Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "19 patches.

  Subsystems affected by this patch series: MAINTAINERS, ipc, fork,
  checkpatch, lib, and mm (memcg, slub, pagemap, madvise, migration,
  hugetlb)"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  include/linux/log2.h: add missing () around n in roundup_pow_of_two()
  mm/khugepaged.c: fix khugepaged's request size in collapse_file
  mm/hugetlb: fix a race between hugetlb sysctl handlers
  mm/hugetlb: try preferred node first when alloc gigantic page from cma
  mm/migrate: preserve soft dirty in remove_migration_pte()
  mm/migrate: remove unnecessary is_zone_device_page() check
  mm/rmap: fixup copying of soft dirty and uffd ptes
  mm/migrate: fixup setting UFFD_WP flag
  mm: madvise: fix vma user-after-free
  checkpatch: fix the usage of capture group ( ... )
  fork: adjust sysctl_max_threads definition to match prototype
  ipc: adjust proc_ipc_sem_dointvec definition to match prototype
  mm: track page table modifications in __apply_to_page_range()
  MAINTAINERS: IA64: mark Status as Odd Fixes only
  MAINTAINERS: add LLVM maintainers
  MAINTAINERS: update Cavium/Marvell entries
  mm: slub: fix conversion of freelist_corrupted()
  mm: memcg: fix memcg reclaim soft lockup
  memcg: fix use-after-free in uncharge_batch
This commit is contained in:
Linus Torvalds 2020-09-05 13:28:40 -07:00
commit 7514c0362f
14 changed files with 129 additions and 67 deletions

View File

@ -1694,7 +1694,6 @@ F: arch/arm/mach-cns3xxx/
ARM/CAVIUM THUNDER NETWORK DRIVER
M: Sunil Goutham <sgoutham@marvell.com>
M: Robert Richter <rrichter@marvell.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
F: drivers/net/ethernet/cavium/thunder/
@ -3948,8 +3947,8 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/carl9170
F: drivers/net/wireless/ath/carl9170/
CAVIUM I2C DRIVER
M: Robert Richter <rrichter@marvell.com>
S: Supported
M: Robert Richter <rric@kernel.org>
S: Odd Fixes
W: http://www.marvell.com
F: drivers/i2c/busses/i2c-octeon*
F: drivers/i2c/busses/i2c-thunderx*
@ -3964,8 +3963,8 @@ W: http://www.marvell.com
F: drivers/net/ethernet/cavium/liquidio/
CAVIUM MMC DRIVER
M: Robert Richter <rrichter@marvell.com>
S: Supported
M: Robert Richter <rric@kernel.org>
S: Odd Fixes
W: http://www.marvell.com
F: drivers/mmc/host/cavium*
@ -3977,9 +3976,9 @@ W: http://www.marvell.com
F: drivers/crypto/cavium/cpt/
CAVIUM THUNDERX2 ARM64 SOC
M: Robert Richter <rrichter@marvell.com>
M: Robert Richter <rric@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
S: Odd Fixes
F: Documentation/devicetree/bindings/arm/cavium-thunder2.txt
F: arch/arm64/boot/dts/cavium/thunder2-99xx*
@ -4258,6 +4257,8 @@ S: Maintained
F: .clang-format
CLANG/LLVM BUILD SUPPORT
M: Nathan Chancellor <natechancellor@gmail.com>
M: Nick Desaulniers <ndesaulniers@google.com>
L: clang-built-linux@googlegroups.com
S: Supported
W: https://clangbuiltlinux.github.io/
@ -6191,16 +6192,15 @@ F: drivers/edac/highbank*
EDAC-CAVIUM OCTEON
M: Ralf Baechle <ralf@linux-mips.org>
M: Robert Richter <rrichter@marvell.com>
L: linux-edac@vger.kernel.org
L: linux-mips@vger.kernel.org
S: Supported
F: drivers/edac/octeon_edac*
EDAC-CAVIUM THUNDERX
M: Robert Richter <rrichter@marvell.com>
M: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
S: Supported
S: Odd Fixes
F: drivers/edac/thunderx_edac*
EDAC-CORE
@ -6208,7 +6208,7 @@ M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@kernel.org>
M: Tony Luck <tony.luck@intel.com>
R: James Morse <james.morse@arm.com>
R: Robert Richter <rrichter@marvell.com>
R: Robert Richter <rric@kernel.org>
L: linux-edac@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ras/ras.git edac-for-next
@ -8272,7 +8272,7 @@ IA64 (Itanium) PLATFORM
M: Tony Luck <tony.luck@intel.com>
M: Fenghua Yu <fenghua.yu@intel.com>
L: linux-ia64@vger.kernel.org
S: Maintained
S: Odd Fixes
T: git git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux.git
F: Documentation/ia64/
F: arch/ia64/
@ -13446,10 +13446,10 @@ F: Documentation/devicetree/bindings/pci/axis,artpec*
F: drivers/pci/controller/dwc/*artpec*
PCIE DRIVER FOR CAVIUM THUNDERX
M: Robert Richter <rrichter@marvell.com>
M: Robert Richter <rric@kernel.org>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
S: Odd Fixes
F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON
@ -17237,8 +17237,8 @@ S: Maintained
F: drivers/net/thunderbolt.c
THUNDERX GPIO DRIVER
M: Robert Richter <rrichter@marvell.com>
S: Maintained
M: Robert Richter <rric@kernel.org>
S: Odd Fixes
F: drivers/gpio/gpio-thunderx.c
TI AM437X VPFE DRIVER

View File

@ -173,7 +173,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define roundup_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
(n == 1) ? 1 : \
((n) == 1) ? 1 : \
(1UL << (ilog2((n) - 1) + 1)) \
) : \
__roundup_pow_of_two(n) \

View File

@ -85,7 +85,7 @@ static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
}
static int proc_ipc_sem_dointvec(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
int ret, semmni;
struct ipc_namespace *ns = current->nsproxy->ipc_ns;

View File

@ -3014,7 +3014,7 @@ int unshare_files(struct files_struct **displaced)
}
int sysctl_max_threads(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
void *buffer, size_t *lenp, loff_t *ppos)
{
struct ctl_table t;
int ret;

View File

@ -1250,21 +1250,32 @@ static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
int nid, nodemask_t *nodemask)
{
unsigned long nr_pages = 1UL << huge_page_order(h);
if (nid == NUMA_NO_NODE)
nid = numa_mem_id();
#ifdef CONFIG_CMA
{
struct page *page;
int node;
for_each_node_mask(node, *nodemask) {
if (!hugetlb_cma[node])
continue;
page = cma_alloc(hugetlb_cma[node], nr_pages,
huge_page_order(h), true);
if (hugetlb_cma[nid]) {
page = cma_alloc(hugetlb_cma[nid], nr_pages,
huge_page_order(h), true);
if (page)
return page;
}
if (!(gfp_mask & __GFP_THISNODE)) {
for_each_node_mask(node, *nodemask) {
if (node == nid || !hugetlb_cma[node])
continue;
page = cma_alloc(hugetlb_cma[node], nr_pages,
huge_page_order(h), true);
if (page)
return page;
}
}
}
#endif
@ -3454,6 +3465,22 @@ static unsigned int allowed_mems_nr(struct hstate *h)
}
#ifdef CONFIG_SYSCTL
static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
void *buffer, size_t *length,
loff_t *ppos, unsigned long *out)
{
struct ctl_table dup_table;
/*
* In order to avoid races with __do_proc_doulongvec_minmax(), we
* can duplicate the @table and alter the duplicate of it.
*/
dup_table = *table;
dup_table.data = out;
return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
}
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void *buffer, size_t *length, loff_t *ppos)
@ -3465,9 +3492,8 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
if (!hugepages_supported())
return -EOPNOTSUPP;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
&tmp);
if (ret)
goto out;
@ -3510,9 +3536,8 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
if (write && hstate_is_gigantic(h))
return -EINVAL;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
&tmp);
if (ret)
goto out;

View File

@ -1709,7 +1709,7 @@ static void collapse_file(struct mm_struct *mm,
xas_unlock_irq(&xas);
page_cache_sync_readahead(mapping, &file->f_ra,
file, index,
PAGE_SIZE);
end - index);
/* drain pagevecs to help isolate_lru_page() */
lru_add_drain();
page = find_lock_page(mapping, index);

View File

@ -289,9 +289,9 @@ static long madvise_willneed(struct vm_area_struct *vma,
*/
*prev = NULL; /* tell sys_madvise we drop mmap_lock */
get_file(file);
mmap_read_unlock(current->mm);
offset = (loff_t)(start - vma->vm_start)
+ ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
mmap_read_unlock(current->mm);
vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
fput(file);
mmap_read_lock(current->mm);

View File

@ -6774,6 +6774,9 @@ static void uncharge_batch(const struct uncharge_gather *ug)
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
local_irq_restore(flags);
/* drop reference from uncharge_page */
css_put(&ug->memcg->css);
}
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
@ -6797,6 +6800,9 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
uncharge_gather_clear(ug);
}
ug->memcg = page->mem_cgroup;
/* pairs with css_put in uncharge_batch */
css_get(&ug->memcg->css);
}
nr_pages = compound_nr(page);

View File

@ -73,6 +73,7 @@
#include <linux/numa.h>
#include <linux/perf_event.h>
#include <linux/ptrace.h>
#include <linux/vmalloc.h>
#include <trace/events/kmem.h>
@ -83,6 +84,7 @@
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include "pgalloc-track.h"
#include "internal.h"
#if defined(LAST_CPUPID_NOT_IN_PAGE_FLAGS) && !defined(CONFIG_COMPILE_TEST)
@ -2206,7 +2208,8 @@ EXPORT_SYMBOL(vm_iomap_memory);
static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create)
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
pte_t *pte;
int err = 0;
@ -2214,7 +2217,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
if (create) {
pte = (mm == &init_mm) ?
pte_alloc_kernel(pmd, addr) :
pte_alloc_kernel_track(pmd, addr, mask) :
pte_alloc_map_lock(mm, pmd, addr, &ptl);
if (!pte)
return -ENOMEM;
@ -2235,6 +2238,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
break;
}
} while (addr += PAGE_SIZE, addr != end);
*mask |= PGTBL_PTE_MODIFIED;
arch_leave_lazy_mmu_mode();
@ -2245,7 +2249,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create)
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
pmd_t *pmd;
unsigned long next;
@ -2254,7 +2259,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
BUG_ON(pud_huge(*pud));
if (create) {
pmd = pmd_alloc(mm, pud, addr);
pmd = pmd_alloc_track(mm, pud, addr, mask);
if (!pmd)
return -ENOMEM;
} else {
@ -2264,7 +2269,7 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
next = pmd_addr_end(addr, end);
if (create || !pmd_none_or_clear_bad(pmd)) {
err = apply_to_pte_range(mm, pmd, addr, next, fn, data,
create);
create, mask);
if (err)
break;
}
@ -2274,14 +2279,15 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create)
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
pud_t *pud;
unsigned long next;
int err = 0;
if (create) {
pud = pud_alloc(mm, p4d, addr);
pud = pud_alloc_track(mm, p4d, addr, mask);
if (!pud)
return -ENOMEM;
} else {
@ -2291,7 +2297,7 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
next = pud_addr_end(addr, end);
if (create || !pud_none_or_clear_bad(pud)) {
err = apply_to_pmd_range(mm, pud, addr, next, fn, data,
create);
create, mask);
if (err)
break;
}
@ -2301,14 +2307,15 @@ static int apply_to_pud_range(struct mm_struct *mm, p4d_t *p4d,
static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
unsigned long addr, unsigned long end,
pte_fn_t fn, void *data, bool create)
pte_fn_t fn, void *data, bool create,
pgtbl_mod_mask *mask)
{
p4d_t *p4d;
unsigned long next;
int err = 0;
if (create) {
p4d = p4d_alloc(mm, pgd, addr);
p4d = p4d_alloc_track(mm, pgd, addr, mask);
if (!p4d)
return -ENOMEM;
} else {
@ -2318,7 +2325,7 @@ static int apply_to_p4d_range(struct mm_struct *mm, pgd_t *pgd,
next = p4d_addr_end(addr, end);
if (create || !p4d_none_or_clear_bad(p4d)) {
err = apply_to_pud_range(mm, p4d, addr, next, fn, data,
create);
create, mask);
if (err)
break;
}
@ -2331,8 +2338,9 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
void *data, bool create)
{
pgd_t *pgd;
unsigned long next;
unsigned long start = addr, next;
unsigned long end = addr + size;
pgtbl_mod_mask mask = 0;
int err = 0;
if (WARN_ON(addr >= end))
@ -2343,11 +2351,14 @@ static int __apply_to_page_range(struct mm_struct *mm, unsigned long addr,
next = pgd_addr_end(addr, end);
if (!create && pgd_none_or_clear_bad(pgd))
continue;
err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create);
err = apply_to_p4d_range(mm, pgd, addr, next, fn, data, create, &mask);
if (err)
break;
} while (pgd++, addr = next, addr != end);
if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
arch_sync_kernel_mappings(start, start + size);
return err;
}

View File

@ -246,13 +246,13 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
else if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte);
if (unlikely(is_zone_device_page(new))) {
if (is_device_private_page(new)) {
entry = make_device_private_entry(new, pte_write(pte));
pte = swp_entry_to_pte(entry);
if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_mkuffd_wp(pte);
}
if (unlikely(is_device_private_page(new))) {
entry = make_device_private_entry(new, pte_write(pte));
pte = swp_entry_to_pte(entry);
if (pte_swp_soft_dirty(*pvmw.pte))
pte = pte_swp_mksoft_dirty(pte);
if (pte_swp_uffd_wp(*pvmw.pte))
pte = pte_swp_mkuffd_wp(pte);
}
#ifdef CONFIG_HUGETLB_PAGE
@ -2427,10 +2427,17 @@ again:
entry = make_migration_entry(page, mpfn &
MIGRATE_PFN_WRITE);
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_uffd_wp(pte))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
if (pte_present(pte)) {
if (pte_soft_dirty(pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_uffd_wp(pte))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
} else {
if (pte_swp_soft_dirty(pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_swp_uffd_wp(pte))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
}
set_pte_at(mm, addr, ptep, swp_pte);
/*

View File

@ -1511,9 +1511,14 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
*/
entry = make_migration_entry(page, 0);
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
/*
* pteval maps a zone device page and is therefore
* a swap pte.
*/
if (pte_swp_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_uffd_wp(pteval))
if (pte_swp_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, pvmw.address, pvmw.pte, swp_pte);
/*

View File

@ -672,12 +672,12 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
void *freelist, void *nextfree)
void **freelist, void *nextfree)
{
if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
!check_valid_pointer(s, page, nextfree)) {
object_err(s, page, freelist, "Freechain corrupt");
freelist = NULL;
!check_valid_pointer(s, page, nextfree) && freelist) {
object_err(s, page, *freelist, "Freechain corrupt");
*freelist = NULL;
slab_fix(s, "Isolate corrupted freechain");
return true;
}
@ -1494,7 +1494,7 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node,
int objects) {}
static bool freelist_corrupted(struct kmem_cache *s, struct page *page,
void *freelist, void *nextfree)
void **freelist, void *nextfree)
{
return false;
}
@ -2184,7 +2184,7 @@ static void deactivate_slab(struct kmem_cache *s, struct page *page,
* 'freelist' is already corrupted. So isolate all objects
* starting at 'freelist'.
*/
if (freelist_corrupted(s, page, freelist, nextfree))
if (freelist_corrupted(s, page, &freelist, nextfree))
break;
do {

View File

@ -2615,6 +2615,14 @@ static void shrink_node_memcgs(pg_data_t *pgdat, struct scan_control *sc)
unsigned long reclaimed;
unsigned long scanned;
/*
* This loop can become CPU-bound when target memcgs
* aren't eligible for reclaim - either because they
* don't have any reclaimable pages, or because their
* memory is explicitly protected. Avoid soft lockups.
*/
cond_resched();
mem_cgroup_calculate_protection(target_memcg, memcg);
if (mem_cgroup_below_min(memcg)) {

View File

@ -2639,8 +2639,8 @@ sub process {
# Check if the commit log has what seems like a diff which can confuse patch
if ($in_commit_log && !$commit_log_has_diff &&
(($line =~ m@^\s+diff\b.*a/[\w/]+@ &&
$line =~ m@^\s+diff\b.*a/([\w/]+)\s+b/$1\b@) ||
(($line =~ m@^\s+diff\b.*a/([\w/]+)@ &&
$line =~ m@^\s+diff\b.*a/[\w/]+\s+b/$1\b@) ||
$line =~ m@^\s*(?:\-\-\-\s+a/|\+\+\+\s+b/)@ ||
$line =~ m/^\s*\@\@ \-\d+,\d+ \+\d+,\d+ \@\@/)) {
ERROR("DIFF_IN_COMMIT_MSG",