Merge tag 'mm-stable-2022-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton: - Yu Zhao's Multi-Gen LRU patches are here. They've been under test in linux-next for a couple of months without, to my knowledge, any negative reports (or any positive ones, come to that). - Also the Maple Tree from Liam Howlett. An overlapping range-based tree for vmas. It it apparently slightly more efficient in its own right, but is mainly targeted at enabling work to reduce mmap_lock contention. Liam has identified a number of other tree users in the kernel which could be beneficially onverted to mapletrees. Yu Zhao has identified a hard-to-hit but "easy to fix" lockdep splat at [1]. This has yet to be addressed due to Liam's unfortunately timed vacation. He is now back and we'll get this fixed up. - Dmitry Vyukov introduces KMSAN: the Kernel Memory Sanitizer. It uses clang-generated instrumentation to detect used-unintialized bugs down to the single bit level. KMSAN keeps finding bugs. New ones, as well as the legacy ones. - Yang Shi adds a userspace mechanism (madvise) to induce a collapse of memory into THPs. - Zach O'Keefe has expanded Yang Shi's madvise(MADV_COLLAPSE) to support file/shmem-backed pages. - userfaultfd updates from Axel Rasmussen - zsmalloc cleanups from Alexey Romanov - cleanups from Miaohe Lin: vmscan, hugetlb_cgroup, hugetlb and memory-failure - Huang Ying adds enhancements to NUMA balancing memory tiering mode's page promotion, with a new way of detecting hot pages. - memcg updates from Shakeel Butt: charging optimizations and reduced memory consumption. - memcg cleanups from Kairui Song. - memcg fixes and cleanups from Johannes Weiner. - Vishal Moola provides more folio conversions - Zhang Yi removed ll_rw_block() :( - migration enhancements from Peter Xu - migration error-path bugfixes from Huang Ying - Aneesh Kumar added ability for a device driver to alter the memory tiering promotion paths. For optimizations by PMEM drivers, DRM drivers, etc. - vma merging improvements from Jakub Matěn. - NUMA hinting cleanups from David Hildenbrand. - xu xin added aditional userspace visibility into KSM merging activity. - THP & KSM code consolidation from Qi Zheng. - more folio work from Matthew Wilcox. - KASAN updates from Andrey Konovalov. - DAMON cleanups from Kaixu Xia. - DAMON work from SeongJae Park: fixes, cleanups. - hugetlb sysfs cleanups from Muchun Song. - Mike Kravetz fixes locking issues in hugetlbfs and in hugetlb core. Link: https://lkml.kernel.org/r/CAOUHufZabH85CeUN-MEMgL8gJGzJEWUrkiM58JkTbBhh-jew0Q@mail.gmail.com [1] * tag 'mm-stable-2022-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (555 commits) hugetlb: allocate vma lock for all sharable vmas hugetlb: take hugetlb vma_lock when clearing vma_lock->vma pointer hugetlb: fix vma lock handling during split vma and range unmapping mglru: mm/vmscan.c: fix imprecise comments mm/mglru: don't sync disk for each aging cycle mm: memcontrol: drop dead CONFIG_MEMCG_SWAP config symbol mm: memcontrol: use do_memsw_account() in a few more places mm: memcontrol: deprecate swapaccounting=0 mode mm: memcontrol: don't allocate cgroup swap arrays when memcg is disabled mm/secretmem: remove reduntant return value mm/hugetlb: add available_huge_pages() func mm: remove unused inline functions from include/linux/mm_inline.h selftests/vm: add selftest for MADV_COLLAPSE of uffd-minor memory selftests/vm: add file/shmem MADV_COLLAPSE selftest for cleared pmd selftests/vm: add thp collapse shmem testing selftests/vm: add thp collapse file and tmpfs testing selftests/vm: modularize thp collapse memory operations selftests/vm: dedup THP helpers mm/khugepaged: add tracepoint to hpage_collapse_scan_file() mm/madvise: add file and shmem support to MADV_COLLAPSE ...
This commit is contained in:
@@ -16,6 +16,7 @@
|
||||
struct ctl_table;
|
||||
struct user_struct;
|
||||
struct mmu_gather;
|
||||
struct node;
|
||||
|
||||
#ifndef CONFIG_ARCH_HAS_HUGEPD
|
||||
typedef struct { unsigned long pd; } hugepd_t;
|
||||
@@ -114,6 +115,12 @@ struct file_region {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct hugetlb_vma_lock {
|
||||
struct kref refs;
|
||||
struct rw_semaphore rw_sema;
|
||||
struct vm_area_struct *vma;
|
||||
};
|
||||
|
||||
extern struct resv_map *resv_map_alloc(void);
|
||||
void resv_map_release(struct kref *ref);
|
||||
|
||||
@@ -126,7 +133,7 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
|
||||
long min_hpages);
|
||||
void hugepage_put_subpool(struct hugepage_subpool *spool);
|
||||
|
||||
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
|
||||
void hugetlb_dup_vma_private(struct vm_area_struct *vma);
|
||||
void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
|
||||
int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
|
||||
@@ -214,6 +221,14 @@ struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
|
||||
struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
|
||||
pgd_t *pgd, int flags);
|
||||
|
||||
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
|
||||
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
|
||||
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
|
||||
void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
|
||||
int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
|
||||
void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
|
||||
void hugetlb_vma_lock_release(struct kref *kref);
|
||||
|
||||
int pmd_huge(pmd_t pmd);
|
||||
int pud_huge(pud_t pud);
|
||||
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
|
||||
@@ -225,7 +240,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
|
||||
|
||||
#else /* !CONFIG_HUGETLB_PAGE */
|
||||
|
||||
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
|
||||
static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
@@ -336,6 +351,31 @@ static inline int prepare_hugepage_range(struct file *file,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
|
||||
{
|
||||
}
|
||||
|
||||
static inline int pmd_huge(pmd_t pmd)
|
||||
{
|
||||
return 0;
|
||||
@@ -665,7 +705,7 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
|
||||
nodemask_t *nmask, gfp_t gfp_mask);
|
||||
struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
|
||||
unsigned long address);
|
||||
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
|
||||
int hugetlb_add_to_page_cache(struct page *page, struct address_space *mapping,
|
||||
pgoff_t idx);
|
||||
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
|
||||
unsigned long address, struct page *page);
|
||||
@@ -935,6 +975,11 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
void hugetlb_register_node(struct node *node);
|
||||
void hugetlb_unregister_node(struct node *node);
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_HUGETLB_PAGE */
|
||||
struct hstate {};
|
||||
|
||||
@@ -1109,6 +1154,14 @@ static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_register_node(struct node *node)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void hugetlb_unregister_node(struct node *node)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_HUGETLB_PAGE */
|
||||
|
||||
static inline spinlock_t *huge_pte_lock(struct hstate *h,
|
||||
@@ -1123,14 +1176,10 @@ static inline spinlock_t *huge_pte_lock(struct hstate *h,
|
||||
|
||||
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
|
||||
extern void __init hugetlb_cma_reserve(int order);
|
||||
extern void __init hugetlb_cma_check(void);
|
||||
#else
|
||||
static inline __init void hugetlb_cma_reserve(int order)
|
||||
{
|
||||
}
|
||||
static inline __init void hugetlb_cma_check(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
|
||||
|
||||
Reference in New Issue
Block a user