- The usual batches of cleanups from Baoquan He, Muchun Song, Miaohe
Lin, Yang Shi, Anshuman Khandual and Mike Rapoport
- Some kmemleak fixes from Patrick Wang and Waiman Long
- DAMON updates from SeongJae Park
- memcg debug/visibility work from Roman Gushchin
- vmalloc speedup from Uladzislau Rezki
- more folio conversion work from Matthew Wilcox
- enhancements for coherent device memory mapping from Alex Sierra
- addition of shared pages tracking and CoW support for fsdax, from
Shiyang Ruan
- hugetlb optimizations from Mike Kravetz
- Mel Gorman has contributed some pagealloc changes to improve latency
and realtime behaviour.
- mprotect soft-dirty checking has been improved by Peter Xu
- Many other singleton patches all over the place
-----BEGIN PGP SIGNATURE-----
iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCYuravgAKCRDdBJ7gKXxA
jpqSAQDrXSdII+ht9kSHlaCVYjqRFQz/rRvURQrWQV74f6aeiAD+NHHeDPwZn11/
SPktqEUrF1pxnGQxqLh1kUFUhsVZQgE=
=w/UH
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
"Most of the MM queue. A few things are still pending.
Liam's maple tree rework didn't make it. This has resulted in a few
other minor patch series being held over for next time.
Multi-gen LRU still isn't merged as we were waiting for mapletree to
stabilize. The current plan is to merge MGLRU into -mm soon and to
later reintroduce mapletree, with a view to hopefully getting both
into 6.1-rc1.
Summary:
- The usual batches of cleanups from Baoquan He, Muchun Song, Miaohe
Lin, Yang Shi, Anshuman Khandual and Mike Rapoport
- Some kmemleak fixes from Patrick Wang and Waiman Long
- DAMON updates from SeongJae Park
- memcg debug/visibility work from Roman Gushchin
- vmalloc speedup from Uladzislau Rezki
- more folio conversion work from Matthew Wilcox
- enhancements for coherent device memory mapping from Alex Sierra
- addition of shared pages tracking and CoW support for fsdax, from
Shiyang Ruan
- hugetlb optimizations from Mike Kravetz
- Mel Gorman has contributed some pagealloc changes to improve
latency and realtime behaviour.
- mprotect soft-dirty checking has been improved by Peter Xu
- Many other singleton patches all over the place"
[ XFS merge from hell as per Darrick Wong in
https://lore.kernel.org/all/YshKnxb4VwXycPO8@magnolia/ ]
* tag 'mm-stable-2022-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (282 commits)
tools/testing/selftests/vm/hmm-tests.c: fix build
mm: Kconfig: fix typo
mm: memory-failure: convert to pr_fmt()
mm: use is_zone_movable_page() helper
hugetlbfs: fix inaccurate comment in hugetlbfs_statfs()
hugetlbfs: cleanup some comments in inode.c
hugetlbfs: remove unneeded header file
hugetlbfs: remove unneeded hugetlbfs_ops forward declaration
hugetlbfs: use helper macro SZ_1{K,M}
mm: cleanup is_highmem()
mm/hmm: add a test for cross device private faults
selftests: add soft-dirty into run_vmtests.sh
selftests: soft-dirty: add test for mprotect
mm/mprotect: fix soft-dirty check in can_change_pte_writable()
mm: memcontrol: fix potential oom_lock recursion deadlock
mm/gup.c: fix formatting in check_and_migrate_movable_page()
xfs: fail dax mount if reflink is enabled on a partition
mm/memcontrol.c: remove the redundant updating of stats_flush_threshold
userfaultfd: don't fail on unrecognized features
hugetlb_cgroup: fix wrong hugetlb cgroup numa stat
...
This commit is contained in:
commit
6614a3c316
380 changed files with 7173 additions and 3224 deletions
|
|
@ -161,24 +161,6 @@
|
|||
* attempts to write to the page.
|
||||
*/
|
||||
/* xwr */
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
|
||||
#define __P011 PAGE_READONLY /* ditto */
|
||||
#define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
|
||||
#define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
|
||||
#define __P110 PAGE_COPY_EXEC
|
||||
#define __P111 PAGE_COPY_EXEC
|
||||
|
||||
#define __S000 PAGE_NONE
|
||||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
|
||||
#define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
|
||||
#define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
|
||||
#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
|
||||
|
||||
#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
#if CONFIG_PGTABLE_LEVELS == 4
|
||||
#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
|
||||
|
|
|
|||
|
|
@ -139,6 +139,10 @@ retry:
|
|||
if (fault_signal_pending(fault, regs))
|
||||
return;
|
||||
|
||||
/* The fault is fully completed (including releasing mmap lock) */
|
||||
if (fault & VM_FAULT_COMPLETED)
|
||||
return;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
/*
|
||||
* We ran out of memory, or some other thing happened
|
||||
|
|
|
|||
|
|
@ -273,7 +273,7 @@ static int __init gate_vma_init(void)
|
|||
gate_vma.vm_start = FIXADDR_USER_START;
|
||||
gate_vma.vm_end = FIXADDR_USER_END;
|
||||
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
|
||||
gate_vma.vm_page_prot = __P101;
|
||||
gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -490,3 +490,29 @@ void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
|
|||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif
|
||||
|
||||
static const pgprot_t protection_map[16] = {
|
||||
[VM_NONE] = PAGE_NONE,
|
||||
[VM_READ] = PAGE_READONLY,
|
||||
[VM_WRITE] = PAGE_READONLY,
|
||||
[VM_WRITE | VM_READ] = PAGE_READONLY,
|
||||
[VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
|
||||
_PAGE_AR_X_RX),
|
||||
[VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
|
||||
_PAGE_AR_RX),
|
||||
[VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
|
||||
[VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
|
||||
[VM_SHARED] = PAGE_NONE,
|
||||
[VM_SHARED | VM_READ] = PAGE_READONLY,
|
||||
[VM_SHARED | VM_WRITE] = PAGE_SHARED,
|
||||
[VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
|
||||
[VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
|
||||
_PAGE_AR_X_RX),
|
||||
[VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
|
||||
_PAGE_AR_RX),
|
||||
[VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
|
||||
_PAGE_AR_RWX),
|
||||
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
|
||||
_PAGE_AR_RWX)
|
||||
};
|
||||
DECLARE_VM_GET_PAGE_PROT
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue