Allow mm-Support-soft-dirty-flag patches on all kernels as long as the patches are available.

Effectively allows 5.11 and up, while it was bound to 5.11 and 5.12 before.

Also fix the first patch on 5.14, thanks to @Tatsh - Fixes https://github.com/Frogging-Family/linux-tkg/issues/298
This commit is contained in:
Tk-Glitch
2021-08-31 22:13:08 +02:00
parent 6d3d3e78e3
commit b5da16cc45
3 changed files with 26 additions and 27 deletions

View File

@@ -587,7 +587,7 @@ case $_basever in
'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911' 'a557b342111849a5f920bbe1c129f3ff1fc1eff62c6bd6685e0972fc88e39911'
'92e5f35da18fcabfa87a4543b0d1daafff56f3c523a4c516e2600df9de231fd0' '92e5f35da18fcabfa87a4543b0d1daafff56f3c523a4c516e2600df9de231fd0'
'7fb1104c167edb79ec8fbdcde97940ed0f806aa978bdd14d0c665a1d76d25c24' '7fb1104c167edb79ec8fbdcde97940ed0f806aa978bdd14d0c665a1d76d25c24'
'b1c6599d0e1ac9b66898d652ed99dae3fb8676d840a43ffa920a78d96e0521be' '1b656ad96004f27e9dc63d7f430b50d5c48510d6d4cd595a81c24b21adb70313'
'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6') 'b0319a7dff9c48b2f3e3d3597ee154bf92223149a633a8b7ce4026252db86da6')
;; ;;
esac esac

View File

@@ -420,11 +420,9 @@ _tkg_srcprep() {
_msg="Applying misc additions patch" && _tkg_patcher _msg="Applying misc additions patch" && _tkg_patcher
fi fi
if [ "$_basever" = "511" ] || [ "$_basever" = "512" ]; then _msg="Applying patches for WRITE_WATCH support in Wine"
_msg="Applying patches for WRITE_WATCH support in Wine" tkgpatch="$srcdir/0001-mm-Support-soft-dirty-flag-reset-for-VA-range.patch" && _tkg_patcher
tkgpatch="$srcdir/0001-mm-Support-soft-dirty-flag-reset-for-VA-range.patch" && _tkg_patcher tkgpatch="$srcdir/0002-mm-Support-soft-dirty-flag-read-with-reset.patch" && _tkg_patcher
tkgpatch="$srcdir/0002-mm-Support-soft-dirty-flag-read-with-reset.patch" && _tkg_patcher
fi
# prjc/bmq patch rev # prjc/bmq patch rev
if [ "$_basever" = "58" ] || [ "$_basever" = "57" ]; then if [ "$_basever" = "58" ] || [ "$_basever" = "57" ]; then

View File

@@ -12,18 +12,18 @@ index 3cec6fbef725..7c7865028f10 100644
--- a/fs/proc/task_mmu.c --- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c
@@ -1032,6 +1032,8 @@ enum clear_refs_types { @@ -1032,6 +1032,8 @@ enum clear_refs_types {
struct clear_refs_private { struct clear_refs_private {
enum clear_refs_types type; enum clear_refs_types type;
+ unsigned long start, end; + unsigned long start, end;
+ bool clear_range; + bool clear_range;
}; };
#ifdef CONFIG_MEM_SOFT_DIRTY #ifdef CONFIG_MEM_SOFT_DIRTY
@@ -1125,6 +1127,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, @@ -1125,6 +1127,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
spinlock_t *ptl; spinlock_t *ptl;
struct page *page; struct page *page;
+ BUG_ON(addr < cp->start || end > cp->end); + BUG_ON(addr < cp->start || end > cp->end);
+ +
ptl = pmd_trans_huge_lock(pmd, vma); ptl = pmd_trans_huge_lock(pmd, vma);
@@ -32,11 +32,11 @@ index 3cec6fbef725..7c7865028f10 100644
@@ -1181,9 +1185,11 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end, @@ -1181,9 +1185,11 @@ static int clear_refs_test_walk(unsigned long start, unsigned long end,
struct clear_refs_private *cp = walk->private; struct clear_refs_private *cp = walk->private;
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
- if (vma->vm_flags & VM_PFNMAP) - if (vma->vm_flags & VM_PFNMAP)
+ if (!cp->clear_range && (vma->vm_flags & VM_PFNMAP)) + if (!cp->clear_range && (vma->vm_flags & VM_PFNMAP))
return 1; return 1;
+ BUG_ON(start < cp->start || end > cp->end); + BUG_ON(start < cp->start || end > cp->end);
+ +
/* /*
@@ -55,7 +55,7 @@ index 3cec6fbef725..7c7865028f10 100644
+ bool clear_range; + bool clear_range;
int itype; int itype;
int rv; int rv;
@@ -1218,12 +1226,34 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, @@ -1218,12 +1226,34 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
count = sizeof(buffer) - 1; count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count)) if (copy_from_user(buffer, buf, count))
@@ -94,13 +94,13 @@ index 3cec6fbef725..7c7865028f10 100644
+ start = 0; + start = 0;
+ end = -1UL; + end = -1UL;
+ } + }
task = get_proc_task(file_inode(file)); task = get_proc_task(file_inode(file));
if (!task) if (!task)
@@ -1235,41 +1265,87 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, @@ -1235,41 +1265,87 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
.type = type, .type = type,
}; };
- if (mmap_write_lock_killable(mm)) { - if (mmap_write_lock_killable(mm)) {
- count = -EINTR; - count = -EINTR;
- goto out_mm; - goto out_mm;
@@ -140,7 +140,7 @@ index 3cec6fbef725..7c7865028f10 100644
+ mmap_write_unlock(mm); + mmap_write_unlock(mm);
+ goto out_mm; + goto out_mm;
} }
if (type == CLEAR_REFS_SOFT_DIRTY) { if (type == CLEAR_REFS_SOFT_DIRTY) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) { - for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (!(vma->vm_flags & VM_SOFTDIRTY)) - if (!(vma->vm_flags & VM_SOFTDIRTY))
@@ -204,41 +204,42 @@ index 3cec6fbef725..7c7865028f10 100644
#define PM_SOFT_DIRTY BIT_ULL(55) #define PM_SOFT_DIRTY BIT_ULL(55)
#define PM_MMAP_EXCLUSIVE BIT_ULL(56) #define PM_MMAP_EXCLUSIVE BIT_ULL(56)
+#define PM_SOFT_DIRTY_PAGE BIT_ULL(57) +#define PM_SOFT_DIRTY_PAGE BIT_ULL(57)
#define PM_UFFD_WP BIT_ULL(57)
#define PM_FILE BIT_ULL(61) #define PM_FILE BIT_ULL(61)
#define PM_SWAP BIT_ULL(62) #define PM_SWAP BIT_ULL(62)
#define PM_PRESENT BIT_ULL(63) @@ -1373,13 +1450,13 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
@@ -1373,11 +1450,11 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
flags |= PM_PRESENT; flags |= PM_PRESENT;
page = vm_normal_page(vma, addr, pte); page = vm_normal_page(vma, addr, pte);
if (pte_soft_dirty(pte)) if (pte_soft_dirty(pte))
- flags |= PM_SOFT_DIRTY; - flags |= PM_SOFT_DIRTY;
+ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE;
if (pte_uffd_wp(pte))
flags |= PM_UFFD_WP;
} else if (is_swap_pte(pte)) { } else if (is_swap_pte(pte)) {
swp_entry_t entry; swp_entry_t entry;
if (pte_swp_soft_dirty(pte)) if (pte_swp_soft_dirty(pte))
- flags |= PM_SOFT_DIRTY; - flags |= PM_SOFT_DIRTY;
+ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE;
if (pte_swp_uffd_wp(pte))
flags |= PM_UFFD_WP;
entry = pte_to_swp_entry(pte); entry = pte_to_swp_entry(pte);
if (pm->show_pfn) @@ -1500,7 +1500,7 @@
frame = swp_type(entry) |
@@ -1424,7 +1501,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
flags |= PM_PRESENT; flags |= PM_PRESENT;
if (pmd_soft_dirty(pmd)) if (pmd_soft_dirty(pmd))
- flags |= PM_SOFT_DIRTY; - flags |= PM_SOFT_DIRTY;
+ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE;
if (pmd_uffd_wp(pmd))
flags |= PM_UFFD_WP;
if (pm->show_pfn) if (pm->show_pfn)
frame = pmd_pfn(pmd) +
((addr & ~PMD_MASK) >> PAGE_SHIFT);
@@ -1442,7 +1519,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, @@ -1442,7 +1519,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
} }
flags |= PM_SWAP; flags |= PM_SWAP;
if (pmd_swp_soft_dirty(pmd)) if (pmd_swp_soft_dirty(pmd))
- flags |= PM_SOFT_DIRTY; - flags |= PM_SOFT_DIRTY;
+ flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE; + flags |= PM_SOFT_DIRTY | PM_SOFT_DIRTY_PAGE;
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
VM_BUG_ON(!is_pmd_migration_entry(pmd)); VM_BUG_ON(!is_pmd_migration_entry(pmd));
page = migration_entry_to_page(entry); --
}
--
2.30.2 2.30.2