summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorLuis Claudio R. Goncalves <lgoncalv@redhat.com>2021-06-08 19:30:43 -0300
committerLuis Claudio R. Goncalves <lgoncalv@redhat.com>2021-06-08 19:30:43 -0300
commit67cea00c1bbcb07168e0b66c5b5aa077e190a39b (patch)
tree2a8e214f7cf83f797b5c5fb7a00236b37b01970f /mm
parent9c284551d0fd10f94b51ebe4cbaa31789c260b6f (diff)
parenta6b2dae3ee3a3f9f4db3fab5b4b9e493fecf4acd (diff)
downloadlinux-rt-67cea00c1bbcb07168e0b66c5b5aa077e190a39b.tar.gz
Merge tag 'v4.14.235' into v4.14-rt
Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c11
-rw-r--r--mm/userfaultfd.c2
-rw-r--r--mm/vmstat.c3
3 files changed, 9 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 777ad49dbd46..e59e0f7ed562 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3802,8 +3802,7 @@ retry:
* handling userfault. Reacquire after handling
* fault to make calling code simpler.
*/
- hash = hugetlb_fault_mutex_hash(h, mapping, idx,
- address);
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
ret = handle_userfault(&vmf, VM_UFFD_MISSING);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
@@ -3916,7 +3915,7 @@ backout_unlocked:
#ifdef CONFIG_SMP
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address)
+ pgoff_t idx)
{
unsigned long key[2];
u32 hash;
@@ -3924,7 +3923,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
key[0] = (unsigned long) mapping;
key[1] = idx;
- hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
+ hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
return hash & (num_fault_mutexes - 1);
}
@@ -3934,7 +3933,7 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
* return 0 and avoid the hashing overhead.
*/
u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address)
+ pgoff_t idx)
{
return 0;
}
@@ -3979,7 +3978,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, address);
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
entry = huge_ptep_get(ptep);
diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c
index d3b4a78d79b6..ee8a68863089 100644
--- a/mm/userfaultfd.c
+++ b/mm/userfaultfd.c
@@ -272,7 +272,7 @@ retry:
*/
idx = linear_page_index(dst_vma, dst_addr);
mapping = dst_vma->vm_file->f_mapping;
- hash = hugetlb_fault_mutex_hash(h, mapping, idx, dst_addr);
+ hash = hugetlb_fault_mutex_hash(h, mapping, idx);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
err = -ENOMEM;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 1c81bc249ca3..0b099c61e7ec 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1325,6 +1325,9 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
list_for_each(curr, &area->free_list[mtype])
freecount++;
seq_printf(m, "%6lu ", freecount);
+ spin_unlock_irq(&zone->lock);
+ cond_resched();
+ spin_lock_irq(&zone->lock);
}
seq_putc(m, '\n');
}