ANDROID: mm: avoid using vmacache in lockless vma search
When searching vma under RCU protection vmcache should be avoided because
a race with munmap() might result in finding a vma and placing it into
vmcache after munmap() removed that vma and called vmcache_invalidate.
Once that vma is freed, vmcache will be left with an invalid vma pointer.
Bug: 257443051
Change-Id: I62438305fcf5139974f4f7d3bae5b22c74084a59
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 21c8954d..fa42ef4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2734,6 +2734,8 @@ extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#define expand_upwards(vma, address) (0)
#endif
+extern struct vm_area_struct *find_vma_from_tree(struct mm_struct *mm,
+ unsigned long addr);
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
extern struct vm_area_struct * __find_vma(struct mm_struct * mm, unsigned long addr);
extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
diff --git a/mm/memory.c b/mm/memory.c
index 6519783..3172677 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -216,7 +216,7 @@ struct vm_area_struct *get_vma(struct mm_struct *mm, unsigned long addr)
struct vm_area_struct *vma;
rcu_read_lock();
- vma = __find_vma(mm, addr);
+ vma = find_vma_from_tree(mm, addr);
if (vma) {
if (vma->vm_start > addr ||
!atomic_inc_unless_negative(&vma->file_ref_count))
diff --git a/mm/mmap.c b/mm/mmap.c
index aa61bcb..6966807 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2279,16 +2279,10 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
EXPORT_SYMBOL(get_unmapped_area);
-/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
-struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
+struct vm_area_struct *find_vma_from_tree(struct mm_struct *mm, unsigned long addr)
{
struct rb_node *rb_node;
- struct vm_area_struct *vma;
-
- /* Check the cache first. */
- vma = vmacache_find(mm, addr);
- if (likely(vma))
- return vma;
+ struct vm_area_struct *vma = NULL;
rb_node = mm->mm_rb.rb_node;
@@ -2306,6 +2300,21 @@ struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
rb_node = rb_node->rb_right;
}
+ return vma;
+}
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma;
+
+ /* Check the cache first. */
+ vma = vmacache_find(mm, addr);
+ if (likely(vma))
+ return vma;
+
+ vma = find_vma_from_tree(mm, addr);
+
if (vma)
vmacache_update(addr, vma);
return vma;
diff --git a/mm/nommu.c b/mm/nommu.c
index bbe7b1a..f90afb1 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -659,6 +659,22 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
vm_area_free(vma);
}
+struct vm_area_struct *find_vma_from_tree(struct mm_struct *mm, unsigned long addr)
+{
+ struct vm_area_struct *vma;
+
+ /* trawl the list (there may be multiple mappings in which addr
+ * resides) */
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (vma->vm_start > addr)
+ return NULL;
+ if (vma->vm_end > addr)
+ return vma;
+ }
+
+ return NULL;
+}
+
/*
* look up the first VMA in which addr resides, NULL if none
* - should be called with mm->mmap_lock at least held readlocked
@@ -667,23 +683,16 @@ struct vm_area_struct *__find_vma(struct mm_struct *mm, unsigned long addr)
{
struct vm_area_struct *vma;
- /* check the cache first */
+ /* Check the cache first. */
vma = vmacache_find(mm, addr);
if (likely(vma))
return vma;
- /* trawl the list (there may be multiple mappings in which addr
- * resides) */
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if (vma->vm_start > addr)
- return NULL;
- if (vma->vm_end > addr) {
- vmacache_update(addr, vma);
- return vma;
- }
- }
+ vma = find_vma_from_tree(mm, addr);
- return NULL;
+ if (vma)
+ vmacache_update(addr, vma);
+ return vma;
}
EXPORT_SYMBOL(__find_vma);