blob: 2eebff4be83e07758cebb442a83d85cd4c7eaa06 [file] [log] [blame]
Todd Kjos0c972a02017-06-29 12:01:41 -07001/* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
Todd Kjos0c972a02017-06-29 12:01:41 -070020#include <linux/list.h>
21#include <linux/sched/mm.h>
22#include <linux/module.h>
23#include <linux/rtmutex.h>
24#include <linux/rbtree.h>
25#include <linux/seq_file.h>
26#include <linux/vmalloc.h>
27#include <linux/slab.h>
28#include <linux/sched.h>
Sherry Yangf2517eb2017-08-23 08:46:42 -070029#include <linux/list_lru.h>
Sherry Yang128f3802018-08-07 12:57:13 -070030#include <linux/ratelimit.h>
Guenter Roeck1e81c572018-07-23 14:47:23 -070031#include <asm/cacheflush.h>
Todd Kjos1a7c3d92019-02-08 10:35:14 -080032#include <linux/uaccess.h>
33#include <linux/highmem.h>
Todd Kjos0c972a02017-06-29 12:01:41 -070034#include "binder_alloc.h"
35#include "binder_trace.h"
36
Sherry Yangf2517eb2017-08-23 08:46:42 -070037struct list_lru binder_alloc_lru;
38
Todd Kjos0c972a02017-06-29 12:01:41 -070039static DEFINE_MUTEX(binder_alloc_mmap_lock);
40
41enum {
Sherry Yang128f3802018-08-07 12:57:13 -070042 BINDER_DEBUG_USER_ERROR = 1U << 0,
Todd Kjos0c972a02017-06-29 12:01:41 -070043 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
44 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
45 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
46};
Sherry Yang128f3802018-08-07 12:57:13 -070047static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
Todd Kjos0c972a02017-06-29 12:01:41 -070048
49module_param_named(debug_mask, binder_alloc_debug_mask,
50 uint, 0644);
51
52#define binder_alloc_debug(mask, x...) \
53 do { \
54 if (binder_alloc_debug_mask & mask) \
Sherry Yang128f3802018-08-07 12:57:13 -070055 pr_info_ratelimited(x); \
Todd Kjos0c972a02017-06-29 12:01:41 -070056 } while (0)
57
Sherry Yange21762192017-08-23 08:46:39 -070058static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
59{
60 return list_entry(buffer->entry.next, struct binder_buffer, entry);
61}
62
63static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
64{
65 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
66}
67
Todd Kjos0c972a02017-06-29 12:01:41 -070068static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
69 struct binder_buffer *buffer)
70{
71 if (list_is_last(&buffer->entry, &alloc->buffers))
Sherry Yang74310e02017-08-23 08:46:41 -070072 return (u8 *)alloc->buffer +
73 alloc->buffer_size - (u8 *)buffer->data;
74 return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
Todd Kjos0c972a02017-06-29 12:01:41 -070075}
76
77static void binder_insert_free_buffer(struct binder_alloc *alloc,
78 struct binder_buffer *new_buffer)
79{
80 struct rb_node **p = &alloc->free_buffers.rb_node;
81 struct rb_node *parent = NULL;
82 struct binder_buffer *buffer;
83 size_t buffer_size;
84 size_t new_buffer_size;
85
86 BUG_ON(!new_buffer->free);
87
88 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
89
90 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
91 "%d: add free buffer, size %zd, at %pK\n",
92 alloc->pid, new_buffer_size, new_buffer);
93
94 while (*p) {
95 parent = *p;
96 buffer = rb_entry(parent, struct binder_buffer, rb_node);
97 BUG_ON(!buffer->free);
98
99 buffer_size = binder_alloc_buffer_size(alloc, buffer);
100
101 if (new_buffer_size < buffer_size)
102 p = &parent->rb_left;
103 else
104 p = &parent->rb_right;
105 }
106 rb_link_node(&new_buffer->rb_node, parent, p);
107 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
108}
109
110static void binder_insert_allocated_buffer_locked(
111 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
112{
113 struct rb_node **p = &alloc->allocated_buffers.rb_node;
114 struct rb_node *parent = NULL;
115 struct binder_buffer *buffer;
116
117 BUG_ON(new_buffer->free);
118
119 while (*p) {
120 parent = *p;
121 buffer = rb_entry(parent, struct binder_buffer, rb_node);
122 BUG_ON(buffer->free);
123
Sherry Yang74310e02017-08-23 08:46:41 -0700124 if (new_buffer->data < buffer->data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700125 p = &parent->rb_left;
Sherry Yang74310e02017-08-23 08:46:41 -0700126 else if (new_buffer->data > buffer->data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700127 p = &parent->rb_right;
128 else
129 BUG();
130 }
131 rb_link_node(&new_buffer->rb_node, parent, p);
132 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
133}
134
Todd Kjos53d311cf2017-06-29 12:01:51 -0700135static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjos0c972a02017-06-29 12:01:41 -0700136 struct binder_alloc *alloc,
137 uintptr_t user_ptr)
138{
139 struct rb_node *n = alloc->allocated_buffers.rb_node;
140 struct binder_buffer *buffer;
Sherry Yang74310e02017-08-23 08:46:41 -0700141 void *kern_ptr;
Todd Kjos0c972a02017-06-29 12:01:41 -0700142
Sherry Yang74310e02017-08-23 08:46:41 -0700143 kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
Todd Kjos0c972a02017-06-29 12:01:41 -0700144
145 while (n) {
146 buffer = rb_entry(n, struct binder_buffer, rb_node);
147 BUG_ON(buffer->free);
148
Sherry Yang74310e02017-08-23 08:46:41 -0700149 if (kern_ptr < buffer->data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700150 n = n->rb_left;
Sherry Yang74310e02017-08-23 08:46:41 -0700151 else if (kern_ptr > buffer->data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700152 n = n->rb_right;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700153 else {
154 /*
155 * Guard against user threads attempting to
Todd Kjos7bada552018-11-06 15:55:32 -0800156 * free the buffer when in use by kernel or
157 * after it's already been freed.
Todd Kjos53d311cf2017-06-29 12:01:51 -0700158 */
Todd Kjos7bada552018-11-06 15:55:32 -0800159 if (!buffer->allow_user_free)
160 return ERR_PTR(-EPERM);
161 buffer->allow_user_free = 0;
Todd Kjos0c972a02017-06-29 12:01:41 -0700162 return buffer;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700163 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700164 }
165 return NULL;
166}
167
168/**
169 * binder_alloc_buffer_lookup() - get buffer given user ptr
170 * @alloc: binder_alloc for this proc
171 * @user_ptr: User pointer to buffer data
172 *
173 * Validate userspace pointer to buffer data and return buffer corresponding to
174 * that user pointer. Search the rb tree for buffer that matches user data
175 * pointer.
176 *
177 * Return: Pointer to buffer or NULL
178 */
Todd Kjos53d311cf2017-06-29 12:01:51 -0700179struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
180 uintptr_t user_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700181{
182 struct binder_buffer *buffer;
183
184 mutex_lock(&alloc->mutex);
Todd Kjos53d311cf2017-06-29 12:01:51 -0700185 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700186 mutex_unlock(&alloc->mutex);
187 return buffer;
188}
189
190static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
Sherry Yang6ae33b92017-09-16 01:11:56 -0400191 void *start, void *end)
Todd Kjos0c972a02017-06-29 12:01:41 -0700192{
193 void *page_addr;
194 unsigned long user_page_addr;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700195 struct binder_lru_page *page;
Sherry Yang6ae33b92017-09-16 01:11:56 -0400196 struct vm_area_struct *vma = NULL;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700197 struct mm_struct *mm = NULL;
198 bool need_mm = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700199
200 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
201 "%d: %s pages %pK-%pK\n", alloc->pid,
202 allocate ? "allocate" : "free", start, end);
203
204 if (end <= start)
205 return 0;
206
207 trace_binder_update_page_range(alloc, allocate, start, end);
208
Sherry Yangf2517eb2017-08-23 08:46:42 -0700209 if (allocate == 0)
210 goto free_range;
211
212 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
213 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
214 if (!page->page_ptr) {
215 need_mm = true;
216 break;
217 }
218 }
219
Greg Kroah-Hartman6fbf2482017-10-23 17:21:44 +0200220 if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
Sherry Yanga0c2baa2017-10-20 20:58:58 -0400221 mm = alloc->vma_vm_mm;
Todd Kjos0c972a02017-06-29 12:01:41 -0700222
223 if (mm) {
Minchan Kim720c24192018-05-07 23:15:37 +0900224 down_read(&mm->mmap_sem);
Todd Kjos0c972a02017-06-29 12:01:41 -0700225 vma = alloc->vma;
Todd Kjos0c972a02017-06-29 12:01:41 -0700226 }
227
Sherry Yangf2517eb2017-08-23 08:46:42 -0700228 if (!vma && need_mm) {
Sherry Yang128f3802018-08-07 12:57:13 -0700229 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
230 "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
231 alloc->pid);
Todd Kjos0c972a02017-06-29 12:01:41 -0700232 goto err_no_vma;
233 }
234
235 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
236 int ret;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700237 bool on_lru;
Sherry Yange41e1642017-08-23 08:46:43 -0700238 size_t index;
Todd Kjos0c972a02017-06-29 12:01:41 -0700239
Sherry Yange41e1642017-08-23 08:46:43 -0700240 index = (page_addr - alloc->buffer) / PAGE_SIZE;
241 page = &alloc->pages[index];
Todd Kjos0c972a02017-06-29 12:01:41 -0700242
Sherry Yangf2517eb2017-08-23 08:46:42 -0700243 if (page->page_ptr) {
Sherry Yange41e1642017-08-23 08:46:43 -0700244 trace_binder_alloc_lru_start(alloc, index);
245
Sherry Yangf2517eb2017-08-23 08:46:42 -0700246 on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
247 WARN_ON(!on_lru);
Sherry Yange41e1642017-08-23 08:46:43 -0700248
249 trace_binder_alloc_lru_end(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700250 continue;
251 }
252
253 if (WARN_ON(!vma))
254 goto err_page_ptr_cleared;
255
Sherry Yange41e1642017-08-23 08:46:43 -0700256 trace_binder_alloc_page_start(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700257 page->page_ptr = alloc_page(GFP_KERNEL |
258 __GFP_HIGHMEM |
259 __GFP_ZERO);
260 if (!page->page_ptr) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700261 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
262 alloc->pid, page_addr);
263 goto err_alloc_page_failed;
264 }
Sherry Yangf2517eb2017-08-23 08:46:42 -0700265 page->alloc = alloc;
266 INIT_LIST_HEAD(&page->lru);
267
Todd Kjos0c972a02017-06-29 12:01:41 -0700268 ret = map_kernel_range_noflush((unsigned long)page_addr,
Sherry Yangf2517eb2017-08-23 08:46:42 -0700269 PAGE_SIZE, PAGE_KERNEL,
270 &page->page_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700271 flush_cache_vmap((unsigned long)page_addr,
272 (unsigned long)page_addr + PAGE_SIZE);
273 if (ret != 1) {
274 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
275 alloc->pid, page_addr);
276 goto err_map_kernel_failed;
277 }
278 user_page_addr =
279 (uintptr_t)page_addr + alloc->user_buffer_offset;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700280 ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700281 if (ret) {
282 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
283 alloc->pid, user_page_addr);
284 goto err_vm_insert_page_failed;
285 }
Sherry Yange41e1642017-08-23 08:46:43 -0700286
Martijn Coenen8d9a3ab62017-11-13 10:06:56 +0100287 if (index + 1 > alloc->pages_high)
288 alloc->pages_high = index + 1;
289
Sherry Yange41e1642017-08-23 08:46:43 -0700290 trace_binder_alloc_page_end(alloc, index);
Todd Kjos0c972a02017-06-29 12:01:41 -0700291 /* vm_insert_page does not seem to increment the refcount */
292 }
293 if (mm) {
Minchan Kim720c24192018-05-07 23:15:37 +0900294 up_read(&mm->mmap_sem);
Todd Kjos0c972a02017-06-29 12:01:41 -0700295 mmput(mm);
296 }
297 return 0;
298
299free_range:
300 for (page_addr = end - PAGE_SIZE; page_addr >= start;
301 page_addr -= PAGE_SIZE) {
Sherry Yangf2517eb2017-08-23 08:46:42 -0700302 bool ret;
Sherry Yange41e1642017-08-23 08:46:43 -0700303 size_t index;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700304
Sherry Yange41e1642017-08-23 08:46:43 -0700305 index = (page_addr - alloc->buffer) / PAGE_SIZE;
306 page = &alloc->pages[index];
307
308 trace_binder_free_lru_start(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700309
310 ret = list_lru_add(&binder_alloc_lru, &page->lru);
311 WARN_ON(!ret);
Sherry Yange41e1642017-08-23 08:46:43 -0700312
313 trace_binder_free_lru_end(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700314 continue;
315
Todd Kjos0c972a02017-06-29 12:01:41 -0700316err_vm_insert_page_failed:
317 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
318err_map_kernel_failed:
Sherry Yangf2517eb2017-08-23 08:46:42 -0700319 __free_page(page->page_ptr);
320 page->page_ptr = NULL;
Todd Kjos0c972a02017-06-29 12:01:41 -0700321err_alloc_page_failed:
Sherry Yangf2517eb2017-08-23 08:46:42 -0700322err_page_ptr_cleared:
Todd Kjos0c972a02017-06-29 12:01:41 -0700323 ;
324 }
325err_no_vma:
326 if (mm) {
Minchan Kim720c24192018-05-07 23:15:37 +0900327 up_read(&mm->mmap_sem);
Todd Kjos0c972a02017-06-29 12:01:41 -0700328 mmput(mm);
329 }
Todd Kjos57ada2f2017-06-29 12:01:46 -0700330 return vma ? -ENOMEM : -ESRCH;
Todd Kjos0c972a02017-06-29 12:01:41 -0700331}
332
Minchan Kimda1b9562018-08-23 14:29:56 +0900333
334static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
335 struct vm_area_struct *vma)
336{
337 if (vma)
338 alloc->vma_vm_mm = vma->vm_mm;
339 /*
340 * If we see alloc->vma is not NULL, buffer data structures set up
341 * completely. Look at smp_rmb side binder_alloc_get_vma.
342 * We also want to guarantee new alloc->vma_vm_mm is always visible
343 * if alloc->vma is set.
344 */
345 smp_wmb();
346 alloc->vma = vma;
347}
348
349static inline struct vm_area_struct *binder_alloc_get_vma(
350 struct binder_alloc *alloc)
351{
352 struct vm_area_struct *vma = NULL;
353
354 if (alloc->vma) {
355 /* Look at description in binder_alloc_set_vma */
356 smp_rmb();
357 vma = alloc->vma;
358 }
359 return vma;
360}
361
Xiongwei Song3f827242017-12-14 12:15:42 +0800362static struct binder_buffer *binder_alloc_new_buf_locked(
363 struct binder_alloc *alloc,
364 size_t data_size,
365 size_t offsets_size,
366 size_t extra_buffers_size,
367 int is_async)
Todd Kjos0c972a02017-06-29 12:01:41 -0700368{
369 struct rb_node *n = alloc->free_buffers.rb_node;
370 struct binder_buffer *buffer;
371 size_t buffer_size;
372 struct rb_node *best_fit = NULL;
373 void *has_page_addr;
374 void *end_page_addr;
375 size_t size, data_offsets_size;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700376 int ret;
Todd Kjos0c972a02017-06-29 12:01:41 -0700377
Minchan Kimda1b9562018-08-23 14:29:56 +0900378 if (!binder_alloc_get_vma(alloc)) {
Sherry Yang128f3802018-08-07 12:57:13 -0700379 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
380 "%d: binder_alloc_buf, no vma\n",
381 alloc->pid);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700382 return ERR_PTR(-ESRCH);
Todd Kjos0c972a02017-06-29 12:01:41 -0700383 }
384
385 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
386 ALIGN(offsets_size, sizeof(void *));
387
388 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
389 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
390 "%d: got transaction with invalid size %zd-%zd\n",
391 alloc->pid, data_size, offsets_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700392 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700393 }
394 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
395 if (size < data_offsets_size || size < extra_buffers_size) {
396 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
397 "%d: got transaction with invalid extra_buffers_size %zd\n",
398 alloc->pid, extra_buffers_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700399 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700400 }
401 if (is_async &&
402 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
403 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
404 "%d: binder_alloc_buf size %zd failed, no async space left\n",
405 alloc->pid, size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700406 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700407 }
408
Sherry Yang74310e02017-08-23 08:46:41 -0700409 /* Pad 0-size buffers so they get assigned unique addresses */
410 size = max(size, sizeof(void *));
411
Todd Kjos0c972a02017-06-29 12:01:41 -0700412 while (n) {
413 buffer = rb_entry(n, struct binder_buffer, rb_node);
414 BUG_ON(!buffer->free);
415 buffer_size = binder_alloc_buffer_size(alloc, buffer);
416
417 if (size < buffer_size) {
418 best_fit = n;
419 n = n->rb_left;
420 } else if (size > buffer_size)
421 n = n->rb_right;
422 else {
423 best_fit = n;
424 break;
425 }
426 }
427 if (best_fit == NULL) {
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700428 size_t allocated_buffers = 0;
429 size_t largest_alloc_size = 0;
430 size_t total_alloc_size = 0;
431 size_t free_buffers = 0;
432 size_t largest_free_size = 0;
433 size_t total_free_size = 0;
434
435 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
436 n = rb_next(n)) {
437 buffer = rb_entry(n, struct binder_buffer, rb_node);
438 buffer_size = binder_alloc_buffer_size(alloc, buffer);
439 allocated_buffers++;
440 total_alloc_size += buffer_size;
441 if (buffer_size > largest_alloc_size)
442 largest_alloc_size = buffer_size;
443 }
444 for (n = rb_first(&alloc->free_buffers); n != NULL;
445 n = rb_next(n)) {
446 buffer = rb_entry(n, struct binder_buffer, rb_node);
447 buffer_size = binder_alloc_buffer_size(alloc, buffer);
448 free_buffers++;
449 total_free_size += buffer_size;
450 if (buffer_size > largest_free_size)
451 largest_free_size = buffer_size;
452 }
Sherry Yang128f3802018-08-07 12:57:13 -0700453 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
454 "%d: binder_alloc_buf size %zd failed, no address space\n",
455 alloc->pid, size);
456 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
457 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
458 total_alloc_size, allocated_buffers,
459 largest_alloc_size, total_free_size,
460 free_buffers, largest_free_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700461 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700462 }
463 if (n == NULL) {
464 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
465 buffer_size = binder_alloc_buffer_size(alloc, buffer);
466 }
467
468 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
469 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
470 alloc->pid, size, buffer, buffer_size);
471
472 has_page_addr =
473 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
Sherry Yang74310e02017-08-23 08:46:41 -0700474 WARN_ON(n && buffer_size != size);
Todd Kjos0c972a02017-06-29 12:01:41 -0700475 end_page_addr =
Sherry Yang74310e02017-08-23 08:46:41 -0700476 (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
Todd Kjos0c972a02017-06-29 12:01:41 -0700477 if (end_page_addr > has_page_addr)
478 end_page_addr = has_page_addr;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700479 ret = binder_update_page_range(alloc, 1,
Sherry Yang6ae33b92017-09-16 01:11:56 -0400480 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700481 if (ret)
482 return ERR_PTR(ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700483
Todd Kjos0c972a02017-06-29 12:01:41 -0700484 if (buffer_size != size) {
Sherry Yang74310e02017-08-23 08:46:41 -0700485 struct binder_buffer *new_buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700486
Sherry Yang74310e02017-08-23 08:46:41 -0700487 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
488 if (!new_buffer) {
489 pr_err("%s: %d failed to alloc new buffer struct\n",
490 __func__, alloc->pid);
491 goto err_alloc_buf_struct_failed;
492 }
493 new_buffer->data = (u8 *)buffer->data + size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700494 list_add(&new_buffer->entry, &buffer->entry);
495 new_buffer->free = 1;
496 binder_insert_free_buffer(alloc, new_buffer);
497 }
Sherry Yang74310e02017-08-23 08:46:41 -0700498
499 rb_erase(best_fit, &alloc->free_buffers);
500 buffer->free = 0;
Todd Kjos7bada552018-11-06 15:55:32 -0800501 buffer->allow_user_free = 0;
Sherry Yang74310e02017-08-23 08:46:41 -0700502 binder_insert_allocated_buffer_locked(alloc, buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700503 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
504 "%d: binder_alloc_buf size %zd got %pK\n",
505 alloc->pid, size, buffer);
506 buffer->data_size = data_size;
507 buffer->offsets_size = offsets_size;
508 buffer->async_transaction = is_async;
509 buffer->extra_buffers_size = extra_buffers_size;
510 if (is_async) {
511 alloc->free_async_space -= size + sizeof(struct binder_buffer);
512 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
513 "%d: binder_alloc_buf size %zd async free %zd\n",
514 alloc->pid, size, alloc->free_async_space);
515 }
516 return buffer;
Sherry Yang74310e02017-08-23 08:46:41 -0700517
518err_alloc_buf_struct_failed:
519 binder_update_page_range(alloc, 0,
520 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
Sherry Yang6ae33b92017-09-16 01:11:56 -0400521 end_page_addr);
Sherry Yang74310e02017-08-23 08:46:41 -0700522 return ERR_PTR(-ENOMEM);
Todd Kjos0c972a02017-06-29 12:01:41 -0700523}
524
525/**
526 * binder_alloc_new_buf() - Allocate a new binder buffer
527 * @alloc: binder_alloc for this proc
528 * @data_size: size of user data buffer
529 * @offsets_size: user specified buffer offset
530 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
531 * @is_async: buffer for async transaction
532 *
533 * Allocate a new buffer given the requested sizes. Returns
534 * the kernel version of the buffer pointer. The size allocated
535 * is the sum of the three given sizes (each rounded up to
536 * pointer-sized boundary)
537 *
538 * Return: The allocated buffer or %NULL if error
539 */
540struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
541 size_t data_size,
542 size_t offsets_size,
543 size_t extra_buffers_size,
544 int is_async)
545{
546 struct binder_buffer *buffer;
547
548 mutex_lock(&alloc->mutex);
549 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
550 extra_buffers_size, is_async);
551 mutex_unlock(&alloc->mutex);
552 return buffer;
553}
554
555static void *buffer_start_page(struct binder_buffer *buffer)
556{
Sherry Yang74310e02017-08-23 08:46:41 -0700557 return (void *)((uintptr_t)buffer->data & PAGE_MASK);
Todd Kjos0c972a02017-06-29 12:01:41 -0700558}
559
Sherry Yang74310e02017-08-23 08:46:41 -0700560static void *prev_buffer_end_page(struct binder_buffer *buffer)
Todd Kjos0c972a02017-06-29 12:01:41 -0700561{
Sherry Yang74310e02017-08-23 08:46:41 -0700562 return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
Todd Kjos0c972a02017-06-29 12:01:41 -0700563}
564
565static void binder_delete_free_buffer(struct binder_alloc *alloc,
566 struct binder_buffer *buffer)
567{
568 struct binder_buffer *prev, *next = NULL;
Sherry Yang74310e02017-08-23 08:46:41 -0700569 bool to_free = true;
Todd Kjos0c972a02017-06-29 12:01:41 -0700570 BUG_ON(alloc->buffers.next == &buffer->entry);
Sherry Yange21762192017-08-23 08:46:39 -0700571 prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700572 BUG_ON(!prev->free);
Sherry Yang74310e02017-08-23 08:46:41 -0700573 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
574 to_free = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700575 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang74310e02017-08-23 08:46:41 -0700576 "%d: merge free, buffer %pK share page with %pK\n",
577 alloc->pid, buffer->data, prev->data);
Todd Kjos0c972a02017-06-29 12:01:41 -0700578 }
579
580 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700581 next = binder_buffer_next(buffer);
Sherry Yang74310e02017-08-23 08:46:41 -0700582 if (buffer_start_page(next) == buffer_start_page(buffer)) {
583 to_free = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700584 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang74310e02017-08-23 08:46:41 -0700585 "%d: merge free, buffer %pK share page with %pK\n",
586 alloc->pid,
587 buffer->data,
588 next->data);
Todd Kjos0c972a02017-06-29 12:01:41 -0700589 }
590 }
Sherry Yang74310e02017-08-23 08:46:41 -0700591
592 if (PAGE_ALIGNED(buffer->data)) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700593 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yang74310e02017-08-23 08:46:41 -0700594 "%d: merge free, buffer start %pK is page aligned\n",
595 alloc->pid, buffer->data);
596 to_free = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700597 }
Sherry Yang74310e02017-08-23 08:46:41 -0700598
599 if (to_free) {
600 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
601 "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
602 alloc->pid, buffer->data,
Sherry Yangae65c852017-10-20 20:58:59 -0400603 prev->data, next ? next->data : NULL);
Sherry Yang74310e02017-08-23 08:46:41 -0700604 binder_update_page_range(alloc, 0, buffer_start_page(buffer),
Sherry Yang6ae33b92017-09-16 01:11:56 -0400605 buffer_start_page(buffer) + PAGE_SIZE);
Sherry Yang74310e02017-08-23 08:46:41 -0700606 }
607 list_del(&buffer->entry);
608 kfree(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700609}
610
611static void binder_free_buf_locked(struct binder_alloc *alloc,
612 struct binder_buffer *buffer)
613{
614 size_t size, buffer_size;
615
616 buffer_size = binder_alloc_buffer_size(alloc, buffer);
617
618 size = ALIGN(buffer->data_size, sizeof(void *)) +
619 ALIGN(buffer->offsets_size, sizeof(void *)) +
620 ALIGN(buffer->extra_buffers_size, sizeof(void *));
621
622 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
623 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
624 alloc->pid, buffer, size, buffer_size);
625
626 BUG_ON(buffer->free);
627 BUG_ON(size > buffer_size);
628 BUG_ON(buffer->transaction != NULL);
Sherry Yang74310e02017-08-23 08:46:41 -0700629 BUG_ON(buffer->data < alloc->buffer);
630 BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
Todd Kjos0c972a02017-06-29 12:01:41 -0700631
632 if (buffer->async_transaction) {
633 alloc->free_async_space += size + sizeof(struct binder_buffer);
634
635 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
636 "%d: binder_free_buf size %zd async free %zd\n",
637 alloc->pid, size, alloc->free_async_space);
638 }
639
640 binder_update_page_range(alloc, 0,
641 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
Sherry Yang6ae33b92017-09-16 01:11:56 -0400642 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK));
Todd Kjos0c972a02017-06-29 12:01:41 -0700643
644 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
645 buffer->free = 1;
646 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700647 struct binder_buffer *next = binder_buffer_next(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700648
649 if (next->free) {
650 rb_erase(&next->rb_node, &alloc->free_buffers);
651 binder_delete_free_buffer(alloc, next);
652 }
653 }
654 if (alloc->buffers.next != &buffer->entry) {
Sherry Yange21762192017-08-23 08:46:39 -0700655 struct binder_buffer *prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700656
657 if (prev->free) {
658 binder_delete_free_buffer(alloc, buffer);
659 rb_erase(&prev->rb_node, &alloc->free_buffers);
660 buffer = prev;
661 }
662 }
663 binder_insert_free_buffer(alloc, buffer);
664}
665
666/**
667 * binder_alloc_free_buf() - free a binder buffer
668 * @alloc: binder_alloc for this proc
669 * @buffer: kernel pointer to buffer
670 *
671 * Free the buffer allocated via binder_alloc_new_buffer()
672 */
673void binder_alloc_free_buf(struct binder_alloc *alloc,
674 struct binder_buffer *buffer)
675{
676 mutex_lock(&alloc->mutex);
677 binder_free_buf_locked(alloc, buffer);
678 mutex_unlock(&alloc->mutex);
679}
680
681/**
682 * binder_alloc_mmap_handler() - map virtual address space for proc
683 * @alloc: alloc structure for this proc
684 * @vma: vma passed to mmap()
685 *
686 * Called by binder_mmap() to initialize the space specified in
687 * vma for allocating binder buffers
688 *
689 * Return:
690 * 0 = success
691 * -EBUSY = address space already mapped
692 * -ENOMEM = failed to map memory to given address space
693 */
694int binder_alloc_mmap_handler(struct binder_alloc *alloc,
695 struct vm_area_struct *vma)
696{
697 int ret;
698 struct vm_struct *area;
699 const char *failure_string;
700 struct binder_buffer *buffer;
701
702 mutex_lock(&binder_alloc_mmap_lock);
703 if (alloc->buffer) {
704 ret = -EBUSY;
705 failure_string = "already mapped";
706 goto err_already_mapped;
707 }
708
Ganesh Mahendranaac68302018-01-10 10:49:05 +0800709 area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700710 if (area == NULL) {
711 ret = -ENOMEM;
712 failure_string = "get_vm_area";
713 goto err_get_vm_area_failed;
714 }
715 alloc->buffer = area->addr;
716 alloc->user_buffer_offset =
717 vma->vm_start - (uintptr_t)alloc->buffer;
718 mutex_unlock(&binder_alloc_mmap_lock);
719
720#ifdef CONFIG_CPU_CACHE_VIPT
721 if (cache_is_vipt_aliasing()) {
722 while (CACHE_COLOUR(
723 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
724 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
725 __func__, alloc->pid, vma->vm_start,
726 vma->vm_end, alloc->buffer);
727 vma->vm_start += PAGE_SIZE;
728 }
729 }
730#endif
Kees Cook6396bb22018-06-12 14:03:40 -0700731 alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
732 sizeof(alloc->pages[0]),
Todd Kjos0c972a02017-06-29 12:01:41 -0700733 GFP_KERNEL);
734 if (alloc->pages == NULL) {
735 ret = -ENOMEM;
736 failure_string = "alloc page array";
737 goto err_alloc_pages_failed;
738 }
739 alloc->buffer_size = vma->vm_end - vma->vm_start;
740
Sherry Yang74310e02017-08-23 08:46:41 -0700741 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
742 if (!buffer) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700743 ret = -ENOMEM;
Sherry Yang74310e02017-08-23 08:46:41 -0700744 failure_string = "alloc buffer struct";
745 goto err_alloc_buf_struct_failed;
Todd Kjos0c972a02017-06-29 12:01:41 -0700746 }
Sherry Yang74310e02017-08-23 08:46:41 -0700747
748 buffer->data = alloc->buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700749 list_add(&buffer->entry, &alloc->buffers);
750 buffer->free = 1;
751 binder_insert_free_buffer(alloc, buffer);
752 alloc->free_async_space = alloc->buffer_size / 2;
Minchan Kimda1b9562018-08-23 14:29:56 +0900753 binder_alloc_set_vma(alloc, vma);
Sherry Yanga0c2baa2017-10-20 20:58:58 -0400754 mmgrab(alloc->vma_vm_mm);
Todd Kjos0c972a02017-06-29 12:01:41 -0700755
756 return 0;
757
Sherry Yang74310e02017-08-23 08:46:41 -0700758err_alloc_buf_struct_failed:
Todd Kjos0c972a02017-06-29 12:01:41 -0700759 kfree(alloc->pages);
760 alloc->pages = NULL;
761err_alloc_pages_failed:
762 mutex_lock(&binder_alloc_mmap_lock);
763 vfree(alloc->buffer);
764 alloc->buffer = NULL;
765err_get_vm_area_failed:
766err_already_mapped:
767 mutex_unlock(&binder_alloc_mmap_lock);
Sherry Yang128f3802018-08-07 12:57:13 -0700768 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
769 "%s: %d %lx-%lx %s failed %d\n", __func__,
770 alloc->pid, vma->vm_start, vma->vm_end,
771 failure_string, ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700772 return ret;
773}
774
775
776void binder_alloc_deferred_release(struct binder_alloc *alloc)
777{
778 struct rb_node *n;
779 int buffers, page_count;
Sherry Yang74310e02017-08-23 08:46:41 -0700780 struct binder_buffer *buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700781
Todd Kjos0c972a02017-06-29 12:01:41 -0700782 buffers = 0;
783 mutex_lock(&alloc->mutex);
Minchan Kimda1b9562018-08-23 14:29:56 +0900784 BUG_ON(alloc->vma);
785
Todd Kjos0c972a02017-06-29 12:01:41 -0700786 while ((n = rb_first(&alloc->allocated_buffers))) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700787 buffer = rb_entry(n, struct binder_buffer, rb_node);
788
789 /* Transaction should already have been freed */
790 BUG_ON(buffer->transaction);
791
792 binder_free_buf_locked(alloc, buffer);
793 buffers++;
794 }
795
Sherry Yang74310e02017-08-23 08:46:41 -0700796 while (!list_empty(&alloc->buffers)) {
797 buffer = list_first_entry(&alloc->buffers,
798 struct binder_buffer, entry);
799 WARN_ON(!buffer->free);
800
801 list_del(&buffer->entry);
802 WARN_ON_ONCE(!list_empty(&alloc->buffers));
803 kfree(buffer);
804 }
805
Todd Kjos0c972a02017-06-29 12:01:41 -0700806 page_count = 0;
807 if (alloc->pages) {
808 int i;
809
810 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
811 void *page_addr;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700812 bool on_lru;
Todd Kjos0c972a02017-06-29 12:01:41 -0700813
Sherry Yangf2517eb2017-08-23 08:46:42 -0700814 if (!alloc->pages[i].page_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700815 continue;
816
Sherry Yangf2517eb2017-08-23 08:46:42 -0700817 on_lru = list_lru_del(&binder_alloc_lru,
818 &alloc->pages[i].lru);
Todd Kjos0c972a02017-06-29 12:01:41 -0700819 page_addr = alloc->buffer + i * PAGE_SIZE;
820 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Sherry Yangf2517eb2017-08-23 08:46:42 -0700821 "%s: %d: page %d at %pK %s\n",
822 __func__, alloc->pid, i, page_addr,
823 on_lru ? "on lru" : "active");
Todd Kjos0c972a02017-06-29 12:01:41 -0700824 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700825 __free_page(alloc->pages[i].page_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700826 page_count++;
827 }
828 kfree(alloc->pages);
829 vfree(alloc->buffer);
830 }
831 mutex_unlock(&alloc->mutex);
Sherry Yanga0c2baa2017-10-20 20:58:58 -0400832 if (alloc->vma_vm_mm)
833 mmdrop(alloc->vma_vm_mm);
Todd Kjos0c972a02017-06-29 12:01:41 -0700834
835 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
836 "%s: %d buffers %d, pages %d\n",
837 __func__, alloc->pid, buffers, page_count);
838}
839
840static void print_binder_buffer(struct seq_file *m, const char *prefix,
841 struct binder_buffer *buffer)
842{
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700843 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
Todd Kjos0c972a02017-06-29 12:01:41 -0700844 prefix, buffer->debug_id, buffer->data,
845 buffer->data_size, buffer->offsets_size,
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700846 buffer->extra_buffers_size,
Todd Kjos0c972a02017-06-29 12:01:41 -0700847 buffer->transaction ? "active" : "delivered");
848}
849
850/**
851 * binder_alloc_print_allocated() - print buffer info
852 * @m: seq_file for output via seq_printf()
853 * @alloc: binder_alloc for this proc
854 *
855 * Prints information about every buffer associated with
856 * the binder_alloc state to the given seq_file
857 */
858void binder_alloc_print_allocated(struct seq_file *m,
859 struct binder_alloc *alloc)
860{
861 struct rb_node *n;
862
863 mutex_lock(&alloc->mutex);
864 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
865 print_binder_buffer(m, " buffer",
866 rb_entry(n, struct binder_buffer, rb_node));
867 mutex_unlock(&alloc->mutex);
868}
869
870/**
Sherry Yang8ef46652017-08-31 11:56:36 -0700871 * binder_alloc_print_pages() - print page usage
872 * @m: seq_file for output via seq_printf()
873 * @alloc: binder_alloc for this proc
874 */
875void binder_alloc_print_pages(struct seq_file *m,
876 struct binder_alloc *alloc)
877{
878 struct binder_lru_page *page;
879 int i;
880 int active = 0;
881 int lru = 0;
882 int free = 0;
883
884 mutex_lock(&alloc->mutex);
885 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
886 page = &alloc->pages[i];
887 if (!page->page_ptr)
888 free++;
889 else if (list_empty(&page->lru))
890 active++;
891 else
892 lru++;
893 }
894 mutex_unlock(&alloc->mutex);
895 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
Martijn Coenen8d9a3ab62017-11-13 10:06:56 +0100896 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
Sherry Yang8ef46652017-08-31 11:56:36 -0700897}
898
899/**
Todd Kjos0c972a02017-06-29 12:01:41 -0700900 * binder_alloc_get_allocated_count() - return count of buffers
901 * @alloc: binder_alloc for this proc
902 *
903 * Return: count of allocated buffers
904 */
905int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
906{
907 struct rb_node *n;
908 int count = 0;
909
910 mutex_lock(&alloc->mutex);
911 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
912 count++;
913 mutex_unlock(&alloc->mutex);
914 return count;
915}
916
917
918/**
919 * binder_alloc_vma_close() - invalidate address space
920 * @alloc: binder_alloc for this proc
921 *
922 * Called from binder_vma_close() when releasing address space.
923 * Clears alloc->vma to prevent new incoming transactions from
924 * allocating more buffers.
925 */
926void binder_alloc_vma_close(struct binder_alloc *alloc)
927{
Minchan Kimda1b9562018-08-23 14:29:56 +0900928 binder_alloc_set_vma(alloc, NULL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700929}
930
931/**
Sherry Yangf2517eb2017-08-23 08:46:42 -0700932 * binder_alloc_free_page() - shrinker callback to free pages
933 * @item: item to free
934 * @lock: lock protecting the item
935 * @cb_arg: callback argument
936 *
937 * Called from list_lru_walk() in binder_shrink_scan() to free
938 * up pages when the system is under memory pressure.
939 */
940enum lru_status binder_alloc_free_page(struct list_head *item,
941 struct list_lru_one *lru,
942 spinlock_t *lock,
943 void *cb_arg)
Todd Kjos324fa642018-11-06 15:56:31 -0800944 __must_hold(lock)
Sherry Yangf2517eb2017-08-23 08:46:42 -0700945{
946 struct mm_struct *mm = NULL;
947 struct binder_lru_page *page = container_of(item,
948 struct binder_lru_page,
949 lru);
950 struct binder_alloc *alloc;
951 uintptr_t page_addr;
952 size_t index;
Sherry Yanga1b22892017-10-03 16:15:00 -0700953 struct vm_area_struct *vma;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700954
955 alloc = page->alloc;
956 if (!mutex_trylock(&alloc->mutex))
957 goto err_get_alloc_mutex_failed;
958
959 if (!page->page_ptr)
960 goto err_page_already_freed;
961
962 index = page - alloc->pages;
963 page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
Minchan Kimda1b9562018-08-23 14:29:56 +0900964 vma = binder_alloc_get_vma(alloc);
Sherry Yanga1b22892017-10-03 16:15:00 -0700965 if (vma) {
Sherry Yanga0c2baa2017-10-20 20:58:58 -0400966 if (!mmget_not_zero(alloc->vma_vm_mm))
967 goto err_mmget;
968 mm = alloc->vma_vm_mm;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700969 if (!down_write_trylock(&mm->mmap_sem))
970 goto err_down_write_mmap_sem_failed;
Sherry Yanga1b22892017-10-03 16:15:00 -0700971 }
Sherry Yangf2517eb2017-08-23 08:46:42 -0700972
Sherry Yanga1b22892017-10-03 16:15:00 -0700973 list_lru_isolate(lru, item);
974 spin_unlock(lock);
975
976 if (vma) {
Sherry Yange41e1642017-08-23 08:46:43 -0700977 trace_binder_unmap_user_start(alloc, index);
978
Sherry Yanga1b22892017-10-03 16:15:00 -0700979 zap_page_range(vma,
Sherry Yangf2517eb2017-08-23 08:46:42 -0700980 page_addr + alloc->user_buffer_offset,
981 PAGE_SIZE);
982
Sherry Yange41e1642017-08-23 08:46:43 -0700983 trace_binder_unmap_user_end(alloc, index);
984
Sherry Yangf2517eb2017-08-23 08:46:42 -0700985 up_write(&mm->mmap_sem);
986 mmput(mm);
987 }
988
Sherry Yange41e1642017-08-23 08:46:43 -0700989 trace_binder_unmap_kernel_start(alloc, index);
990
Sherry Yangf2517eb2017-08-23 08:46:42 -0700991 unmap_kernel_range(page_addr, PAGE_SIZE);
992 __free_page(page->page_ptr);
993 page->page_ptr = NULL;
994
Sherry Yange41e1642017-08-23 08:46:43 -0700995 trace_binder_unmap_kernel_end(alloc, index);
996
Sherry Yanga1b22892017-10-03 16:15:00 -0700997 spin_lock(lock);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700998 mutex_unlock(&alloc->mutex);
Sherry Yanga1b22892017-10-03 16:15:00 -0700999 return LRU_REMOVED_RETRY;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001000
1001err_down_write_mmap_sem_failed:
Sherry Yanga1b22892017-10-03 16:15:00 -07001002 mmput_async(mm);
Sherry Yanga0c2baa2017-10-20 20:58:58 -04001003err_mmget:
Sherry Yangf2517eb2017-08-23 08:46:42 -07001004err_page_already_freed:
1005 mutex_unlock(&alloc->mutex);
1006err_get_alloc_mutex_failed:
1007 return LRU_SKIP;
1008}
1009
1010static unsigned long
1011binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1012{
1013 unsigned long ret = list_lru_count(&binder_alloc_lru);
1014 return ret;
1015}
1016
1017static unsigned long
1018binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1019{
1020 unsigned long ret;
1021
1022 ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1023 NULL, sc->nr_to_scan);
1024 return ret;
1025}
1026
Sherry Yangde7bbe32017-10-06 16:12:05 -04001027static struct shrinker binder_shrinker = {
Sherry Yangf2517eb2017-08-23 08:46:42 -07001028 .count_objects = binder_shrink_count,
1029 .scan_objects = binder_shrink_scan,
1030 .seeks = DEFAULT_SEEKS,
1031};
1032
1033/**
Todd Kjos0c972a02017-06-29 12:01:41 -07001034 * binder_alloc_init() - called by binder_open() for per-proc initialization
1035 * @alloc: binder_alloc for this proc
1036 *
1037 * Called from binder_open() to initialize binder_alloc fields for
1038 * new binder proc
1039 */
1040void binder_alloc_init(struct binder_alloc *alloc)
1041{
Todd Kjos0c972a02017-06-29 12:01:41 -07001042 alloc->pid = current->group_leader->pid;
1043 mutex_init(&alloc->mutex);
Sherry Yang957ccc22017-08-31 10:26:06 -07001044 INIT_LIST_HEAD(&alloc->buffers);
Todd Kjos0c972a02017-06-29 12:01:41 -07001045}
1046
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001047int binder_alloc_shrinker_init(void)
Sherry Yangf2517eb2017-08-23 08:46:42 -07001048{
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001049 int ret = list_lru_init(&binder_alloc_lru);
1050
1051 if (ret == 0) {
1052 ret = register_shrinker(&binder_shrinker);
1053 if (ret)
1054 list_lru_destroy(&binder_alloc_lru);
1055 }
1056 return ret;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001057}
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001058
1059/**
1060 * check_buffer() - verify that buffer/offset is safe to access
1061 * @alloc: binder_alloc for this proc
1062 * @buffer: binder buffer to be accessed
1063 * @offset: offset into @buffer data
1064 * @bytes: bytes to access from offset
1065 *
1066 * Check that the @offset/@bytes are within the size of the given
1067 * @buffer and that the buffer is currently active and not freeable.
1068 * Offsets must also be multiples of sizeof(u32). The kernel is
1069 * allowed to touch the buffer in two cases:
1070 *
1071 * 1) when the buffer is being created:
1072 * (buffer->free == 0 && buffer->allow_user_free == 0)
1073 * 2) when the buffer is being torn down:
1074 * (buffer->free == 0 && buffer->transaction == NULL).
1075 *
1076 * Return: true if the buffer is safe to access
1077 */
1078static inline bool check_buffer(struct binder_alloc *alloc,
1079 struct binder_buffer *buffer,
1080 binder_size_t offset, size_t bytes)
1081{
1082 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1083
1084 return buffer_size >= bytes &&
1085 offset <= buffer_size - bytes &&
1086 IS_ALIGNED(offset, sizeof(u32)) &&
1087 !buffer->free &&
1088 (!buffer->allow_user_free || !buffer->transaction);
1089}
1090
1091/**
1092 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1093 * @alloc: binder_alloc for this proc
1094 * @buffer: binder buffer to be accessed
1095 * @buffer_offset: offset into @buffer data
1096 * @pgoffp: address to copy final page offset to
1097 *
1098 * Lookup the struct page corresponding to the address
1099 * at @buffer_offset into @buffer->data. If @pgoffp is not
1100 * NULL, the byte-offset into the page is written there.
1101 *
1102 * The caller is responsible to ensure that the offset points
1103 * to a valid address within the @buffer and that @buffer is
1104 * not freeable by the user. Since it can't be freed, we are
1105 * guaranteed that the corresponding elements of @alloc->pages[]
1106 * cannot change.
1107 *
1108 * Return: struct page
1109 */
1110static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1111 struct binder_buffer *buffer,
1112 binder_size_t buffer_offset,
1113 pgoff_t *pgoffp)
1114{
1115 binder_size_t buffer_space_offset = buffer_offset +
1116 (buffer->data - alloc->buffer);
1117 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1118 size_t index = buffer_space_offset >> PAGE_SHIFT;
1119 struct binder_lru_page *lru_page;
1120
1121 lru_page = &alloc->pages[index];
1122 *pgoffp = pgoff;
1123 return lru_page->page_ptr;
1124}
1125
1126/**
1127 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1128 * @alloc: binder_alloc for this proc
1129 * @buffer: binder buffer to be accessed
1130 * @buffer_offset: offset into @buffer data
1131 * @from: userspace pointer to source buffer
1132 * @bytes: bytes to copy
1133 *
1134 * Copy bytes from source userspace to target buffer.
1135 *
1136 * Return: bytes remaining to be copied
1137 */
1138unsigned long
1139binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1140 struct binder_buffer *buffer,
1141 binder_size_t buffer_offset,
1142 const void __user *from,
1143 size_t bytes)
1144{
1145 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1146 return bytes;
1147
1148 while (bytes) {
1149 unsigned long size;
1150 unsigned long ret;
1151 struct page *page;
1152 pgoff_t pgoff;
1153 void *kptr;
1154
1155 page = binder_alloc_get_page(alloc, buffer,
1156 buffer_offset, &pgoff);
1157 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1158 kptr = kmap(page) + pgoff;
1159 ret = copy_from_user(kptr, from, size);
1160 kunmap(page);
1161 if (ret)
1162 return bytes - size + ret;
1163 bytes -= size;
1164 from += size;
1165 buffer_offset += size;
1166 }
1167 return 0;
1168}
Todd Kjos8ced0c62019-02-08 10:35:15 -08001169
1170static void binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1171 bool to_buffer,
1172 struct binder_buffer *buffer,
1173 binder_size_t buffer_offset,
1174 void *ptr,
1175 size_t bytes)
1176{
1177 /* All copies must be 32-bit aligned and 32-bit size */
1178 BUG_ON(!check_buffer(alloc, buffer, buffer_offset, bytes));
1179
1180 while (bytes) {
1181 unsigned long size;
1182 struct page *page;
1183 pgoff_t pgoff;
1184 void *tmpptr;
1185 void *base_ptr;
1186
1187 page = binder_alloc_get_page(alloc, buffer,
1188 buffer_offset, &pgoff);
1189 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1190 base_ptr = kmap_atomic(page);
1191 tmpptr = base_ptr + pgoff;
1192 if (to_buffer)
1193 memcpy(tmpptr, ptr, size);
1194 else
1195 memcpy(ptr, tmpptr, size);
1196 /*
1197 * kunmap_atomic() takes care of flushing the cache
1198 * if this device has VIVT cache arch
1199 */
1200 kunmap_atomic(base_ptr);
1201 bytes -= size;
1202 pgoff = 0;
1203 ptr = ptr + size;
1204 buffer_offset += size;
1205 }
1206}
1207
1208void binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1209 struct binder_buffer *buffer,
1210 binder_size_t buffer_offset,
1211 void *src,
1212 size_t bytes)
1213{
1214 binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1215 src, bytes);
1216}
1217
1218void binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1219 void *dest,
1220 struct binder_buffer *buffer,
1221 binder_size_t buffer_offset,
1222 size_t bytes)
1223{
1224 binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1225 dest, bytes);
1226}
1227