blob: 40f31df605800762743edc8d24721f9d32af8128 [file] [log] [blame]
Todd Kjos0c972a02017-06-29 12:01:41 -07001/* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <asm/cacheflush.h>
21#include <linux/list.h>
22#include <linux/sched/mm.h>
23#include <linux/module.h>
24#include <linux/rtmutex.h>
25#include <linux/rbtree.h>
26#include <linux/seq_file.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include "binder_alloc.h"
31#include "binder_trace.h"
32
33static DEFINE_MUTEX(binder_alloc_mmap_lock);
34
35enum {
36 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39};
40static uint32_t binder_alloc_debug_mask;
41
42module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45#define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
48 pr_info(x); \
49 } while (0)
50
51static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
52 struct binder_buffer *buffer)
53{
54 if (list_is_last(&buffer->entry, &alloc->buffers))
55 return alloc->buffer +
56 alloc->buffer_size - (void *)buffer->data;
57 return (size_t)list_entry(buffer->entry.next,
58 struct binder_buffer, entry) - (size_t)buffer->data;
59}
60
61static void binder_insert_free_buffer(struct binder_alloc *alloc,
62 struct binder_buffer *new_buffer)
63{
64 struct rb_node **p = &alloc->free_buffers.rb_node;
65 struct rb_node *parent = NULL;
66 struct binder_buffer *buffer;
67 size_t buffer_size;
68 size_t new_buffer_size;
69
70 BUG_ON(!new_buffer->free);
71
72 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
73
74 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
75 "%d: add free buffer, size %zd, at %pK\n",
76 alloc->pid, new_buffer_size, new_buffer);
77
78 while (*p) {
79 parent = *p;
80 buffer = rb_entry(parent, struct binder_buffer, rb_node);
81 BUG_ON(!buffer->free);
82
83 buffer_size = binder_alloc_buffer_size(alloc, buffer);
84
85 if (new_buffer_size < buffer_size)
86 p = &parent->rb_left;
87 else
88 p = &parent->rb_right;
89 }
90 rb_link_node(&new_buffer->rb_node, parent, p);
91 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
92}
93
94static void binder_insert_allocated_buffer_locked(
95 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
96{
97 struct rb_node **p = &alloc->allocated_buffers.rb_node;
98 struct rb_node *parent = NULL;
99 struct binder_buffer *buffer;
100
101 BUG_ON(new_buffer->free);
102
103 while (*p) {
104 parent = *p;
105 buffer = rb_entry(parent, struct binder_buffer, rb_node);
106 BUG_ON(buffer->free);
107
108 if (new_buffer < buffer)
109 p = &parent->rb_left;
110 else if (new_buffer > buffer)
111 p = &parent->rb_right;
112 else
113 BUG();
114 }
115 rb_link_node(&new_buffer->rb_node, parent, p);
116 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
117}
118
Todd Kjos53d311cf2017-06-29 12:01:51 -0700119static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjos0c972a02017-06-29 12:01:41 -0700120 struct binder_alloc *alloc,
121 uintptr_t user_ptr)
122{
123 struct rb_node *n = alloc->allocated_buffers.rb_node;
124 struct binder_buffer *buffer;
125 struct binder_buffer *kern_ptr;
126
127 kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
128 - offsetof(struct binder_buffer, data));
129
130 while (n) {
131 buffer = rb_entry(n, struct binder_buffer, rb_node);
132 BUG_ON(buffer->free);
133
134 if (kern_ptr < buffer)
135 n = n->rb_left;
136 else if (kern_ptr > buffer)
137 n = n->rb_right;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700138 else {
139 /*
140 * Guard against user threads attempting to
141 * free the buffer twice
142 */
143 if (buffer->free_in_progress) {
144 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
145 alloc->pid, current->pid, (u64)user_ptr);
146 return NULL;
147 }
148 buffer->free_in_progress = 1;
Todd Kjos0c972a02017-06-29 12:01:41 -0700149 return buffer;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700150 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700151 }
152 return NULL;
153}
154
155/**
156 * binder_alloc_buffer_lookup() - get buffer given user ptr
157 * @alloc: binder_alloc for this proc
158 * @user_ptr: User pointer to buffer data
159 *
160 * Validate userspace pointer to buffer data and return buffer corresponding to
161 * that user pointer. Search the rb tree for buffer that matches user data
162 * pointer.
163 *
164 * Return: Pointer to buffer or NULL
165 */
Todd Kjos53d311cf2017-06-29 12:01:51 -0700166struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
167 uintptr_t user_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700168{
169 struct binder_buffer *buffer;
170
171 mutex_lock(&alloc->mutex);
Todd Kjos53d311cf2017-06-29 12:01:51 -0700172 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700173 mutex_unlock(&alloc->mutex);
174 return buffer;
175}
176
177static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
178 void *start, void *end,
179 struct vm_area_struct *vma)
180{
181 void *page_addr;
182 unsigned long user_page_addr;
183 struct page **page;
184 struct mm_struct *mm;
185
186 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
187 "%d: %s pages %pK-%pK\n", alloc->pid,
188 allocate ? "allocate" : "free", start, end);
189
190 if (end <= start)
191 return 0;
192
193 trace_binder_update_page_range(alloc, allocate, start, end);
194
195 if (vma)
196 mm = NULL;
197 else
198 mm = get_task_mm(alloc->tsk);
199
200 if (mm) {
201 down_write(&mm->mmap_sem);
202 vma = alloc->vma;
203 if (vma && mm != alloc->vma_vm_mm) {
204 pr_err("%d: vma mm and task mm mismatch\n",
205 alloc->pid);
206 vma = NULL;
207 }
208 }
209
210 if (allocate == 0)
211 goto free_range;
212
213 if (vma == NULL) {
214 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
215 alloc->pid);
216 goto err_no_vma;
217 }
218
219 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
220 int ret;
221
222 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
223
224 BUG_ON(*page);
225 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
226 if (*page == NULL) {
227 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
228 alloc->pid, page_addr);
229 goto err_alloc_page_failed;
230 }
231 ret = map_kernel_range_noflush((unsigned long)page_addr,
232 PAGE_SIZE, PAGE_KERNEL, page);
233 flush_cache_vmap((unsigned long)page_addr,
234 (unsigned long)page_addr + PAGE_SIZE);
235 if (ret != 1) {
236 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
237 alloc->pid, page_addr);
238 goto err_map_kernel_failed;
239 }
240 user_page_addr =
241 (uintptr_t)page_addr + alloc->user_buffer_offset;
242 ret = vm_insert_page(vma, user_page_addr, page[0]);
243 if (ret) {
244 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
245 alloc->pid, user_page_addr);
246 goto err_vm_insert_page_failed;
247 }
248 /* vm_insert_page does not seem to increment the refcount */
249 }
250 if (mm) {
251 up_write(&mm->mmap_sem);
252 mmput(mm);
253 }
254 return 0;
255
256free_range:
257 for (page_addr = end - PAGE_SIZE; page_addr >= start;
258 page_addr -= PAGE_SIZE) {
259 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
260 if (vma)
261 zap_page_range(vma, (uintptr_t)page_addr +
262 alloc->user_buffer_offset, PAGE_SIZE);
263err_vm_insert_page_failed:
264 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
265err_map_kernel_failed:
266 __free_page(*page);
267 *page = NULL;
268err_alloc_page_failed:
269 ;
270 }
271err_no_vma:
272 if (mm) {
273 up_write(&mm->mmap_sem);
274 mmput(mm);
275 }
Todd Kjos57ada2f2017-06-29 12:01:46 -0700276 return vma ? -ENOMEM : -ESRCH;
Todd Kjos0c972a02017-06-29 12:01:41 -0700277}
278
279struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
280 size_t data_size,
281 size_t offsets_size,
282 size_t extra_buffers_size,
283 int is_async)
284{
285 struct rb_node *n = alloc->free_buffers.rb_node;
286 struct binder_buffer *buffer;
287 size_t buffer_size;
288 struct rb_node *best_fit = NULL;
289 void *has_page_addr;
290 void *end_page_addr;
291 size_t size, data_offsets_size;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700292 int ret;
Todd Kjos0c972a02017-06-29 12:01:41 -0700293
294 if (alloc->vma == NULL) {
295 pr_err("%d: binder_alloc_buf, no vma\n",
296 alloc->pid);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700297 return ERR_PTR(-ESRCH);
Todd Kjos0c972a02017-06-29 12:01:41 -0700298 }
299
300 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
301 ALIGN(offsets_size, sizeof(void *));
302
303 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
304 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
305 "%d: got transaction with invalid size %zd-%zd\n",
306 alloc->pid, data_size, offsets_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700307 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700308 }
309 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
310 if (size < data_offsets_size || size < extra_buffers_size) {
311 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
312 "%d: got transaction with invalid extra_buffers_size %zd\n",
313 alloc->pid, extra_buffers_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700314 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700315 }
316 if (is_async &&
317 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
318 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
319 "%d: binder_alloc_buf size %zd failed, no async space left\n",
320 alloc->pid, size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700321 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700322 }
323
324 while (n) {
325 buffer = rb_entry(n, struct binder_buffer, rb_node);
326 BUG_ON(!buffer->free);
327 buffer_size = binder_alloc_buffer_size(alloc, buffer);
328
329 if (size < buffer_size) {
330 best_fit = n;
331 n = n->rb_left;
332 } else if (size > buffer_size)
333 n = n->rb_right;
334 else {
335 best_fit = n;
336 break;
337 }
338 }
339 if (best_fit == NULL) {
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700340 size_t allocated_buffers = 0;
341 size_t largest_alloc_size = 0;
342 size_t total_alloc_size = 0;
343 size_t free_buffers = 0;
344 size_t largest_free_size = 0;
345 size_t total_free_size = 0;
346
347 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
348 n = rb_next(n)) {
349 buffer = rb_entry(n, struct binder_buffer, rb_node);
350 buffer_size = binder_alloc_buffer_size(alloc, buffer);
351 allocated_buffers++;
352 total_alloc_size += buffer_size;
353 if (buffer_size > largest_alloc_size)
354 largest_alloc_size = buffer_size;
355 }
356 for (n = rb_first(&alloc->free_buffers); n != NULL;
357 n = rb_next(n)) {
358 buffer = rb_entry(n, struct binder_buffer, rb_node);
359 buffer_size = binder_alloc_buffer_size(alloc, buffer);
360 free_buffers++;
361 total_free_size += buffer_size;
362 if (buffer_size > largest_free_size)
363 largest_free_size = buffer_size;
364 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700365 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
366 alloc->pid, size);
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700367 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
368 total_alloc_size, allocated_buffers, largest_alloc_size,
369 total_free_size, free_buffers, largest_free_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700370 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700371 }
372 if (n == NULL) {
373 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
374 buffer_size = binder_alloc_buffer_size(alloc, buffer);
375 }
376
377 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
378 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
379 alloc->pid, size, buffer, buffer_size);
380
381 has_page_addr =
382 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
383 if (n == NULL) {
384 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
385 buffer_size = size; /* no room for other buffers */
386 else
387 buffer_size = size + sizeof(struct binder_buffer);
388 }
389 end_page_addr =
390 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
391 if (end_page_addr > has_page_addr)
392 end_page_addr = has_page_addr;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700393 ret = binder_update_page_range(alloc, 1,
394 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
395 if (ret)
396 return ERR_PTR(ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700397
398 rb_erase(best_fit, &alloc->free_buffers);
399 buffer->free = 0;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700400 buffer->free_in_progress = 0;
Todd Kjos0c972a02017-06-29 12:01:41 -0700401 binder_insert_allocated_buffer_locked(alloc, buffer);
402 if (buffer_size != size) {
403 struct binder_buffer *new_buffer = (void *)buffer->data + size;
404
405 list_add(&new_buffer->entry, &buffer->entry);
406 new_buffer->free = 1;
407 binder_insert_free_buffer(alloc, new_buffer);
408 }
409 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
410 "%d: binder_alloc_buf size %zd got %pK\n",
411 alloc->pid, size, buffer);
412 buffer->data_size = data_size;
413 buffer->offsets_size = offsets_size;
414 buffer->async_transaction = is_async;
415 buffer->extra_buffers_size = extra_buffers_size;
416 if (is_async) {
417 alloc->free_async_space -= size + sizeof(struct binder_buffer);
418 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
419 "%d: binder_alloc_buf size %zd async free %zd\n",
420 alloc->pid, size, alloc->free_async_space);
421 }
422 return buffer;
423}
424
425/**
426 * binder_alloc_new_buf() - Allocate a new binder buffer
427 * @alloc: binder_alloc for this proc
428 * @data_size: size of user data buffer
429 * @offsets_size: user specified buffer offset
430 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
431 * @is_async: buffer for async transaction
432 *
433 * Allocate a new buffer given the requested sizes. Returns
434 * the kernel version of the buffer pointer. The size allocated
435 * is the sum of the three given sizes (each rounded up to
436 * pointer-sized boundary)
437 *
438 * Return: The allocated buffer or %NULL if error
439 */
440struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
441 size_t data_size,
442 size_t offsets_size,
443 size_t extra_buffers_size,
444 int is_async)
445{
446 struct binder_buffer *buffer;
447
448 mutex_lock(&alloc->mutex);
449 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
450 extra_buffers_size, is_async);
451 mutex_unlock(&alloc->mutex);
452 return buffer;
453}
454
455static void *buffer_start_page(struct binder_buffer *buffer)
456{
457 return (void *)((uintptr_t)buffer & PAGE_MASK);
458}
459
460static void *buffer_end_page(struct binder_buffer *buffer)
461{
462 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
463}
464
465static void binder_delete_free_buffer(struct binder_alloc *alloc,
466 struct binder_buffer *buffer)
467{
468 struct binder_buffer *prev, *next = NULL;
469 int free_page_end = 1;
470 int free_page_start = 1;
471
472 BUG_ON(alloc->buffers.next == &buffer->entry);
473 prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
474 BUG_ON(!prev->free);
475 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
476 free_page_start = 0;
477 if (buffer_end_page(prev) == buffer_end_page(buffer))
478 free_page_end = 0;
479 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
480 "%d: merge free, buffer %pK share page with %pK\n",
481 alloc->pid, buffer, prev);
482 }
483
484 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
485 next = list_entry(buffer->entry.next,
486 struct binder_buffer, entry);
487 if (buffer_start_page(next) == buffer_end_page(buffer)) {
488 free_page_end = 0;
489 if (buffer_start_page(next) ==
490 buffer_start_page(buffer))
491 free_page_start = 0;
492 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
493 "%d: merge free, buffer %pK share page with %pK\n",
494 alloc->pid, buffer, prev);
495 }
496 }
497 list_del(&buffer->entry);
498 if (free_page_start || free_page_end) {
499 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
500 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
501 alloc->pid, buffer, free_page_start ? "" : " end",
502 free_page_end ? "" : " start", prev, next);
503 binder_update_page_range(alloc, 0, free_page_start ?
504 buffer_start_page(buffer) : buffer_end_page(buffer),
505 (free_page_end ? buffer_end_page(buffer) :
506 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
507 }
508}
509
510static void binder_free_buf_locked(struct binder_alloc *alloc,
511 struct binder_buffer *buffer)
512{
513 size_t size, buffer_size;
514
515 buffer_size = binder_alloc_buffer_size(alloc, buffer);
516
517 size = ALIGN(buffer->data_size, sizeof(void *)) +
518 ALIGN(buffer->offsets_size, sizeof(void *)) +
519 ALIGN(buffer->extra_buffers_size, sizeof(void *));
520
521 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
522 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
523 alloc->pid, buffer, size, buffer_size);
524
525 BUG_ON(buffer->free);
526 BUG_ON(size > buffer_size);
527 BUG_ON(buffer->transaction != NULL);
528 BUG_ON((void *)buffer < alloc->buffer);
529 BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
530
531 if (buffer->async_transaction) {
532 alloc->free_async_space += size + sizeof(struct binder_buffer);
533
534 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
535 "%d: binder_free_buf size %zd async free %zd\n",
536 alloc->pid, size, alloc->free_async_space);
537 }
538
539 binder_update_page_range(alloc, 0,
540 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
541 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
542 NULL);
543
544 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
545 buffer->free = 1;
546 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
547 struct binder_buffer *next = list_entry(buffer->entry.next,
548 struct binder_buffer, entry);
549
550 if (next->free) {
551 rb_erase(&next->rb_node, &alloc->free_buffers);
552 binder_delete_free_buffer(alloc, next);
553 }
554 }
555 if (alloc->buffers.next != &buffer->entry) {
556 struct binder_buffer *prev = list_entry(buffer->entry.prev,
557 struct binder_buffer, entry);
558
559 if (prev->free) {
560 binder_delete_free_buffer(alloc, buffer);
561 rb_erase(&prev->rb_node, &alloc->free_buffers);
562 buffer = prev;
563 }
564 }
565 binder_insert_free_buffer(alloc, buffer);
566}
567
568/**
569 * binder_alloc_free_buf() - free a binder buffer
570 * @alloc: binder_alloc for this proc
571 * @buffer: kernel pointer to buffer
572 *
573 * Free the buffer allocated via binder_alloc_new_buffer()
574 */
575void binder_alloc_free_buf(struct binder_alloc *alloc,
576 struct binder_buffer *buffer)
577{
578 mutex_lock(&alloc->mutex);
579 binder_free_buf_locked(alloc, buffer);
580 mutex_unlock(&alloc->mutex);
581}
582
583/**
584 * binder_alloc_mmap_handler() - map virtual address space for proc
585 * @alloc: alloc structure for this proc
586 * @vma: vma passed to mmap()
587 *
588 * Called by binder_mmap() to initialize the space specified in
589 * vma for allocating binder buffers
590 *
591 * Return:
592 * 0 = success
593 * -EBUSY = address space already mapped
594 * -ENOMEM = failed to map memory to given address space
595 */
596int binder_alloc_mmap_handler(struct binder_alloc *alloc,
597 struct vm_area_struct *vma)
598{
599 int ret;
600 struct vm_struct *area;
601 const char *failure_string;
602 struct binder_buffer *buffer;
603
604 mutex_lock(&binder_alloc_mmap_lock);
605 if (alloc->buffer) {
606 ret = -EBUSY;
607 failure_string = "already mapped";
608 goto err_already_mapped;
609 }
610
611 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
612 if (area == NULL) {
613 ret = -ENOMEM;
614 failure_string = "get_vm_area";
615 goto err_get_vm_area_failed;
616 }
617 alloc->buffer = area->addr;
618 alloc->user_buffer_offset =
619 vma->vm_start - (uintptr_t)alloc->buffer;
620 mutex_unlock(&binder_alloc_mmap_lock);
621
622#ifdef CONFIG_CPU_CACHE_VIPT
623 if (cache_is_vipt_aliasing()) {
624 while (CACHE_COLOUR(
625 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
626 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
627 __func__, alloc->pid, vma->vm_start,
628 vma->vm_end, alloc->buffer);
629 vma->vm_start += PAGE_SIZE;
630 }
631 }
632#endif
633 alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
634 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
635 GFP_KERNEL);
636 if (alloc->pages == NULL) {
637 ret = -ENOMEM;
638 failure_string = "alloc page array";
639 goto err_alloc_pages_failed;
640 }
641 alloc->buffer_size = vma->vm_end - vma->vm_start;
642
643 if (binder_update_page_range(alloc, 1, alloc->buffer,
644 alloc->buffer + PAGE_SIZE, vma)) {
645 ret = -ENOMEM;
646 failure_string = "alloc small buf";
647 goto err_alloc_small_buf_failed;
648 }
649 buffer = alloc->buffer;
650 INIT_LIST_HEAD(&alloc->buffers);
651 list_add(&buffer->entry, &alloc->buffers);
652 buffer->free = 1;
653 binder_insert_free_buffer(alloc, buffer);
654 alloc->free_async_space = alloc->buffer_size / 2;
655 barrier();
656 alloc->vma = vma;
657 alloc->vma_vm_mm = vma->vm_mm;
658
659 return 0;
660
661err_alloc_small_buf_failed:
662 kfree(alloc->pages);
663 alloc->pages = NULL;
664err_alloc_pages_failed:
665 mutex_lock(&binder_alloc_mmap_lock);
666 vfree(alloc->buffer);
667 alloc->buffer = NULL;
668err_get_vm_area_failed:
669err_already_mapped:
670 mutex_unlock(&binder_alloc_mmap_lock);
671 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
672 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
673 return ret;
674}
675
676
677void binder_alloc_deferred_release(struct binder_alloc *alloc)
678{
679 struct rb_node *n;
680 int buffers, page_count;
681
682 BUG_ON(alloc->vma);
683
684 buffers = 0;
685 mutex_lock(&alloc->mutex);
686 while ((n = rb_first(&alloc->allocated_buffers))) {
687 struct binder_buffer *buffer;
688
689 buffer = rb_entry(n, struct binder_buffer, rb_node);
690
691 /* Transaction should already have been freed */
692 BUG_ON(buffer->transaction);
693
694 binder_free_buf_locked(alloc, buffer);
695 buffers++;
696 }
697
698 page_count = 0;
699 if (alloc->pages) {
700 int i;
701
702 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
703 void *page_addr;
704
705 if (!alloc->pages[i])
706 continue;
707
708 page_addr = alloc->buffer + i * PAGE_SIZE;
709 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
710 "%s: %d: page %d at %pK not freed\n",
711 __func__, alloc->pid, i, page_addr);
712 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
713 __free_page(alloc->pages[i]);
714 page_count++;
715 }
716 kfree(alloc->pages);
717 vfree(alloc->buffer);
718 }
719 mutex_unlock(&alloc->mutex);
720
721 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
722 "%s: %d buffers %d, pages %d\n",
723 __func__, alloc->pid, buffers, page_count);
724}
725
726static void print_binder_buffer(struct seq_file *m, const char *prefix,
727 struct binder_buffer *buffer)
728{
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700729 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
Todd Kjos0c972a02017-06-29 12:01:41 -0700730 prefix, buffer->debug_id, buffer->data,
731 buffer->data_size, buffer->offsets_size,
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700732 buffer->extra_buffers_size,
Todd Kjos0c972a02017-06-29 12:01:41 -0700733 buffer->transaction ? "active" : "delivered");
734}
735
736/**
737 * binder_alloc_print_allocated() - print buffer info
738 * @m: seq_file for output via seq_printf()
739 * @alloc: binder_alloc for this proc
740 *
741 * Prints information about every buffer associated with
742 * the binder_alloc state to the given seq_file
743 */
744void binder_alloc_print_allocated(struct seq_file *m,
745 struct binder_alloc *alloc)
746{
747 struct rb_node *n;
748
749 mutex_lock(&alloc->mutex);
750 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
751 print_binder_buffer(m, " buffer",
752 rb_entry(n, struct binder_buffer, rb_node));
753 mutex_unlock(&alloc->mutex);
754}
755
756/**
757 * binder_alloc_get_allocated_count() - return count of buffers
758 * @alloc: binder_alloc for this proc
759 *
760 * Return: count of allocated buffers
761 */
762int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
763{
764 struct rb_node *n;
765 int count = 0;
766
767 mutex_lock(&alloc->mutex);
768 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
769 count++;
770 mutex_unlock(&alloc->mutex);
771 return count;
772}
773
774
775/**
776 * binder_alloc_vma_close() - invalidate address space
777 * @alloc: binder_alloc for this proc
778 *
779 * Called from binder_vma_close() when releasing address space.
780 * Clears alloc->vma to prevent new incoming transactions from
781 * allocating more buffers.
782 */
783void binder_alloc_vma_close(struct binder_alloc *alloc)
784{
785 WRITE_ONCE(alloc->vma, NULL);
786 WRITE_ONCE(alloc->vma_vm_mm, NULL);
787}
788
789/**
790 * binder_alloc_init() - called by binder_open() for per-proc initialization
791 * @alloc: binder_alloc for this proc
792 *
793 * Called from binder_open() to initialize binder_alloc fields for
794 * new binder proc
795 */
796void binder_alloc_init(struct binder_alloc *alloc)
797{
798 alloc->tsk = current->group_leader;
799 alloc->pid = current->group_leader->pid;
800 mutex_init(&alloc->mutex);
801}
802