blob: f15af2b55a62c01a1e67899ebb382f1e9365d12f [file] [log] [blame]
Todd Kjos0c972a02017-06-29 12:01:41 -07001/* binder_alloc.c
2 *
3 * Android IPC Subsystem
4 *
5 * Copyright (C) 2007-2017 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20#include <asm/cacheflush.h>
21#include <linux/list.h>
22#include <linux/sched/mm.h>
23#include <linux/module.h>
24#include <linux/rtmutex.h>
25#include <linux/rbtree.h>
26#include <linux/seq_file.h>
27#include <linux/vmalloc.h>
28#include <linux/slab.h>
29#include <linux/sched.h>
30#include "binder_alloc.h"
31#include "binder_trace.h"
32
33static DEFINE_MUTEX(binder_alloc_mmap_lock);
34
35enum {
36 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39};
40static uint32_t binder_alloc_debug_mask;
41
42module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45#define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
48 pr_info(x); \
49 } while (0)
50
Sherry Yange21762192017-08-23 08:46:39 -070051static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52{
53 return list_entry(buffer->entry.next, struct binder_buffer, entry);
54}
55
56static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57{
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59}
60
Todd Kjos0c972a02017-06-29 12:01:41 -070061static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 struct binder_buffer *buffer)
63{
64 if (list_is_last(&buffer->entry, &alloc->buffers))
65 return alloc->buffer +
66 alloc->buffer_size - (void *)buffer->data;
Sherry Yange21762192017-08-23 08:46:39 -070067 return (size_t)binder_buffer_next(buffer) - (size_t)buffer->data;
Todd Kjos0c972a02017-06-29 12:01:41 -070068}
69
70static void binder_insert_free_buffer(struct binder_alloc *alloc,
71 struct binder_buffer *new_buffer)
72{
73 struct rb_node **p = &alloc->free_buffers.rb_node;
74 struct rb_node *parent = NULL;
75 struct binder_buffer *buffer;
76 size_t buffer_size;
77 size_t new_buffer_size;
78
79 BUG_ON(!new_buffer->free);
80
81 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
82
83 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
84 "%d: add free buffer, size %zd, at %pK\n",
85 alloc->pid, new_buffer_size, new_buffer);
86
87 while (*p) {
88 parent = *p;
89 buffer = rb_entry(parent, struct binder_buffer, rb_node);
90 BUG_ON(!buffer->free);
91
92 buffer_size = binder_alloc_buffer_size(alloc, buffer);
93
94 if (new_buffer_size < buffer_size)
95 p = &parent->rb_left;
96 else
97 p = &parent->rb_right;
98 }
99 rb_link_node(&new_buffer->rb_node, parent, p);
100 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
101}
102
103static void binder_insert_allocated_buffer_locked(
104 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
105{
106 struct rb_node **p = &alloc->allocated_buffers.rb_node;
107 struct rb_node *parent = NULL;
108 struct binder_buffer *buffer;
109
110 BUG_ON(new_buffer->free);
111
112 while (*p) {
113 parent = *p;
114 buffer = rb_entry(parent, struct binder_buffer, rb_node);
115 BUG_ON(buffer->free);
116
117 if (new_buffer < buffer)
118 p = &parent->rb_left;
119 else if (new_buffer > buffer)
120 p = &parent->rb_right;
121 else
122 BUG();
123 }
124 rb_link_node(&new_buffer->rb_node, parent, p);
125 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
126}
127
Todd Kjos53d311cf2017-06-29 12:01:51 -0700128static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjos0c972a02017-06-29 12:01:41 -0700129 struct binder_alloc *alloc,
130 uintptr_t user_ptr)
131{
132 struct rb_node *n = alloc->allocated_buffers.rb_node;
133 struct binder_buffer *buffer;
134 struct binder_buffer *kern_ptr;
135
136 kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
137 - offsetof(struct binder_buffer, data));
138
139 while (n) {
140 buffer = rb_entry(n, struct binder_buffer, rb_node);
141 BUG_ON(buffer->free);
142
143 if (kern_ptr < buffer)
144 n = n->rb_left;
145 else if (kern_ptr > buffer)
146 n = n->rb_right;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700147 else {
148 /*
149 * Guard against user threads attempting to
150 * free the buffer twice
151 */
152 if (buffer->free_in_progress) {
153 pr_err("%d:%d FREE_BUFFER u%016llx user freed buffer twice\n",
154 alloc->pid, current->pid, (u64)user_ptr);
155 return NULL;
156 }
157 buffer->free_in_progress = 1;
Todd Kjos0c972a02017-06-29 12:01:41 -0700158 return buffer;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700159 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700160 }
161 return NULL;
162}
163
164/**
165 * binder_alloc_buffer_lookup() - get buffer given user ptr
166 * @alloc: binder_alloc for this proc
167 * @user_ptr: User pointer to buffer data
168 *
169 * Validate userspace pointer to buffer data and return buffer corresponding to
170 * that user pointer. Search the rb tree for buffer that matches user data
171 * pointer.
172 *
173 * Return: Pointer to buffer or NULL
174 */
Todd Kjos53d311cf2017-06-29 12:01:51 -0700175struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
176 uintptr_t user_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700177{
178 struct binder_buffer *buffer;
179
180 mutex_lock(&alloc->mutex);
Todd Kjos53d311cf2017-06-29 12:01:51 -0700181 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700182 mutex_unlock(&alloc->mutex);
183 return buffer;
184}
185
186static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
187 void *start, void *end,
188 struct vm_area_struct *vma)
189{
190 void *page_addr;
191 unsigned long user_page_addr;
192 struct page **page;
193 struct mm_struct *mm;
194
195 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
196 "%d: %s pages %pK-%pK\n", alloc->pid,
197 allocate ? "allocate" : "free", start, end);
198
199 if (end <= start)
200 return 0;
201
202 trace_binder_update_page_range(alloc, allocate, start, end);
203
204 if (vma)
205 mm = NULL;
206 else
207 mm = get_task_mm(alloc->tsk);
208
209 if (mm) {
210 down_write(&mm->mmap_sem);
211 vma = alloc->vma;
212 if (vma && mm != alloc->vma_vm_mm) {
213 pr_err("%d: vma mm and task mm mismatch\n",
214 alloc->pid);
215 vma = NULL;
216 }
217 }
218
219 if (allocate == 0)
220 goto free_range;
221
222 if (vma == NULL) {
223 pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
224 alloc->pid);
225 goto err_no_vma;
226 }
227
228 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
229 int ret;
230
231 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
232
233 BUG_ON(*page);
234 *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
235 if (*page == NULL) {
236 pr_err("%d: binder_alloc_buf failed for page at %pK\n",
237 alloc->pid, page_addr);
238 goto err_alloc_page_failed;
239 }
240 ret = map_kernel_range_noflush((unsigned long)page_addr,
241 PAGE_SIZE, PAGE_KERNEL, page);
242 flush_cache_vmap((unsigned long)page_addr,
243 (unsigned long)page_addr + PAGE_SIZE);
244 if (ret != 1) {
245 pr_err("%d: binder_alloc_buf failed to map page at %pK in kernel\n",
246 alloc->pid, page_addr);
247 goto err_map_kernel_failed;
248 }
249 user_page_addr =
250 (uintptr_t)page_addr + alloc->user_buffer_offset;
251 ret = vm_insert_page(vma, user_page_addr, page[0]);
252 if (ret) {
253 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
254 alloc->pid, user_page_addr);
255 goto err_vm_insert_page_failed;
256 }
257 /* vm_insert_page does not seem to increment the refcount */
258 }
259 if (mm) {
260 up_write(&mm->mmap_sem);
261 mmput(mm);
262 }
263 return 0;
264
265free_range:
266 for (page_addr = end - PAGE_SIZE; page_addr >= start;
267 page_addr -= PAGE_SIZE) {
268 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
269 if (vma)
270 zap_page_range(vma, (uintptr_t)page_addr +
271 alloc->user_buffer_offset, PAGE_SIZE);
272err_vm_insert_page_failed:
273 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
274err_map_kernel_failed:
275 __free_page(*page);
276 *page = NULL;
277err_alloc_page_failed:
278 ;
279 }
280err_no_vma:
281 if (mm) {
282 up_write(&mm->mmap_sem);
283 mmput(mm);
284 }
Todd Kjos57ada2f2017-06-29 12:01:46 -0700285 return vma ? -ENOMEM : -ESRCH;
Todd Kjos0c972a02017-06-29 12:01:41 -0700286}
287
288struct binder_buffer *binder_alloc_new_buf_locked(struct binder_alloc *alloc,
289 size_t data_size,
290 size_t offsets_size,
291 size_t extra_buffers_size,
292 int is_async)
293{
294 struct rb_node *n = alloc->free_buffers.rb_node;
295 struct binder_buffer *buffer;
296 size_t buffer_size;
297 struct rb_node *best_fit = NULL;
298 void *has_page_addr;
299 void *end_page_addr;
300 size_t size, data_offsets_size;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700301 int ret;
Todd Kjos0c972a02017-06-29 12:01:41 -0700302
303 if (alloc->vma == NULL) {
304 pr_err("%d: binder_alloc_buf, no vma\n",
305 alloc->pid);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700306 return ERR_PTR(-ESRCH);
Todd Kjos0c972a02017-06-29 12:01:41 -0700307 }
308
309 data_offsets_size = ALIGN(data_size, sizeof(void *)) +
310 ALIGN(offsets_size, sizeof(void *));
311
312 if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
313 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
314 "%d: got transaction with invalid size %zd-%zd\n",
315 alloc->pid, data_size, offsets_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700316 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700317 }
318 size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
319 if (size < data_offsets_size || size < extra_buffers_size) {
320 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
321 "%d: got transaction with invalid extra_buffers_size %zd\n",
322 alloc->pid, extra_buffers_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700323 return ERR_PTR(-EINVAL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700324 }
325 if (is_async &&
326 alloc->free_async_space < size + sizeof(struct binder_buffer)) {
327 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
328 "%d: binder_alloc_buf size %zd failed, no async space left\n",
329 alloc->pid, size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700330 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700331 }
332
333 while (n) {
334 buffer = rb_entry(n, struct binder_buffer, rb_node);
335 BUG_ON(!buffer->free);
336 buffer_size = binder_alloc_buffer_size(alloc, buffer);
337
338 if (size < buffer_size) {
339 best_fit = n;
340 n = n->rb_left;
341 } else if (size > buffer_size)
342 n = n->rb_right;
343 else {
344 best_fit = n;
345 break;
346 }
347 }
348 if (best_fit == NULL) {
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700349 size_t allocated_buffers = 0;
350 size_t largest_alloc_size = 0;
351 size_t total_alloc_size = 0;
352 size_t free_buffers = 0;
353 size_t largest_free_size = 0;
354 size_t total_free_size = 0;
355
356 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
357 n = rb_next(n)) {
358 buffer = rb_entry(n, struct binder_buffer, rb_node);
359 buffer_size = binder_alloc_buffer_size(alloc, buffer);
360 allocated_buffers++;
361 total_alloc_size += buffer_size;
362 if (buffer_size > largest_alloc_size)
363 largest_alloc_size = buffer_size;
364 }
365 for (n = rb_first(&alloc->free_buffers); n != NULL;
366 n = rb_next(n)) {
367 buffer = rb_entry(n, struct binder_buffer, rb_node);
368 buffer_size = binder_alloc_buffer_size(alloc, buffer);
369 free_buffers++;
370 total_free_size += buffer_size;
371 if (buffer_size > largest_free_size)
372 largest_free_size = buffer_size;
373 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700374 pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
375 alloc->pid, size);
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700376 pr_err("allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
377 total_alloc_size, allocated_buffers, largest_alloc_size,
378 total_free_size, free_buffers, largest_free_size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700379 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700380 }
381 if (n == NULL) {
382 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
383 buffer_size = binder_alloc_buffer_size(alloc, buffer);
384 }
385
386 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
387 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
388 alloc->pid, size, buffer, buffer_size);
389
390 has_page_addr =
391 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
392 if (n == NULL) {
393 if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
394 buffer_size = size; /* no room for other buffers */
395 else
396 buffer_size = size + sizeof(struct binder_buffer);
397 }
398 end_page_addr =
399 (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
400 if (end_page_addr > has_page_addr)
401 end_page_addr = has_page_addr;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700402 ret = binder_update_page_range(alloc, 1,
403 (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL);
404 if (ret)
405 return ERR_PTR(ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700406
407 rb_erase(best_fit, &alloc->free_buffers);
408 buffer->free = 0;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700409 buffer->free_in_progress = 0;
Todd Kjos0c972a02017-06-29 12:01:41 -0700410 binder_insert_allocated_buffer_locked(alloc, buffer);
411 if (buffer_size != size) {
412 struct binder_buffer *new_buffer = (void *)buffer->data + size;
413
414 list_add(&new_buffer->entry, &buffer->entry);
415 new_buffer->free = 1;
416 binder_insert_free_buffer(alloc, new_buffer);
417 }
418 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
419 "%d: binder_alloc_buf size %zd got %pK\n",
420 alloc->pid, size, buffer);
421 buffer->data_size = data_size;
422 buffer->offsets_size = offsets_size;
423 buffer->async_transaction = is_async;
424 buffer->extra_buffers_size = extra_buffers_size;
425 if (is_async) {
426 alloc->free_async_space -= size + sizeof(struct binder_buffer);
427 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
428 "%d: binder_alloc_buf size %zd async free %zd\n",
429 alloc->pid, size, alloc->free_async_space);
430 }
431 return buffer;
432}
433
434/**
435 * binder_alloc_new_buf() - Allocate a new binder buffer
436 * @alloc: binder_alloc for this proc
437 * @data_size: size of user data buffer
438 * @offsets_size: user specified buffer offset
439 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
440 * @is_async: buffer for async transaction
441 *
442 * Allocate a new buffer given the requested sizes. Returns
443 * the kernel version of the buffer pointer. The size allocated
444 * is the sum of the three given sizes (each rounded up to
445 * pointer-sized boundary)
446 *
447 * Return: The allocated buffer or %NULL if error
448 */
449struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
450 size_t data_size,
451 size_t offsets_size,
452 size_t extra_buffers_size,
453 int is_async)
454{
455 struct binder_buffer *buffer;
456
457 mutex_lock(&alloc->mutex);
458 buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
459 extra_buffers_size, is_async);
460 mutex_unlock(&alloc->mutex);
461 return buffer;
462}
463
464static void *buffer_start_page(struct binder_buffer *buffer)
465{
466 return (void *)((uintptr_t)buffer & PAGE_MASK);
467}
468
469static void *buffer_end_page(struct binder_buffer *buffer)
470{
471 return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
472}
473
474static void binder_delete_free_buffer(struct binder_alloc *alloc,
475 struct binder_buffer *buffer)
476{
477 struct binder_buffer *prev, *next = NULL;
478 int free_page_end = 1;
479 int free_page_start = 1;
480
481 BUG_ON(alloc->buffers.next == &buffer->entry);
Sherry Yange21762192017-08-23 08:46:39 -0700482 prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700483 BUG_ON(!prev->free);
484 if (buffer_end_page(prev) == buffer_start_page(buffer)) {
485 free_page_start = 0;
486 if (buffer_end_page(prev) == buffer_end_page(buffer))
487 free_page_end = 0;
488 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
489 "%d: merge free, buffer %pK share page with %pK\n",
490 alloc->pid, buffer, prev);
491 }
492
493 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700494 next = binder_buffer_next(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700495 if (buffer_start_page(next) == buffer_end_page(buffer)) {
496 free_page_end = 0;
497 if (buffer_start_page(next) ==
498 buffer_start_page(buffer))
499 free_page_start = 0;
500 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
501 "%d: merge free, buffer %pK share page with %pK\n",
502 alloc->pid, buffer, prev);
503 }
504 }
505 list_del(&buffer->entry);
506 if (free_page_start || free_page_end) {
507 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
508 "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
509 alloc->pid, buffer, free_page_start ? "" : " end",
510 free_page_end ? "" : " start", prev, next);
511 binder_update_page_range(alloc, 0, free_page_start ?
512 buffer_start_page(buffer) : buffer_end_page(buffer),
513 (free_page_end ? buffer_end_page(buffer) :
514 buffer_start_page(buffer)) + PAGE_SIZE, NULL);
515 }
516}
517
518static void binder_free_buf_locked(struct binder_alloc *alloc,
519 struct binder_buffer *buffer)
520{
521 size_t size, buffer_size;
522
523 buffer_size = binder_alloc_buffer_size(alloc, buffer);
524
525 size = ALIGN(buffer->data_size, sizeof(void *)) +
526 ALIGN(buffer->offsets_size, sizeof(void *)) +
527 ALIGN(buffer->extra_buffers_size, sizeof(void *));
528
529 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
530 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
531 alloc->pid, buffer, size, buffer_size);
532
533 BUG_ON(buffer->free);
534 BUG_ON(size > buffer_size);
535 BUG_ON(buffer->transaction != NULL);
536 BUG_ON((void *)buffer < alloc->buffer);
537 BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
538
539 if (buffer->async_transaction) {
540 alloc->free_async_space += size + sizeof(struct binder_buffer);
541
542 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
543 "%d: binder_free_buf size %zd async free %zd\n",
544 alloc->pid, size, alloc->free_async_space);
545 }
546
547 binder_update_page_range(alloc, 0,
548 (void *)PAGE_ALIGN((uintptr_t)buffer->data),
549 (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
550 NULL);
551
552 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
553 buffer->free = 1;
554 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700555 struct binder_buffer *next = binder_buffer_next(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700556
557 if (next->free) {
558 rb_erase(&next->rb_node, &alloc->free_buffers);
559 binder_delete_free_buffer(alloc, next);
560 }
561 }
562 if (alloc->buffers.next != &buffer->entry) {
Sherry Yange21762192017-08-23 08:46:39 -0700563 struct binder_buffer *prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700564
565 if (prev->free) {
566 binder_delete_free_buffer(alloc, buffer);
567 rb_erase(&prev->rb_node, &alloc->free_buffers);
568 buffer = prev;
569 }
570 }
571 binder_insert_free_buffer(alloc, buffer);
572}
573
574/**
575 * binder_alloc_free_buf() - free a binder buffer
576 * @alloc: binder_alloc for this proc
577 * @buffer: kernel pointer to buffer
578 *
579 * Free the buffer allocated via binder_alloc_new_buffer()
580 */
581void binder_alloc_free_buf(struct binder_alloc *alloc,
582 struct binder_buffer *buffer)
583{
584 mutex_lock(&alloc->mutex);
585 binder_free_buf_locked(alloc, buffer);
586 mutex_unlock(&alloc->mutex);
587}
588
589/**
590 * binder_alloc_mmap_handler() - map virtual address space for proc
591 * @alloc: alloc structure for this proc
592 * @vma: vma passed to mmap()
593 *
594 * Called by binder_mmap() to initialize the space specified in
595 * vma for allocating binder buffers
596 *
597 * Return:
598 * 0 = success
599 * -EBUSY = address space already mapped
600 * -ENOMEM = failed to map memory to given address space
601 */
602int binder_alloc_mmap_handler(struct binder_alloc *alloc,
603 struct vm_area_struct *vma)
604{
605 int ret;
606 struct vm_struct *area;
607 const char *failure_string;
608 struct binder_buffer *buffer;
609
610 mutex_lock(&binder_alloc_mmap_lock);
611 if (alloc->buffer) {
612 ret = -EBUSY;
613 failure_string = "already mapped";
614 goto err_already_mapped;
615 }
616
617 area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
618 if (area == NULL) {
619 ret = -ENOMEM;
620 failure_string = "get_vm_area";
621 goto err_get_vm_area_failed;
622 }
623 alloc->buffer = area->addr;
624 alloc->user_buffer_offset =
625 vma->vm_start - (uintptr_t)alloc->buffer;
626 mutex_unlock(&binder_alloc_mmap_lock);
627
628#ifdef CONFIG_CPU_CACHE_VIPT
629 if (cache_is_vipt_aliasing()) {
630 while (CACHE_COLOUR(
631 (vma->vm_start ^ (uint32_t)alloc->buffer))) {
632 pr_info("%s: %d %lx-%lx maps %pK bad alignment\n",
633 __func__, alloc->pid, vma->vm_start,
634 vma->vm_end, alloc->buffer);
635 vma->vm_start += PAGE_SIZE;
636 }
637 }
638#endif
639 alloc->pages = kzalloc(sizeof(alloc->pages[0]) *
640 ((vma->vm_end - vma->vm_start) / PAGE_SIZE),
641 GFP_KERNEL);
642 if (alloc->pages == NULL) {
643 ret = -ENOMEM;
644 failure_string = "alloc page array";
645 goto err_alloc_pages_failed;
646 }
647 alloc->buffer_size = vma->vm_end - vma->vm_start;
648
649 if (binder_update_page_range(alloc, 1, alloc->buffer,
650 alloc->buffer + PAGE_SIZE, vma)) {
651 ret = -ENOMEM;
652 failure_string = "alloc small buf";
653 goto err_alloc_small_buf_failed;
654 }
655 buffer = alloc->buffer;
656 INIT_LIST_HEAD(&alloc->buffers);
657 list_add(&buffer->entry, &alloc->buffers);
658 buffer->free = 1;
659 binder_insert_free_buffer(alloc, buffer);
660 alloc->free_async_space = alloc->buffer_size / 2;
661 barrier();
662 alloc->vma = vma;
663 alloc->vma_vm_mm = vma->vm_mm;
664
665 return 0;
666
667err_alloc_small_buf_failed:
668 kfree(alloc->pages);
669 alloc->pages = NULL;
670err_alloc_pages_failed:
671 mutex_lock(&binder_alloc_mmap_lock);
672 vfree(alloc->buffer);
673 alloc->buffer = NULL;
674err_get_vm_area_failed:
675err_already_mapped:
676 mutex_unlock(&binder_alloc_mmap_lock);
677 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
678 alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
679 return ret;
680}
681
682
683void binder_alloc_deferred_release(struct binder_alloc *alloc)
684{
685 struct rb_node *n;
686 int buffers, page_count;
687
688 BUG_ON(alloc->vma);
689
690 buffers = 0;
691 mutex_lock(&alloc->mutex);
692 while ((n = rb_first(&alloc->allocated_buffers))) {
693 struct binder_buffer *buffer;
694
695 buffer = rb_entry(n, struct binder_buffer, rb_node);
696
697 /* Transaction should already have been freed */
698 BUG_ON(buffer->transaction);
699
700 binder_free_buf_locked(alloc, buffer);
701 buffers++;
702 }
703
704 page_count = 0;
705 if (alloc->pages) {
706 int i;
707
708 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
709 void *page_addr;
710
711 if (!alloc->pages[i])
712 continue;
713
714 page_addr = alloc->buffer + i * PAGE_SIZE;
715 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
716 "%s: %d: page %d at %pK not freed\n",
717 __func__, alloc->pid, i, page_addr);
718 unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
719 __free_page(alloc->pages[i]);
720 page_count++;
721 }
722 kfree(alloc->pages);
723 vfree(alloc->buffer);
724 }
725 mutex_unlock(&alloc->mutex);
726
727 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
728 "%s: %d buffers %d, pages %d\n",
729 __func__, alloc->pid, buffers, page_count);
730}
731
732static void print_binder_buffer(struct seq_file *m, const char *prefix,
733 struct binder_buffer *buffer)
734{
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700735 seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
Todd Kjos0c972a02017-06-29 12:01:41 -0700736 prefix, buffer->debug_id, buffer->data,
737 buffer->data_size, buffer->offsets_size,
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700738 buffer->extra_buffers_size,
Todd Kjos0c972a02017-06-29 12:01:41 -0700739 buffer->transaction ? "active" : "delivered");
740}
741
742/**
743 * binder_alloc_print_allocated() - print buffer info
744 * @m: seq_file for output via seq_printf()
745 * @alloc: binder_alloc for this proc
746 *
747 * Prints information about every buffer associated with
748 * the binder_alloc state to the given seq_file
749 */
750void binder_alloc_print_allocated(struct seq_file *m,
751 struct binder_alloc *alloc)
752{
753 struct rb_node *n;
754
755 mutex_lock(&alloc->mutex);
756 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
757 print_binder_buffer(m, " buffer",
758 rb_entry(n, struct binder_buffer, rb_node));
759 mutex_unlock(&alloc->mutex);
760}
761
762/**
763 * binder_alloc_get_allocated_count() - return count of buffers
764 * @alloc: binder_alloc for this proc
765 *
766 * Return: count of allocated buffers
767 */
768int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
769{
770 struct rb_node *n;
771 int count = 0;
772
773 mutex_lock(&alloc->mutex);
774 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
775 count++;
776 mutex_unlock(&alloc->mutex);
777 return count;
778}
779
780
781/**
782 * binder_alloc_vma_close() - invalidate address space
783 * @alloc: binder_alloc for this proc
784 *
785 * Called from binder_vma_close() when releasing address space.
786 * Clears alloc->vma to prevent new incoming transactions from
787 * allocating more buffers.
788 */
789void binder_alloc_vma_close(struct binder_alloc *alloc)
790{
791 WRITE_ONCE(alloc->vma, NULL);
792 WRITE_ONCE(alloc->vma_vm_mm, NULL);
793}
794
795/**
796 * binder_alloc_init() - called by binder_open() for per-proc initialization
797 * @alloc: binder_alloc for this proc
798 *
799 * Called from binder_open() to initialize binder_alloc fields for
800 * new binder proc
801 */
802void binder_alloc_init(struct binder_alloc *alloc)
803{
804 alloc->tsk = current->group_leader;
805 alloc->pid = current->group_leader->pid;
806 mutex_init(&alloc->mutex);
807}
808