blob: 6ccbf95536b4f0e6a96aa12da370f7a7bfffc724 [file] [log] [blame]
Thomas Gleixner9c92ab62019-05-29 07:17:56 -07001// SPDX-License-Identifier: GPL-2.0-only
Todd Kjos0c972a02017-06-29 12:01:41 -07002/* binder_alloc.c
3 *
4 * Android IPC Subsystem
5 *
6 * Copyright (C) 2007-2017 Google, Inc.
Todd Kjos0c972a02017-06-29 12:01:41 -07007 */
8
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
Todd Kjos0c972a02017-06-29 12:01:41 -070011#include <linux/list.h>
12#include <linux/sched/mm.h>
13#include <linux/module.h>
14#include <linux/rtmutex.h>
15#include <linux/rbtree.h>
16#include <linux/seq_file.h>
17#include <linux/vmalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
Sherry Yangf2517eb2017-08-23 08:46:42 -070020#include <linux/list_lru.h>
Sherry Yang128f3802018-08-07 12:57:13 -070021#include <linux/ratelimit.h>
Guenter Roeck1e81c572018-07-23 14:47:23 -070022#include <asm/cacheflush.h>
Todd Kjos1a7c3d92019-02-08 10:35:14 -080023#include <linux/uaccess.h>
24#include <linux/highmem.h>
Jann Horn45d02f72019-10-16 17:01:18 +020025#include <linux/sizes.h>
Todd Kjos0c972a02017-06-29 12:01:41 -070026#include "binder_alloc.h"
27#include "binder_trace.h"
Zhuguangqing1174e452021-03-09 15:47:43 +080028#include <trace/hooks/binder.h>
Todd Kjos0c972a02017-06-29 12:01:41 -070029
Sherry Yangf2517eb2017-08-23 08:46:42 -070030struct list_lru binder_alloc_lru;
31
Todd Kjos0c972a02017-06-29 12:01:41 -070032static DEFINE_MUTEX(binder_alloc_mmap_lock);
33
34enum {
Sherry Yang128f3802018-08-07 12:57:13 -070035 BINDER_DEBUG_USER_ERROR = 1U << 0,
Todd Kjos0c972a02017-06-29 12:01:41 -070036 BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
37 BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
38 BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
39};
Sherry Yang128f3802018-08-07 12:57:13 -070040static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
Todd Kjos0c972a02017-06-29 12:01:41 -070041
42module_param_named(debug_mask, binder_alloc_debug_mask,
43 uint, 0644);
44
45#define binder_alloc_debug(mask, x...) \
46 do { \
47 if (binder_alloc_debug_mask & mask) \
Sherry Yang128f3802018-08-07 12:57:13 -070048 pr_info_ratelimited(x); \
Todd Kjos0c972a02017-06-29 12:01:41 -070049 } while (0)
50
Sherry Yange21762192017-08-23 08:46:39 -070051static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
52{
53 return list_entry(buffer->entry.next, struct binder_buffer, entry);
54}
55
56static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
57{
58 return list_entry(buffer->entry.prev, struct binder_buffer, entry);
59}
60
Todd Kjos0c972a02017-06-29 12:01:41 -070061static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
62 struct binder_buffer *buffer)
63{
64 if (list_is_last(&buffer->entry, &alloc->buffers))
Todd Kjosbde4a192019-02-08 10:35:20 -080065 return alloc->buffer + alloc->buffer_size - buffer->user_data;
66 return binder_buffer_next(buffer)->user_data - buffer->user_data;
Todd Kjos0c972a02017-06-29 12:01:41 -070067}
68
69static void binder_insert_free_buffer(struct binder_alloc *alloc,
70 struct binder_buffer *new_buffer)
71{
72 struct rb_node **p = &alloc->free_buffers.rb_node;
73 struct rb_node *parent = NULL;
74 struct binder_buffer *buffer;
75 size_t buffer_size;
76 size_t new_buffer_size;
77
78 BUG_ON(!new_buffer->free);
79
80 new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
81
82 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
83 "%d: add free buffer, size %zd, at %pK\n",
84 alloc->pid, new_buffer_size, new_buffer);
85
86 while (*p) {
87 parent = *p;
88 buffer = rb_entry(parent, struct binder_buffer, rb_node);
89 BUG_ON(!buffer->free);
90
91 buffer_size = binder_alloc_buffer_size(alloc, buffer);
92
93 if (new_buffer_size < buffer_size)
94 p = &parent->rb_left;
95 else
96 p = &parent->rb_right;
97 }
98 rb_link_node(&new_buffer->rb_node, parent, p);
99 rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
100}
101
102static void binder_insert_allocated_buffer_locked(
103 struct binder_alloc *alloc, struct binder_buffer *new_buffer)
104{
105 struct rb_node **p = &alloc->allocated_buffers.rb_node;
106 struct rb_node *parent = NULL;
107 struct binder_buffer *buffer;
108
109 BUG_ON(new_buffer->free);
110
111 while (*p) {
112 parent = *p;
113 buffer = rb_entry(parent, struct binder_buffer, rb_node);
114 BUG_ON(buffer->free);
115
Todd Kjosbde4a192019-02-08 10:35:20 -0800116 if (new_buffer->user_data < buffer->user_data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700117 p = &parent->rb_left;
Todd Kjosbde4a192019-02-08 10:35:20 -0800118 else if (new_buffer->user_data > buffer->user_data)
Todd Kjos0c972a02017-06-29 12:01:41 -0700119 p = &parent->rb_right;
120 else
121 BUG();
122 }
123 rb_link_node(&new_buffer->rb_node, parent, p);
124 rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
125}
126
Todd Kjos53d311cf2017-06-29 12:01:51 -0700127static struct binder_buffer *binder_alloc_prepare_to_free_locked(
Todd Kjos0c972a02017-06-29 12:01:41 -0700128 struct binder_alloc *alloc,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000129 unsigned long user_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700130{
131 struct rb_node *n = alloc->allocated_buffers.rb_node;
132 struct binder_buffer *buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700133
134 while (n) {
135 buffer = rb_entry(n, struct binder_buffer, rb_node);
136 BUG_ON(buffer->free);
137
Carlos Llamasc38a8982023-12-01 17:21:38 +0000138 if (user_ptr < buffer->user_data) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700139 n = n->rb_left;
Carlos Llamasc38a8982023-12-01 17:21:38 +0000140 } else if (user_ptr > buffer->user_data) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700141 n = n->rb_right;
Carlos Llamasc38a8982023-12-01 17:21:38 +0000142 } else {
Todd Kjos53d311cf2017-06-29 12:01:51 -0700143 /*
144 * Guard against user threads attempting to
Todd Kjos7bada552018-11-06 15:55:32 -0800145 * free the buffer when in use by kernel or
146 * after it's already been freed.
Todd Kjos53d311cf2017-06-29 12:01:51 -0700147 */
Todd Kjos7bada552018-11-06 15:55:32 -0800148 if (!buffer->allow_user_free)
149 return ERR_PTR(-EPERM);
150 buffer->allow_user_free = 0;
Todd Kjos0c972a02017-06-29 12:01:41 -0700151 return buffer;
Todd Kjos53d311cf2017-06-29 12:01:51 -0700152 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700153 }
154 return NULL;
155}
156
157/**
Joel Fernandes (Google)5dc54a02019-09-30 16:12:50 -0400158 * binder_alloc_prepare_to_free() - get buffer given user ptr
Todd Kjos0c972a02017-06-29 12:01:41 -0700159 * @alloc: binder_alloc for this proc
160 * @user_ptr: User pointer to buffer data
161 *
162 * Validate userspace pointer to buffer data and return buffer corresponding to
163 * that user pointer. Search the rb tree for buffer that matches user data
164 * pointer.
165 *
166 * Return: Pointer to buffer or NULL
167 */
Todd Kjos53d311cf2017-06-29 12:01:51 -0700168struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000169 unsigned long user_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700170{
171 struct binder_buffer *buffer;
172
173 mutex_lock(&alloc->mutex);
Todd Kjos53d311cf2017-06-29 12:01:51 -0700174 buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700175 mutex_unlock(&alloc->mutex);
176 return buffer;
177}
178
Carlos Llamas0b243682023-12-01 17:21:39 +0000179static void binder_free_page_range(struct binder_alloc *alloc,
180 unsigned long start, unsigned long end)
181{
182 struct binder_lru_page *page;
183 unsigned long page_addr;
184
185 trace_binder_update_page_range(alloc, false, start, end);
186
187 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
188 size_t index;
189 int ret;
190
191 index = (page_addr - alloc->buffer) / PAGE_SIZE;
192 page = &alloc->pages[index];
193
194 trace_binder_free_lru_start(alloc, index);
195
196 ret = list_lru_add(&binder_alloc_lru, &page->lru);
197 WARN_ON(!ret);
198
199 trace_binder_free_lru_end(alloc, index);
200 }
201}
202
203static int binder_allocate_page_range(struct binder_alloc *alloc,
204 unsigned long start, unsigned long end)
Todd Kjos0c972a02017-06-29 12:01:41 -0700205{
Sherry Yang6ae33b92017-09-16 01:11:56 -0400206 struct vm_area_struct *vma = NULL;
Carlos Llamasc38a8982023-12-01 17:21:38 +0000207 struct binder_lru_page *page;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700208 struct mm_struct *mm = NULL;
Carlos Llamasc38a8982023-12-01 17:21:38 +0000209 unsigned long page_addr;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700210 bool need_mm = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700211
212 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Carlos Llamas0b243682023-12-01 17:21:39 +0000213 "%d: allocate pages %lx-%lx\n",
214 alloc->pid, start, end);
Todd Kjos0c972a02017-06-29 12:01:41 -0700215
216 if (end <= start)
217 return 0;
218
Carlos Llamas0b243682023-12-01 17:21:39 +0000219 trace_binder_update_page_range(alloc, true, start, end);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700220
221 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
222 page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
223 if (!page->page_ptr) {
224 need_mm = true;
225 break;
226 }
227 }
228
Greg Kroah-Hartman6fbf2482017-10-23 17:21:44 +0200229 if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
Sherry Yanga0c2baa2017-10-20 20:58:58 -0400230 mm = alloc->vma_vm_mm;
Todd Kjos0c972a02017-06-29 12:01:41 -0700231
232 if (mm) {
Carlos Llamas0270aeeb2023-05-30 19:43:38 +0000233 mmap_write_lock(mm);
Carlos Llamasacd81932023-05-30 19:43:36 +0000234 vma = alloc->vma;
Todd Kjos0c972a02017-06-29 12:01:41 -0700235 }
236
Sherry Yangf2517eb2017-08-23 08:46:42 -0700237 if (!vma && need_mm) {
Sherry Yang128f3802018-08-07 12:57:13 -0700238 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
239 "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
240 alloc->pid);
Todd Kjos0c972a02017-06-29 12:01:41 -0700241 goto err_no_vma;
242 }
243
244 for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
245 int ret;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700246 bool on_lru;
Sherry Yange41e1642017-08-23 08:46:43 -0700247 size_t index;
Todd Kjos0c972a02017-06-29 12:01:41 -0700248
Sherry Yange41e1642017-08-23 08:46:43 -0700249 index = (page_addr - alloc->buffer) / PAGE_SIZE;
250 page = &alloc->pages[index];
Todd Kjos0c972a02017-06-29 12:01:41 -0700251
Sherry Yangf2517eb2017-08-23 08:46:42 -0700252 if (page->page_ptr) {
Sherry Yange41e1642017-08-23 08:46:43 -0700253 trace_binder_alloc_lru_start(alloc, index);
254
Sherry Yangf2517eb2017-08-23 08:46:42 -0700255 on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
256 WARN_ON(!on_lru);
Sherry Yange41e1642017-08-23 08:46:43 -0700257
258 trace_binder_alloc_lru_end(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700259 continue;
260 }
261
262 if (WARN_ON(!vma))
263 goto err_page_ptr_cleared;
264
Sherry Yange41e1642017-08-23 08:46:43 -0700265 trace_binder_alloc_page_start(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -0700266 page->page_ptr = alloc_page(GFP_KERNEL |
267 __GFP_HIGHMEM |
268 __GFP_ZERO);
269 if (!page->page_ptr) {
Carlos Llamasc38a8982023-12-01 17:21:38 +0000270 pr_err("%d: binder_alloc_buf failed for page at %lx\n",
271 alloc->pid, page_addr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700272 goto err_alloc_page_failed;
273 }
Sherry Yangf2517eb2017-08-23 08:46:42 -0700274 page->alloc = alloc;
275 INIT_LIST_HEAD(&page->lru);
276
Carlos Llamasc38a8982023-12-01 17:21:38 +0000277 ret = vm_insert_page(vma, page_addr, page->page_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700278 if (ret) {
279 pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
Carlos Llamasc38a8982023-12-01 17:21:38 +0000280 alloc->pid, page_addr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700281 goto err_vm_insert_page_failed;
282 }
Sherry Yange41e1642017-08-23 08:46:43 -0700283
Martijn Coenen8d9a3ab62017-11-13 10:06:56 +0100284 if (index + 1 > alloc->pages_high)
285 alloc->pages_high = index + 1;
286
Sherry Yange41e1642017-08-23 08:46:43 -0700287 trace_binder_alloc_page_end(alloc, index);
Todd Kjos0c972a02017-06-29 12:01:41 -0700288 }
289 if (mm) {
Carlos Llamas0270aeeb2023-05-30 19:43:38 +0000290 mmap_write_unlock(mm);
Carlos Llamas1787ddd2023-12-01 17:21:32 +0000291 mmput_async(mm);
Todd Kjos0c972a02017-06-29 12:01:41 -0700292 }
293 return 0;
294
Todd Kjos0c972a02017-06-29 12:01:41 -0700295err_vm_insert_page_failed:
Carlos Llamas0b243682023-12-01 17:21:39 +0000296 __free_page(page->page_ptr);
297 page->page_ptr = NULL;
Todd Kjos0c972a02017-06-29 12:01:41 -0700298err_alloc_page_failed:
Sherry Yangf2517eb2017-08-23 08:46:42 -0700299err_page_ptr_cleared:
Carlos Llamas0b243682023-12-01 17:21:39 +0000300 binder_free_page_range(alloc, start, page_addr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700301err_no_vma:
302 if (mm) {
Carlos Llamas0270aeeb2023-05-30 19:43:38 +0000303 mmap_write_unlock(mm);
Carlos Llamas1787ddd2023-12-01 17:21:32 +0000304 mmput_async(mm);
Todd Kjos0c972a02017-06-29 12:01:41 -0700305 }
Todd Kjos57ada2f2017-06-29 12:01:46 -0700306 return vma ? -ENOMEM : -ESRCH;
Todd Kjos0c972a02017-06-29 12:01:41 -0700307}
308
Minchan Kimda1b9562018-08-23 14:29:56 +0900309static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
310 struct vm_area_struct *vma)
311{
Carlos Llamasb094b042023-05-30 19:43:37 +0000312 /* pairs with smp_load_acquire in binder_alloc_get_vma() */
313 smp_store_release(&alloc->vma, vma);
Minchan Kimda1b9562018-08-23 14:29:56 +0900314}
315
316static inline struct vm_area_struct *binder_alloc_get_vma(
317 struct binder_alloc *alloc)
318{
Carlos Llamasb094b042023-05-30 19:43:37 +0000319 /* pairs with smp_store_release in binder_alloc_set_vma() */
320 return smp_load_acquire(&alloc->vma);
Minchan Kimda1b9562018-08-23 14:29:56 +0900321}
322
Carlos Llamase1d195e2023-12-01 17:21:42 +0000323static void debug_no_space_locked(struct binder_alloc *alloc)
324{
325 size_t largest_alloc_size = 0;
326 struct binder_buffer *buffer;
327 size_t allocated_buffers = 0;
328 size_t largest_free_size = 0;
329 size_t total_alloc_size = 0;
330 size_t total_free_size = 0;
331 size_t free_buffers = 0;
332 size_t buffer_size;
333 struct rb_node *n;
334
335 for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
336 buffer = rb_entry(n, struct binder_buffer, rb_node);
337 buffer_size = binder_alloc_buffer_size(alloc, buffer);
338 allocated_buffers++;
339 total_alloc_size += buffer_size;
340 if (buffer_size > largest_alloc_size)
341 largest_alloc_size = buffer_size;
342 }
343
344 for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) {
345 buffer = rb_entry(n, struct binder_buffer, rb_node);
346 buffer_size = binder_alloc_buffer_size(alloc, buffer);
347 free_buffers++;
348 total_free_size += buffer_size;
349 if (buffer_size > largest_free_size)
350 largest_free_size = buffer_size;
351 }
352
353 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
354 "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
355 total_alloc_size, allocated_buffers,
356 largest_alloc_size, total_free_size,
357 free_buffers, largest_free_size);
358}
359
Carlos Llamas26d06d92023-12-01 17:21:41 +0000360static bool debug_low_async_space_locked(struct binder_alloc *alloc)
Martijn Coenen261e7812020-08-21 14:25:44 +0200361{
362 /*
363 * Find the amount and size of buffers allocated by the current caller;
364 * The idea is that once we cross the threshold, whoever is responsible
365 * for the low async space is likely to try to send another async txn,
366 * and at some point we'll catch them in the act. This is more efficient
367 * than keeping a map per pid.
368 */
Martijn Coenen261e7812020-08-21 14:25:44 +0200369 struct binder_buffer *buffer;
370 size_t total_alloc_size = 0;
Carlos Llamas26d06d92023-12-01 17:21:41 +0000371 int pid = current->tgid;
Martijn Coenen261e7812020-08-21 14:25:44 +0200372 size_t num_buffers = 0;
Carlos Llamas26d06d92023-12-01 17:21:41 +0000373 struct rb_node *n;
Martijn Coenen261e7812020-08-21 14:25:44 +0200374
375 for (n = rb_first(&alloc->allocated_buffers); n != NULL;
376 n = rb_next(n)) {
377 buffer = rb_entry(n, struct binder_buffer, rb_node);
378 if (buffer->pid != pid)
379 continue;
380 if (!buffer->async_transaction)
381 continue;
Carlos Llamas11ca0762023-12-01 17:21:34 +0000382 total_alloc_size += binder_alloc_buffer_size(alloc, buffer);
Martijn Coenen261e7812020-08-21 14:25:44 +0200383 num_buffers++;
384 }
385
386 /*
387 * Warn if this pid has more than 50 transactions, or more than 50% of
Hang Lua7dc1e62021-04-09 17:40:46 +0800388 * async space (which is 25% of total buffer size). Oneway spam is only
389 * detected when the threshold is exceeded.
Martijn Coenen261e7812020-08-21 14:25:44 +0200390 */
391 if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) {
392 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
393 "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n",
394 alloc->pid, pid, num_buffers, total_alloc_size);
Hang Lua7dc1e62021-04-09 17:40:46 +0800395 if (!alloc->oneway_spam_detected) {
396 alloc->oneway_spam_detected = true;
397 return true;
398 }
Martijn Coenen261e7812020-08-21 14:25:44 +0200399 }
Hang Lua7dc1e62021-04-09 17:40:46 +0800400 return false;
Martijn Coenen261e7812020-08-21 14:25:44 +0200401}
402
Xiongwei Song3f827242017-12-14 12:15:42 +0800403static struct binder_buffer *binder_alloc_new_buf_locked(
404 struct binder_alloc *alloc,
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000405 size_t size,
Carlos Llamas26d06d92023-12-01 17:21:41 +0000406 int is_async)
Todd Kjos0c972a02017-06-29 12:01:41 -0700407{
408 struct rb_node *n = alloc->free_buffers.rb_node;
409 struct binder_buffer *buffer;
410 size_t buffer_size;
411 struct rb_node *best_fit = NULL;
Carlos Llamasc38a8982023-12-01 17:21:38 +0000412 unsigned long has_page_addr;
413 unsigned long end_page_addr;
Todd Kjos57ada2f2017-06-29 12:01:46 -0700414 int ret;
Todd Kjos0c972a02017-06-29 12:01:41 -0700415
Zhuguangqing1174e452021-03-09 15:47:43 +0800416 trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async);
Carlos Llamas65cf1582023-12-01 17:21:33 +0000417
Carlos Llamas11ca0762023-12-01 17:21:34 +0000418 if (is_async && alloc->free_async_space < size) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700419 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
420 "%d: binder_alloc_buf size %zd failed, no async space left\n",
421 alloc->pid, size);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700422 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700423 }
424
425 while (n) {
426 buffer = rb_entry(n, struct binder_buffer, rb_node);
427 BUG_ON(!buffer->free);
428 buffer_size = binder_alloc_buffer_size(alloc, buffer);
429
430 if (size < buffer_size) {
431 best_fit = n;
432 n = n->rb_left;
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000433 } else if (size > buffer_size) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700434 n = n->rb_right;
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000435 } else {
Todd Kjos0c972a02017-06-29 12:01:41 -0700436 best_fit = n;
437 break;
438 }
439 }
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000440
Carlos Llamase1d195e2023-12-01 17:21:42 +0000441 if (unlikely(!best_fit)) {
Sherry Yang128f3802018-08-07 12:57:13 -0700442 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
443 "%d: binder_alloc_buf size %zd failed, no address space\n",
444 alloc->pid, size);
Carlos Llamase1d195e2023-12-01 17:21:42 +0000445 debug_no_space_locked(alloc);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700446 return ERR_PTR(-ENOSPC);
Todd Kjos0c972a02017-06-29 12:01:41 -0700447 }
Carlos Llamase1d195e2023-12-01 17:21:42 +0000448
Todd Kjos0c972a02017-06-29 12:01:41 -0700449 if (n == NULL) {
450 buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
451 buffer_size = binder_alloc_buffer_size(alloc, buffer);
452 }
453
454 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
455 "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
456 alloc->pid, size, buffer, buffer_size);
457
Carlos Llamasc38a8982023-12-01 17:21:38 +0000458 has_page_addr = (buffer->user_data + buffer_size) & PAGE_MASK;
Sherry Yang74310e02017-08-23 08:46:41 -0700459 WARN_ON(n && buffer_size != size);
Carlos Llamasc38a8982023-12-01 17:21:38 +0000460 end_page_addr = PAGE_ALIGN(buffer->user_data + size);
Todd Kjos0c972a02017-06-29 12:01:41 -0700461 if (end_page_addr > has_page_addr)
462 end_page_addr = has_page_addr;
Carlos Llamas0b243682023-12-01 17:21:39 +0000463 ret = binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
464 end_page_addr);
Todd Kjos57ada2f2017-06-29 12:01:46 -0700465 if (ret)
466 return ERR_PTR(ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700467
Todd Kjos0c972a02017-06-29 12:01:41 -0700468 if (buffer_size != size) {
Sherry Yang74310e02017-08-23 08:46:41 -0700469 struct binder_buffer *new_buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700470
Sherry Yang74310e02017-08-23 08:46:41 -0700471 new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
472 if (!new_buffer) {
473 pr_err("%s: %d failed to alloc new buffer struct\n",
474 __func__, alloc->pid);
475 goto err_alloc_buf_struct_failed;
476 }
Carlos Llamasc38a8982023-12-01 17:21:38 +0000477 new_buffer->user_data = buffer->user_data + size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700478 list_add(&new_buffer->entry, &buffer->entry);
479 new_buffer->free = 1;
480 binder_insert_free_buffer(alloc, new_buffer);
481 }
Sherry Yang74310e02017-08-23 08:46:41 -0700482
483 rb_erase(best_fit, &alloc->free_buffers);
484 buffer->free = 0;
Todd Kjos7bada552018-11-06 15:55:32 -0800485 buffer->allow_user_free = 0;
Sherry Yang74310e02017-08-23 08:46:41 -0700486 binder_insert_allocated_buffer_locked(alloc, buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700487 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
488 "%d: binder_alloc_buf size %zd got %pK\n",
489 alloc->pid, size, buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700490 buffer->async_transaction = is_async;
Hang Lua7dc1e62021-04-09 17:40:46 +0800491 buffer->oneway_spam_suspect = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700492 if (is_async) {
Carlos Llamas11ca0762023-12-01 17:21:34 +0000493 alloc->free_async_space -= size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700494 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
495 "%d: binder_alloc_buf size %zd async free %zd\n",
496 alloc->pid, size, alloc->free_async_space);
Martijn Coenen261e7812020-08-21 14:25:44 +0200497 if (alloc->free_async_space < alloc->buffer_size / 10) {
498 /*
499 * Start detecting spammers once we have less than 20%
500 * of async space left (which is less than 10% of total
501 * buffer size).
502 */
Carlos Llamas26d06d92023-12-01 17:21:41 +0000503 buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc);
Hang Lua7dc1e62021-04-09 17:40:46 +0800504 } else {
505 alloc->oneway_spam_detected = false;
Martijn Coenen261e7812020-08-21 14:25:44 +0200506 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700507 }
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000508
Todd Kjos0c972a02017-06-29 12:01:41 -0700509 return buffer;
Sherry Yang74310e02017-08-23 08:46:41 -0700510
511err_alloc_buf_struct_failed:
Carlos Llamas0b243682023-12-01 17:21:39 +0000512 binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
513 end_page_addr);
Sherry Yang74310e02017-08-23 08:46:41 -0700514 return ERR_PTR(-ENOMEM);
Todd Kjos0c972a02017-06-29 12:01:41 -0700515}
516
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000517/* Calculate the sanitized total size, returns 0 for invalid request */
518static inline size_t sanitized_size(size_t data_size,
519 size_t offsets_size,
520 size_t extra_buffers_size)
521{
522 size_t total, tmp;
523
524 /* Align to pointer size and check for overflows */
525 tmp = ALIGN(data_size, sizeof(void *)) +
526 ALIGN(offsets_size, sizeof(void *));
527 if (tmp < data_size || tmp < offsets_size)
528 return 0;
529 total = tmp + ALIGN(extra_buffers_size, sizeof(void *));
530 if (total < tmp || total < extra_buffers_size)
531 return 0;
532
533 /* Pad 0-sized buffers so they get a unique address */
534 total = max(total, sizeof(void *));
535
536 return total;
537}
538
Todd Kjos0c972a02017-06-29 12:01:41 -0700539/**
540 * binder_alloc_new_buf() - Allocate a new binder buffer
541 * @alloc: binder_alloc for this proc
542 * @data_size: size of user data buffer
543 * @offsets_size: user specified buffer offset
544 * @extra_buffers_size: size of extra space for meta-data (eg, security context)
545 * @is_async: buffer for async transaction
546 *
547 * Allocate a new buffer given the requested sizes. Returns
548 * the kernel version of the buffer pointer. The size allocated
549 * is the sum of the three given sizes (each rounded up to
550 * pointer-sized boundary)
551 *
Carlos Llamas2a250a12023-12-01 17:21:36 +0000552 * Return: The allocated buffer or %ERR_PTR(-errno) if error
Todd Kjos0c972a02017-06-29 12:01:41 -0700553 */
554struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
555 size_t data_size,
556 size_t offsets_size,
557 size_t extra_buffers_size,
Carlos Llamas26d06d92023-12-01 17:21:41 +0000558 int is_async)
Todd Kjos0c972a02017-06-29 12:01:41 -0700559{
560 struct binder_buffer *buffer;
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000561 size_t size;
562
563 /* Check binder_alloc is fully initialized */
564 if (!binder_alloc_get_vma(alloc)) {
565 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
566 "%d: binder_alloc_buf, no vma\n",
567 alloc->pid);
568 return ERR_PTR(-ESRCH);
569 }
570
571 size = sanitized_size(data_size, offsets_size, extra_buffers_size);
572 if (unlikely(!size)) {
573 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
574 "%d: got transaction with invalid size %zd-%zd-%zd\n",
575 alloc->pid, data_size, offsets_size,
576 extra_buffers_size);
577 return ERR_PTR(-EINVAL);
578 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700579
580 mutex_lock(&alloc->mutex);
Carlos Llamas26d06d92023-12-01 17:21:41 +0000581 buffer = binder_alloc_new_buf_locked(alloc, size, is_async);
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000582 if (IS_ERR(buffer)) {
583 mutex_unlock(&alloc->mutex);
584 goto out;
585 }
586
587 buffer->data_size = data_size;
588 buffer->offsets_size = offsets_size;
589 buffer->extra_buffers_size = extra_buffers_size;
Carlos Llamas26d06d92023-12-01 17:21:41 +0000590 buffer->pid = current->tgid;
Todd Kjos0c972a02017-06-29 12:01:41 -0700591 mutex_unlock(&alloc->mutex);
Carlos Llamasd5c44f92023-12-01 17:21:40 +0000592
593out:
Todd Kjos0c972a02017-06-29 12:01:41 -0700594 return buffer;
595}
596
Carlos Llamasc38a8982023-12-01 17:21:38 +0000597static unsigned long buffer_start_page(struct binder_buffer *buffer)
Todd Kjos0c972a02017-06-29 12:01:41 -0700598{
Carlos Llamasc38a8982023-12-01 17:21:38 +0000599 return buffer->user_data & PAGE_MASK;
Todd Kjos0c972a02017-06-29 12:01:41 -0700600}
601
Carlos Llamasc38a8982023-12-01 17:21:38 +0000602static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
Todd Kjos0c972a02017-06-29 12:01:41 -0700603{
Carlos Llamasc38a8982023-12-01 17:21:38 +0000604 return (buffer->user_data - 1) & PAGE_MASK;
Todd Kjos0c972a02017-06-29 12:01:41 -0700605}
606
607static void binder_delete_free_buffer(struct binder_alloc *alloc,
608 struct binder_buffer *buffer)
609{
610 struct binder_buffer *prev, *next = NULL;
Sherry Yang74310e02017-08-23 08:46:41 -0700611 bool to_free = true;
Mrinal Pandey4df97722020-07-24 18:42:54 +0530612
Todd Kjos0c972a02017-06-29 12:01:41 -0700613 BUG_ON(alloc->buffers.next == &buffer->entry);
Sherry Yange21762192017-08-23 08:46:39 -0700614 prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700615 BUG_ON(!prev->free);
Sherry Yang74310e02017-08-23 08:46:41 -0700616 if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
617 to_free = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700618 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000619 "%d: merge free, buffer %lx share page with %lx\n",
Todd Kjosbde4a192019-02-08 10:35:20 -0800620 alloc->pid, buffer->user_data,
621 prev->user_data);
Todd Kjos0c972a02017-06-29 12:01:41 -0700622 }
623
624 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700625 next = binder_buffer_next(buffer);
Sherry Yang74310e02017-08-23 08:46:41 -0700626 if (buffer_start_page(next) == buffer_start_page(buffer)) {
627 to_free = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700628 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000629 "%d: merge free, buffer %lx share page with %lx\n",
Sherry Yang74310e02017-08-23 08:46:41 -0700630 alloc->pid,
Todd Kjosbde4a192019-02-08 10:35:20 -0800631 buffer->user_data,
632 next->user_data);
Todd Kjos0c972a02017-06-29 12:01:41 -0700633 }
634 }
Sherry Yang74310e02017-08-23 08:46:41 -0700635
Todd Kjosbde4a192019-02-08 10:35:20 -0800636 if (PAGE_ALIGNED(buffer->user_data)) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700637 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000638 "%d: merge free, buffer start %lx is page aligned\n",
Todd Kjosbde4a192019-02-08 10:35:20 -0800639 alloc->pid, buffer->user_data);
Sherry Yang74310e02017-08-23 08:46:41 -0700640 to_free = false;
Todd Kjos0c972a02017-06-29 12:01:41 -0700641 }
Sherry Yang74310e02017-08-23 08:46:41 -0700642
643 if (to_free) {
644 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000645 "%d: merge free, buffer %lx do not share page with %lx or %lx\n",
Todd Kjosbde4a192019-02-08 10:35:20 -0800646 alloc->pid, buffer->user_data,
647 prev->user_data,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000648 next ? next->user_data : 0);
Carlos Llamas0b243682023-12-01 17:21:39 +0000649 binder_free_page_range(alloc, buffer_start_page(buffer),
650 buffer_start_page(buffer) + PAGE_SIZE);
Sherry Yang74310e02017-08-23 08:46:41 -0700651 }
652 list_del(&buffer->entry);
653 kfree(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700654}
655
656static void binder_free_buf_locked(struct binder_alloc *alloc,
657 struct binder_buffer *buffer)
658{
659 size_t size, buffer_size;
660
661 buffer_size = binder_alloc_buffer_size(alloc, buffer);
662
663 size = ALIGN(buffer->data_size, sizeof(void *)) +
664 ALIGN(buffer->offsets_size, sizeof(void *)) +
665 ALIGN(buffer->extra_buffers_size, sizeof(void *));
666
667 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
668 "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
669 alloc->pid, buffer, size, buffer_size);
670
671 BUG_ON(buffer->free);
672 BUG_ON(size > buffer_size);
673 BUG_ON(buffer->transaction != NULL);
Todd Kjosbde4a192019-02-08 10:35:20 -0800674 BUG_ON(buffer->user_data < alloc->buffer);
675 BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
Todd Kjos0c972a02017-06-29 12:01:41 -0700676
677 if (buffer->async_transaction) {
Carlos Llamas11ca0762023-12-01 17:21:34 +0000678 alloc->free_async_space += buffer_size;
Todd Kjos0c972a02017-06-29 12:01:41 -0700679 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
680 "%d: binder_free_buf size %zd async free %zd\n",
681 alloc->pid, size, alloc->free_async_space);
682 }
683
Carlos Llamas0b243682023-12-01 17:21:39 +0000684 binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
685 (buffer->user_data + buffer_size) & PAGE_MASK);
Todd Kjos0c972a02017-06-29 12:01:41 -0700686
687 rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
688 buffer->free = 1;
689 if (!list_is_last(&buffer->entry, &alloc->buffers)) {
Sherry Yange21762192017-08-23 08:46:39 -0700690 struct binder_buffer *next = binder_buffer_next(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700691
692 if (next->free) {
693 rb_erase(&next->rb_node, &alloc->free_buffers);
694 binder_delete_free_buffer(alloc, next);
695 }
696 }
697 if (alloc->buffers.next != &buffer->entry) {
Sherry Yange21762192017-08-23 08:46:39 -0700698 struct binder_buffer *prev = binder_buffer_prev(buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700699
700 if (prev->free) {
701 binder_delete_free_buffer(alloc, buffer);
702 rb_erase(&prev->rb_node, &alloc->free_buffers);
703 buffer = prev;
704 }
705 }
706 binder_insert_free_buffer(alloc, buffer);
707}
708
Todd Kjos0f966cb2020-11-20 15:37:43 -0800709static void binder_alloc_clear_buf(struct binder_alloc *alloc,
710 struct binder_buffer *buffer);
Todd Kjos0c972a02017-06-29 12:01:41 -0700711/**
712 * binder_alloc_free_buf() - free a binder buffer
713 * @alloc: binder_alloc for this proc
714 * @buffer: kernel pointer to buffer
715 *
YangHui4b463822020-08-18 09:34:04 +0800716 * Free the buffer allocated via binder_alloc_new_buf()
Todd Kjos0c972a02017-06-29 12:01:41 -0700717 */
718void binder_alloc_free_buf(struct binder_alloc *alloc,
719 struct binder_buffer *buffer)
720{
Todd Kjos0f966cb2020-11-20 15:37:43 -0800721 /*
722 * We could eliminate the call to binder_alloc_clear_buf()
723 * from binder_alloc_deferred_release() by moving this to
Carlos Llamas26f0c012023-12-01 17:21:35 +0000724 * binder_free_buf_locked(). However, that could
Todd Kjos0f966cb2020-11-20 15:37:43 -0800725 * increase contention for the alloc mutex if clear_on_free
726 * is used frequently for large buffers. The mutex is not
727 * needed for correctness here.
728 */
729 if (buffer->clear_on_free) {
730 binder_alloc_clear_buf(alloc, buffer);
731 buffer->clear_on_free = false;
732 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700733 mutex_lock(&alloc->mutex);
734 binder_free_buf_locked(alloc, buffer);
735 mutex_unlock(&alloc->mutex);
736}
737
738/**
739 * binder_alloc_mmap_handler() - map virtual address space for proc
740 * @alloc: alloc structure for this proc
741 * @vma: vma passed to mmap()
742 *
743 * Called by binder_mmap() to initialize the space specified in
744 * vma for allocating binder buffers
745 *
746 * Return:
747 * 0 = success
748 * -EBUSY = address space already mapped
749 * -ENOMEM = failed to map memory to given address space
750 */
751int binder_alloc_mmap_handler(struct binder_alloc *alloc,
752 struct vm_area_struct *vma)
753{
754 int ret;
Todd Kjos0c972a02017-06-29 12:01:41 -0700755 const char *failure_string;
756 struct binder_buffer *buffer;
757
Carlos Llamasd276fb42022-11-04 23:12:35 +0000758 if (unlikely(vma->vm_mm != alloc->vma_vm_mm)) {
759 ret = -EINVAL;
760 failure_string = "invalid vma->vm_mm";
761 goto err_invalid_mm;
762 }
763
Todd Kjos0c972a02017-06-29 12:01:41 -0700764 mutex_lock(&binder_alloc_mmap_lock);
Jann Horna7a74d72019-10-18 22:56:30 +0200765 if (alloc->buffer_size) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700766 ret = -EBUSY;
767 failure_string = "already mapped";
768 goto err_already_mapped;
769 }
Jann Horn45d02f72019-10-16 17:01:18 +0200770 alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
771 SZ_4M);
Jann Horna7a74d72019-10-18 22:56:30 +0200772 mutex_unlock(&binder_alloc_mmap_lock);
773
Carlos Llamasc38a8982023-12-01 17:21:38 +0000774 alloc->buffer = vma->vm_start;
Jann Horna7a74d72019-10-18 22:56:30 +0200775
Jann Horn45d02f72019-10-16 17:01:18 +0200776 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
Kees Cook6396bb22018-06-12 14:03:40 -0700777 sizeof(alloc->pages[0]),
Todd Kjos0c972a02017-06-29 12:01:41 -0700778 GFP_KERNEL);
779 if (alloc->pages == NULL) {
780 ret = -ENOMEM;
781 failure_string = "alloc page array";
782 goto err_alloc_pages_failed;
783 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700784
Sherry Yang74310e02017-08-23 08:46:41 -0700785 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
786 if (!buffer) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700787 ret = -ENOMEM;
Sherry Yang74310e02017-08-23 08:46:41 -0700788 failure_string = "alloc buffer struct";
789 goto err_alloc_buf_struct_failed;
Todd Kjos0c972a02017-06-29 12:01:41 -0700790 }
Sherry Yang74310e02017-08-23 08:46:41 -0700791
Todd Kjosbde4a192019-02-08 10:35:20 -0800792 buffer->user_data = alloc->buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700793 list_add(&buffer->entry, &alloc->buffers);
794 buffer->free = 1;
795 binder_insert_free_buffer(alloc, buffer);
796 alloc->free_async_space = alloc->buffer_size / 2;
Carlos Llamasb094b042023-05-30 19:43:37 +0000797
798 /* Signal binder_alloc is fully initialized */
Minchan Kimda1b9562018-08-23 14:29:56 +0900799 binder_alloc_set_vma(alloc, vma);
Todd Kjos0c972a02017-06-29 12:01:41 -0700800
801 return 0;
802
Sherry Yang74310e02017-08-23 08:46:41 -0700803err_alloc_buf_struct_failed:
Todd Kjos0c972a02017-06-29 12:01:41 -0700804 kfree(alloc->pages);
805 alloc->pages = NULL;
806err_alloc_pages_failed:
Carlos Llamasc38a8982023-12-01 17:21:38 +0000807 alloc->buffer = 0;
Jann Horna7a74d72019-10-18 22:56:30 +0200808 mutex_lock(&binder_alloc_mmap_lock);
809 alloc->buffer_size = 0;
Todd Kjos0c972a02017-06-29 12:01:41 -0700810err_already_mapped:
811 mutex_unlock(&binder_alloc_mmap_lock);
Carlos Llamasd276fb42022-11-04 23:12:35 +0000812err_invalid_mm:
Sherry Yang128f3802018-08-07 12:57:13 -0700813 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
814 "%s: %d %lx-%lx %s failed %d\n", __func__,
815 alloc->pid, vma->vm_start, vma->vm_end,
816 failure_string, ret);
Todd Kjos0c972a02017-06-29 12:01:41 -0700817 return ret;
818}
819
820
821void binder_alloc_deferred_release(struct binder_alloc *alloc)
822{
823 struct rb_node *n;
824 int buffers, page_count;
Sherry Yang74310e02017-08-23 08:46:41 -0700825 struct binder_buffer *buffer;
Todd Kjos0c972a02017-06-29 12:01:41 -0700826
Todd Kjos0c972a02017-06-29 12:01:41 -0700827 buffers = 0;
828 mutex_lock(&alloc->mutex);
Carlos Llamasacd81932023-05-30 19:43:36 +0000829 BUG_ON(alloc->vma);
Minchan Kimda1b9562018-08-23 14:29:56 +0900830
Todd Kjos0c972a02017-06-29 12:01:41 -0700831 while ((n = rb_first(&alloc->allocated_buffers))) {
Todd Kjos0c972a02017-06-29 12:01:41 -0700832 buffer = rb_entry(n, struct binder_buffer, rb_node);
833
834 /* Transaction should already have been freed */
835 BUG_ON(buffer->transaction);
836
Todd Kjos0f966cb2020-11-20 15:37:43 -0800837 if (buffer->clear_on_free) {
838 binder_alloc_clear_buf(alloc, buffer);
839 buffer->clear_on_free = false;
840 }
Todd Kjos0c972a02017-06-29 12:01:41 -0700841 binder_free_buf_locked(alloc, buffer);
842 buffers++;
843 }
844
Sherry Yang74310e02017-08-23 08:46:41 -0700845 while (!list_empty(&alloc->buffers)) {
846 buffer = list_first_entry(&alloc->buffers,
847 struct binder_buffer, entry);
848 WARN_ON(!buffer->free);
849
850 list_del(&buffer->entry);
851 WARN_ON_ONCE(!list_empty(&alloc->buffers));
852 kfree(buffer);
853 }
854
Todd Kjos0c972a02017-06-29 12:01:41 -0700855 page_count = 0;
856 if (alloc->pages) {
857 int i;
858
859 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
Carlos Llamasc38a8982023-12-01 17:21:38 +0000860 unsigned long page_addr;
Sherry Yangf2517eb2017-08-23 08:46:42 -0700861 bool on_lru;
Todd Kjos0c972a02017-06-29 12:01:41 -0700862
Sherry Yangf2517eb2017-08-23 08:46:42 -0700863 if (!alloc->pages[i].page_ptr)
Todd Kjos0c972a02017-06-29 12:01:41 -0700864 continue;
865
Sherry Yangf2517eb2017-08-23 08:46:42 -0700866 on_lru = list_lru_del(&binder_alloc_lru,
867 &alloc->pages[i].lru);
Todd Kjos0c972a02017-06-29 12:01:41 -0700868 page_addr = alloc->buffer + i * PAGE_SIZE;
869 binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
Carlos Llamasc38a8982023-12-01 17:21:38 +0000870 "%s: %d: page %d at %lx %s\n",
Sherry Yangf2517eb2017-08-23 08:46:42 -0700871 __func__, alloc->pid, i, page_addr,
872 on_lru ? "on lru" : "active");
Sherry Yangf2517eb2017-08-23 08:46:42 -0700873 __free_page(alloc->pages[i].page_ptr);
Todd Kjos0c972a02017-06-29 12:01:41 -0700874 page_count++;
875 }
876 kfree(alloc->pages);
Todd Kjos0c972a02017-06-29 12:01:41 -0700877 }
878 mutex_unlock(&alloc->mutex);
Sherry Yanga0c2baa2017-10-20 20:58:58 -0400879 if (alloc->vma_vm_mm)
880 mmdrop(alloc->vma_vm_mm);
Todd Kjos0c972a02017-06-29 12:01:41 -0700881
882 binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
883 "%s: %d buffers %d, pages %d\n",
884 __func__, alloc->pid, buffers, page_count);
885}
886
887static void print_binder_buffer(struct seq_file *m, const char *prefix,
888 struct binder_buffer *buffer)
889{
Carlos Llamasc38a8982023-12-01 17:21:38 +0000890 seq_printf(m, "%s %d: %lx size %zd:%zd:%zd %s\n",
Todd Kjosbde4a192019-02-08 10:35:20 -0800891 prefix, buffer->debug_id, buffer->user_data,
Todd Kjos0c972a02017-06-29 12:01:41 -0700892 buffer->data_size, buffer->offsets_size,
Martijn Coenenb05a68e2017-06-29 12:01:52 -0700893 buffer->extra_buffers_size,
Todd Kjos0c972a02017-06-29 12:01:41 -0700894 buffer->transaction ? "active" : "delivered");
895}
896
897/**
898 * binder_alloc_print_allocated() - print buffer info
899 * @m: seq_file for output via seq_printf()
900 * @alloc: binder_alloc for this proc
901 *
902 * Prints information about every buffer associated with
903 * the binder_alloc state to the given seq_file
904 */
905void binder_alloc_print_allocated(struct seq_file *m,
906 struct binder_alloc *alloc)
907{
908 struct rb_node *n;
909
910 mutex_lock(&alloc->mutex);
911 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
912 print_binder_buffer(m, " buffer",
913 rb_entry(n, struct binder_buffer, rb_node));
914 mutex_unlock(&alloc->mutex);
915}
916
917/**
Sherry Yang8ef46652017-08-31 11:56:36 -0700918 * binder_alloc_print_pages() - print page usage
919 * @m: seq_file for output via seq_printf()
920 * @alloc: binder_alloc for this proc
921 */
922void binder_alloc_print_pages(struct seq_file *m,
923 struct binder_alloc *alloc)
924{
925 struct binder_lru_page *page;
926 int i;
927 int active = 0;
928 int lru = 0;
929 int free = 0;
930
931 mutex_lock(&alloc->mutex);
Jann Horn8eb52a12019-10-18 22:56:29 +0200932 /*
933 * Make sure the binder_alloc is fully initialized, otherwise we might
934 * read inconsistent state.
935 */
Carlos Llamas45efb0a2023-05-30 19:43:35 +0000936 if (binder_alloc_get_vma(alloc) != NULL) {
937 for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
938 page = &alloc->pages[i];
939 if (!page->page_ptr)
940 free++;
941 else if (list_empty(&page->lru))
942 active++;
943 else
944 lru++;
945 }
Sherry Yang8ef46652017-08-31 11:56:36 -0700946 }
947 mutex_unlock(&alloc->mutex);
948 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
Martijn Coenen8d9a3ab62017-11-13 10:06:56 +0100949 seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
Sherry Yang8ef46652017-08-31 11:56:36 -0700950}
951
952/**
Todd Kjos0c972a02017-06-29 12:01:41 -0700953 * binder_alloc_get_allocated_count() - return count of buffers
954 * @alloc: binder_alloc for this proc
955 *
956 * Return: count of allocated buffers
957 */
958int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
959{
960 struct rb_node *n;
961 int count = 0;
962
963 mutex_lock(&alloc->mutex);
964 for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
965 count++;
966 mutex_unlock(&alloc->mutex);
967 return count;
968}
969
970
971/**
972 * binder_alloc_vma_close() - invalidate address space
973 * @alloc: binder_alloc for this proc
974 *
975 * Called from binder_vma_close() when releasing address space.
976 * Clears alloc->vma to prevent new incoming transactions from
977 * allocating more buffers.
978 */
979void binder_alloc_vma_close(struct binder_alloc *alloc)
980{
Minchan Kimda1b9562018-08-23 14:29:56 +0900981 binder_alloc_set_vma(alloc, NULL);
Todd Kjos0c972a02017-06-29 12:01:41 -0700982}
983
984/**
Sherry Yangf2517eb2017-08-23 08:46:42 -0700985 * binder_alloc_free_page() - shrinker callback to free pages
986 * @item: item to free
987 * @lock: lock protecting the item
988 * @cb_arg: callback argument
989 *
990 * Called from list_lru_walk() in binder_shrink_scan() to free
991 * up pages when the system is under memory pressure.
992 */
993enum lru_status binder_alloc_free_page(struct list_head *item,
994 struct list_lru_one *lru,
995 spinlock_t *lock,
996 void *cb_arg)
Todd Kjos324fa642018-11-06 15:56:31 -0800997 __must_hold(lock)
Sherry Yangf2517eb2017-08-23 08:46:42 -0700998{
999 struct mm_struct *mm = NULL;
1000 struct binder_lru_page *page = container_of(item,
1001 struct binder_lru_page,
1002 lru);
1003 struct binder_alloc *alloc;
Carlos Llamasc38a8982023-12-01 17:21:38 +00001004 unsigned long page_addr;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001005 size_t index;
Sherry Yanga1b22892017-10-03 16:15:00 -07001006 struct vm_area_struct *vma;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001007
1008 alloc = page->alloc;
1009 if (!mutex_trylock(&alloc->mutex))
1010 goto err_get_alloc_mutex_failed;
1011
1012 if (!page->page_ptr)
1013 goto err_page_already_freed;
1014
1015 index = page - alloc->pages;
Carlos Llamasc38a8982023-12-01 17:21:38 +00001016 page_addr = alloc->buffer + index * PAGE_SIZE;
Todd Kjos5cec2d22019-03-01 15:06:06 -08001017
1018 mm = alloc->vma_vm_mm;
1019 if (!mmget_not_zero(mm))
1020 goto err_mmget;
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001021 if (!mmap_read_trylock(mm))
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001022 goto err_mmap_read_lock_failed;
Carlos Llamas8dce2882023-12-01 17:21:31 +00001023 vma = vma_lookup(mm, page_addr);
1024 if (vma && vma != binder_alloc_get_vma(alloc))
1025 goto err_invalid_vma;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001026
Sherry Yanga1b22892017-10-03 16:15:00 -07001027 list_lru_isolate(lru, item);
1028 spin_unlock(lock);
1029
1030 if (vma) {
Sherry Yange41e1642017-08-23 08:46:43 -07001031 trace_binder_unmap_user_start(alloc, index);
1032
Todd Kjosc41358a2019-02-08 10:35:19 -08001033 zap_page_range(vma, page_addr, PAGE_SIZE);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001034
Sherry Yange41e1642017-08-23 08:46:43 -07001035 trace_binder_unmap_user_end(alloc, index);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001036 }
Michel Lespinassed8ed45c2020-06-08 21:33:25 -07001037 mmap_read_unlock(mm);
Tetsuo Handaf867c772020-07-17 00:12:15 +09001038 mmput_async(mm);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001039
Sherry Yange41e1642017-08-23 08:46:43 -07001040 trace_binder_unmap_kernel_start(alloc, index);
1041
Sherry Yangf2517eb2017-08-23 08:46:42 -07001042 __free_page(page->page_ptr);
1043 page->page_ptr = NULL;
1044
Sherry Yange41e1642017-08-23 08:46:43 -07001045 trace_binder_unmap_kernel_end(alloc, index);
1046
Sherry Yanga1b22892017-10-03 16:15:00 -07001047 spin_lock(lock);
Sherry Yangf2517eb2017-08-23 08:46:42 -07001048 mutex_unlock(&alloc->mutex);
Sherry Yanga1b22892017-10-03 16:15:00 -07001049 return LRU_REMOVED_RETRY;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001050
Carlos Llamas8dce2882023-12-01 17:21:31 +00001051err_invalid_vma:
1052 mmap_read_unlock(mm);
Michel Lespinasse3e4e28c2020-06-08 21:33:51 -07001053err_mmap_read_lock_failed:
Sherry Yanga1b22892017-10-03 16:15:00 -07001054 mmput_async(mm);
Sherry Yanga0c2baa2017-10-20 20:58:58 -04001055err_mmget:
Sherry Yangf2517eb2017-08-23 08:46:42 -07001056err_page_already_freed:
1057 mutex_unlock(&alloc->mutex);
1058err_get_alloc_mutex_failed:
1059 return LRU_SKIP;
1060}
1061
1062static unsigned long
1063binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1064{
1065 unsigned long ret = list_lru_count(&binder_alloc_lru);
1066 return ret;
1067}
1068
1069static unsigned long
1070binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1071{
1072 unsigned long ret;
1073
1074 ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
1075 NULL, sc->nr_to_scan);
1076 return ret;
1077}
1078
Sherry Yangde7bbe32017-10-06 16:12:05 -04001079static struct shrinker binder_shrinker = {
Sherry Yangf2517eb2017-08-23 08:46:42 -07001080 .count_objects = binder_shrink_count,
1081 .scan_objects = binder_shrink_scan,
1082 .seeks = DEFAULT_SEEKS,
1083};
1084
1085/**
Todd Kjos0c972a02017-06-29 12:01:41 -07001086 * binder_alloc_init() - called by binder_open() for per-proc initialization
1087 * @alloc: binder_alloc for this proc
1088 *
1089 * Called from binder_open() to initialize binder_alloc fields for
1090 * new binder proc
1091 */
1092void binder_alloc_init(struct binder_alloc *alloc)
1093{
Todd Kjos0c972a02017-06-29 12:01:41 -07001094 alloc->pid = current->group_leader->pid;
Carlos Llamas81203ab2022-08-29 20:12:48 +00001095 alloc->vma_vm_mm = current->mm;
1096 mmgrab(alloc->vma_vm_mm);
Todd Kjos0c972a02017-06-29 12:01:41 -07001097 mutex_init(&alloc->mutex);
Sherry Yang957ccc22017-08-31 10:26:06 -07001098 INIT_LIST_HEAD(&alloc->buffers);
Todd Kjos0c972a02017-06-29 12:01:41 -07001099}
1100
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001101int binder_alloc_shrinker_init(void)
Sherry Yangf2517eb2017-08-23 08:46:42 -07001102{
Tetsuo Handa533dfb22017-11-29 22:29:47 +09001103 int ret = list_lru_init(&binder_alloc_lru);
1104
1105 if (ret == 0) {
1106 ret = register_shrinker(&binder_shrinker);
1107 if (ret)
1108 list_lru_destroy(&binder_alloc_lru);
1109 }
1110 return ret;
Sherry Yangf2517eb2017-08-23 08:46:42 -07001111}
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001112
Qi Zheng03eebad2023-06-25 15:49:37 +00001113void binder_alloc_shrinker_exit(void)
1114{
1115 unregister_shrinker(&binder_shrinker);
1116 list_lru_destroy(&binder_alloc_lru);
1117}
1118
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001119/**
1120 * check_buffer() - verify that buffer/offset is safe to access
1121 * @alloc: binder_alloc for this proc
1122 * @buffer: binder buffer to be accessed
1123 * @offset: offset into @buffer data
1124 * @bytes: bytes to access from offset
1125 *
1126 * Check that the @offset/@bytes are within the size of the given
1127 * @buffer and that the buffer is currently active and not freeable.
1128 * Offsets must also be multiples of sizeof(u32). The kernel is
1129 * allowed to touch the buffer in two cases:
1130 *
1131 * 1) when the buffer is being created:
1132 * (buffer->free == 0 && buffer->allow_user_free == 0)
1133 * 2) when the buffer is being torn down:
1134 * (buffer->free == 0 && buffer->transaction == NULL).
1135 *
1136 * Return: true if the buffer is safe to access
1137 */
1138static inline bool check_buffer(struct binder_alloc *alloc,
1139 struct binder_buffer *buffer,
1140 binder_size_t offset, size_t bytes)
1141{
1142 size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
1143
1144 return buffer_size >= bytes &&
1145 offset <= buffer_size - bytes &&
1146 IS_ALIGNED(offset, sizeof(u32)) &&
1147 !buffer->free &&
1148 (!buffer->allow_user_free || !buffer->transaction);
1149}
1150
1151/**
1152 * binder_alloc_get_page() - get kernel pointer for given buffer offset
1153 * @alloc: binder_alloc for this proc
1154 * @buffer: binder buffer to be accessed
1155 * @buffer_offset: offset into @buffer data
1156 * @pgoffp: address to copy final page offset to
1157 *
1158 * Lookup the struct page corresponding to the address
Todd Kjosbde4a192019-02-08 10:35:20 -08001159 * at @buffer_offset into @buffer->user_data. If @pgoffp is not
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001160 * NULL, the byte-offset into the page is written there.
1161 *
1162 * The caller is responsible to ensure that the offset points
1163 * to a valid address within the @buffer and that @buffer is
1164 * not freeable by the user. Since it can't be freed, we are
1165 * guaranteed that the corresponding elements of @alloc->pages[]
1166 * cannot change.
1167 *
1168 * Return: struct page
1169 */
1170static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
1171 struct binder_buffer *buffer,
1172 binder_size_t buffer_offset,
1173 pgoff_t *pgoffp)
1174{
1175 binder_size_t buffer_space_offset = buffer_offset +
Todd Kjosbde4a192019-02-08 10:35:20 -08001176 (buffer->user_data - alloc->buffer);
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001177 pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
1178 size_t index = buffer_space_offset >> PAGE_SHIFT;
1179 struct binder_lru_page *lru_page;
1180
1181 lru_page = &alloc->pages[index];
1182 *pgoffp = pgoff;
1183 return lru_page->page_ptr;
1184}
1185
1186/**
Todd Kjos0f966cb2020-11-20 15:37:43 -08001187 * binder_alloc_clear_buf() - zero out buffer
1188 * @alloc: binder_alloc for this proc
1189 * @buffer: binder buffer to be cleared
1190 *
1191 * memset the given buffer to 0
1192 */
1193static void binder_alloc_clear_buf(struct binder_alloc *alloc,
1194 struct binder_buffer *buffer)
1195{
1196 size_t bytes = binder_alloc_buffer_size(alloc, buffer);
1197 binder_size_t buffer_offset = 0;
1198
1199 while (bytes) {
1200 unsigned long size;
1201 struct page *page;
1202 pgoff_t pgoff;
1203 void *kptr;
1204
1205 page = binder_alloc_get_page(alloc, buffer,
1206 buffer_offset, &pgoff);
1207 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1208 kptr = kmap(page) + pgoff;
1209 memset(kptr, 0, size);
1210 kunmap(page);
1211 bytes -= size;
1212 buffer_offset += size;
1213 }
1214}
1215
1216/**
Todd Kjos1a7c3d92019-02-08 10:35:14 -08001217 * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
1218 * @alloc: binder_alloc for this proc
1219 * @buffer: binder buffer to be accessed
1220 * @buffer_offset: offset into @buffer data
1221 * @from: userspace pointer to source buffer
1222 * @bytes: bytes to copy
1223 *
1224 * Copy bytes from source userspace to target buffer.
1225 *
1226 * Return: bytes remaining to be copied
1227 */
1228unsigned long
1229binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
1230 struct binder_buffer *buffer,
1231 binder_size_t buffer_offset,
1232 const void __user *from,
1233 size_t bytes)
1234{
1235 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1236 return bytes;
1237
1238 while (bytes) {
1239 unsigned long size;
1240 unsigned long ret;
1241 struct page *page;
1242 pgoff_t pgoff;
1243 void *kptr;
1244
1245 page = binder_alloc_get_page(alloc, buffer,
1246 buffer_offset, &pgoff);
1247 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1248 kptr = kmap(page) + pgoff;
1249 ret = copy_from_user(kptr, from, size);
1250 kunmap(page);
1251 if (ret)
1252 return bytes - size + ret;
1253 bytes -= size;
1254 from += size;
1255 buffer_offset += size;
1256 }
1257 return 0;
1258}
Todd Kjos8ced0c62019-02-08 10:35:15 -08001259
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001260static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
1261 bool to_buffer,
1262 struct binder_buffer *buffer,
1263 binder_size_t buffer_offset,
1264 void *ptr,
1265 size_t bytes)
Todd Kjos8ced0c62019-02-08 10:35:15 -08001266{
1267 /* All copies must be 32-bit aligned and 32-bit size */
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001268 if (!check_buffer(alloc, buffer, buffer_offset, bytes))
1269 return -EINVAL;
Todd Kjos8ced0c62019-02-08 10:35:15 -08001270
1271 while (bytes) {
1272 unsigned long size;
1273 struct page *page;
1274 pgoff_t pgoff;
1275 void *tmpptr;
1276 void *base_ptr;
1277
1278 page = binder_alloc_get_page(alloc, buffer,
1279 buffer_offset, &pgoff);
1280 size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
1281 base_ptr = kmap_atomic(page);
1282 tmpptr = base_ptr + pgoff;
1283 if (to_buffer)
1284 memcpy(tmpptr, ptr, size);
1285 else
1286 memcpy(ptr, tmpptr, size);
1287 /*
1288 * kunmap_atomic() takes care of flushing the cache
1289 * if this device has VIVT cache arch
1290 */
1291 kunmap_atomic(base_ptr);
1292 bytes -= size;
1293 pgoff = 0;
1294 ptr = ptr + size;
1295 buffer_offset += size;
1296 }
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001297 return 0;
Todd Kjos8ced0c62019-02-08 10:35:15 -08001298}
1299
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001300int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
1301 struct binder_buffer *buffer,
1302 binder_size_t buffer_offset,
1303 void *src,
1304 size_t bytes)
Todd Kjos8ced0c62019-02-08 10:35:15 -08001305{
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001306 return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
1307 src, bytes);
Todd Kjos8ced0c62019-02-08 10:35:15 -08001308}
1309
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001310int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
1311 void *dest,
1312 struct binder_buffer *buffer,
1313 binder_size_t buffer_offset,
1314 size_t bytes)
Todd Kjos8ced0c62019-02-08 10:35:15 -08001315{
Todd Kjosbb4a2e482019-06-28 09:50:12 -07001316 return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
1317 dest, bytes);
Todd Kjos8ced0c62019-02-08 10:35:15 -08001318}
1319