FROMGIT: binder: malloc new_buffer outside of locks
Preallocate new_buffer before acquiring the alloc->mutex and hand it
down to binder_alloc_new_buf_locked(). The new buffer will be used in
the vast majority of requests (measured at 98.2% in field data). The
buffer is discarded otherwise. This change is required in preparation
for transitioning alloc->mutex into a spinlock in subsequent commits.
Signed-off-by: Carlos Llamas <cmllamas@google.com>
Reviewed-by: Alice Ryhl <aliceryhl@google.com>
Link: https://lore.kernel.org/r/20231201172212.1813387-18-cmllamas@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Bug: 254650075
(cherry picked from commit c7ac30fad18231a1637d38aa8a97d6b4788ed8ad
https://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
char-misc-next)
Change-Id: Ib7c8eb3c53e8383694a118fabc776a6a22783c75
Signed-off-by: Carlos Llamas <cmllamas@google.com>
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index eddfe6b..e0e1217 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -396,17 +396,19 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc)
return false;
}
+/* Callers preallocate @new_buffer, it is freed by this function if unused */
static struct binder_buffer *binder_alloc_new_buf_locked(
struct binder_alloc *alloc,
+ struct binder_buffer *new_buffer,
size_t size,
int is_async)
{
struct rb_node *n = alloc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
struct rb_node *best_fit = NULL;
+ struct binder_buffer *buffer;
unsigned long has_page_addr;
unsigned long end_page_addr;
+ size_t buffer_size;
int ret;
trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async);
@@ -415,7 +417,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd failed, no async space left\n",
alloc->pid, size);
- return ERR_PTR(-ENOSPC);
+ buffer = ERR_PTR(-ENOSPC);
+ goto out;
}
while (n) {
@@ -439,7 +442,8 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
"%d: binder_alloc_buf size %zd failed, no address space\n",
alloc->pid, size);
debug_no_space_locked(alloc);
- return ERR_PTR(-ENOSPC);
+ buffer = ERR_PTR(-ENOSPC);
+ goto out;
}
if (n == NULL) {
@@ -458,22 +462,17 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
end_page_addr = has_page_addr;
ret = binder_allocate_page_range(alloc, PAGE_ALIGN(buffer->user_data),
end_page_addr);
- if (ret)
- return ERR_PTR(ret);
+ if (ret) {
+ buffer = ERR_PTR(ret);
+ goto out;
+ }
if (buffer_size != size) {
- struct binder_buffer *new_buffer;
-
- new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!new_buffer) {
- pr_err("%s: %d failed to alloc new buffer struct\n",
- __func__, alloc->pid);
- goto err_alloc_buf_struct_failed;
- }
new_buffer->user_data = buffer->user_data + size;
list_add(&new_buffer->entry, &buffer->entry);
new_buffer->free = 1;
binder_insert_free_buffer(alloc, new_buffer);
+ new_buffer = NULL;
}
rb_erase(best_fit, &alloc->free_buffers);
@@ -494,12 +493,10 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
buffer->oneway_spam_suspect = true;
}
+out:
+ /* Discard possibly unused new_buffer */
+ kfree(new_buffer);
return buffer;
-
-err_alloc_buf_struct_failed:
- binder_free_page_range(alloc, PAGE_ALIGN(buffer->user_data),
- end_page_addr);
- return ERR_PTR(-ENOMEM);
}
/* Calculate the sanitized total size, returns 0 for invalid request */
@@ -545,7 +542,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t extra_buffers_size,
int is_async)
{
- struct binder_buffer *buffer;
+ struct binder_buffer *buffer, *next;
size_t size;
/* Check binder_alloc is fully initialized */
@@ -565,8 +562,13 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
return ERR_PTR(-EINVAL);
}
+ /* Preallocate the next buffer */
+ next = kzalloc(sizeof(*next), GFP_KERNEL);
+ if (!next)
+ return ERR_PTR(-ENOMEM);
+
mutex_lock(&alloc->mutex);
- buffer = binder_alloc_new_buf_locked(alloc, size, is_async);
+ buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
if (IS_ERR(buffer)) {
mutex_unlock(&alloc->mutex);
goto out;