ANDROID: binder: fix KMI-break due to alloc->lock

Wrap 'struct binder_proc' inside 'struct binder_proc_wrap' to add the
alloc->lock equivalent without breaking the KMI. Also, add convenient
apis to access/modify this new spinlock.

Without this patch, the following KMI issues show up:

  type 'struct binder_proc' changed
    byte size changed from 616 to 576

  type 'struct binder_alloc' changed
    byte size changed from 152 to 112
    member 'spinlock_t lock' was added
    member 'struct mutex mutex' was removed

Bug: 254650075
Change-Id: Ic31dc39fb82800a3e47be10a7873cd210f7b60be
Signed-off-by: Carlos Llamas <cmllamas@google.com>
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 8f604ea..e5d76d3 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -23,7 +23,7 @@
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
 #include <linux/sizes.h>
-#include "binder_alloc.h"
+#include "binder_internal.h"
 #include "binder_trace.h"
 #include <trace/hooks/binder.h>
 
@@ -170,9 +170,9 @@ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
 {
 	struct binder_buffer *buffer;
 
-	spin_lock(&alloc->lock);
+	binder_alloc_lock(alloc);
 	buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 	return buffer;
 }
 
@@ -601,10 +601,10 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 	if (!next)
 		return ERR_PTR(-ENOMEM);
 
-	spin_lock(&alloc->lock);
+	binder_alloc_lock(alloc);
 	buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async);
 	if (IS_ERR(buffer)) {
-		spin_unlock(&alloc->lock);
+		binder_alloc_unlock(alloc);
 		goto out;
 	}
 
@@ -612,7 +612,7 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 	buffer->offsets_size = offsets_size;
 	buffer->extra_buffers_size = extra_buffers_size;
 	buffer->pid = current->tgid;
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 
 	ret = binder_install_buffer_pages(alloc, buffer, size);
 	if (ret) {
@@ -801,9 +801,9 @@ void binder_alloc_free_buf(struct binder_alloc *alloc,
 		binder_alloc_clear_buf(alloc, buffer);
 		buffer->clear_on_free = false;
 	}
-	spin_lock(&alloc->lock);
+	binder_alloc_lock(alloc);
 	binder_free_buf_locked(alloc, buffer);
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 }
 
 /**
@@ -901,7 +901,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 	struct binder_buffer *buffer;
 
 	buffers = 0;
-	spin_lock(&alloc->lock);
+	binder_alloc_lock(alloc);
 	BUG_ON(alloc->vma);
 
 	while ((n = rb_first(&alloc->allocated_buffers))) {
@@ -951,7 +951,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 		}
 		kfree(alloc->pages);
 	}
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 	if (alloc->vma_vm_mm)
 		mmdrop(alloc->vma_vm_mm);
 
@@ -974,7 +974,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
 	struct binder_buffer *buffer;
 	struct rb_node *n;
 
-	spin_lock(&alloc->lock);
+	binder_alloc_lock(alloc);
 	for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) {
 		buffer = rb_entry(n, struct binder_buffer, rb_node);
 		seq_printf(m, "  buffer %d: %lx size %zd:%zd:%zd %s\n",
@@ -984,7 +984,7 @@ void binder_alloc_print_allocated(struct seq_file *m,
 			   buffer->extra_buffers_size,
 			   buffer->transaction ? "active" : "delivered");
 	}
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 }
 
 /**
@@ -1001,7 +1001,7 @@ void binder_alloc_print_pages(struct seq_file *m,
 	int lru = 0;
 	int free = 0;
 
-	spin_lock(&alloc->lock);
+	binder_alloc_lock(alloc);
 	/*
 	 * Make sure the binder_alloc is fully initialized, otherwise we might
 	 * read inconsistent state.
@@ -1017,7 +1017,7 @@ void binder_alloc_print_pages(struct seq_file *m,
 				lru++;
 		}
 	}
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
 }
@@ -1033,10 +1033,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
 	struct rb_node *n;
 	int count = 0;
 
-	spin_lock(&alloc->lock);
+	binder_alloc_lock(alloc);
 	for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
 		count++;
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 	return count;
 }
 
@@ -1081,7 +1081,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 		goto err_mmget;
 	if (!mmap_read_trylock(mm))
 		goto err_mmap_read_lock_failed;
-	if (!spin_trylock(&alloc->lock))
+	if (!binder_alloc_trylock(alloc))
 		goto err_get_alloc_lock_failed;
 	if (!page->page_ptr)
 		goto err_page_already_freed;
@@ -1101,7 +1101,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 	trace_binder_unmap_kernel_end(alloc, index);
 
 	list_lru_isolate(lru, item);
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 	spin_unlock(lock);
 
 	if (vma) {
@@ -1121,7 +1121,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
 err_invalid_vma:
 err_page_already_freed:
-	spin_unlock(&alloc->lock);
+	binder_alloc_unlock(alloc);
 err_get_alloc_lock_failed:
 	mmap_read_unlock(mm);
 err_mmap_read_lock_failed:
@@ -1161,7 +1161,7 @@ void binder_alloc_init(struct binder_alloc *alloc)
 	alloc->pid = current->group_leader->pid;
 	alloc->vma_vm_mm = current->mm;
 	mmgrab(alloc->vma_vm_mm);
-	spin_lock_init(&alloc->lock);
+	binder_alloc_lock_init(alloc);
 	INIT_LIST_HEAD(&alloc->buffers);
 }