ANDROID: binder: fix KMI-break due to address type change

In commit ("binder: keep vma addresses type as unsigned long") the vma
address type in 'struct binder_alloc' and 'struct binder_buffer' is
changed from 'void __user *' to 'unsigned long'.

This triggers the following KMI issues:

  type 'struct binder_buffer' changed
    member changed from 'void* user_data' to 'unsigned long user_data'
      type changed from 'void*' to 'unsigned long'

  type 'struct binder_alloc' changed
    member changed from 'void* buffer' to 'unsigned long buffer'
      type changed from 'void*' to 'unsigned long'

This offending commit is being backported as part of a larger patchset
from upstream in [1]. Lets fix these issues by doing a partial revert
that restores the original types and casts to an integer type where
necessary.

Note this approach is preferred over dropping the single KMI-breaking
patch from the backport, as this would have created non-trivial merge
conflicts in the subsequent cherry-picks.

Bug: 254650075
Link: https://lore.kernel.org/all/20231201172212.1813387-1-cmllamas@google.com/ [1]
Change-Id: Ief9de717d0f34642f5954ffa2e306075a5b4e02e
Signed-off-by: Carlos Llamas <cmllamas@google.com>
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index c3162ca..8f604ea 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -135,9 +135,9 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
 		buffer = rb_entry(n, struct binder_buffer, rb_node);
 		BUG_ON(buffer->free);
 
-		if (user_ptr < buffer->user_data) {
+		if (user_ptr < (uintptr_t)buffer->user_data) {
 			n = n->rb_left;
-		} else if (user_ptr > buffer->user_data) {
+		} else if (user_ptr > (uintptr_t)buffer->user_data) {
 			n = n->rb_right;
 		} else {
 			/*
@@ -203,7 +203,7 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
 		size_t index;
 		int ret;
 
-		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
 		page = &alloc->pages[index];
 
 		if (!binder_get_installed_page(page))
@@ -252,7 +252,8 @@ static int binder_install_single_page(struct binder_alloc *alloc,
 	ret = vm_insert_page(alloc->vma, addr, page);
 	if (ret) {
 		pr_err("%d: %s failed to insert page at offset %lx with %d\n",
-		       alloc->pid, __func__, addr - alloc->buffer, ret);
+		       alloc->pid, __func__, addr - (uintptr_t)alloc->buffer,
+		       ret);
 		__free_page(page);
 		ret = -ENOMEM;
 		goto out;
@@ -274,14 +275,14 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
 	unsigned long start, final;
 	unsigned long page_addr;
 
-	start = buffer->user_data & PAGE_MASK;
-	final = PAGE_ALIGN(buffer->user_data + size);
+	start = (uintptr_t)buffer->user_data & PAGE_MASK;
+	final = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
 
 	for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
 		unsigned long index;
 		int ret;
 
-		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
 		page = &alloc->pages[index];
 
 		if (binder_get_installed_page(page))
@@ -312,7 +313,7 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
 		unsigned long index;
 		bool on_lru;
 
-		index = (page_addr - alloc->buffer) / PAGE_SIZE;
+		index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
 		page = &alloc->pages[index];
 
 		if (page->page_ptr) {
@@ -505,9 +506,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
 	 * adjacent in-use buffer. In such case, the page has been already
 	 * removed from the freelist so we trim our range short.
 	 */
-	next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
-	curr_last_page = PAGE_ALIGN(buffer->user_data + size);
-	binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
+	next_used_page = ((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK;
+	curr_last_page = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
+	binder_lru_freelist_del(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
 				min(next_used_page, curr_last_page));
 
 	rb_erase(&buffer->rb_node, &alloc->free_buffers);
@@ -624,12 +625,12 @@ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
 
 static unsigned long buffer_start_page(struct binder_buffer *buffer)
 {
-	return buffer->user_data & PAGE_MASK;
+	return (uintptr_t)buffer->user_data & PAGE_MASK;
 }
 
 static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
 {
-	return (buffer->user_data - 1) & PAGE_MASK;
+	return ((uintptr_t)buffer->user_data - 1) & PAGE_MASK;
 }
 
 static void binder_delete_free_buffer(struct binder_alloc *alloc,
@@ -687,8 +688,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
 			      alloc->pid, size, alloc->free_async_space);
 	}
 
-	binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
-				(buffer->user_data + buffer_size) & PAGE_MASK);
+	binder_lru_freelist_add(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
+				((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
 
 	rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
 	buffer->free = 1;
@@ -841,7 +842,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
 				   SZ_4M);
 	mutex_unlock(&binder_alloc_mmap_lock);
 
-	alloc->buffer = vma->vm_start;
+	alloc->buffer = (void __user *)vma->vm_start;
 
 	alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
 			       sizeof(alloc->pages[0]),
@@ -940,7 +941,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
 
 			on_lru = list_lru_del(&binder_freelist,
 					      &alloc->pages[i].lru);
-			page_addr = alloc->buffer + i * PAGE_SIZE;
+			page_addr = (uintptr_t)alloc->buffer + i * PAGE_SIZE;
 			binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
 				     "%s: %d: page %d %s\n",
 				     __func__, alloc->pid, i,
@@ -1086,7 +1087,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 		goto err_page_already_freed;
 
 	index = page - alloc->pages;
-	page_addr = alloc->buffer + index * PAGE_SIZE;
+	page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
 
 	vma = vma_lookup(mm, page_addr);
 	if (vma && vma != binder_alloc_get_vma(alloc))