ANDROID: binder: fix KMI-break due to address type change
In commit ("binder: keep vma addresses type as unsigned long") the vma
address type in 'struct binder_alloc' and 'struct binder_buffer' is
changed from 'void __user *' to 'unsigned long'.
This triggers the following KMI issues:
type 'struct binder_buffer' changed
member changed from 'void* user_data' to 'unsigned long user_data'
type changed from 'void*' to 'unsigned long'
type 'struct binder_alloc' changed
member changed from 'void* buffer' to 'unsigned long buffer'
type changed from 'void*' to 'unsigned long'
This offending commit is being backported as part of a larger patchset
from upstream in [1]. Lets fix these issues by doing a partial revert
that restores the original types and casts to an integer type where
necessary.
Note this approach is preferred over dropping the single KMI-breaking
patch from the backport, as this would have created non-trivial merge
conflicts in the subsequent cherry-picks.
Bug: 254650075
Bug: 319778300
Link: https://lore.kernel.org/all/20231201172212.1813387-1-cmllamas@google.com/ [1]
Change-Id: Ief9de717d0f34642f5954ffa2e306075a5b4e02e
Signed-off-by: Carlos Llamas <cmllamas@google.com>
[cmllamas: fixed trivial conflicts]
This commit is contained in:
parent
81054a4507
commit
d30de4c7b0
5 changed files with 35 additions and 33 deletions
|
|
@ -2276,8 +2276,9 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
|
|||
* Convert the address to an offset relative to
|
||||
* the base of the transaction buffer.
|
||||
*/
|
||||
fda_offset = parent->buffer - buffer->user_data +
|
||||
fda->parent_offset;
|
||||
fda_offset =
|
||||
(parent->buffer - (uintptr_t)buffer->user_data) +
|
||||
fda->parent_offset;
|
||||
for (fd_index = 0; fd_index < fda->num_fds;
|
||||
fd_index++) {
|
||||
u32 fd;
|
||||
|
|
@ -2795,7 +2796,7 @@ static int binder_translate_fd_array(struct list_head *pf_head,
|
|||
* Convert the address to an offset relative to
|
||||
* the base of the transaction buffer.
|
||||
*/
|
||||
fda_offset = parent->buffer - t->buffer->user_data +
|
||||
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
|
||||
fda->parent_offset;
|
||||
sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
|
||||
fda->parent_offset;
|
||||
|
|
@ -2870,9 +2871,8 @@ static int binder_fixup_parent(struct list_head *pf_head,
|
|||
proc->pid, thread->pid);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
buffer_offset = bp->parent_offset + parent->buffer - b->user_data;
|
||||
|
||||
buffer_offset = bp->parent_offset +
|
||||
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
|
||||
return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
|
||||
}
|
||||
|
||||
|
|
@ -3473,7 +3473,7 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
ALIGN(extra_buffers_size, sizeof(void *)) -
|
||||
ALIGN(secctx_sz, sizeof(u64));
|
||||
|
||||
t->security_ctx = t->buffer->user_data + buf_offset;
|
||||
t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
|
||||
err = binder_alloc_copy_to_buffer(&target_proc->alloc,
|
||||
t->buffer, buf_offset,
|
||||
secctx, secctx_sz);
|
||||
|
|
@ -3751,7 +3751,8 @@ static void binder_transaction(struct binder_proc *proc,
|
|||
goto err_translate_failed;
|
||||
}
|
||||
/* Fixup buffer pointer to target proc address space */
|
||||
bp->buffer = t->buffer->user_data + sg_buf_offset;
|
||||
bp->buffer = (uintptr_t)
|
||||
t->buffer->user_data + sg_buf_offset;
|
||||
sg_buf_offset += ALIGN(bp->length, sizeof(u64));
|
||||
|
||||
num_valid = (buffer_offset - off_start_offset) /
|
||||
|
|
@ -4941,7 +4942,7 @@ skip:
|
|||
}
|
||||
trd->data_size = t->buffer->data_size;
|
||||
trd->offsets_size = t->buffer->offsets_size;
|
||||
trd->data.ptr.buffer = t->buffer->user_data;
|
||||
trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
|
||||
trd->data.ptr.offsets = trd->data.ptr.buffer +
|
||||
ALIGN(t->buffer->data_size,
|
||||
sizeof(void *));
|
||||
|
|
|
|||
|
|
@ -135,9 +135,9 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
|
|||
buffer = rb_entry(n, struct binder_buffer, rb_node);
|
||||
BUG_ON(buffer->free);
|
||||
|
||||
if (user_ptr < buffer->user_data) {
|
||||
if (user_ptr < (uintptr_t)buffer->user_data) {
|
||||
n = n->rb_left;
|
||||
} else if (user_ptr > buffer->user_data) {
|
||||
} else if (user_ptr > (uintptr_t)buffer->user_data) {
|
||||
n = n->rb_right;
|
||||
} else {
|
||||
/*
|
||||
|
|
@ -203,7 +203,7 @@ static void binder_lru_freelist_add(struct binder_alloc *alloc,
|
|||
size_t index;
|
||||
int ret;
|
||||
|
||||
index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
|
||||
page = &alloc->pages[index];
|
||||
|
||||
if (!binder_get_installed_page(page))
|
||||
|
|
@ -252,7 +252,8 @@ static int binder_install_single_page(struct binder_alloc *alloc,
|
|||
ret = vm_insert_page(alloc->vma, addr, page);
|
||||
if (ret) {
|
||||
pr_err("%d: %s failed to insert page at offset %lx with %d\n",
|
||||
alloc->pid, __func__, addr - alloc->buffer, ret);
|
||||
alloc->pid, __func__, addr - (uintptr_t)alloc->buffer,
|
||||
ret);
|
||||
__free_page(page);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
|
@ -274,14 +275,14 @@ static int binder_install_buffer_pages(struct binder_alloc *alloc,
|
|||
unsigned long start, final;
|
||||
unsigned long page_addr;
|
||||
|
||||
start = buffer->user_data & PAGE_MASK;
|
||||
final = PAGE_ALIGN(buffer->user_data + size);
|
||||
start = (uintptr_t)buffer->user_data & PAGE_MASK;
|
||||
final = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
|
||||
|
||||
for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) {
|
||||
unsigned long index;
|
||||
int ret;
|
||||
|
||||
index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
|
||||
page = &alloc->pages[index];
|
||||
|
||||
if (binder_get_installed_page(page))
|
||||
|
|
@ -312,7 +313,7 @@ static void binder_lru_freelist_del(struct binder_alloc *alloc,
|
|||
unsigned long index;
|
||||
bool on_lru;
|
||||
|
||||
index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
|
||||
page = &alloc->pages[index];
|
||||
|
||||
if (page->page_ptr) {
|
||||
|
|
@ -515,9 +516,9 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
|
|||
* adjacent in-use buffer. In such case, the page has been already
|
||||
* removed from the freelist so we trim our range short.
|
||||
*/
|
||||
next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK;
|
||||
curr_last_page = PAGE_ALIGN(buffer->user_data + size);
|
||||
binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data),
|
||||
next_used_page = ((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK;
|
||||
curr_last_page = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
|
||||
binder_lru_freelist_del(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
|
||||
min(next_used_page, curr_last_page));
|
||||
|
||||
rb_erase(&buffer->rb_node, &alloc->free_buffers);
|
||||
|
|
@ -634,12 +635,12 @@ out:
|
|||
|
||||
static unsigned long buffer_start_page(struct binder_buffer *buffer)
|
||||
{
|
||||
return buffer->user_data & PAGE_MASK;
|
||||
return (uintptr_t)buffer->user_data & PAGE_MASK;
|
||||
}
|
||||
|
||||
static unsigned long prev_buffer_end_page(struct binder_buffer *buffer)
|
||||
{
|
||||
return (buffer->user_data - 1) & PAGE_MASK;
|
||||
return ((uintptr_t)buffer->user_data - 1) & PAGE_MASK;
|
||||
}
|
||||
|
||||
static void binder_delete_free_buffer(struct binder_alloc *alloc,
|
||||
|
|
@ -697,8 +698,8 @@ static void binder_free_buf_locked(struct binder_alloc *alloc,
|
|||
alloc->pid, size, alloc->free_async_space);
|
||||
}
|
||||
|
||||
binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data),
|
||||
(buffer->user_data + buffer_size) & PAGE_MASK);
|
||||
binder_lru_freelist_add(alloc, PAGE_ALIGN((uintptr_t)buffer->user_data),
|
||||
((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
|
||||
|
||||
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
|
||||
buffer->free = 1;
|
||||
|
|
@ -847,7 +848,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
|
|||
SZ_4M);
|
||||
mutex_unlock(&binder_alloc_mmap_lock);
|
||||
|
||||
alloc->buffer = vma->vm_start;
|
||||
alloc->buffer = (void __user *)vma->vm_start;
|
||||
|
||||
alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
|
||||
sizeof(alloc->pages[0]),
|
||||
|
|
@ -946,7 +947,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
|
|||
|
||||
on_lru = list_lru_del(&binder_freelist,
|
||||
&alloc->pages[i].lru);
|
||||
page_addr = alloc->buffer + i * PAGE_SIZE;
|
||||
page_addr = (uintptr_t)alloc->buffer + i * PAGE_SIZE;
|
||||
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
|
||||
"%s: %d: page %d %s\n",
|
||||
__func__, alloc->pid, i,
|
||||
|
|
@ -1092,7 +1093,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
|
|||
goto err_page_already_freed;
|
||||
|
||||
index = page - alloc->pages;
|
||||
page_addr = alloc->buffer + index * PAGE_SIZE;
|
||||
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
|
||||
|
||||
vma = vma_lookup(mm, page_addr);
|
||||
if (vma && vma != binder_alloc_get_vma(alloc))
|
||||
|
|
|
|||
|
|
@ -54,7 +54,7 @@ struct binder_buffer {
|
|||
size_t data_size;
|
||||
size_t offsets_size;
|
||||
size_t extra_buffers_size;
|
||||
unsigned long user_data;
|
||||
void __user *user_data;
|
||||
int pid;
|
||||
};
|
||||
|
||||
|
|
@ -99,7 +99,7 @@ struct binder_alloc {
|
|||
spinlock_t lock;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
unsigned long buffer;
|
||||
void __user *buffer;
|
||||
struct list_head buffers;
|
||||
struct rb_root free_buffers;
|
||||
struct rb_root allocated_buffers;
|
||||
|
|
|
|||
|
|
@ -97,10 +97,10 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
|
|||
unsigned long end;
|
||||
int page_index;
|
||||
|
||||
end = PAGE_ALIGN(buffer->user_data + size);
|
||||
page_addr = buffer->user_data;
|
||||
end = PAGE_ALIGN((uintptr_t)buffer->user_data + size);
|
||||
page_addr = (uintptr_t)buffer->user_data;
|
||||
for (; page_addr < end; page_addr += PAGE_SIZE) {
|
||||
page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
|
||||
page_index = (page_addr - (uintptr_t)alloc->buffer) / PAGE_SIZE;
|
||||
if (!alloc->pages[page_index].page_ptr ||
|
||||
!list_empty(&alloc->pages[page_index].lru)) {
|
||||
pr_err("expect alloc but is %s at page index %d\n",
|
||||
|
|
|
|||
|
|
@ -352,7 +352,7 @@ TRACE_EVENT(binder_update_page_range,
|
|||
TP_fast_assign(
|
||||
__entry->proc = alloc->pid;
|
||||
__entry->allocate = allocate;
|
||||
__entry->offset = start - alloc->buffer;
|
||||
__entry->offset = start - (uintptr_t)alloc->buffer;
|
||||
__entry->size = end - start;
|
||||
),
|
||||
TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue