From c51b9ef3f52161ee009af34555af7719fdcf74f1 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 14 Apr 2023 10:57:26 -0400 Subject: [PATCH 01/63] maple_tree: make maple state reusable after mas_empty_area_rev() commit fad8e4291da5e3243e086622df63cb952db444d8 upstream. Stop using maple state min/max for the range by passing through pointers for those values. This will allow the maple state to be reused without resetting. Also add some logic to fail out early on searching with invalid arguments. Link: https://lkml.kernel.org/r/20230414145728.4067069-1-Liam.Howlett@oracle.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Liam R. Howlett Reported-by: Rick Edgecombe Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- lib/maple_tree.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 39f34ea7a9be..2309c4e1b58a 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4968,7 +4968,8 @@ not_found: * Return: True if found in a leaf, false otherwise. * */ -static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) +static bool mas_rev_awalk(struct ma_state *mas, unsigned long size, + unsigned long *gap_min, unsigned long *gap_max) { enum maple_type type = mte_node_type(mas->node); struct maple_node *node = mas_mn(mas); @@ -5033,8 +5034,8 @@ static bool mas_rev_awalk(struct ma_state *mas, unsigned long size) if (unlikely(ma_is_leaf(type))) { mas->offset = offset; - mas->min = min; - mas->max = min + gap - 1; + *gap_min = min; + *gap_max = min + gap - 1; return true; } @@ -5310,6 +5311,9 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long *pivots; enum maple_type mt; + if (min >= max) + return -EINVAL; + if (mas_is_start(mas)) mas_start(mas); else if (mas->offset >= 2) @@ -5364,6 +5368,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, { struct maple_enode *last = mas->node; + if (min >= max) + return -EINVAL; + if (mas_is_start(mas)) { mas_start(mas); mas->offset = mas_data_end(mas); @@ -5383,7 +5390,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, mas->index = min; mas->last = max; - while (!mas_rev_awalk(mas, size)) { + while (!mas_rev_awalk(mas, size, &min, &max)) { if (last == mas->node) { if (!mas_rewind_node(mas)) return -EBUSY; @@ -5398,17 +5405,9 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, if (unlikely(mas->offset == MAPLE_NODE_SLOTS)) return -EBUSY; - /* - * mas_rev_awalk() has set mas->min and mas->max to the gap values. If - * the maximum is outside the window we are searching, then use the last - * location in the search. - * mas->max and mas->min is the range of the gap. - * mas->index and mas->last are currently set to the search range. - */ - /* Trim the upper limit to the max. */ - if (mas->max <= mas->last) - mas->last = mas->max; + if (max <= mas->last) + mas->last = max; mas->index = mas->last - size + 1; return 0; From 66f13a1acf0ae80bcd5cd336a6e864c2eaff851d Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 14 Apr 2023 10:57:27 -0400 Subject: [PATCH 02/63] maple_tree: fix mas_empty_area() search commit 06e8fd999334bcd76b4d72d7b9206d4aea89764e upstream. The internal function of mas_awalk() was incorrectly skipping the last entry in a node, which could potentially be NULL. This is only a problem for the left-most node in the tree - otherwise that NULL would not exist. Fix mas_awalk() by using the metadata to obtain the end of the node for the loop and the logical pivot as apposed to the raw pivot value. Link: https://lkml.kernel.org/r/20230414145728.4067069-2-Liam.Howlett@oracle.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Liam R. Howlett Reported-by: Rick Edgecombe Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- lib/maple_tree.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 2309c4e1b58a..f026d5fb51ab 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5059,10 +5059,10 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) { enum maple_type type = mte_node_type(mas->node); unsigned long pivot, min, gap = 0; - unsigned char offset; - unsigned long *gaps; - unsigned long *pivots = ma_pivots(mas_mn(mas), type); - void __rcu **slots = ma_slots(mas_mn(mas), type); + unsigned char offset, data_end; + unsigned long *gaps, *pivots; + void __rcu **slots; + struct maple_node *node; bool found = false; if (ma_is_dense(type)) { @@ -5070,13 +5070,15 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) return true; } - gaps = ma_gaps(mte_to_node(mas->node), type); + node = mas_mn(mas); + pivots = ma_pivots(node, type); + slots = ma_slots(node, type); + gaps = ma_gaps(node, type); offset = mas->offset; min = mas_safe_min(mas, pivots, offset); - for (; offset < mt_slots[type]; offset++) { - pivot = mas_safe_pivot(mas, pivots, offset, type); - if (offset && !pivot) - break; + data_end = ma_data_end(node, type, pivots, mas->max); + for (; offset <= data_end; offset++) { + pivot = mas_logical_pivot(mas, pivots, offset, type); /* Not within lower bounds */ if (mas->index > pivot) From a1176791ab74bed2df83e7528fcde16a7c888420 Mon Sep 17 00:00:00 2001 From: Peng Zhang Date: Tue, 11 Apr 2023 12:10:04 +0800 Subject: [PATCH 03/63] maple_tree: fix a potential memory leak, OOB access, or other unpredictable bug commit 1f5f12ece722aacea1769fb644f27790ede339dc upstream. In mas_alloc_nodes(), "node->node_count = 0" means to initialize the node_count field of the new node, but the node may not be a new node. It may be a node that existed before and node_count has a value, setting it to 0 will cause a memory leak. At this time, mas->alloc->total will be greater than the actual number of nodes in the linked list, which may cause many other errors. For example, out-of-bounds access in mas_pop_node(), and mas_pop_node() may return addresses that should not be used. Fix it by initializing node_count only for new nodes. Also, by the way, an if-else statement was removed to simplify the code. Link: https://lkml.kernel.org/r/20230411041005.26205-1-zhangpeng.00@bytedance.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Peng Zhang Reviewed-by: Liam R. Howlett Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- lib/maple_tree.c | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index f026d5fb51ab..9fe25ce9937b 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1293,26 +1293,21 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp) node = mas->alloc; node->request_count = 0; while (requested) { - max_req = MAPLE_ALLOC_SLOTS; - if (node->node_count) { - unsigned int offset = node->node_count; - - slots = (void **)&node->slot[offset]; - max_req -= offset; - } else { - slots = (void **)&node->slot; - } - + max_req = MAPLE_ALLOC_SLOTS - node->node_count; + slots = (void **)&node->slot[node->node_count]; max_req = min(requested, max_req); count = mt_alloc_bulk(gfp, max_req, slots); if (!count) goto nomem_bulk; + if (node->node_count == 0) { + node->slot[0]->node_count = 0; + node->slot[0]->request_count = 0; + } + node->node_count += count; allocated += count; node = node->slot[0]; - node->node_count = 0; - node->request_count = 0; requested -= count; } mas->alloc->total = allocated; From a0aa4827f79100e23b2614be155a7be5872748b7 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Tue, 18 Apr 2023 02:35:13 +0900 Subject: [PATCH 04/63] nilfs2: initialize unused bytes in segment summary blocks commit ef832747a82dfbc22a3702219cc716f449b24e4a upstream. Syzbot still reports uninit-value in nilfs_add_checksums_on_logs() for KMSAN enabled kernels after applying commit 7397031622e0 ("nilfs2: initialize "struct nilfs_binfo_dat"->bi_pad field"). This is because the unused bytes at the end of each block in segment summaries are not initialized. So this fixes the issue by padding the unused bytes with null bytes. Link: https://lkml.kernel.org/r/20230417173513.12598-1-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi Tested-by: Ryusuke Konishi Reported-by: syzbot+048585f3f4227bb2b49b@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=048585f3f4227bb2b49b Cc: Alexander Potapenko Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/segment.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 63d96a1733b2..101f2ce6ba37 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -430,6 +430,23 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) return 0; } +/** + * nilfs_segctor_zeropad_segsum - zero pad the rest of the segment summary area + * @sci: segment constructor object + * + * nilfs_segctor_zeropad_segsum() zero-fills unallocated space at the end of + * the current segment summary block. + */ +static void nilfs_segctor_zeropad_segsum(struct nilfs_sc_info *sci) +{ + struct nilfs_segsum_pointer *ssp; + + ssp = sci->sc_blk_cnt > 0 ? &sci->sc_binfo_ptr : &sci->sc_finfo_ptr; + if (ssp->offset < ssp->bh->b_size) + memset(ssp->bh->b_data + ssp->offset, 0, + ssp->bh->b_size - ssp->offset); +} + static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) { sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; @@ -438,6 +455,7 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) * The current segment is filled up * (internal code) */ + nilfs_segctor_zeropad_segsum(sci); sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); return nilfs_segctor_reset_segment_buffer(sci); } @@ -542,6 +560,7 @@ static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, goto retry; } if (unlikely(required)) { + nilfs_segctor_zeropad_segsum(sci); err = nilfs_segbuf_extend_segsum(segbuf); if (unlikely(err)) goto failed; @@ -1531,6 +1550,7 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); sci->sc_stage = prev_stage; } + nilfs_segctor_zeropad_segsum(sci); nilfs_segctor_truncate_segments(sci, sci->sc_curseg, nilfs->ns_sufile); return 0; From ec90129b91b6568aab011d954d85edbd43ea7196 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Fri, 17 Feb 2023 17:21:54 +0100 Subject: [PATCH 05/63] kernel/sys.c: fix and improve control flow in __sys_setres[ug]id() commit 659c0ce1cb9efc7f58d380ca4bb2a51ae9e30553 upstream. Linux Security Modules (LSMs) that implement the "capable" hook will usually emit an access denial message to the audit log whenever they "block" the current task from using the given capability based on their security policy. The occurrence of a denial is used as an indication that the given task has attempted an operation that requires the given access permission, so the callers of functions that perform LSM permission checks must take care to avoid calling them too early (before it is decided if the permission is actually needed to perform the requested operation). The __sys_setres[ug]id() functions violate this convention by first calling ns_capable_setid() and only then checking if the operation requires the capability or not. It means that any caller that has the capability granted by DAC (task's capability set) but not by MAC (LSMs) will generate a "denied" audit record, even if is doing an operation for which the capability is not required. Fix this by reordering the checks such that ns_capable_setid() is checked last and -EPERM is returned immediately if it returns false. While there, also do two small optimizations: * move the capability check before prepare_creds() and * bail out early in case of a no-op. Link: https://lkml.kernel.org/r/20230217162154.837549-1-omosnace@redhat.com Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Ondrej Mosnacek Cc: Eric W. Biederman Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- kernel/sys.c | 69 ++++++++++++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 29 deletions(-) diff --git a/kernel/sys.c b/kernel/sys.c index 88b31f096fb2..c85e1abf7b7c 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -664,6 +664,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) struct cred *new; int retval; kuid_t kruid, keuid, ksuid; + bool ruid_new, euid_new, suid_new; kruid = make_kuid(ns, ruid); keuid = make_kuid(ns, euid); @@ -678,25 +679,29 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) if ((suid != (uid_t) -1) && !uid_valid(ksuid)) return -EINVAL; + old = current_cred(); + + /* check for no-op */ + if ((ruid == (uid_t) -1 || uid_eq(kruid, old->uid)) && + (euid == (uid_t) -1 || (uid_eq(keuid, old->euid) && + uid_eq(keuid, old->fsuid))) && + (suid == (uid_t) -1 || uid_eq(ksuid, old->suid))) + return 0; + + ruid_new = ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && + !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid); + euid_new = euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && + !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid); + suid_new = suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && + !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid); + if ((ruid_new || euid_new || suid_new) && + !ns_capable_setid(old->user_ns, CAP_SETUID)) + return -EPERM; + new = prepare_creds(); if (!new) return -ENOMEM; - old = current_cred(); - - retval = -EPERM; - if (!ns_capable_setid(old->user_ns, CAP_SETUID)) { - if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && - !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) - goto error; - if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) && - !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid)) - goto error; - if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) && - !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid)) - goto error; - } - if (ruid != (uid_t) -1) { new->uid = kruid; if (!uid_eq(kruid, old->uid)) { @@ -761,6 +766,7 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) struct cred *new; int retval; kgid_t krgid, kegid, ksgid; + bool rgid_new, egid_new, sgid_new; krgid = make_kgid(ns, rgid); kegid = make_kgid(ns, egid); @@ -773,23 +779,28 @@ long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) if ((sgid != (gid_t) -1) && !gid_valid(ksgid)) return -EINVAL; + old = current_cred(); + + /* check for no-op */ + if ((rgid == (gid_t) -1 || gid_eq(krgid, old->gid)) && + (egid == (gid_t) -1 || (gid_eq(kegid, old->egid) && + gid_eq(kegid, old->fsgid))) && + (sgid == (gid_t) -1 || gid_eq(ksgid, old->sgid))) + return 0; + + rgid_new = rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && + !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid); + egid_new = egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && + !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid); + sgid_new = sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && + !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid); + if ((rgid_new || egid_new || sgid_new) && + !ns_capable_setid(old->user_ns, CAP_SETGID)) + return -EPERM; + new = prepare_creds(); if (!new) return -ENOMEM; - old = current_cred(); - - retval = -EPERM; - if (!ns_capable_setid(old->user_ns, CAP_SETGID)) { - if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && - !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) - goto error; - if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) && - !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid)) - goto error; - if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) && - !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid)) - goto error; - } if (rgid != (gid_t) -1) new->gid = krgid; From f6a5f61200ff0f41137b383812ae43898480ed69 Mon Sep 17 00:00:00 2001 From: Steve Chou Date: Tue, 11 Apr 2023 11:49:28 +0800 Subject: [PATCH 06/63] tools/mm/page_owner_sort.c: fix TGID output when cull=tg is used commit 9235756885e865070c4be2facda75262dbd85967 upstream. When using cull option with 'tg' flag, the fprintf is using pid instead of tgid. It should use tgid instead. Link: https://lkml.kernel.org/r/20230411034929.2071501-1-steve_chou@pesi.com.tw Fixes: 9c8a0a8e599f4a ("tools/vm/page_owner_sort.c: support for user-defined culling rules") Signed-off-by: Steve Chou Cc: Jiajian Ye Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- tools/vm/page_owner_sort.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/vm/page_owner_sort.c b/tools/vm/page_owner_sort.c index ce860ab94162..58ebfe392402 100644 --- a/tools/vm/page_owner_sort.c +++ b/tools/vm/page_owner_sort.c @@ -847,7 +847,7 @@ int main(int argc, char **argv) if (cull & CULL_PID || filter & FILTER_PID) fprintf(fout, ", PID %d", list[i].pid); if (cull & CULL_TGID || filter & FILTER_TGID) - fprintf(fout, ", TGID %d", list[i].pid); + fprintf(fout, ", TGID %d", list[i].tgid); if (cull & CULL_COMM || filter & FILTER_COMM) fprintf(fout, ", task_comm_name: %s", list[i].comm); if (cull & CULL_ALLOCATOR) { From 3e6bd2653ff86ee31fdbb821abe05af4d309aedf Mon Sep 17 00:00:00 2001 From: Baokun Li Date: Mon, 10 Apr 2023 21:08:26 +0800 Subject: [PATCH 07/63] writeback, cgroup: fix null-ptr-deref write in bdi_split_work_to_wbs commit 1ba1199ec5747f475538c0d25a32804e5ba1dfde upstream. KASAN report null-ptr-deref: ================================================================== BUG: KASAN: null-ptr-deref in bdi_split_work_to_wbs+0x5c5/0x7b0 Write of size 8 at addr 0000000000000000 by task sync/943 CPU: 5 PID: 943 Comm: sync Tainted: 6.3.0-rc5-next-20230406-dirty #461 Call Trace: dump_stack_lvl+0x7f/0xc0 print_report+0x2ba/0x340 kasan_report+0xc4/0x120 kasan_check_range+0x1b7/0x2e0 __kasan_check_write+0x24/0x40 bdi_split_work_to_wbs+0x5c5/0x7b0 sync_inodes_sb+0x195/0x630 sync_inodes_one_sb+0x3a/0x50 iterate_supers+0x106/0x1b0 ksys_sync+0x98/0x160 [...] ================================================================== The race that causes the above issue is as follows: cpu1 cpu2 -------------------------|------------------------- inode_switch_wbs INIT_WORK(&isw->work, inode_switch_wbs_work_fn) queue_rcu_work(isw_wq, &isw->work) // queue_work async inode_switch_wbs_work_fn wb_put_many(old_wb, nr_switched) percpu_ref_put_many ref->data->release(ref) cgwb_release queue_work(cgwb_release_wq, &wb->release_work) // queue_work async &wb->release_work cgwb_release_workfn ksys_sync iterate_supers sync_inodes_one_sb sync_inodes_sb bdi_split_work_to_wbs kmalloc(sizeof(*work), GFP_ATOMIC) // alloc memory failed percpu_ref_exit ref->data = NULL kfree(data) wb_get(wb) percpu_ref_get(&wb->refcnt) percpu_ref_get_many(ref, 1) atomic_long_add(nr, &ref->data->count) atomic64_add(i, v) // trigger null-ptr-deref bdi_split_work_to_wbs() traverses &bdi->wb_list to split work into all wbs. If the allocation of new work fails, the on-stack fallback will be used and the reference count of the current wb is increased afterwards. If cgroup writeback membership switches occur before getting the reference count and the current wb is released as old_wd, then calling wb_get() or wb_put() will trigger the null pointer dereference above. This issue was introduced in v4.3-rc7 (see fix tag1). Both sync_inodes_sb() and __writeback_inodes_sb_nr() calls to bdi_split_work_to_wbs() can trigger this issue. For scenarios called via sync_inodes_sb(), originally commit 7fc5854f8c6e ("writeback: synchronize sync(2) against cgroup writeback membership switches") reduced the possibility of the issue by adding wb_switch_rwsem, but in v5.14-rc1 (see fix tag2) removed the "inode_io_list_del_locked(inode, old_wb)" from inode_switch_wbs_work_fn() so that wb->state contains WB_has_dirty_io, thus old_wb is not skipped when traversing wbs in bdi_split_work_to_wbs(), and the issue becomes easily reproducible again. To solve this problem, percpu_ref_exit() is called under RCU protection to avoid race between cgwb_release_workfn() and bdi_split_work_to_wbs(). Moreover, replace wb_get() with wb_tryget() in bdi_split_work_to_wbs(), and skip the current wb if wb_tryget() fails because the wb has already been shutdown. Link: https://lkml.kernel.org/r/20230410130826.1492525-1-libaokun1@huawei.com Fixes: b817525a4a80 ("writeback: bdi_writeback iteration must not skip dying ones") Signed-off-by: Baokun Li Reviewed-by: Jan Kara Acked-by: Tejun Heo Cc: Alexander Viro Cc: Andreas Dilger Cc: Christian Brauner Cc: Dennis Zhou Cc: Hou Tao Cc: yangerkun Cc: Zhang Yi Cc: Jens Axboe Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/fs-writeback.c | 17 ++++++++++------- mm/backing-dev.c | 12 ++++++++++-- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 9958d4020771..aa33c39be182 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -974,6 +974,16 @@ restart: continue; } + /* + * If wb_tryget fails, the wb has been shutdown, skip it. + * + * Pin @wb so that it stays on @bdi->wb_list. This allows + * continuing iteration from @wb after dropping and + * regrabbing rcu read lock. + */ + if (!wb_tryget(wb)) + continue; + /* alloc failed, execute synchronously using on-stack fallback */ work = &fallback_work; *work = *base_work; @@ -982,13 +992,6 @@ restart: work->done = &fallback_work_done; wb_queue_work(wb, work); - - /* - * Pin @wb so that it stays on @bdi->wb_list. This allows - * continuing iteration from @wb after dropping and - * regrabbing rcu read lock. - */ - wb_get(wb); last_wb = wb; rcu_read_unlock(); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index c30419a5e119..bf5525c2e561 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -380,6 +380,15 @@ static LIST_HEAD(offline_cgwbs); static void cleanup_offline_cgwbs_workfn(struct work_struct *work); static DECLARE_WORK(cleanup_offline_cgwbs_work, cleanup_offline_cgwbs_workfn); +static void cgwb_free_rcu(struct rcu_head *rcu_head) +{ + struct bdi_writeback *wb = container_of(rcu_head, + struct bdi_writeback, rcu); + + percpu_ref_exit(&wb->refcnt); + kfree(wb); +} + static void cgwb_release_workfn(struct work_struct *work) { struct bdi_writeback *wb = container_of(work, struct bdi_writeback, @@ -402,11 +411,10 @@ static void cgwb_release_workfn(struct work_struct *work) list_del(&wb->offline_node); spin_unlock_irq(&cgwb_lock); - percpu_ref_exit(&wb->refcnt); wb_exit(wb); bdi_put(bdi); WARN_ON_ONCE(!list_empty(&wb->b_attached)); - kfree_rcu(wb, rcu); + call_rcu(&wb->rcu, cgwb_free_rcu); } static void cgwb_release(struct percpu_ref *refcnt) From a3a93b46833faf7421d4424ab78c8f0402423d04 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 1 Apr 2023 22:03:27 +0200 Subject: [PATCH 08/63] memstick: fix memory leak if card device is never registered commit 4b6d621c9d859ff89e68cebf6178652592676013 upstream. When calling dev_set_name() memory is allocated for the name for the struct device. Once that structure device is registered, or attempted to be registerd, with the driver core, the driver core will handle cleaning up that memory when the device is removed from the system. Unfortunatly for the memstick code, there is an error path that causes the struct device to never be registered, and so the memory allocated in dev_set_name will be leaked. Fix that leak by manually freeing it right before the memory for the device is freed. Cc: Maxim Levitsky Cc: Alex Dubov Cc: Ulf Hansson Cc: "Rafael J. Wysocki" Cc: Hans de Goede Cc: Kay Sievers Cc: linux-mmc@vger.kernel.org Fixes: 0252c3b4f018 ("memstick: struct device - replace bus_id with dev_name(), dev_set_name()") Cc: stable Co-developed-by: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman Co-developed-by: Mirsad Goran Todorovac Signed-off-by: Mirsad Goran Todorovac Link: https://lore.kernel.org/r/20230401200327.16800-1-gregkh@linuxfoundation.org Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/memstick/core/memstick.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 660df7d269fa..d410e2e78a3d 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -410,6 +410,7 @@ static struct memstick_dev *memstick_alloc_card(struct memstick_host *host) return card; err_out: host->card = old_card; + kfree_const(card->dev.kobj.name); kfree(card); return NULL; } @@ -468,8 +469,10 @@ static void memstick_check(struct work_struct *work) put_device(&card->dev); host->card = NULL; } - } else + } else { + kfree_const(card->dev.kobj.name); kfree(card); + } } out_power_off: From d9caa028d7ade869e0522078e62423f27f58e5b0 Mon Sep 17 00:00:00 2001 From: Bhavya Kapoor Date: Fri, 17 Mar 2023 14:57:11 +0530 Subject: [PATCH 09/63] mmc: sdhci_am654: Set HIGH_SPEED_ENA for SDR12 and SDR25 commit 2265098fd6a6272fde3fd1be5761f2f5895bd99a upstream. Timing Information in Datasheet assumes that HIGH_SPEED_ENA=1 should be set for SDR12 and SDR25 modes. But sdhci_am654 driver clears HIGH_SPEED_ENA register. Thus, Modify sdhci_am654 to not clear HIGH_SPEED_ENA (HOST_CONTROL[2]) bit for SDR12 and SDR25 speed modes. Fixes: e374e87538f4 ("mmc: sdhci_am654: Clear HISPD_ENA in some lower speed modes") Signed-off-by: Bhavya Kapoor Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230317092711.660897-1-b-kapoor@ti.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci_am654.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 101581d83982..8e22b375247e 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -351,8 +351,6 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) */ case MMC_TIMING_SD_HS: case MMC_TIMING_MMC_HS: - case MMC_TIMING_UHS_SDR12: - case MMC_TIMING_UHS_SDR25: val &= ~SDHCI_CTRL_HISPD; } } From 66eb772be27e228716bb81feee0400d995cbe605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 29 Mar 2023 20:24:33 +0300 Subject: [PATCH 10/63] drm/i915: Fix fast wake AUX sync len MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit e1c71f8f918047ce822dc19b42ab1261ed259fd1 upstream. Fast wake should use 8 SYNC pulses for the preamble and 10-16 SYNC pulses for the precharge. Reduce our fast wake SYNC count to match the maximum value. We also use the maximum precharge length for normal AUX transactions. Cc: stable@vger.kernel.org Cc: Jouni Högander Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20230329172434.18744-1-ville.syrjala@linux.intel.com Reviewed-by: Jouni Högander (cherry picked from commit 605f7c73133341d4b762cbd9a22174cc22d4c38b) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/display/intel_dp_aux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 48c375c65a41..7f3f2d50e6cd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -165,7 +165,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_TIME_OUT_MAX | DP_AUX_CH_CTL_RECEIVE_ERROR | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); if (intel_tc_port_in_tbt_alt_mode(dig_port)) From bef774effb278ff0b65ea2dbaa1ab32ba6a1dc13 Mon Sep 17 00:00:00 2001 From: Alan Liu Date: Fri, 14 Apr 2023 18:39:52 +0800 Subject: [PATCH 11/63] drm/amdgpu: Fix desktop freezed after gpu-reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit c8b5a95b570949536a2b75cd8fc4f1de0bc60629 upstream. [Why] After gpu-reset, sometimes the driver fails to enable vblank irq, causing flip_done timed out and the desktop freezed. During gpu-reset, we disable and enable vblank irq in dm_suspend() and dm_resume(). Later on in amdgpu_irq_gpu_reset_resume_helper(), we check irqs' refcount and decide to enable or disable the irqs again. However, we have 2 sets of API for controling vblank irq, one is dm_vblank_get/put() and another is amdgpu_irq_get/put(). Each API has its own refcount and flag to store the state of vblank irq, and they are not synchronized. In drm we use the first API to control vblank irq but in amdgpu_irq_gpu_reset_resume_helper() we use the second set of API. The failure happens when vblank irq was enabled by dm_vblank_get() before gpu-reset, we have vblank->enabled true. However, during gpu-reset, in amdgpu_irq_gpu_reset_resume_helper() vblank irq's state checked from amdgpu_irq_update() is DISABLED. So finally it disables vblank irq again. After gpu-reset, if there is a cursor plane commit, the driver will try to enable vblank irq by calling drm_vblank_enable(), but the vblank->enabled is still true, so it fails to turn on vblank irq and causes flip_done can't be completed in vblank irq handler and desktop become freezed. [How] Combining the 2 vblank control APIs by letting drm's API finally calls amdgpu_irq's API, so the irq's refcount and state of both APIs can be synchronized. Also add a check to prevent refcount from being less then 0 in amdgpu_irq_put(). v2: - Add warning in amdgpu_irq_enable() if the irq is already disabled. - Call dc_interrupt_set() in dm_set_vblank() to avoid refcount change if it is in gpu-reset. v3: - Improve commit message and code comments. Signed-off-by: Alan Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 3 +++ .../drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 17 ++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 89011bae7588..ca5dc51600fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -653,6 +653,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, if (!src->enabled_types || !src->funcs->set) return -EINVAL; + if (WARN_ON(!amdgpu_irq_enabled(adev, src, type))) + return -EINVAL; + if (atomic_dec_and_test(&src->enabled_types[type])) return amdgpu_irq_update(adev, src, type); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index b87f50e8fa61..1ec643a0d00d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -167,10 +167,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (rc) return rc; - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + if (amdgpu_in_reset(adev)) { + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + rc = -EBUSY; + } else { + rc = (enable) + ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) + : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); + } - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - return -EBUSY; + if (rc) + return rc; skip: if (amdgpu_in_reset(adev)) From 4ac57c3fe2c0a74c6239170fc58fc824637c6015 Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 3 Apr 2023 10:13:12 -0400 Subject: [PATCH 12/63] drm/amd/display: set dcn315 lb bpp to 48 commit 6d9240c46f7419aa3210353b5f52cc63da5a6440 upstream. [Why & How] Fix a typo for dcn315 line buffer bpp. Reviewed-by: Jun Lei Acked-by: Qingqing Zhuo Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 7dd0845d1bd9..8e416433184c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = { .maximum_dsc_bits_per_component = 10, .dsc422_native_support = false, .is_line_buffer_bpp_fixed = true, - .line_buffer_fixed_bpp = 49, + .line_buffer_fixed_bpp = 48, .line_buffer_size_bits = 789504, .max_line_buffer_lines = 12, .writeback_interface_buffer_size_kbytes = 90, From 904e1b66854b012f4fe29f4a7cfabf726d98b4d2 Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Thu, 13 Apr 2023 16:43:47 +0200 Subject: [PATCH 13/63] drm/rockchip: vop2: fix suspend/resume commit afa965a45e01e541cdbe5c8018226eff117610f0 upstream. During a suspend/resume cycle the VO power domain will be disabled and the VOP2 registers will reset to their default values. After that the cached register values will be out of sync and the read/modify/write operations we do on the window registers will result in bogus values written. Fix this by re-initializing the register cache each time we enable the VOP2. With this the VOP2 will show a picture after a suspend/resume cycle whereas without this the screen stays dark. Fixes: 604be85547ce4 ("drm/rockchip: Add VOP2 driver") Cc: stable@vger.kernel.org Signed-off-by: Sascha Hauer Tested-by: Chris Morgan Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20230413144347.3506023-1-s.hauer@pengutronix.de Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 8cecf81a5ae0..86b290659bfd 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -216,6 +216,8 @@ struct vop2 { struct vop2_win win[]; }; +static const struct regmap_config vop2_regmap_config; + static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc) { return container_of(crtc, struct vop2_video_port, crtc); @@ -840,6 +842,12 @@ static void vop2_enable(struct vop2 *vop2) return; } + ret = regmap_reinit_cache(vop2->map, &vop2_regmap_config); + if (ret) { + drm_err(vop2->drm, "failed to reinit cache: %d\n", ret); + return; + } + if (vop2->data->soc_id == 3566) vop2_writel(vop2, RK3568_OTP_WIN_EN, 1); From b1644a0031cfb3ca2cbd84c92f771f8ebb62302d Mon Sep 17 00:00:00 2001 From: Sascha Hauer Date: Mon, 17 Apr 2023 14:37:47 +0200 Subject: [PATCH 14/63] drm/rockchip: vop2: Use regcache_sync() to fix suspend/resume commit b63a553e8f5aa6574eeb535a551817a93c426d8c upstream. afa965a45e01 ("drm/rockchip: vop2: fix suspend/resume") uses regmap_reinit_cache() to fix the suspend/resume issue with the VOP2 driver. During discussion it came up that we should rather use regcache_sync() instead. As the original patch is already applied fix this up in this follow-up patch. Fixes: afa965a45e01 ("drm/rockchip: vop2: fix suspend/resume") Cc: stable@vger.kernel.org Signed-off-by: Sascha Hauer Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20230417123747.2179695-1-s.hauer@pengutronix.de Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/rockchip/rockchip_drm_vop2.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index 86b290659bfd..3c05ce01f73b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -216,8 +216,6 @@ struct vop2 { struct vop2_win win[]; }; -static const struct regmap_config vop2_regmap_config; - static struct vop2_video_port *to_vop2_video_port(struct drm_crtc *crtc) { return container_of(crtc, struct vop2_video_port, crtc); @@ -842,11 +840,7 @@ static void vop2_enable(struct vop2 *vop2) return; } - ret = regmap_reinit_cache(vop2->map, &vop2_regmap_config); - if (ret) { - drm_err(vop2->drm, "failed to reinit cache: %d\n", ret); - return; - } + regcache_sync(vop2->map); if (vop2->data->soc_id == 3566) vop2_writel(vop2, RK3568_OTP_WIN_EN, 1); @@ -876,6 +870,8 @@ static void vop2_disable(struct vop2 *vop2) pm_runtime_put_sync(vop2->dev); + regcache_mark_dirty(vop2->map); + clk_disable_unprepare(vop2->aclk); clk_disable_unprepare(vop2->hclk); } From b75992d1f8b71e8fab0d3509e4c969ab11df5dd6 Mon Sep 17 00:00:00 2001 From: Udipto Goswami Date: Tue, 9 May 2023 20:18:36 +0530 Subject: [PATCH 15/63] UPSTREAM: usb: dwc3: debugfs: Resume dwc3 before accessing registers When the dwc3 device is runtime suspended, various required clocks are in disabled state and it is not guaranteed that access to any registers would work. Depending on the SoC glue, a register read could be as benign as returning 0 or be fatal enough to hang the system. In order to prevent such scenarios of fatal errors, make sure to resume dwc3 then allow the function to proceed. Fixes: 72246da40f37 ("usb: Introduce DesignWare USB3 DRD Driver") Cc: stable@vger.kernel.org #3.2: 30332eeefec8: debugfs: regset32: Add Runtime PM support Signed-off-by: Udipto Goswami Reviewed-by: Johan Hovold Tested-by: Johan Hovold Acked-by: Thinh Nguyen Link: https://lore.kernel.org/r/20230509144836.6803-1-quic_ugoswami@quicinc.com Signed-off-by: Greg Kroah-Hartman Bug: 282654910 (cherry picked from commit 614ce6a2ea50068b45339257891e51e639ac9001 usb-linus) Change-Id: Ie89d818b2d77681075cc517184ad1a5fa755dd88 Signed-off-by: Udipto Goswami --- drivers/usb/dwc3/debugfs.c | 109 +++++++++++++++++++++++++++++++++++++ 1 file changed, 109 insertions(+) diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index 850df0e6bcab..f0ffd2e5c642 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -327,6 +327,11 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused) unsigned int current_mode; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); @@ -345,6 +350,8 @@ static int dwc3_lsp_show(struct seq_file *s, void *unused) } spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -390,6 +397,11 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GCTL); @@ -409,6 +421,8 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg)); } + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -458,6 +472,11 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) struct dwc3 *dwc = s->private; unsigned long flags; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_DCTL); @@ -488,6 +507,8 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) seq_printf(s, "UNKNOWN %d\n", reg); } + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -504,6 +525,7 @@ static ssize_t dwc3_testmode_write(struct file *file, unsigned long flags; u32 testmode = 0; char buf[32]; + int ret; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -521,10 +543,16 @@ static ssize_t dwc3_testmode_write(struct file *file, else testmode = 0; + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_set_test_mode(dwc, testmode); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return count; } @@ -543,12 +571,18 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) enum dwc3_link_state state; u32 reg; u8 speed; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) { seq_puts(s, "Not available\n"); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return 0; } @@ -561,6 +595,8 @@ static int dwc3_link_state_show(struct seq_file *s, void *unused) dwc3_gadget_hs_link_string(state)); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -579,6 +615,7 @@ static ssize_t dwc3_link_state_write(struct file *file, char buf[32]; u32 reg; u8 speed; + int ret; if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; @@ -598,10 +635,15 @@ static ssize_t dwc3_link_state_write(struct file *file, else return -EINVAL; + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; + spin_lock_irqsave(&dwc->lock, flags); reg = dwc3_readl(dwc->regs, DWC3_GSTS); if (DWC3_GSTS_CURMOD(reg) != DWC3_GSTS_CURMOD_DEVICE) { spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return -EINVAL; } @@ -611,12 +653,15 @@ static ssize_t dwc3_link_state_write(struct file *file, if (speed < DWC3_DSTS_SUPERSPEED && state != DWC3_LINK_STATE_RECOV) { spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); return -EINVAL; } dwc3_gadget_set_link_state(dwc, state); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return count; } @@ -640,6 +685,11 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused) unsigned long flags; u32 mdwidth; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_TXFIFO); @@ -652,6 +702,8 @@ static int dwc3_tx_fifo_size_show(struct seq_file *s, void *unused) seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -662,6 +714,11 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused) unsigned long flags; u32 mdwidth; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXFIFO); @@ -674,6 +731,8 @@ static int dwc3_rx_fifo_size_show(struct seq_file *s, void *unused) seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -683,12 +742,19 @@ static int dwc3_tx_request_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_TXREQQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -698,12 +764,19 @@ static int dwc3_rx_request_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXREQQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -713,12 +786,19 @@ static int dwc3_rx_info_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_RXINFOQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -728,12 +808,19 @@ static int dwc3_descriptor_fetch_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_DESCFETCHQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -743,12 +830,19 @@ static int dwc3_event_queue_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; u32 val; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); val = dwc3_core_fifo_space(dep, DWC3_EVENTQ); seq_printf(s, "%u\n", val); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -793,6 +887,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused) struct dwc3 *dwc = dep->dwc; unsigned long flags; int i; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); if (dep->number <= 1) { @@ -822,6 +921,8 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused) out: spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -834,6 +935,11 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused) u32 lower_32_bits; u32 upper_32_bits; u32 reg; + int ret; + + ret = pm_runtime_resume_and_get(dwc->dev); + if (ret < 0) + return ret; spin_lock_irqsave(&dwc->lock, flags); reg = DWC3_GDBGLSPMUX_EPSELECT(dep->number); @@ -846,6 +952,8 @@ static int dwc3_ep_info_register_show(struct seq_file *s, void *unused) seq_printf(s, "0x%016llx\n", ep_info); spin_unlock_irqrestore(&dwc->lock, flags); + pm_runtime_put_sync(dwc->dev); + return 0; } @@ -905,6 +1013,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc) dwc->regset->regs = dwc3_regs; dwc->regset->nregs = ARRAY_SIZE(dwc3_regs); dwc->regset->base = dwc->regs - DWC3_GLOBALS_REGS_START; + dwc->regset->dev = dwc->dev; root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root); dwc->debug_root = root; From 2200f18847d42b2f29a57fe3282f7e5313bc10ab Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Sat, 8 Apr 2023 21:33:48 +0100 Subject: [PATCH 16/63] UPSTREAM: MIPS: Define RUNTIME_DISCARD_EXIT in LD script commit 6dcbd0a69c84a8ae7a442840a8cf6b1379dc8f16 upstream. MIPS's exit sections are discarded at runtime as well. Fixes link error: `.exit.text' referenced in section `__jump_table' of fs/fuse/inode.o: defined in discarded section `.exit.text' of fs/fuse/inode.o Fixes: 99cb0d917ffa ("arch: fix broken BuildID for arm64 and riscv") Reported-by: "kernelci.org bot" Change-Id: I2acdca2a4b35005d411c57978f4765bcc936e093 Signed-off-by: Jiaxun Yang Signed-off-by: Thomas Bogendoerfer Signed-off-by: Greg Kroah-Hartman (cherry picked from commit f9a20ef5e83c4ae3a4b2deb5535b3913680768f2) Signed-off-by: Greg Kroah-Hartman --- arch/mips/kernel/vmlinux.lds.S | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 1f98947fe715..91d6a5360bb9 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -15,6 +15,8 @@ #define EMITS_PT_NOTE #endif +#define RUNTIME_DISCARD_EXIT + #include #undef mips From e0c86087dc8d4b77a1dbbf5851f72648247e0189 Mon Sep 17 00:00:00 2001 From: Alyssa Ross Date: Sun, 26 Mar 2023 18:21:21 +0000 Subject: [PATCH 17/63] UPSTREAM: purgatory: fix disabling debug info commit d83806c4c0cccc0d6d3c3581a11983a9c186a138 upstream. Since 32ef9e5054ec, -Wa,-gdwarf-2 is no longer used in KBUILD_AFLAGS. Instead, it includes -g, the appropriate -gdwarf-* flag, and also the -Wa versions of both of those if building with Clang and GNU as. As a result, debug info was being generated for the purgatory objects, even though the intention was that it not be. Fixes: 32ef9e5054ec ("Makefile.debug: re-enable debug info for .S files") Change-Id: I68ab786b0a713958e145908bbd26b7f842de5575 Signed-off-by: Alyssa Ross Cc: stable@vger.kernel.org Acked-by: Nick Desaulniers Signed-off-by: Masahiro Yamada Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 588d682251e64444bfab28baa86c6befb9d7ad05) Signed-off-by: Greg Kroah-Hartman --- arch/riscv/purgatory/Makefile | 4 +--- arch/x86/purgatory/Makefile | 3 +-- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/arch/riscv/purgatory/Makefile b/arch/riscv/purgatory/Makefile index dd58e1d99397..659e21862077 100644 --- a/arch/riscv/purgatory/Makefile +++ b/arch/riscv/purgatory/Makefile @@ -74,9 +74,7 @@ CFLAGS_string.o += $(PURGATORY_CFLAGS) CFLAGS_REMOVE_ctype.o += $(PURGATORY_CFLAGS_REMOVE) CFLAGS_ctype.o += $(PURGATORY_CFLAGS) -AFLAGS_REMOVE_entry.o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_memcpy.o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_memset.o += -Wa,-gdwarf-2 +asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x)) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 17f09dc26381..82fec66d46d2 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -69,8 +69,7 @@ CFLAGS_sha256.o += $(PURGATORY_CFLAGS) CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE) CFLAGS_string.o += $(PURGATORY_CFLAGS) -AFLAGS_REMOVE_setup-x86_$(BITS).o += -Wa,-gdwarf-2 -AFLAGS_REMOVE_entry64.o += -Wa,-gdwarf-2 +asflags-remove-y += $(foreach x, -g -gdwarf-4 -gdwarf-5, $(x) -Wa,$(x)) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) From 8b5229c547a6314e015de7ea6afc27b15caa371c Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 19 Oct 2022 15:35:59 -0700 Subject: [PATCH 18/63] UPSTREAM: inet6: Remove inet6_destroy_sock() in sk->sk_prot->destroy(). commit b5fc29233d28be7a3322848ebe73ac327559cdb9 upstream. After commit d38afeec26ed ("tcp/udp: Call inet6_destroy_sock() in IPv6 sk->sk_destruct()."), we call inet6_destroy_sock() in sk->sk_destruct() by setting inet6_sock_destruct() to it to make sure we do not leak inet6-specific resources. Now we can remove unnecessary inet6_destroy_sock() calls in sk->sk_prot->destroy(). DCCP and SCTP have their own sk->sk_destruct() function, so we change them separately in the following patches. Change-Id: Iae566ce3b7a73584548fc88c950518b53194c5df Signed-off-by: Kuniyuki Iwashima Reviewed-by: Matthieu Baerts Signed-off-by: David S. Miller Signed-off-by: Ziyang Xuan Signed-off-by: Greg Kroah-Hartman (cherry picked from commit a8cf1141057a04e9630e73a4d543f10a939980b2) Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ping.c | 6 ------ net/ipv6/raw.c | 2 -- net/ipv6/tcp_ipv6.c | 8 +------- net/ipv6/udp.c | 2 -- net/l2tp/l2tp_ip6.c | 2 -- net/mptcp/protocol.c | 7 ------- 6 files changed, 1 insertion(+), 26 deletions(-) diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 86c26e48d065..808983bc2ec9 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c @@ -23,11 +23,6 @@ #include #include -static void ping_v6_destroy(struct sock *sk) -{ - inet6_destroy_sock(sk); -} - /* Compatibility glue so we can support IPv6 when it's compiled as a module */ static int dummy_ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) @@ -205,7 +200,6 @@ struct proto pingv6_prot = { .owner = THIS_MODULE, .init = ping_init_sock, .close = ping_close, - .destroy = ping_v6_destroy, .pre_connect = ping_v6_pre_connect, .connect = ip6_datagram_connect_v6_only, .disconnect = __udp_disconnect, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 9ee1506e23ab..4fc511bdf176 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1175,8 +1175,6 @@ static void raw6_destroy(struct sock *sk) lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); - - inet6_destroy_sock(sk); } static int rawv6_init_sk(struct sock *sk) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index ea1ecf5fe947..81afb40bfc0b 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1951,12 +1951,6 @@ static int tcp_v6_init_sock(struct sock *sk) return 0; } -static void tcp_v6_destroy_sock(struct sock *sk) -{ - tcp_v4_destroy_sock(sk); - inet6_destroy_sock(sk); -} - #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, @@ -2149,7 +2143,7 @@ struct proto tcpv6_prot = { .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, - .destroy = tcp_v6_destroy_sock, + .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 17d721a6add7..0b8127988adb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1668,8 +1668,6 @@ void udpv6_destroy_sock(struct sock *sk) udp_encap_disable(); } } - - inet6_destroy_sock(sk); } /* diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 9db7f4f5a441..5137ea1861ce 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -257,8 +257,6 @@ static void l2tp_ip6_destroy_sock(struct sock *sk) if (tunnel) l2tp_tunnel_delete(tunnel); - - inet6_destroy_sock(sk); } static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 19f35869a164..b1bbb0b75a13 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -3939,12 +3939,6 @@ static const struct proto_ops mptcp_v6_stream_ops = { static struct proto mptcp_v6_prot; -static void mptcp_v6_destroy(struct sock *sk) -{ - mptcp_destroy(sk); - inet6_destroy_sock(sk); -} - static struct inet_protosw mptcp_v6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_MPTCP, @@ -3960,7 +3954,6 @@ int __init mptcp_proto_v6_init(void) mptcp_v6_prot = mptcp_prot; strcpy(mptcp_v6_prot.name, "MPTCPv6"); mptcp_v6_prot.slab = NULL; - mptcp_v6_prot.destroy = mptcp_v6_destroy; mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); err = proto_register(&mptcp_v6_prot, 1); From 10e4c804f2f9a612d8c43d36fdde20eb7b295fc6 Mon Sep 17 00:00:00 2001 From: Cixi Geng Date: Tue, 16 May 2023 17:14:56 +0800 Subject: [PATCH 19/63] ANDROID: GKI: add symbol list file for unisoc Add abi_gki_aarch64_unisoc 4 function symbol(s) added 'struct hwspinlock* devm_hwspin_lock_request_specific(struct device*, unsigned int)' 'void sdhci_enable_v4_mode(struct sdhci_host*)' 'void sdhci_request(struct mmc_host*, struct mmc_request*)' 'int sdhci_request_atomic(struct mmc_host*, struct mmc_request*)' Bug: 282902304 Change-Id: I70eaba8be31407d3486b6e845241681c9995f27e Signed-off-by: Cixi Geng --- BUILD.bazel | 1 + android/abi_gki_aarch64.stg | 58 +++++++ android/abi_gki_aarch64_unisoc | 289 +++++++++++++++++++++++++++++++++ 3 files changed, 348 insertions(+) create mode 100644 android/abi_gki_aarch64_unisoc diff --git a/BUILD.bazel b/BUILD.bazel index ff684be5f6e9..e6272a8dba4a 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -43,6 +43,7 @@ filegroup( "android/abi_gki_aarch64_oplus", "android/abi_gki_aarch64_pixel", "android/abi_gki_aarch64_qcom", + "android/abi_gki_aarch64_unisoc", "android/abi_gki_aarch64_virtual_device", "android/abi_gki_aarch64_vivo", "android/abi_gki_aarch64_xiaomi", diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index a34e7522cb64..fb992fc1426c 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -313074,6 +313074,12 @@ function { return_type_id: 0x4585663f parameter_id: 0x31fa879c } +function { + id: 0xc2e99087 + return_type_id: 0x0ab9fa4c + parameter_id: 0x0258f96e + parameter_id: 0x4585663f +} function { id: 0xc3320c3e return_type_id: 0x4585663f @@ -332172,6 +332178,15 @@ elf_symbol { type_id: 0x9d27e8b1 full_name: "devm_hwspin_lock_register" } +elf_symbol { + id: 0xe896baa8 + name: "devm_hwspin_lock_request_specific" + is_defined: true + symbol_type: FUNCTION + crc: 0x36d01cc2 + type_id: 0xc2e99087 + full_name: "devm_hwspin_lock_request_specific" +} elf_symbol { id: 0xa29138c1 name: "devm_i2c_new_dummy_device" @@ -359220,6 +359235,15 @@ elf_symbol { type_id: 0x1c822746 full_name: "sdhci_enable_clk" } +elf_symbol { + id: 0x5ab300fb + name: "sdhci_enable_v4_mode" + is_defined: true + symbol_type: FUNCTION + crc: 0x80ef52da + type_id: 0x1ec711b9 + full_name: "sdhci_enable_v4_mode" +} elf_symbol { id: 0x33d40ef0 name: "sdhci_execute_tuning" @@ -359265,6 +359289,24 @@ elf_symbol { type_id: 0x1f5b92f5 full_name: "sdhci_remove_host" } +elf_symbol { + id: 0xdc85b8be + name: "sdhci_request" + is_defined: true + symbol_type: FUNCTION + crc: 0x4885a3af + type_id: 0x1dbab156 + full_name: "sdhci_request" +} +elf_symbol { + id: 0xd1dc3f24 + name: "sdhci_request_atomic" + is_defined: true + symbol_type: FUNCTION + crc: 0x30c50c36 + type_id: 0x90a203ea + full_name: "sdhci_request_atomic" +} elf_symbol { id: 0xa6a2da07 name: "sdhci_reset" @@ -380177,6 +380219,10 @@ symbols { key: "devm_hwspin_lock_register" value: 0x40c3a63b } + symbol { + key: "devm_hwspin_lock_request_specific" + value: 0xe896baa8 + } symbol { key: "devm_i2c_new_dummy_device" value: 0xa29138c1 @@ -392189,6 +392235,10 @@ symbols { key: "sdhci_enable_clk" value: 0x6febaf59 } + symbol { + key: "sdhci_enable_v4_mode" + value: 0x5ab300fb + } symbol { key: "sdhci_execute_tuning" value: 0x33d40ef0 @@ -392209,6 +392259,14 @@ symbols { key: "sdhci_remove_host" value: 0xad3b5931 } + symbol { + key: "sdhci_request" + value: 0xdc85b8be + } + symbol { + key: "sdhci_request_atomic" + value: 0xd1dc3f24 + } symbol { key: "sdhci_reset" value: 0xa6a2da07 diff --git a/android/abi_gki_aarch64_unisoc b/android/abi_gki_aarch64_unisoc new file mode 100644 index 000000000000..9f5036f9e8e8 --- /dev/null +++ b/android/abi_gki_aarch64_unisoc @@ -0,0 +1,289 @@ +[abi_symbol_list] +# commonly used symbols + alt_cb_patch_nops + arm64_use_ng_mappings + clk_disable + clk_enable + clk_get_rate + clk_prepare + clk_unprepare + __const_udelay + debugfs_create_dir + debugfs_create_file + debugfs_remove + _dev_err + dev_err_probe + dev_get_regmap + _dev_info + devm_clk_get + devm_gpiochip_add_data_with_key + devm_ioremap_resource + devm_kmalloc + devm_platform_ioremap_resource + devm_regulator_register + devm_request_threaded_irq + devm_spi_register_controller + _dev_warn + gpiochip_disable_irq + gpiochip_enable_irq + gpiochip_get_data + gpiochip_irq_relres + gpiochip_irq_reqres + handle_bad_irq + handle_edge_irq + handle_level_irq + irq_get_irq_data + __irq_resolve_mapping + kfree + __kmalloc + kmalloc_caches + kmalloc_trace + ktime_get + ktime_get_mono_fast_ns + __list_add_valid + __list_del_entry_valid + log_post_read_mmio + log_post_write_mmio + log_read_mmio + log_write_mmio + memcpy + memstart_addr + module_layout + __mutex_init + mutex_lock + mutex_unlock + of_alias_get_id + of_device_get_match_data + of_property_read_variable_u32_array + __platform_driver_register + platform_driver_unregister + platform_get_irq + platform_get_resource + __pm_runtime_disable + pm_runtime_enable + pm_runtime_set_autosuspend_delay + __pm_runtime_set_status + __pm_runtime_suspend + __pm_runtime_use_autosuspend + _printk + put_device + __put_task_struct + _raw_spin_lock_irqsave + _raw_spin_unlock_irqrestore + regmap_read + regmap_update_bits_base + regmap_write + regulator_disable_regmap + regulator_enable_regmap + regulator_get_voltage_sel_regmap + regulator_is_enabled_regmap + regulator_list_voltage_linear + regulator_set_voltage_sel_regmap + seq_lseek + seq_printf + seq_puts + seq_read + sg_next + single_open + single_release + __spi_alloc_controller + __stack_chk_fail + strcmp + usleep_range_state + +# required by clk-sprd.ko + clk_hw_get_num_parents + clk_hw_get_parent + clk_hw_is_enabled + __clk_mux_determine_rate + device_node_to_regmap + devm_clk_hw_register + devm_of_clk_add_hw_provider + __devm_regmap_init_mmio_clk + divider_get_val + divider_recalc_rate + divider_round_rate_parent + of_clk_hw_onecell_get + of_device_is_compatible + of_find_property + of_get_parent + syscon_regmap_lookup_by_phandle + __udelay + +# required by gpio-eic-sprd.ko + generic_handle_irq + gpiochip_find + +# required by gpio-pmic-eic-sprd.ko + _find_next_bit + handle_nested_irq + +# required by gpio-sprd.ko + generic_handle_domain_irq + __platform_driver_probe + +# required by mmc_hsq.ko + finish_wait + init_wait_entry + __init_waitqueue_head + mmc_cqe_request_done + prepare_to_wait_event + queue_work_on + _raw_spin_lock_irq + _raw_spin_unlock_irq + schedule + schedule_timeout + system_wq + __wake_up + +# required by pwm-sprd.ko + clk_bulk_disable + clk_bulk_enable + clk_bulk_prepare + clk_bulk_unprepare + devm_clk_bulk_get + of_property_read_string_helper + pwmchip_add + pwmchip_remove + +# required by sc2730-regulator.ko + generic_file_llseek + regulator_map_voltage_linear + simple_attr_open + simple_attr_read + simple_attr_release + simple_attr_write + +# required by sdhci-sprd.ko + clk_round_rate + devm_pinctrl_get + mmc_of_parse + mmc_regulator_set_vqmmc + mmc_request_done + pinctrl_lookup_state + pinctrl_select_state + pm_runtime_force_resume + pm_runtime_force_suspend + __sdhci_add_host + sdhci_cleanup_host + sdhci_enable_clk + sdhci_enable_v4_mode + sdhci_pltfm_free + sdhci_pltfm_init + sdhci_remove_host + sdhci_request + sdhci_request_atomic + sdhci_reset + sdhci_runtime_resume_host + sdhci_runtime_suspend_host + sdhci_set_bus_width + sdhci_setup_host + +# required by spi-sprd-adi.ko + _dev_emerg + devm_hwspin_lock_request_specific + __hwspin_lock_timeout + __hwspin_unlock + of_get_next_child + of_get_property + of_hwspin_lock_get_id + register_restart_handler + strncmp + unregister_restart_handler + +# required by spi-sprd.ko + clk_set_parent + complete + dma_release_channel + dma_request_chan + __init_swait_queue_head + __pm_runtime_resume + spi_controller_suspend + spi_finalize_current_transfer + wait_for_completion + +# required by sprd-sc27xx-spi.ko + device_set_wakeup_capable + device_wakeup_enable + devm_of_platform_populate + devm_regmap_add_irq_chip + __devm_regmap_init + driver_unregister + irq_set_irq_wake + __spi_register_driver + spi_sync + +# required by sprd_hwspinlock.ko + devm_add_action + devm_hwspin_lock_register + +# required by sprd_power_manager.ko + del_timer + fortify_panic + init_timer_key + jiffies + jiffies_to_msecs + mod_timer + __msecs_to_jiffies + __pm_relax + __pm_stay_awake + register_pm_notifier + register_reboot_notifier + snprintf + strnlen + unregister_pm_notifier + unregister_reboot_notifier + wakeup_source_add + wakeup_source_create + +# required by system_heap.ko + __alloc_pages + dma_buf_export + dma_heap_add + dma_heap_get_dev + dma_heap_get_name + dma_map_sgtable + dma_set_coherent_mask + dma_set_mask + dma_sync_sg_for_cpu + dma_sync_sg_for_device + dma_unmap_sg_attrs + __free_pages + remap_pfn_range + sg_alloc_table + sg_free_table + __sg_page_iter_next + __sg_page_iter_start + vfree + vmalloc + vmap + vunmap + +# required by ums512-clk.ko + clk_fixed_factor_ops + device_get_match_data + devm_reset_controller_register + +# required by unisoc-iommu.ko + blocking_notifier_call_chain + blocking_notifier_chain_register + dma_alloc_attrs + dma_free_attrs + gen_pool_add_owner + gen_pool_alloc_algo_owner + gen_pool_avail + gen_pool_create + gen_pool_destroy + gen_pool_free_owner + __get_free_pages + ioremap_prot + iounmap + kimage_voffset + kmalloc_large + memset32 + memset + of_address_to_resource + of_count_phandle_with_args + of_match_node + __of_parse_phandle_with_args + of_property_read_string From 8cc757d50bbe3ddb19ae64a615912d64b8a0c85d Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 2 May 2023 10:25:24 +0200 Subject: [PATCH 20/63] UPSTREAM: netfilter: nf_tables: deactivate anonymous set from preparation phase commit c1592a89942e9678f7d9c8030efa777c0d57edab upstream. Toggle deleted anonymous sets as inactive in the next generation, so users cannot perform any update on it. Clear the generation bitmask in case the transaction is aborted. The following KASAN splat shows a set element deletion for a bound anonymous set that has been already removed in the same transaction. [ 64.921510] ================================================================== [ 64.923123] BUG: KASAN: wild-memory-access in nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.924745] Write of size 8 at addr dead000000000122 by task test/890 [ 64.927903] CPU: 3 PID: 890 Comm: test Not tainted 6.3.0+ #253 [ 64.931120] Call Trace: [ 64.932699] [ 64.934292] dump_stack_lvl+0x33/0x50 [ 64.935908] ? nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.937551] kasan_report+0xda/0x120 [ 64.939186] ? nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.940814] nf_tables_commit+0xa24/0x1490 [nf_tables] [ 64.942452] ? __kasan_slab_alloc+0x2d/0x60 [ 64.944070] ? nf_tables_setelem_notify+0x190/0x190 [nf_tables] [ 64.945710] ? kasan_set_track+0x21/0x30 [ 64.947323] nfnetlink_rcv_batch+0x709/0xd90 [nfnetlink] [ 64.948898] ? nfnetlink_rcv_msg+0x480/0x480 [nfnetlink] Bug: 282877000 Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman Signed-off-by: Lee Jones Change-Id: I536b7fbec55a5b37a57546023891a3dcfeb2c24b --- include/net/netfilter/nf_tables.h | 1 + net/netfilter/nf_tables_api.c | 12 ++++++++++++ net/netfilter/nft_dynset.c | 2 +- net/netfilter/nft_lookup.c | 2 +- net/netfilter/nft_objref.c | 2 +- 5 files changed, 16 insertions(+), 3 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 6bacbf57ac17..a1ccf1276f3e 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -614,6 +614,7 @@ struct nft_set_binding { }; enum nft_trans_phase; +void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set); void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 12d815b9aa13..f82a61d92aa9 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -4936,12 +4936,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, } } +void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) +{ + if (nft_set_is_anonymous(set)) + nft_clear(ctx->net, set); + + set->use++; +} +EXPORT_SYMBOL_GPL(nf_tables_activate_set); + void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *binding, enum nft_trans_phase phase) { switch (phase) { case NFT_TRANS_PREPARE: + if (nft_set_is_anonymous(set)) + nft_deactivate_next(ctx->net, set); + set->use--; return; case NFT_TRANS_ABORT: diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 6983e6ddeef9..e65a83328b55 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx, { struct nft_dynset *priv = nft_expr_priv(expr); - priv->set->use++; + nf_tables_activate_set(ctx, priv->set); } static void nft_dynset_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index d9ad1aa81856..68a5dea80548 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c @@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx, { struct nft_lookup *priv = nft_expr_priv(expr); - priv->set->use++; + nf_tables_activate_set(ctx, priv->set); } static void nft_lookup_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 5d8d91b3904d..7f8e480b6be5 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -184,7 +184,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx, { struct nft_objref_map *priv = nft_expr_priv(expr); - priv->set->use++; + nf_tables_activate_set(ctx, priv->set); } static void nft_objref_map_destroy(const struct nft_ctx *ctx, From e1dc9c79c2356ba2a5cc8611ec8f69195f8a547b Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 19 Oct 2022 15:36:00 -0700 Subject: [PATCH 21/63] UPSTREAM: dccp: Call inet6_destroy_sock() via sk->sk_destruct(). commit 1651951ebea54970e0bda60c638fc2eee7a6218f upstream. After commit d38afeec26ed ("tcp/udp: Call inet6_destroy_sock() in IPv6 sk->sk_destruct()."), we call inet6_destroy_sock() in sk->sk_destruct() by setting inet6_sock_destruct() to it to make sure we do not leak inet6-specific resources. DCCP sets its own sk->sk_destruct() in the dccp_init_sock(), and DCCPv6 socket shares it by calling the same init function via dccp_v6_init_sock(). To call inet6_sock_destruct() from DCCPv6 sk->sk_destruct(), we export it and set dccp_v6_sk_destruct() in the init function. Change-Id: I1aa8f30c780796bb5d446874bb44113783d6460a Signed-off-by: Kuniyuki Iwashima Signed-off-by: David S. Miller Signed-off-by: Ziyang Xuan Signed-off-by: Greg Kroah-Hartman (cherry picked from commit a530b33fe98691b88c46128e9c07019696ab247e) Signed-off-by: Greg Kroah-Hartman --- net/dccp/dccp.h | 1 + net/dccp/ipv6.c | 15 ++++++++------- net/dccp/proto.c | 8 +++++++- net/ipv6/af_inet6.c | 1 + 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h index 7dfc00c9fb32..9ddc3a9e89e4 100644 --- a/net/dccp/dccp.h +++ b/net/dccp/dccp.h @@ -278,6 +278,7 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, int dccp_rcv_established(struct sock *sk, struct sk_buff *skb, const struct dccp_hdr *dh, const unsigned int len); +void dccp_destruct_common(struct sock *sk); int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized); void dccp_destroy_sock(struct sock *sk); diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 7a736c352dc4..b9d7c3dd1cb3 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -1004,6 +1004,12 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { .sockaddr_len = sizeof(struct sockaddr_in6), }; +static void dccp_v6_sk_destruct(struct sock *sk) +{ + dccp_destruct_common(sk); + inet6_sock_destruct(sk); +} + /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ @@ -1016,17 +1022,12 @@ static int dccp_v6_init_sock(struct sock *sk) if (unlikely(!dccp_v6_ctl_sock_initialized)) dccp_v6_ctl_sock_initialized = 1; inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; + sk->sk_destruct = dccp_v6_sk_destruct; } return err; } -static void dccp_v6_destroy_sock(struct sock *sk) -{ - dccp_destroy_sock(sk); - inet6_destroy_sock(sk); -} - static struct timewait_sock_ops dccp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct dccp6_timewait_sock), }; @@ -1049,7 +1050,7 @@ static struct proto dccp_v6_prot = { .accept = inet_csk_accept, .get_port = inet_csk_get_port, .shutdown = dccp_shutdown, - .destroy = dccp_v6_destroy_sock, + .destroy = dccp_destroy_sock, .orphan_count = &dccp_orphan_count, .max_header = MAX_DCCP_HEADER, .obj_size = sizeof(struct dccp6_sock), diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 85e35c5e8890..a06b5641287a 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -171,12 +171,18 @@ const char *dccp_packet_name(const int type) EXPORT_SYMBOL_GPL(dccp_packet_name); -static void dccp_sk_destruct(struct sock *sk) +void dccp_destruct_common(struct sock *sk) { struct dccp_sock *dp = dccp_sk(sk); ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk); dp->dccps_hc_tx_ccid = NULL; +} +EXPORT_SYMBOL_GPL(dccp_destruct_common); + +static void dccp_sk_destruct(struct sock *sk) +{ + dccp_destruct_common(sk); inet_sock_destruct(sk); } diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index fb1bf6eb0ff8..b5309ae87fd7 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -114,6 +114,7 @@ void inet6_sock_destruct(struct sock *sk) inet6_cleanup_sock(sk); inet_sock_destruct(sk); } +EXPORT_SYMBOL_GPL(inet6_sock_destruct); static int inet6_create(struct net *net, struct socket *sock, int protocol, int kern) From 90f84684abeeb6a227c5b53ecb05308ecdc523be Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Wed, 19 Oct 2022 15:36:01 -0700 Subject: [PATCH 22/63] UPSTREAM: sctp: Call inet6_destroy_sock() via sk->sk_destruct(). commit 6431b0f6ff1633ae598667e4cdd93830074a03e8 upstream. After commit d38afeec26ed ("tcp/udp: Call inet6_destroy_sock() in IPv6 sk->sk_destruct()."), we call inet6_destroy_sock() in sk->sk_destruct() by setting inet6_sock_destruct() to it to make sure we do not leak inet6-specific resources. SCTP sets its own sk->sk_destruct() in the sctp_init_sock(), and SCTPv6 socket reuses it as the init function. To call inet6_sock_destruct() from SCTPv6 sk->sk_destruct(), we set sctp_v6_destruct_sock() in a new init function. Change-Id: Ie3beb7e182e26def3fb7b50fef029fd5a8c94fc7 Signed-off-by: Kuniyuki Iwashima Signed-off-by: David S. Miller Signed-off-by: Ziyang Xuan Signed-off-by: Greg Kroah-Hartman (cherry picked from commit a09b9383b7495681c9bae41752ee456cf42e41f0) Signed-off-by: Greg Kroah-Hartman --- net/sctp/socket.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 507b2ad5ef7c..17185200079d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5102,13 +5102,17 @@ static void sctp_destroy_sock(struct sock *sk) } /* Triggered when there are no references on the socket anymore */ -static void sctp_destruct_sock(struct sock *sk) +static void sctp_destruct_common(struct sock *sk) { struct sctp_sock *sp = sctp_sk(sk); /* Free up the HMAC transform. */ crypto_free_shash(sp->hmac); +} +static void sctp_destruct_sock(struct sock *sk) +{ + sctp_destruct_common(sk); inet_sock_destruct(sk); } @@ -9431,7 +9435,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, sctp_sk(newsk)->reuse = sp->reuse; newsk->sk_shutdown = sk->sk_shutdown; - newsk->sk_destruct = sctp_destruct_sock; + newsk->sk_destruct = sk->sk_destruct; newsk->sk_family = sk->sk_family; newsk->sk_protocol = IPPROTO_SCTP; newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; @@ -9666,11 +9670,20 @@ struct proto sctp_prot = { #if IS_ENABLED(CONFIG_IPV6) -#include -static void sctp_v6_destroy_sock(struct sock *sk) +static void sctp_v6_destruct_sock(struct sock *sk) { - sctp_destroy_sock(sk); - inet6_destroy_sock(sk); + sctp_destruct_common(sk); + inet6_sock_destruct(sk); +} + +static int sctp_v6_init_sock(struct sock *sk) +{ + int ret = sctp_init_sock(sk); + + if (!ret) + sk->sk_destruct = sctp_v6_destruct_sock; + + return ret; } struct proto sctpv6_prot = { @@ -9680,8 +9693,8 @@ struct proto sctpv6_prot = { .disconnect = sctp_disconnect, .accept = sctp_accept, .ioctl = sctp_ioctl, - .init = sctp_init_sock, - .destroy = sctp_v6_destroy_sock, + .init = sctp_v6_init_sock, + .destroy = sctp_destroy_sock, .shutdown = sctp_shutdown, .setsockopt = sctp_setsockopt, .getsockopt = sctp_getsockopt, From 6bf110bb7a6c92f38a6a4c99a102823d032fd8ae Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 23 Apr 2023 09:56:20 -0700 Subject: [PATCH 23/63] UPSTREAM: gcc: disable '-Warray-bounds' for gcc-13 too commit 0da6e5fd6c3726723e275603426e09178940dace upstream. We started disabling '-Warray-bounds' for gcc-12 originally on s390, because it resulted in some warnings that weren't realistically fixable (commit 8b202ee21839: "s390: disable -Warray-bounds"). That s390-specific issue was then found to be less common elsewhere, but generic (see f0be87c42cbd: "gcc-12: disable '-Warray-bounds' universally for now"), and then later expanded the version check was expanded to gcc-11 (5a41237ad1d4: "gcc: disable -Warray-bounds for gcc-11 too"). And it turns out that I was much too optimistic in thinking that it's all going to go away, and here we are with gcc-13 showing all the same issues. So instead of expanding this one version at a time, let's just disable it for gcc-11+, and put an end limit to it only when we actually find a solution. Yes, I'm sure some of this is because the kernel just does odd things (like our "container_of()" use, but also knowingly playing games with things like linker tables and array layouts). And yes, some of the warnings are likely signs of real bugs, but when there are hundreds of false positives, that doesn't really help. Oh well. Change-Id: Ie09553dec193c593f34169f835a876ea81b7cb6d Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman (cherry picked from commit a93c20f5832221c2bf5f80199c4eaebc0ba28e16) Signed-off-by: Greg Kroah-Hartman --- init/Kconfig | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index 12568107badf..1c860bacf753 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -892,18 +892,14 @@ config CC_IMPLICIT_FALLTHROUGH default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5) default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough) -# Currently, disable gcc-11,12 array-bounds globally. -# We may want to target only particular configurations some day. +# Currently, disable gcc-11+ array-bounds globally. +# It's still broken in gcc-13, so no upper bound yet. config GCC11_NO_ARRAY_BOUNDS def_bool y -config GCC12_NO_ARRAY_BOUNDS - def_bool y - config CC_NO_ARRAY_BOUNDS bool - default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS - default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS + default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS # # For architectures that know their GCC __int128 support is sound From 690f3e949db8678e28f41f6a8ce4bb94f148d350 Mon Sep 17 00:00:00 2001 From: Soumya Negi Date: Sun, 9 Apr 2023 19:12:04 -0700 Subject: [PATCH 24/63] UPSTREAM: Input: pegasus-notetaker - check pipe type when probing commit b3d80fd27a3c2d8715a40cbf876139b56195f162 upstream. Fix WARNING in pegasus_open/usb_submit_urb Syzbot bug: https://syzkaller.appspot.com/bug?id=bbc107584dcf3262253ce93183e51f3612aaeb13 Warning raised because pegasus_driver submits transfer request for bogus URB (pipe type does not match endpoint type). Add sanity check at probe time for pipe value extracted from endpoint descriptor. Probe will fail if sanity check fails. Reported-and-tested-by: syzbot+04ee0cb4caccaed12d78@syzkaller.appspotmail.com Change-Id: Iedb606676db9329b44bc530edf017c80f4fa4263 Signed-off-by: Soumya Negi Link: https://lore.kernel.org/r/20230404074145.11523-1-soumya.negi97@gmail.com Signed-off-by: Dmitry Torokhov Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 342c1db4fa8c005d96efaf78cc87e3876b55cfe3) Signed-off-by: Greg Kroah-Hartman --- drivers/input/tablet/pegasus_notetaker.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c index d836d3dcc6a2..a68da2988f9c 100644 --- a/drivers/input/tablet/pegasus_notetaker.c +++ b/drivers/input/tablet/pegasus_notetaker.c @@ -296,6 +296,12 @@ static int pegasus_probe(struct usb_interface *intf, pegasus->intf = intf; pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); + /* Sanity check that pipe's type matches endpoint's type */ + if (usb_pipe_type_check(dev, pipe)) { + error = -EINVAL; + goto err_free_mem; + } + pegasus->data_len = usb_maxpacket(dev, pipe); pegasus->data = usb_alloc_coherent(dev, pegasus->data_len, GFP_KERNEL, From 0c69b18d8e6c48efb6f4b12cc1fce8d8e64a7d20 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 29 Mar 2023 07:35:32 +0300 Subject: [PATCH 25/63] UPSTREAM: iio: adc: at91-sama5d2_adc: fix an error code in at91_adc_allocate_trigger() commit 73a428b37b9b538f8f8fe61caa45e7f243bab87c upstream. The at91_adc_allocate_trigger() function is supposed to return error pointers. Returning a NULL will cause an Oops. Fixes: 5e1a1da0f8c9 ("iio: adc: at91-sama5d2_adc: add hw trigger and buffer support") Change-Id: I31db683d8467f130b1795093287e0eacee2a776a Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/5d728f9d-31d1-410d-a0b3-df6a63a2c8ba@kili.mountain Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman (cherry picked from commit f8c3eb751a9bdbd1371da17f856d030bcde91f8e) Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/at91-sama5d2_adc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 870f4cb60923..3ad5678f2613 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -1409,7 +1409,7 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *indio, trig = devm_iio_trigger_alloc(&indio->dev, "%s-dev%d-%s", indio->name, iio_device_id(indio), trigger_name); if (!trig) - return NULL; + return ERR_PTR(-ENOMEM); trig->dev.parent = indio->dev.parent; iio_trigger_set_drvdata(trig, indio); From fe43fe9cce4e37043f17b5c9281653e8e7649fb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexis=20Lothor=C3=A9?= Date: Tue, 4 Apr 2023 15:31:02 +0200 Subject: [PATCH 26/63] UPSTREAM: fpga: bridge: properly initialize bridge device before populating children MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit dc70eb868b9cd2ca01313e5a394e6ea001d513e9 upstream. The current code path can lead to warnings because of uninitialized device, which contains, as a consequence, uninitialized kobject. The uninitialized device is passed to of_platform_populate, which will at some point, while creating child device, try to get a reference on uninitialized parent, resulting in the following warning: kobject: '(null)' ((ptrval)): is not initialized, yet kobject_get() is being called. The warning is observed after migrating a kernel 5.10.x to 6.1.x. Reverting commit 0d70af3c2530 ("fpga: bridge: Use standard dev_release for class driver") seems to remove the warning. This commit aggregates device_initialize() and device_add() into device_register() but this new call is done AFTER of_platform_populate Fixes: 0d70af3c2530 ("fpga: bridge: Use standard dev_release for class driver") Change-Id: I4726f74122c2755c1b14b66f40250306b1a796b2 Signed-off-by: Alexis Lothoré Acked-by: Xu Yilun Link: https://lore.kernel.org/r/20230404133102.2837535-2-alexis.lothore@bootlin.com Signed-off-by: Xu Yilun Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 71b6df69f17e5dc31aa25a8d292980aabc8a703c) Signed-off-by: Greg Kroah-Hartman --- drivers/fpga/fpga-bridge.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c index 727704431f61..13918c8c839e 100644 --- a/drivers/fpga/fpga-bridge.c +++ b/drivers/fpga/fpga-bridge.c @@ -360,7 +360,6 @@ fpga_bridge_register(struct device *parent, const char *name, bridge->dev.parent = parent; bridge->dev.of_node = parent->of_node; bridge->dev.id = id; - of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev); ret = dev_set_name(&bridge->dev, "br%d", id); if (ret) @@ -372,6 +371,8 @@ fpga_bridge_register(struct device *parent, const char *name, return ERR_PTR(ret); } + of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev); + return bridge; error_device: From 7caae9e684f0394865a2418afb3ae9d775af0a20 Mon Sep 17 00:00:00 2001 From: Daniel Baluta Date: Wed, 5 Apr 2023 12:26:55 +0300 Subject: [PATCH 27/63] UPSTREAM: ASoC: SOF: pm: Tear down pipelines only if DSP was active commit 0b186bb06198653d74a141902a7739e0bde20cf4 upstream. With PCI if the device was suspended it is brought back to full power and then suspended again. This doesn't happen when device is described via DT. We need to make sure that we tear down pipelines only if the device was previously active (thus the pipelines were setup). Otherwise, we can break the use_count: [ 219.009743] sof-audio-of-imx8m 3b6e8000.dsp: sof_ipc3_tear_down_all_pipelines: widget PIPELINE.2.SAI3.IN is still in use: count -1 and after this everything stops working. Fixes: d185e0689abc ("ASoC: SOF: pm: Always tear down pipelines before DSP suspend") Reviewed-by: Pierre-Louis Bossart Reviewed-by: Ranjani Sridharan Change-Id: Ic7f724954ede17a021a486dba6c9eed9e3354438 Signed-off-by: Daniel Baluta Link: https://lore.kernel.org/r/20230405092655.19587-1-daniel.baluta@oss.nxp.com Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 7a6593b5d7ad19ef61f5199e3c1420c829529b5d) Signed-off-by: Greg Kroah-Hartman --- sound/soc/sof/pm.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c index 8722bbd7fd3d..26ffcbb6e30f 100644 --- a/sound/soc/sof/pm.c +++ b/sound/soc/sof/pm.c @@ -183,6 +183,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend) const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg; pm_message_t pm_state; u32 target_state = snd_sof_dsp_power_target(sdev); + u32 old_state = sdev->dsp_power_state.state; int ret; /* do nothing if dsp suspend callback is not set */ @@ -192,7 +193,12 @@ static int sof_suspend(struct device *dev, bool runtime_suspend) if (runtime_suspend && !sof_ops(sdev)->runtime_suspend) return 0; - if (tplg_ops && tplg_ops->tear_down_all_pipelines) + /* we need to tear down pipelines only if the DSP hardware is + * active, which happens for PCI devices. if the device is + * suspended, it is brought back to full power and then + * suspended again + */ + if (tplg_ops && tplg_ops->tear_down_all_pipelines && (old_state == SOF_DSP_PM_D0)) tplg_ops->tear_down_all_pipelines(sdev, false); if (sdev->fw_state != SOF_FW_BOOT_COMPLETE) From 9a9b52eec789daf148ea8c7aeec5163a4383a69b Mon Sep 17 00:00:00 2001 From: Nikita Zhandarovich Date: Mon, 17 Apr 2023 06:32:42 -0700 Subject: [PATCH 28/63] UPSTREAM: ASoC: fsl_asrc_dma: fix potential null-ptr-deref commit 86a24e99c97234f87d9f70b528a691150e145197 upstream. dma_request_slave_channel() may return NULL which will lead to NULL pointer dereference error in 'tmp_chan->private'. Correct this behaviour by, first, switching from deprecated function dma_request_slave_channel() to dma_request_chan(). Secondly, enable sanity check for the resuling value of dma_request_chan(). Also, fix description that follows the enacted changes and that concerns the use of dma_request_slave_channel(). Fixes: 706e2c881158 ("ASoC: fsl_asrc_dma: Reuse the dma channel if available in Back-End") Co-developed-by: Natalia Petrova Change-Id: I2ae5ca9cbb40afd7c5f066e8ed984cf4f35c832e Signed-off-by: Nikita Zhandarovich Acked-by: Shengjiu Wang Link: https://lore.kernel.org/r/20230417133242.53339-1-n.zhandarovich@fintech.ru Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 6cb818ed5f08777e971cddda21a632490083aa1e) Signed-off-by: Greg Kroah-Hartman --- sound/soc/fsl/fsl_asrc_dma.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sound/soc/fsl/fsl_asrc_dma.c b/sound/soc/fsl/fsl_asrc_dma.c index 3b81a465814a..05a7d1588d20 100644 --- a/sound/soc/fsl/fsl_asrc_dma.c +++ b/sound/soc/fsl/fsl_asrc_dma.c @@ -209,14 +209,19 @@ static int fsl_asrc_dma_hw_params(struct snd_soc_component *component, be_chan = soc_component_to_pcm(component_be)->chan[substream->stream]; tmp_chan = be_chan; } - if (!tmp_chan) - tmp_chan = dma_request_slave_channel(dev_be, tx ? "tx" : "rx"); + if (!tmp_chan) { + tmp_chan = dma_request_chan(dev_be, tx ? "tx" : "rx"); + if (IS_ERR(tmp_chan)) { + dev_err(dev, "failed to request DMA channel for Back-End\n"); + return -EINVAL; + } + } /* * An EDMA DEV_TO_DEV channel is fixed and bound with DMA event of each * peripheral, unlike SDMA channel that is allocated dynamically. So no * need to configure dma_request and dma_request2, but get dma_chan of - * Back-End device directly via dma_request_slave_channel. + * Back-End device directly via dma_request_chan. */ if (!asrc->use_edma) { /* Get DMA request of Back-End */ From c45eb7457f2286536fd0bec9b5ac9415abaf1837 Mon Sep 17 00:00:00 2001 From: Chancel Liu Date: Tue, 18 Apr 2023 17:42:59 +0800 Subject: [PATCH 29/63] UPSTREAM: ASoC: fsl_sai: Fix pins setting for i.MX8QM platform commit 238787157d83969e5149c8e99787d5d90e85fbe5 upstream. SAI on i.MX8QM platform supports the data lines up to 4. So the pins setting should be corrected to 4. Fixes: eba0f0077519 ("ASoC: fsl_sai: Enable combine mode soft") Change-Id: Iab8b402f688a8289606e979b7ea6c0a07006a235 Signed-off-by: Chancel Liu Acked-by: Shengjiu Wang Reviewed-by: Iuliana Prodan Link: https://lore.kernel.org/r/20230418094259.4150771-1-chancel.liu@nxp.com Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 1831d8cbaea8c168ba1ff9ad0b2ab3879bc76b40) Signed-off-by: Greg Kroah-Hartman --- sound/soc/fsl/fsl_sai.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c index df7c0bf37245..6d88af5b287f 100644 --- a/sound/soc/fsl/fsl_sai.c +++ b/sound/soc/fsl/fsl_sai.c @@ -1541,7 +1541,7 @@ static const struct fsl_sai_soc_data fsl_sai_imx8qm_data = { .use_imx_pcm = true, .use_edma = true, .fifo_depth = 64, - .pins = 1, + .pins = 4, .reg_offset = 0, .mclk0_is_mclk1 = false, .flags = 0, From b0b7c6147e554273a1552f4b2a2580606fc01828 Mon Sep 17 00:00:00 2001 From: Ekaterina Orlova Date: Fri, 21 Apr 2023 15:35:39 +0100 Subject: [PATCH 30/63] UPSTREAM: ASN.1: Fix check for strdup() success commit 5a43001c01691dcbd396541e6faa2c0077378f48 upstream. It seems there is a misprint in the check of strdup() return code that can lead to NULL pointer dereference. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 4520c6a49af8 ("X.509: Add simple ASN.1 grammar compiler") Change-Id: I625ab151cdb5da606d1b3ae364c90e4b0f6f9dc5 Signed-off-by: Ekaterina Orlova Cc: David Woodhouse Cc: James Bottomley Cc: Jarkko Sakkinen Cc: keyrings@vger.kernel.org Cc: linux-kbuild@vger.kernel.org Link: https://lore.kernel.org/r/20230315172130.140-1-vorobushek.ok@gmail.com/ Signed-off-by: David Howells Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman (cherry picked from commit ab91b09f399fe50de43c36549ee2c72b66ca1d3b) Signed-off-by: Greg Kroah-Hartman --- scripts/asn1_compiler.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/asn1_compiler.c b/scripts/asn1_compiler.c index 71d4a7c87900..c3e501451b41 100644 --- a/scripts/asn1_compiler.c +++ b/scripts/asn1_compiler.c @@ -625,7 +625,7 @@ int main(int argc, char **argv) p = strrchr(argv[1], '/'); p = p ? p + 1 : argv[1]; grammar_name = strdup(p); - if (!p) { + if (!grammar_name) { perror(NULL); exit(1); } From fea91b573ab9efe98961324279afa332aeb8ee50 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Wed, 29 Mar 2023 20:24:33 +0300 Subject: [PATCH 31/63] UPSTREAM: drm/i915: Fix fast wake AUX sync len MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit e1c71f8f918047ce822dc19b42ab1261ed259fd1 upstream. Fast wake should use 8 SYNC pulses for the preamble and 10-16 SYNC pulses for the precharge. Reduce our fast wake SYNC count to match the maximum value. We also use the maximum precharge length for normal AUX transactions. Cc: stable@vger.kernel.org Cc: Jouni Högander Change-Id: Iaa1ca424e1758a938aa960891141715c3ec46e14 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20230329172434.18744-1-ville.syrjala@linux.intel.com Reviewed-by: Jouni Högander (cherry picked from commit 605f7c73133341d4b762cbd9a22174cc22d4c38b) Signed-off-by: Jani Nikula Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 66eb772be27e228716bb81feee0400d995cbe605) Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/display/intel_dp_aux.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 48c375c65a41..7f3f2d50e6cd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -165,7 +165,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp, DP_AUX_CH_CTL_TIME_OUT_MAX | DP_AUX_CH_CTL_RECEIVE_ERROR | (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | - DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) | + DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(24) | DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); if (intel_tc_port_in_tbt_alt_mode(dig_port)) From 5d61392e80b01c433c6e8a9e8d8c31c0eee2e7e1 Mon Sep 17 00:00:00 2001 From: Alan Liu Date: Fri, 14 Apr 2023 18:39:52 +0800 Subject: [PATCH 32/63] UPSTREAM: drm/amdgpu: Fix desktop freezed after gpu-reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit c8b5a95b570949536a2b75cd8fc4f1de0bc60629 upstream. [Why] After gpu-reset, sometimes the driver fails to enable vblank irq, causing flip_done timed out and the desktop freezed. During gpu-reset, we disable and enable vblank irq in dm_suspend() and dm_resume(). Later on in amdgpu_irq_gpu_reset_resume_helper(), we check irqs' refcount and decide to enable or disable the irqs again. However, we have 2 sets of API for controling vblank irq, one is dm_vblank_get/put() and another is amdgpu_irq_get/put(). Each API has its own refcount and flag to store the state of vblank irq, and they are not synchronized. In drm we use the first API to control vblank irq but in amdgpu_irq_gpu_reset_resume_helper() we use the second set of API. The failure happens when vblank irq was enabled by dm_vblank_get() before gpu-reset, we have vblank->enabled true. However, during gpu-reset, in amdgpu_irq_gpu_reset_resume_helper() vblank irq's state checked from amdgpu_irq_update() is DISABLED. So finally it disables vblank irq again. After gpu-reset, if there is a cursor plane commit, the driver will try to enable vblank irq by calling drm_vblank_enable(), but the vblank->enabled is still true, so it fails to turn on vblank irq and causes flip_done can't be completed in vblank irq handler and desktop become freezed. [How] Combining the 2 vblank control APIs by letting drm's API finally calls amdgpu_irq's API, so the irq's refcount and state of both APIs can be synchronized. Also add a check to prevent refcount from being less then 0 in amdgpu_irq_put(). v2: - Add warning in amdgpu_irq_enable() if the irq is already disabled. - Call dc_interrupt_set() in dm_set_vblank() to avoid refcount change if it is in gpu-reset. v3: - Improve commit message and code comments. Change-Id: I44a9569645ce40ea1cf98fce4741c9e683160220 Signed-off-by: Alan Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman (cherry picked from commit bef774effb278ff0b65ea2dbaa1ab32ba6a1dc13) Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 3 +++ .../drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 17 ++++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 89011bae7588..ca5dc51600fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -653,6 +653,9 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, if (!src->enabled_types || !src->funcs->set) return -EINVAL; + if (WARN_ON(!amdgpu_irq_enabled(adev, src, type))) + return -EINVAL; + if (atomic_dec_and_test(&src->enabled_types[type])) return amdgpu_irq_update(adev, src, type); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index b87f50e8fa61..1ec643a0d00d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -167,10 +167,21 @@ static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) if (rc) return rc; - irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + if (amdgpu_in_reset(adev)) { + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + rc = -EBUSY; + } else { + rc = (enable) + ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) + : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); + } - if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) - return -EBUSY; + if (rc) + return rc; skip: if (amdgpu_in_reset(adev)) From 26283282a1c77fce843f8505b68778e680b213ed Mon Sep 17 00:00:00 2001 From: Dmytro Laktyushkin Date: Mon, 3 Apr 2023 10:13:12 -0400 Subject: [PATCH 33/63] UPSTREAM: drm/amd/display: set dcn315 lb bpp to 48 commit 6d9240c46f7419aa3210353b5f52cc63da5a6440 upstream. [Why & How] Fix a typo for dcn315 line buffer bpp. Reviewed-by: Jun Lei Acked-by: Qingqing Zhuo Change-Id: I6f5408cd982d1ff478e6fb2982bd90c97184e692 Signed-off-by: Dmytro Laktyushkin Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman (cherry picked from commit 4ac57c3fe2c0a74c6239170fc58fc824637c6015) Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c index 7dd0845d1bd9..8e416433184c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c @@ -222,7 +222,7 @@ struct _vcs_dpi_ip_params_st dcn3_15_ip = { .maximum_dsc_bits_per_component = 10, .dsc422_native_support = false, .is_line_buffer_bpp_fixed = true, - .line_buffer_fixed_bpp = 49, + .line_buffer_fixed_bpp = 48, .line_buffer_size_bits = 789504, .max_line_buffer_lines = 12, .writeback_interface_buffer_size_kbytes = 90, From 467d3baa5d70bb17784aae9e88ccc623cf2c88f9 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 24 Oct 2022 23:11:25 +0300 Subject: [PATCH 34/63] BACKPORT: overflow: Introduce overflows_type() and castable_to_type() Implement a robust overflows_type() macro to test if a variable or constant value would overflow another variable or type. This can be used as a constant expression for static_assert() (which requires a constant expression[1][2]) when used on constant values. This must be constructed manually, since __builtin_add_overflow() does not produce a constant expression[3]. Additionally adds castable_to_type(), similar to __same_type(), but for checking if a constant value would overflow if cast to a given type. Add unit tests for overflows_type(), __same_type(), and castable_to_type() to the existing KUnit "overflow" test: [16:03:33] ================== overflow (21 subtests) ================== ... [16:03:33] [PASSED] overflows_type_test [16:03:33] [PASSED] same_type_test [16:03:33] [PASSED] castable_to_type_test [16:03:33] ==================== [PASSED] overflow ===================== [16:03:33] ============================================================ [16:03:33] Testing complete. Ran 21 tests: passed: 21 [16:03:33] Elapsed time: 24.022s total, 0.002s configuring, 22.598s building, 0.767s running [1] https://en.cppreference.com/w/c/language/_Static_assert [2] C11 standard (ISO/IEC 9899:2011): 6.7.10 Static assertions [3] https://gcc.gnu.org/onlinedocs/gcc/Integer-Overflow-Builtins.html 6.56 Built-in Functions to Perform Arithmetic with Overflow Checking Built-in Function: bool __builtin_add_overflow (type1 a, type2 b, Cc: Luc Van Oostenryck Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Tom Rix Cc: Daniel Latypov Cc: Vitor Massaru Iha Cc: "Gustavo A. R. Silva" Cc: Jani Nikula Cc: Mauro Carvalho Chehab Cc: linux-hardening@vger.kernel.org Cc: llvm@lists.linux.dev Co-developed-by: Gwan-gyeong Mun Signed-off-by: Gwan-gyeong Mun Signed-off-by: Kees Cook Link: https://lore.kernel.org/r/20221024201125.1416422-1-gwan-gyeong.mun@intel.com Bug: 279506910 (cherry picked from commit 4b21d25bf519c9487935a664886956bb18f04f6d) Change-Id: I20aff9de6b82a2f5203367d30555f904681a5b7b Signed-off-by: Elliot Berman --- drivers/gpu/drm/i915/i915_user_extensions.c | 2 +- drivers/gpu/drm/i915/i915_utils.h | 4 - include/linux/compiler.h | 1 + include/linux/overflow.h | 47 +++ lib/Makefile | 1 + lib/overflow_kunit.c | 381 ++++++++++++++++++++ 6 files changed, 431 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_user_extensions.c b/drivers/gpu/drm/i915/i915_user_extensions.c index c822d0aafd2d..e3f808372c47 100644 --- a/drivers/gpu/drm/i915/i915_user_extensions.c +++ b/drivers/gpu/drm/i915/i915_user_extensions.c @@ -51,7 +51,7 @@ int i915_user_extensions(struct i915_user_extension __user *ext, return err; if (get_user(next, &ext->next_extension) || - overflows_type(next, ext)) + overflows_type(next, uintptr_t)) return -EFAULT; ext = u64_to_user_ptr(next); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 6c14d13364bf..67a66d4d5c70 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -111,10 +111,6 @@ bool i915_error_injected(void); #define range_overflows_end_t(type, start, size, max) \ range_overflows_end((type)(start), (type)(size), (type)(max)) -/* Note we don't consider signbits :| */ -#define overflows_type(x, T) \ - (sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T)) - #define ptr_mask_bits(ptr, n) ({ \ unsigned long __v = (unsigned long)(ptr); \ (typeof(ptr))(__v & -BIT(n)); \ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 973a1bfd7ef5..947a60b801db 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -236,6 +236,7 @@ static inline void *offset_to_ptr(const int *off) * bool and also pointer types. */ #define is_signed_type(type) (((type)(-1)) < (__force type)1) +#define is_unsigned_type(type) (!is_signed_type(type)) /* * This is needed in functions which generate the stack canary, see diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 1d3be1a2204c..0e33b5cbdb9f 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -128,6 +128,53 @@ static inline bool __must_check __must_check_overflow(bool overflow) (*_d >> _to_shift) != _a); \ })) +#define __overflows_type_constexpr(x, T) ( \ + is_unsigned_type(typeof(x)) ? \ + (x) > type_max(typeof(T)) : \ + is_unsigned_type(typeof(T)) ? \ + (x) < 0 || (x) > type_max(typeof(T)) : \ + (x) < type_min(typeof(T)) || (x) > type_max(typeof(T))) + +#define __overflows_type(x, T) ({ \ + typeof(T) v = 0; \ + check_add_overflow((x), v, &v); \ +}) + +/** + * overflows_type - helper for checking the overflows between value, variables, + * or data type + * + * @n: source constant value or variable to be checked + * @T: destination variable or data type proposed to store @x + * + * Compares the @x expression for whether or not it can safely fit in + * the storage of the type in @T. @x and @T can have different types. + * If @x is a constant expression, this will also resolve to a constant + * expression. + * + * Returns: true if overflow can occur, false otherwise. + */ +#define overflows_type(n, T) \ + __builtin_choose_expr(__is_constexpr(n), \ + __overflows_type_constexpr(n, T), \ + __overflows_type(n, T)) + +/** + * castable_to_type - like __same_type(), but also allows for casted literals + * + * @n: variable or constant value + * @T: variable or data type + * + * Unlike the __same_type() macro, this allows a constant value as the + * first argument. If this value would not overflow into an assignment + * of the second argument's type, it returns true. Otherwise, this falls + * back to __same_type(). + */ +#define castable_to_type(n, T) \ + __builtin_choose_expr(__is_constexpr(n), \ + !__overflows_type_constexpr(n, T), \ + __same_type(n, T)) + /** * size_mul() - Calculate size_t multiplication with saturation at SIZE_MAX * @factor1: first factor diff --git a/lib/Makefile b/lib/Makefile index 59bd7c2f793a..889f96b10f12 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -377,6 +377,7 @@ obj-$(CONFIG_CMDLINE_KUNIT_TEST) += cmdline_kunit.o obj-$(CONFIG_SLUB_KUNIT_TEST) += slub_kunit.o obj-$(CONFIG_MEMCPY_KUNIT_TEST) += memcpy_kunit.o obj-$(CONFIG_IS_SIGNED_TYPE_KUNIT_TEST) += is_signed_type_kunit.o +CFLAGS_overflow_kunit.o = $(call cc-disable-warning, tautological-constant-out-of-range-compare) obj-$(CONFIG_OVERFLOW_KUNIT_TEST) += overflow_kunit.o CFLAGS_stackinit_kunit.o += $(call cc-disable-warning, switch-unreachable) obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index b8556a2e7bb1..dcd3ba102db6 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -736,6 +736,384 @@ static void overflow_size_helpers_test(struct kunit *test) #undef check_one_size_helper } +static void overflows_type_test(struct kunit *test) +{ + int count = 0; + unsigned int var; + +#define __TEST_OVERFLOWS_TYPE(func, arg1, arg2, of) do { \ + bool __of = func(arg1, arg2); \ + KUNIT_EXPECT_EQ_MSG(test, __of, of, \ + "expected " #func "(" #arg1 ", " #arg2 " to%s overflow\n",\ + of ? "" : " not"); \ + count++; \ +} while (0) + +/* Args are: first type, second type, value, overflow expected */ +#define TEST_OVERFLOWS_TYPE(__t1, __t2, v, of) do { \ + __t1 t1 = (v); \ + __t2 t2; \ + __TEST_OVERFLOWS_TYPE(__overflows_type, t1, t2, of); \ + __TEST_OVERFLOWS_TYPE(__overflows_type, t1, __t2, of); \ + __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, t2, of); \ + __TEST_OVERFLOWS_TYPE(__overflows_type_constexpr, t1, __t2, of);\ +} while (0) + + TEST_OVERFLOWS_TYPE(u8, u8, U8_MAX, false); + TEST_OVERFLOWS_TYPE(u8, u16, U8_MAX, false); + TEST_OVERFLOWS_TYPE(u8, s8, U8_MAX, true); + TEST_OVERFLOWS_TYPE(u8, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(u8, s8, (u8)S8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u8, s16, U8_MAX, false); + TEST_OVERFLOWS_TYPE(s8, u8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s8, u8, -1, true); + TEST_OVERFLOWS_TYPE(s8, u8, S8_MIN, true); + TEST_OVERFLOWS_TYPE(s8, u16, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s8, u16, -1, true); + TEST_OVERFLOWS_TYPE(s8, u16, S8_MIN, true); + TEST_OVERFLOWS_TYPE(s8, u32, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s8, u32, -1, true); + TEST_OVERFLOWS_TYPE(s8, u32, S8_MIN, true); +#if BITS_PER_LONG == 64 + TEST_OVERFLOWS_TYPE(s8, u64, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s8, u64, -1, true); + TEST_OVERFLOWS_TYPE(s8, u64, S8_MIN, true); +#endif + TEST_OVERFLOWS_TYPE(s8, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s8, s8, S8_MIN, false); + TEST_OVERFLOWS_TYPE(s8, s16, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s8, s16, S8_MIN, false); + TEST_OVERFLOWS_TYPE(u16, u8, U8_MAX, false); + TEST_OVERFLOWS_TYPE(u16, u8, (u16)U8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u16, u8, U16_MAX, true); + TEST_OVERFLOWS_TYPE(u16, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(u16, s8, (u16)S8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u16, s8, U16_MAX, true); + TEST_OVERFLOWS_TYPE(u16, s16, S16_MAX, false); + TEST_OVERFLOWS_TYPE(u16, s16, (u16)S16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u16, s16, U16_MAX, true); + TEST_OVERFLOWS_TYPE(u16, u32, U16_MAX, false); + TEST_OVERFLOWS_TYPE(u16, s32, U16_MAX, false); + TEST_OVERFLOWS_TYPE(s16, u8, U8_MAX, false); + TEST_OVERFLOWS_TYPE(s16, u8, (s16)U8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s16, u8, -1, true); + TEST_OVERFLOWS_TYPE(s16, u8, S16_MIN, true); + TEST_OVERFLOWS_TYPE(s16, u16, S16_MAX, false); + TEST_OVERFLOWS_TYPE(s16, u16, -1, true); + TEST_OVERFLOWS_TYPE(s16, u16, S16_MIN, true); + TEST_OVERFLOWS_TYPE(s16, u32, S16_MAX, false); + TEST_OVERFLOWS_TYPE(s16, u32, -1, true); + TEST_OVERFLOWS_TYPE(s16, u32, S16_MIN, true); +#if BITS_PER_LONG == 64 + TEST_OVERFLOWS_TYPE(s16, u64, S16_MAX, false); + TEST_OVERFLOWS_TYPE(s16, u64, -1, true); + TEST_OVERFLOWS_TYPE(s16, u64, S16_MIN, true); +#endif + TEST_OVERFLOWS_TYPE(s16, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s16, s8, S8_MIN, false); + TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s16, s8, (s16)S8_MIN - 1, true); + TEST_OVERFLOWS_TYPE(s16, s8, S16_MAX, true); + TEST_OVERFLOWS_TYPE(s16, s8, S16_MIN, true); + TEST_OVERFLOWS_TYPE(s16, s16, S16_MAX, false); + TEST_OVERFLOWS_TYPE(s16, s16, S16_MIN, false); + TEST_OVERFLOWS_TYPE(s16, s32, S16_MAX, false); + TEST_OVERFLOWS_TYPE(s16, s32, S16_MIN, false); + TEST_OVERFLOWS_TYPE(u32, u8, U8_MAX, false); + TEST_OVERFLOWS_TYPE(u32, u8, (u32)U8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u32, u8, U32_MAX, true); + TEST_OVERFLOWS_TYPE(u32, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(u32, s8, (u32)S8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u32, s8, U32_MAX, true); + TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX, false); + TEST_OVERFLOWS_TYPE(u32, u16, U16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u32, u16, U32_MAX, true); + TEST_OVERFLOWS_TYPE(u32, s16, S16_MAX, false); + TEST_OVERFLOWS_TYPE(u32, s16, (u32)S16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u32, s16, U32_MAX, true); + TEST_OVERFLOWS_TYPE(u32, u32, U32_MAX, false); + TEST_OVERFLOWS_TYPE(u32, s32, S32_MAX, false); + TEST_OVERFLOWS_TYPE(u32, s32, U32_MAX, true); + TEST_OVERFLOWS_TYPE(u32, s32, (u32)S32_MAX + 1, true); +#if BITS_PER_LONG == 64 + TEST_OVERFLOWS_TYPE(u32, u64, U32_MAX, false); + TEST_OVERFLOWS_TYPE(u32, s64, U32_MAX, false); +#endif + TEST_OVERFLOWS_TYPE(s32, u8, U8_MAX, false); + TEST_OVERFLOWS_TYPE(s32, u8, (s32)U8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true); + TEST_OVERFLOWS_TYPE(s32, u8, -1, true); + TEST_OVERFLOWS_TYPE(s32, u8, S32_MIN, true); + TEST_OVERFLOWS_TYPE(s32, u16, U16_MAX, false); + TEST_OVERFLOWS_TYPE(s32, u16, (s32)U16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s32, u16, S32_MAX, true); + TEST_OVERFLOWS_TYPE(s32, u16, -1, true); + TEST_OVERFLOWS_TYPE(s32, u16, S32_MIN, true); + TEST_OVERFLOWS_TYPE(s32, u32, S32_MAX, false); + TEST_OVERFLOWS_TYPE(s32, u32, -1, true); + TEST_OVERFLOWS_TYPE(s32, u32, S32_MIN, true); +#if BITS_PER_LONG == 64 + TEST_OVERFLOWS_TYPE(s32, u64, S32_MAX, false); + TEST_OVERFLOWS_TYPE(s32, u64, -1, true); + TEST_OVERFLOWS_TYPE(s32, u64, S32_MIN, true); +#endif + TEST_OVERFLOWS_TYPE(s32, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s32, s8, S8_MIN, false); + TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s32, s8, (s32)S8_MIN - 1, true); + TEST_OVERFLOWS_TYPE(s32, s8, S32_MAX, true); + TEST_OVERFLOWS_TYPE(s32, s8, S32_MIN, true); + TEST_OVERFLOWS_TYPE(s32, s16, S16_MAX, false); + TEST_OVERFLOWS_TYPE(s32, s16, S16_MIN, false); + TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s32, s16, (s32)S16_MIN - 1, true); + TEST_OVERFLOWS_TYPE(s32, s16, S32_MAX, true); + TEST_OVERFLOWS_TYPE(s32, s16, S32_MIN, true); + TEST_OVERFLOWS_TYPE(s32, s32, S32_MAX, false); + TEST_OVERFLOWS_TYPE(s32, s32, S32_MIN, false); +#if BITS_PER_LONG == 64 + TEST_OVERFLOWS_TYPE(s32, s64, S32_MAX, false); + TEST_OVERFLOWS_TYPE(s32, s64, S32_MIN, false); + TEST_OVERFLOWS_TYPE(u64, u8, U64_MAX, true); + TEST_OVERFLOWS_TYPE(u64, u8, U8_MAX, false); + TEST_OVERFLOWS_TYPE(u64, u8, (u64)U8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u64, u16, U64_MAX, true); + TEST_OVERFLOWS_TYPE(u64, u16, U16_MAX, false); + TEST_OVERFLOWS_TYPE(u64, u16, (u64)U16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u64, u32, U64_MAX, true); + TEST_OVERFLOWS_TYPE(u64, u32, U32_MAX, false); + TEST_OVERFLOWS_TYPE(u64, u32, (u64)U32_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u64, u64, U64_MAX, false); + TEST_OVERFLOWS_TYPE(u64, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(u64, s8, (u64)S8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u64, s8, U64_MAX, true); + TEST_OVERFLOWS_TYPE(u64, s16, S16_MAX, false); + TEST_OVERFLOWS_TYPE(u64, s16, (u64)S16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u64, s16, U64_MAX, true); + TEST_OVERFLOWS_TYPE(u64, s32, S32_MAX, false); + TEST_OVERFLOWS_TYPE(u64, s32, (u64)S32_MAX + 1, true); + TEST_OVERFLOWS_TYPE(u64, s32, U64_MAX, true); + TEST_OVERFLOWS_TYPE(u64, s64, S64_MAX, false); + TEST_OVERFLOWS_TYPE(u64, s64, U64_MAX, true); + TEST_OVERFLOWS_TYPE(u64, s64, (u64)S64_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s64, u8, S64_MAX, true); + TEST_OVERFLOWS_TYPE(s64, u8, S64_MIN, true); + TEST_OVERFLOWS_TYPE(s64, u8, -1, true); + TEST_OVERFLOWS_TYPE(s64, u8, U8_MAX, false); + TEST_OVERFLOWS_TYPE(s64, u8, (s64)U8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s64, u16, S64_MAX, true); + TEST_OVERFLOWS_TYPE(s64, u16, S64_MIN, true); + TEST_OVERFLOWS_TYPE(s64, u16, -1, true); + TEST_OVERFLOWS_TYPE(s64, u16, U16_MAX, false); + TEST_OVERFLOWS_TYPE(s64, u16, (s64)U16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s64, u32, S64_MAX, true); + TEST_OVERFLOWS_TYPE(s64, u32, S64_MIN, true); + TEST_OVERFLOWS_TYPE(s64, u32, -1, true); + TEST_OVERFLOWS_TYPE(s64, u32, U32_MAX, false); + TEST_OVERFLOWS_TYPE(s64, u32, (s64)U32_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s64, u64, S64_MAX, false); + TEST_OVERFLOWS_TYPE(s64, u64, S64_MIN, true); + TEST_OVERFLOWS_TYPE(s64, u64, -1, true); + TEST_OVERFLOWS_TYPE(s64, s8, S8_MAX, false); + TEST_OVERFLOWS_TYPE(s64, s8, S8_MIN, false); + TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s64, s8, (s64)S8_MIN - 1, true); + TEST_OVERFLOWS_TYPE(s64, s8, S64_MAX, true); + TEST_OVERFLOWS_TYPE(s64, s16, S16_MAX, false); + TEST_OVERFLOWS_TYPE(s64, s16, S16_MIN, false); + TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s64, s16, (s64)S16_MIN - 1, true); + TEST_OVERFLOWS_TYPE(s64, s16, S64_MAX, true); + TEST_OVERFLOWS_TYPE(s64, s32, S32_MAX, false); + TEST_OVERFLOWS_TYPE(s64, s32, S32_MIN, false); + TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MAX + 1, true); + TEST_OVERFLOWS_TYPE(s64, s32, (s64)S32_MIN - 1, true); + TEST_OVERFLOWS_TYPE(s64, s32, S64_MAX, true); + TEST_OVERFLOWS_TYPE(s64, s64, S64_MAX, false); + TEST_OVERFLOWS_TYPE(s64, s64, S64_MIN, false); +#endif + + /* Check for macro side-effects. */ + var = INT_MAX - 1; + __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false); + __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, false); + __TEST_OVERFLOWS_TYPE(__overflows_type, var++, int, true); + var = INT_MAX - 1; + __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false); + __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, false); + __TEST_OVERFLOWS_TYPE(overflows_type, var++, int, true); + + kunit_info(test, "%d overflows_type() tests finished\n", count); +#undef TEST_OVERFLOWS_TYPE +#undef __TEST_OVERFLOWS_TYPE +} + +static void same_type_test(struct kunit *test) +{ + int count = 0; + int var; + +#define TEST_SAME_TYPE(t1, t2, same) do { \ + typeof(t1) __t1h = type_max(t1); \ + typeof(t1) __t1l = type_min(t1); \ + typeof(t2) __t2h = type_max(t2); \ + typeof(t2) __t2l = type_min(t2); \ + KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1h)); \ + KUNIT_EXPECT_EQ(test, true, __same_type(t1, __t1l)); \ + KUNIT_EXPECT_EQ(test, true, __same_type(__t1h, t1)); \ + KUNIT_EXPECT_EQ(test, true, __same_type(__t1l, t1)); \ + KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2h)); \ + KUNIT_EXPECT_EQ(test, true, __same_type(t2, __t2l)); \ + KUNIT_EXPECT_EQ(test, true, __same_type(__t2h, t2)); \ + KUNIT_EXPECT_EQ(test, true, __same_type(__t2l, t2)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(t1, t2)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1h)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(t2, __t1l)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(__t1h, t2)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(__t1l, t2)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2h)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(t1, __t2l)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(__t2h, t1)); \ + KUNIT_EXPECT_EQ(test, same, __same_type(__t2l, t1)); \ +} while (0) + +#if BITS_PER_LONG == 64 +# define TEST_SAME_TYPE64(base, t, m) TEST_SAME_TYPE(base, t, m) +#else +# define TEST_SAME_TYPE64(base, t, m) do { } while (0) +#endif + +#define TEST_TYPE_SETS(base, mu8, mu16, mu32, ms8, ms16, ms32, mu64, ms64) \ +do { \ + TEST_SAME_TYPE(base, u8, mu8); \ + TEST_SAME_TYPE(base, u16, mu16); \ + TEST_SAME_TYPE(base, u32, mu32); \ + TEST_SAME_TYPE(base, s8, ms8); \ + TEST_SAME_TYPE(base, s16, ms16); \ + TEST_SAME_TYPE(base, s32, ms32); \ + TEST_SAME_TYPE64(base, u64, mu64); \ + TEST_SAME_TYPE64(base, s64, ms64); \ +} while (0) + + TEST_TYPE_SETS(u8, true, false, false, false, false, false, false, false); + TEST_TYPE_SETS(u16, false, true, false, false, false, false, false, false); + TEST_TYPE_SETS(u32, false, false, true, false, false, false, false, false); + TEST_TYPE_SETS(s8, false, false, false, true, false, false, false, false); + TEST_TYPE_SETS(s16, false, false, false, false, true, false, false, false); + TEST_TYPE_SETS(s32, false, false, false, false, false, true, false, false); +#if BITS_PER_LONG == 64 + TEST_TYPE_SETS(u64, false, false, false, false, false, false, true, false); + TEST_TYPE_SETS(s64, false, false, false, false, false, false, false, true); +#endif + + /* Check for macro side-effects. */ + var = 4; + KUNIT_EXPECT_EQ(test, var, 4); + KUNIT_EXPECT_TRUE(test, __same_type(var++, int)); + KUNIT_EXPECT_EQ(test, var, 4); + KUNIT_EXPECT_TRUE(test, __same_type(int, var++)); + KUNIT_EXPECT_EQ(test, var, 4); + KUNIT_EXPECT_TRUE(test, __same_type(var++, var++)); + KUNIT_EXPECT_EQ(test, var, 4); + + kunit_info(test, "%d __same_type() tests finished\n", count); + +#undef TEST_TYPE_SETS +#undef TEST_SAME_TYPE64 +#undef TEST_SAME_TYPE +} + +static void castable_to_type_test(struct kunit *test) +{ + int count = 0; + +#define TEST_CASTABLE_TO_TYPE(arg1, arg2, pass) do { \ + bool __pass = castable_to_type(arg1, arg2); \ + KUNIT_EXPECT_EQ_MSG(test, __pass, pass, \ + "expected castable_to_type(" #arg1 ", " #arg2 ") to%s pass\n",\ + pass ? "" : " not"); \ + count++; \ +} while (0) + + TEST_CASTABLE_TO_TYPE(16, u8, true); + TEST_CASTABLE_TO_TYPE(16, u16, true); + TEST_CASTABLE_TO_TYPE(16, u32, true); + TEST_CASTABLE_TO_TYPE(16, s8, true); + TEST_CASTABLE_TO_TYPE(16, s16, true); + TEST_CASTABLE_TO_TYPE(16, s32, true); + TEST_CASTABLE_TO_TYPE(-16, s8, true); + TEST_CASTABLE_TO_TYPE(-16, s16, true); + TEST_CASTABLE_TO_TYPE(-16, s32, true); +#if BITS_PER_LONG == 64 + TEST_CASTABLE_TO_TYPE(16, u64, true); + TEST_CASTABLE_TO_TYPE(-16, s64, true); +#endif + +#define TEST_CASTABLE_TO_TYPE_VAR(width) do { \ + u ## width u ## width ## var = 0; \ + s ## width s ## width ## var = 0; \ + \ + /* Constant expressions that fit types. */ \ + TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width, true); \ + TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width, true); \ + TEST_CASTABLE_TO_TYPE(type_max(u ## width), u ## width ## var, true); \ + TEST_CASTABLE_TO_TYPE(type_min(u ## width), u ## width ## var, true); \ + TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width, true); \ + TEST_CASTABLE_TO_TYPE(type_min(s ## width), s ## width, true); \ + TEST_CASTABLE_TO_TYPE(type_max(s ## width), s ## width ## var, true); \ + TEST_CASTABLE_TO_TYPE(type_min(u ## width), s ## width ## var, true); \ + /* Constant expressions that do not fit types. */ \ + TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width, false); \ + TEST_CASTABLE_TO_TYPE(type_max(u ## width), s ## width ## var, false); \ + TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width, false); \ + TEST_CASTABLE_TO_TYPE(type_min(s ## width), u ## width ## var, false); \ + /* Non-constant expression with mismatched type. */ \ + TEST_CASTABLE_TO_TYPE(s ## width ## var, u ## width, false); \ + TEST_CASTABLE_TO_TYPE(u ## width ## var, s ## width, false); \ +} while (0) + +#define TEST_CASTABLE_TO_TYPE_RANGE(width) do { \ + unsigned long big = U ## width ## _MAX; \ + signed long small = S ## width ## _MIN; \ + u ## width u ## width ## var = 0; \ + s ## width s ## width ## var = 0; \ + \ + /* Constant expression in range. */ \ + TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width, true); \ + TEST_CASTABLE_TO_TYPE(U ## width ## _MAX, u ## width ## var, true); \ + TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width, true); \ + TEST_CASTABLE_TO_TYPE(S ## width ## _MIN, s ## width ## var, true); \ + /* Constant expression out of range. */ \ + TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width, false); \ + TEST_CASTABLE_TO_TYPE((unsigned long)U ## width ## _MAX + 1, u ## width ## var, false); \ + TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width, false); \ + TEST_CASTABLE_TO_TYPE((signed long)S ## width ## _MIN - 1, s ## width ## var, false); \ + /* Non-constant expression with mismatched type. */ \ + TEST_CASTABLE_TO_TYPE(big, u ## width, false); \ + TEST_CASTABLE_TO_TYPE(big, u ## width ## var, false); \ + TEST_CASTABLE_TO_TYPE(small, s ## width, false); \ + TEST_CASTABLE_TO_TYPE(small, s ## width ## var, false); \ +} while (0) + + TEST_CASTABLE_TO_TYPE_VAR(8); + TEST_CASTABLE_TO_TYPE_VAR(16); + TEST_CASTABLE_TO_TYPE_VAR(32); +#if BITS_PER_LONG == 64 + TEST_CASTABLE_TO_TYPE_VAR(64); +#endif + + TEST_CASTABLE_TO_TYPE_RANGE(8); + TEST_CASTABLE_TO_TYPE_RANGE(16); +#if BITS_PER_LONG == 64 + TEST_CASTABLE_TO_TYPE_RANGE(32); +#endif + kunit_info(test, "%d castable_to_type() tests finished\n", count); + +#undef TEST_CASTABLE_TO_TYPE_RANGE +#undef TEST_CASTABLE_TO_TYPE_VAR +#undef TEST_CASTABLE_TO_TYPE +} + static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(u8_u8__u8_overflow_test), KUNIT_CASE(s8_s8__s8_overflow_test), @@ -755,6 +1133,9 @@ static struct kunit_case overflow_test_cases[] = { KUNIT_CASE(shift_nonsense_test), KUNIT_CASE(overflow_allocation_test), KUNIT_CASE(overflow_size_helpers_test), + KUNIT_CASE(overflows_type_test), + KUNIT_CASE(same_type_test), + KUNIT_CASE(castable_to_type_test), {} }; From 96ddb92d5cf12c3c75495368e06b5cc21b3b5755 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 14 Apr 2023 16:21:38 -0700 Subject: [PATCH 35/63] ANDROID: gunyah: Sync with latest "virt: gunyah: Add hypercalls to identify Gunyah" Align arch_is_gh_guest to version 13 of Gunyah patches: https://lore.kernel.org/all/20230509204801.2824351-4-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I7b4ff1974a1175efb94dfdb1f414771d887ecb1f Signed-off-by: Elliot Berman --- arch/arm64/gunyah/gunyah_hypercall.c | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/arch/arm64/gunyah/gunyah_hypercall.c b/arch/arm64/gunyah/gunyah_hypercall.c index 0f1cdb706e91..2925932660f1 100644 --- a/arch/arm64/gunyah/gunyah_hypercall.c +++ b/arch/arm64/gunyah/gunyah_hypercall.c @@ -8,18 +8,14 @@ #include #include -static const uuid_t gh_known_uuids[] = { - /* Qualcomm's version of Gunyah {19bd54bd-0b37-571b-946f-609b54539de6} */ - UUID_INIT(0x19bd54bd, 0x0b37, 0x571b, 0x94, 0x6f, 0x60, 0x9b, 0x54, 0x53, 0x9d, 0xe6), - /* Standard version of Gunyah {c1d58fcd-a453-5fdb-9265-ce36673d5f14} */ - UUID_INIT(0xc1d58fcd, 0xa453, 0x5fdb, 0x92, 0x65, 0xce, 0x36, 0x67, 0x3d, 0x5f, 0x14), -}; +/* {c1d58fcd-a453-5fdb-9265-ce36673d5f14} */ +static const uuid_t GUNYAH_UUID = + UUID_INIT(0xc1d58fcd, 0xa453, 0x5fdb, 0x92, 0x65, 0xce, 0x36, 0x67, 0x3d, 0x5f, 0x14); bool arch_is_gh_guest(void) { struct arm_smccc_res res; uuid_t uuid; - int i; arm_smccc_1_1_hvc(ARM_SMCCC_VENDOR_HYP_CALL_UID_FUNC_ID, &res); @@ -28,11 +24,7 @@ bool arch_is_gh_guest(void) ((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2); ((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3); - for (i = 0; i < ARRAY_SIZE(gh_known_uuids); i++) - if (uuid_equal(&uuid, &gh_known_uuids[i])) - return true; - - return false; + return uuid_equal(&uuid, &GUNYAH_UUID); } EXPORT_SYMBOL_GPL(arch_is_gh_guest); From 6889a3fbe42fc8828eef8e11c75be2700e518813 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 14 Apr 2023 16:23:52 -0700 Subject: [PATCH 36/63] ANDROID: gunyah: Sync with latest "virt: gunyah: Translate gh_rm_hyp_resource into gunyah_resource" Align the Gunyah IRQ domain to the v13 series: https://lore.kernel.org/all/20230509204801.2824351-17-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I6807139aa917d89b44cb3d77aa3c790433746f3d Signed-off-by: Elliot Berman --- arch/arm64/include/asm/gunyah.h | 3 ++- drivers/virt/gunyah/rsc_mgr.c | 17 ++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/gunyah.h b/arch/arm64/include/asm/gunyah.h index 64cfb964efee..a8b368f53bab 100644 --- a/arch/arm64/include/asm/gunyah.h +++ b/arch/arm64/include/asm/gunyah.h @@ -10,7 +10,8 @@ static inline int arch_gh_fill_irq_fwspec_params(u32 virq, struct irq_fwspec *fwspec) { - if (virq < 32 || virq > 1019) + /* Assume that Gunyah gave us an SPI; defensively check it */ + if (WARN_ON(virq < 32 || virq > 1019)) return -EINVAL; fwspec->param_count = 3; diff --git a/drivers/virt/gunyah/rsc_mgr.c b/drivers/virt/gunyah/rsc_mgr.c index ae84af21bc3c..c5bed971a699 100644 --- a/drivers/virt/gunyah/rsc_mgr.c +++ b/drivers/virt/gunyah/rsc_mgr.c @@ -230,7 +230,7 @@ static int gh_rm_irq_domain_alloc(struct irq_domain *d, unsigned int virq, unsig u32 gh_virq = spec->gh_virq; int ret; - if (nr_irqs != 1 || gh_virq == U32_MAX) + if (nr_irqs != 1) return -EINVAL; chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); @@ -263,16 +263,13 @@ err_free_irq_data: static void gh_rm_irq_domain_free_single(struct irq_domain *d, unsigned int virq) { - struct gh_irq_chip_data *chip_data; struct irq_data *irq_data; irq_data = irq_domain_get_irq_data(d, virq); if (!irq_data) return; - chip_data = irq_data->chip_data; - - kfree(chip_data); + kfree(irq_data->chip_data); irq_data->chip_data = NULL; } @@ -292,6 +289,7 @@ static const struct irq_domain_ops gh_rm_irq_domain_ops = { struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_resource *hyp_resource) { struct gh_resource *ghrsc; + int ret; ghrsc = kzalloc(sizeof(*ghrsc), GFP_KERNEL); if (!ghrsc) @@ -301,17 +299,18 @@ struct gh_resource *gh_rm_alloc_resource(struct gh_rm *rm, struct gh_rm_hyp_reso ghrsc->capid = le64_to_cpu(hyp_resource->cap_id); ghrsc->irq = IRQ_NOTCONNECTED; ghrsc->rm_label = le32_to_cpu(hyp_resource->resource_label); - if (hyp_resource->virq && le32_to_cpu(hyp_resource->virq) != U32_MAX) { + if (hyp_resource->virq) { struct gh_irq_chip_data irq_data = { .gh_virq = le32_to_cpu(hyp_resource->virq), }; - ghrsc->irq = irq_domain_alloc_irqs(rm->irq_domain, 1, NUMA_NO_NODE, &irq_data); - if (ghrsc->irq < 0) { + ret = irq_domain_alloc_irqs(rm->irq_domain, 1, NUMA_NO_NODE, &irq_data); + if (ret < 0) { dev_err(rm->dev, "Failed to allocate interrupt for resource %d label: %d: %d\n", ghrsc->type, ghrsc->rm_label, ghrsc->irq); - ghrsc->irq = IRQ_NOTCONNECTED; + } else { + ghrsc->irq = ret; } } From fea63fe1f113e3c3000382384ca01d33ddf85261 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 14 Apr 2023 16:27:46 -0700 Subject: [PATCH 37/63] ANDROID: gunyah: Sync with latest "gunyah: rsc_mgr: Add resource manager RPC core" Align resource manager and rpc to v12 of Gunyah patches posted to kernel.org. - Rename "buff" to "buf" - printk adjustments - Comments - Stylistic tweaks https://lore.kernel.org/all/20230509204801.2824351-7-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: Iff216a9cb3afeb9de75f0b42bf58f139da2ca4bd Signed-off-by: Elliot Berman --- drivers/virt/gunyah/rsc_mgr.c | 39 ++++++++++++++--------------- drivers/virt/gunyah/rsc_mgr.h | 2 +- drivers/virt/gunyah/rsc_mgr_rpc.c | 41 +++++++++++++++++-------------- include/linux/gunyah_rsc_mgr.h | 4 +-- 4 files changed, 44 insertions(+), 42 deletions(-) diff --git a/drivers/virt/gunyah/rsc_mgr.c b/drivers/virt/gunyah/rsc_mgr.c index c5bed971a699..10cc8db37d30 100644 --- a/drivers/virt/gunyah/rsc_mgr.c +++ b/drivers/virt/gunyah/rsc_mgr.c @@ -380,7 +380,7 @@ static void gh_rm_notif_work(struct work_struct *work) blocking_notifier_call_chain(&rm->nh, connection->msg_id, connection->payload); - gh_rm_put(rm); + put_device(rm->dev); kfree(connection->payload); kfree(connection); } @@ -401,14 +401,14 @@ static void gh_rm_process_notif(struct gh_rm *rm, void *msg, size_t msg_size) connection->type = RM_RPC_TYPE_NOTIF; connection->msg_id = hdr->msg_id; - gh_rm_get(rm); + get_device(rm->dev); connection->notification.rm = rm; INIT_WORK(&connection->notification.work, gh_rm_notif_work); ret = gh_rm_init_connection_payload(connection, msg, sizeof(*hdr), msg_size); if (ret) { dev_err(rm->dev, "Failed to initialize connection for notification: %d\n", ret); - gh_rm_put(rm); + put_device(rm->dev); kfree(connection); return; } @@ -482,7 +482,7 @@ static void gh_rm_try_complete_connection(struct gh_rm *rm) schedule_work(&connection->notification.work); break; default: - dev_err_ratelimited(rm->dev, "Invalid message type (%d) received\n", + dev_err_ratelimited(rm->dev, "Invalid message type (%u) received\n", connection->type); gh_rm_abort_connection(rm); break; @@ -536,11 +536,11 @@ static void gh_rm_msgq_tx_done(struct mbox_client *cl, void *mssg, int r) } static int gh_rm_send_request(struct gh_rm *rm, u32 message_id, - const void *req_buff, size_t req_buf_size, + const void *req_buf, size_t req_buf_size, struct gh_rm_connection *connection) { size_t buf_size_remaining = req_buf_size; - const void *req_buf_curr = req_buff; + const void *req_buf_curr = req_buf; struct gh_msgq_tx_data *msg; struct gh_rm_rpc_hdr *hdr, hdr_template; u32 cont_fragments = 0; @@ -549,8 +549,8 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id, int ret; if (req_buf_size > GH_RM_MAX_NUM_FRAGMENTS * GH_RM_MAX_MSG_SIZE) { - dev_warn(rm->dev, "Limit exceeded for the number of fragments: %u\n", - cont_fragments); + dev_warn(rm->dev, "Limit (%lu bytes) exceeded for the maximum message size: %lu\n", + GH_RM_MAX_NUM_FRAGMENTS * GH_RM_MAX_MSG_SIZE, req_buf_size); dump_stack(); return -E2BIG; } @@ -560,7 +560,7 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id, hdr_template.api = RM_RPC_API; hdr_template.type = FIELD_PREP(RM_RPC_TYPE_MASK, RM_RPC_TYPE_REQUEST) | - FIELD_PREP(RM_RPC_FRAGMENTS_MASK, cont_fragments); + FIELD_PREP(RM_RPC_FRAGMENTS_MASK, cont_fragments); hdr_template.seq = cpu_to_le16(connection->reply.seq); hdr_template.msg_id = cpu_to_le32(message_id); @@ -568,7 +568,6 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id, if (ret) return ret; - /* Consider also the 'request' packet for the loop count */ do { msg = kmem_cache_zalloc(rm->cache, GFP_KERNEL); if (!msg) { @@ -577,11 +576,11 @@ static int gh_rm_send_request(struct gh_rm *rm, u32 message_id, } /* Fill header */ - hdr = (struct gh_rm_rpc_hdr *)msg->data; + hdr = (struct gh_rm_rpc_hdr *)&msg->data[0]; *hdr = hdr_template; /* Copy payload */ - payload = hdr + 1; + payload = &msg->data[0] + sizeof(*hdr); payload_size = min(buf_size_remaining, GH_RM_MAX_MSG_SIZE); memcpy(payload, req_buf_curr, payload_size); req_buf_curr += payload_size; @@ -615,23 +614,23 @@ out: * gh_rm_call: Achieve request-response type communication with RPC * @rm: Pointer to Gunyah resource manager internal data * @message_id: The RM RPC message-id - * @req_buff: Request buffer that contains the payload + * @req_buf: Request buffer that contains the payload * @req_buf_size: Total size of the payload * @resp_buf: Pointer to a response buffer * @resp_buf_size: Size of the response buffer * - * Make a request to the RM-VM and wait for reply back. For a successful + * Make a request to the Resource Manager and wait for reply back. For a successful * response, the function returns the payload. The size of the payload is set in - * resp_buf_size. The resp_buf should be freed by the caller when 0 is returned + * resp_buf_size. The resp_buf must be freed by the caller when 0 is returned * and resp_buf_size != 0. * - * req_buff should be not NULL for req_buf_size >0. If req_buf_size == 0, - * req_buff *can* be NULL and no additional payload is sent. + * req_buf should be not NULL for req_buf_size >0. If req_buf_size == 0, + * req_buf *can* be NULL and no additional payload is sent. * * Context: Process context. Will sleep waiting for reply. * Return: 0 on success. <0 if error. */ -int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buf_size, +int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buf, size_t req_buf_size, void **resp_buf, size_t *resp_buf_size) { struct gh_rm_connection *connection; @@ -639,7 +638,7 @@ int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buf_ int ret; /* message_id 0 is reserved. req_buf_size implies req_buf is not NULL */ - if (!message_id || (!req_buff && req_buf_size) || !rm) + if (!rm || !message_id || (!req_buf && req_buf_size)) return -EINVAL; @@ -660,7 +659,7 @@ int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buf_ connection->reply.seq = lower_16_bits(seq_id); /* Send the request to the Resource Manager */ - ret = gh_rm_send_request(rm, message_id, req_buff, req_buf_size, connection); + ret = gh_rm_send_request(rm, message_id, req_buf, req_buf_size, connection); if (ret < 0) goto out; diff --git a/drivers/virt/gunyah/rsc_mgr.h b/drivers/virt/gunyah/rsc_mgr.h index 6838e736f361..1e0a1cc7e844 100644 --- a/drivers/virt/gunyah/rsc_mgr.h +++ b/drivers/virt/gunyah/rsc_mgr.h @@ -10,7 +10,7 @@ #include struct gh_rm; -int gh_rm_call(struct gh_rm *rsc_mgr, u32 message_id, void *req_buff, size_t req_buf_size, +int gh_rm_call(struct gh_rm *rsc_mgr, u32 message_id, void *req_buf, size_t req_buf_size, void **resp_buf, size_t *resp_buf_size); int gh_rm_platform_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel); diff --git a/drivers/virt/gunyah/rsc_mgr_rpc.c b/drivers/virt/gunyah/rsc_mgr_rpc.c index d6b3ccceee68..4a8f94a34cf2 100644 --- a/drivers/virt/gunyah/rsc_mgr_rpc.c +++ b/drivers/virt/gunyah/rsc_mgr_rpc.c @@ -60,7 +60,7 @@ struct gh_rm_mem_release_req { } __packed; /* Call: MEM_APPEND */ -#define GH_MEM_APPEND_REQ_FLAGS_END BIT(0) +#define GH_MEM_APPEND_REQ_FLAGS_END BIT(0) struct gh_rm_mem_append_req_header { __le32 mem_handle; @@ -76,7 +76,7 @@ struct gh_rm_vm_alloc_vmid_resp { } __packed; /* Call: VM_STOP */ -#define GH_RM_VM_STOP_FLAG_FORCE_STOP BIT(0) +#define GH_RM_VM_STOP_FLAG_FORCE_STOP BIT(0) #define GH_RM_VM_STOP_REASON_FORCE_STOP 3 @@ -184,6 +184,7 @@ static int gh_rm_mem_append(struct gh_rm *rm, u32 mem_handle, static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_mem_parcel *p) { size_t msg_size = 0, initial_mem_entries = p->n_mem_entries, resp_size; + size_t acl_section_size, mem_section_size; struct gh_rm_mem_share_req_acl_section *acl_section; struct gh_rm_mem_share_req_mem_section *mem_section; struct gh_rm_mem_share_req_header *req_header; @@ -199,6 +200,8 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_ if (initial_mem_entries > GH_RM_MAX_MEM_ENTRIES) initial_mem_entries = GH_RM_MAX_MEM_ENTRIES; + acl_section_size = struct_size(acl_section, entries, p->n_acl_entries); + mem_section_size = struct_size(mem_section, entries, initial_mem_entries); /* The format of the message goes: * request header * ACL entries (which VMs get what kind of access to this memory parcel) @@ -206,8 +209,8 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_ * Memory attributes (currently unused, we'll hard-code the size to 0) */ msg_size += sizeof(struct gh_rm_mem_share_req_header); - msg_size += struct_size(acl_section, entries, p->n_acl_entries); - msg_size += struct_size(mem_section, entries, initial_mem_entries); + msg_size += acl_section_size; + msg_size += mem_section_size; msg_size += sizeof(u32); /* for memory attributes, currently unused */ msg = kzalloc(msg_size, GFP_KERNEL); @@ -222,8 +225,8 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_ req_header = msg; acl_section = (void *)req_header + sizeof(*req_header); - mem_section = (void *)acl_section + struct_size(acl_section, entries, p->n_acl_entries); - attr_section = (void *)mem_section + struct_size(mem_section, entries, initial_mem_entries); + mem_section = (void *)acl_section + acl_section_size; + attr_section = (void *)mem_section + mem_section_size; req_header->mem_type = p->mem_type; if (initial_mem_entries != p->n_mem_entries) @@ -231,11 +234,12 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_ req_header->label = cpu_to_le32(p->label); acl_section->n_entries = cpu_to_le32(p->n_acl_entries); - memcpy(acl_section->entries, p->acl_entries, sizeof(*(p->acl_entries)) * p->n_acl_entries); + memcpy(acl_section->entries, p->acl_entries, + flex_array_size(acl_section, entries, p->n_acl_entries)); mem_section->n_entries = cpu_to_le16(initial_mem_entries); memcpy(mem_section->entries, p->mem_entries, - sizeof(*(p->mem_entries)) * initial_mem_entries); + flex_array_size(mem_section, entries, initial_mem_entries)); /* Set n_entries for memory attribute section to 0 */ *attr_section = 0; @@ -249,6 +253,7 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_ } p->mem_handle = le32_to_cpu(*resp); + kfree(resp); if (initial_mem_entries != p->n_mem_entries) { ret = gh_rm_mem_append(rm, p->mem_handle, @@ -260,14 +265,13 @@ static int gh_rm_mem_lend_common(struct gh_rm *rm, u32 message_id, struct gh_rm_ } } - kfree(resp); return ret; } /** * gh_rm_mem_lend() - Lend memory to other virtual machines. * @rm: Handle to a Gunyah resource manager - * @parcel: Package the memory information of the memory to be lent. + * @parcel: Information about the memory to be lent. * * Lending removes Linux's access to the memory while the memory parcel is lent. */ @@ -280,7 +284,7 @@ int gh_rm_mem_lend(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel) /** * gh_rm_mem_share() - Share memory with other virtual machines. * @rm: Handle to a Gunyah resource manager - * @parcel: Package the memory information of the memory to be shared. + * @parcel: Information about the memory to be shared. * * Sharing keeps Linux's access to the memory while the memory parcel is shared. */ @@ -292,7 +296,7 @@ int gh_rm_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel) /** * gh_rm_mem_reclaim() - Reclaim a memory parcel * @rm: Handle to a Gunyah resource manager - * @parcel: Package the memory information of the memory to be reclaimed. + * @parcel: Information about the memory to be reclaimed. * * RM maps the associated memory back into the stage-2 page tables of the owner VM. */ @@ -366,7 +370,7 @@ int gh_rm_alloc_vmid(struct gh_rm *rm, u16 vmid) } /** - * gh_rm_dealloc_vmid() - Dispose the VMID + * gh_rm_dealloc_vmid() - Dispose of a VMID * @rm: Handle to a Gunyah resource manager * @vmid: VM identifier allocated with gh_rm_alloc_vmid */ @@ -376,11 +380,11 @@ int gh_rm_dealloc_vmid(struct gh_rm *rm, u16 vmid) } /** - * gh_rm_vm_reset() - Reset the VM's resources + * gh_rm_vm_reset() - Reset a VM's resources * @rm: Handle to a Gunyah resource manager * @vmid: VM identifier allocated with gh_rm_alloc_vmid * - * While tearing down the VM, request RM to clean up all the VM resources + * As part of tearing down the VM, request RM to clean up all the VM resources * associated with the VM. Only after this, Linux can clean up all the * references it maintains to resources. */ @@ -390,7 +394,7 @@ int gh_rm_vm_reset(struct gh_rm *rm, u16 vmid) } /** - * gh_rm_vm_start() - Move the VM into "ready to run" state + * gh_rm_vm_start() - Move a VM into "ready to run" state * @rm: Handle to a Gunyah resource manager * @vmid: VM identifier allocated with gh_rm_alloc_vmid * @@ -432,9 +436,7 @@ int gh_rm_vm_stop(struct gh_rm *rm, u16 vmid) * @image_size: Size of the VM image * @dtb_offset: Start address of the devicetree binary with VM configuration, * relative to start of memparcel. - * @dtb_size: Maximum size of devicetree binary. Resource manager applies - * an overlay to the DTB and dtb_size should include room for - * the overlay. + * @dtb_size: Maximum size of devicetree binary. */ int gh_rm_vm_configure(struct gh_rm *rm, u16 vmid, enum gh_rm_vm_auth_mechanism auth_mechanism, u32 mem_handle, u64 image_offset, u64 image_size, u64 dtb_offset, u64 dtb_size) @@ -470,6 +472,7 @@ int gh_rm_vm_init(struct gh_rm *rm, u16 vmid) * @rm: Handle to a Gunyah resource manager * @vmid: VMID of the other VM to get the resources of * @resources: Set by gh_rm_get_hyp_resources and contains the returned hypervisor resources. + * Caller must free the resources pointer if successful. */ int gh_rm_get_hyp_resources(struct gh_rm *rm, u16 vmid, struct gh_rm_hyp_resources **resources) diff --git a/include/linux/gunyah_rsc_mgr.h b/include/linux/gunyah_rsc_mgr.h index 11a9d14b4e98..7e2c9b7d5e20 100644 --- a/include/linux/gunyah_rsc_mgr.h +++ b/include/linux/gunyah_rsc_mgr.h @@ -14,8 +14,8 @@ #define GH_MEM_HANDLE_INVAL U32_MAX struct gh_rm; -int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buff, size_t req_buff_size, - void **resp_buf, size_t *resp_buff_size); +int gh_rm_call(struct gh_rm *rm, u32 message_id, void *req_buf, size_t req_buf_size, + void **resp_buf, size_t *resp_buf_size); int gh_rm_notifier_register(struct gh_rm *rm, struct notifier_block *nb); int gh_rm_notifier_unregister(struct gh_rm *rm, struct notifier_block *nb); struct device *gh_rm_get(struct gh_rm *rm); From 5e0785329a764034326de4690efedc6a9cb5d3bd Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 14 Apr 2023 16:35:40 -0700 Subject: [PATCH 38/63] ANDROID: gunyah: Sync with latest "gunyah: vm_mgr: Add framework for VM Functions" Align Gunyah VM Functions with Gunyah v13 patches: https://lore.kernel.org/all/20230509204801.2824351-18-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: Id8e043191539d41e4b54cb579ba2a84db76e0f70 Signed-off-by: Elliot Berman --- drivers/virt/gunyah/gunyah_ioeventfd.c | 17 +++- drivers/virt/gunyah/gunyah_irqfd.c | 15 +++- drivers/virt/gunyah/gunyah_vcpu.c | 14 +++- drivers/virt/gunyah/vm_mgr.c | 112 ++++++++++++++----------- include/linux/gunyah_vm_mgr.h | 28 +++++-- 5 files changed, 125 insertions(+), 61 deletions(-) diff --git a/drivers/virt/gunyah/gunyah_ioeventfd.c b/drivers/virt/gunyah/gunyah_ioeventfd.c index 517f55706ed9..f61291c17be5 100644 --- a/drivers/virt/gunyah/gunyah_ioeventfd.c +++ b/drivers/virt/gunyah/gunyah_ioeventfd.c @@ -111,7 +111,20 @@ static void gh_ioevent_unbind(struct gh_vm_function_instance *f) kfree(iofd); } -DECLARE_GH_VM_FUNCTION_INIT(ioeventfd, GH_FN_IOEVENTFD, - gh_ioeventfd_bind, gh_ioevent_unbind); +static bool gh_ioevent_compare(const struct gh_vm_function_instance *f, + const void *arg, size_t size) +{ + const struct gh_fn_ioeventfd_arg *instance = f->argp, + *other = arg; + + if (sizeof(*other) != size) + return false; + + return instance->addr == other->addr; +} + +DECLARE_GH_VM_FUNCTION_INIT(ioeventfd, GH_FN_IOEVENTFD, 3, + gh_ioeventfd_bind, gh_ioevent_unbind, + gh_ioevent_compare); MODULE_DESCRIPTION("Gunyah ioeventfds"); MODULE_LICENSE("GPL"); diff --git a/drivers/virt/gunyah/gunyah_irqfd.c b/drivers/virt/gunyah/gunyah_irqfd.c index 38e5fe266b00..7629e5777137 100644 --- a/drivers/virt/gunyah/gunyah_irqfd.c +++ b/drivers/virt/gunyah/gunyah_irqfd.c @@ -159,6 +159,19 @@ static void gh_irqfd_unbind(struct gh_vm_function_instance *f) kfree(irqfd); } -DECLARE_GH_VM_FUNCTION_INIT(irqfd, GH_FN_IRQFD, gh_irqfd_bind, gh_irqfd_unbind); +static bool gh_irqfd_compare(const struct gh_vm_function_instance *f, + const void *arg, size_t size) +{ + const struct gh_fn_irqfd_arg *instance = f->argp, + *other = arg; + + if (sizeof(*other) != size) + return false; + + return instance->label == other->label; +} + +DECLARE_GH_VM_FUNCTION_INIT(irqfd, GH_FN_IRQFD, 2, gh_irqfd_bind, gh_irqfd_unbind, + gh_irqfd_compare); MODULE_DESCRIPTION("Gunyah irqfds"); MODULE_LICENSE("GPL"); diff --git a/drivers/virt/gunyah/gunyah_vcpu.c b/drivers/virt/gunyah/gunyah_vcpu.c index f8925b77851a..b6692afca3de 100644 --- a/drivers/virt/gunyah/gunyah_vcpu.c +++ b/drivers/virt/gunyah/gunyah_vcpu.c @@ -457,6 +457,18 @@ static void gh_vcpu_unbind(struct gh_vm_function_instance *f) kref_put(&vcpu->kref, vcpu_release); } -DECLARE_GH_VM_FUNCTION_INIT(vcpu, GH_FN_VCPU, gh_vcpu_bind, gh_vcpu_unbind); +static bool gh_vcpu_compare(const struct gh_vm_function_instance *f, + const void *arg, size_t size) +{ + const struct gh_fn_vcpu_arg *instance = f->argp, + *other = arg; + + if (sizeof(*other) != size) + return false; + + return instance->id == other->id; +} + +DECLARE_GH_VM_FUNCTION_INIT(vcpu, GH_FN_VCPU, 1, gh_vcpu_bind, gh_vcpu_unbind, gh_vcpu_compare); MODULE_DESCRIPTION("Gunyah vCPU Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c index a6f2d5ee68e2..fe546220b7c1 100644 --- a/drivers/virt/gunyah/vm_mgr.c +++ b/drivers/virt/gunyah/vm_mgr.c @@ -19,47 +19,27 @@ #include "vm_mgr.h" -static DEFINE_XARRAY(functions); +static void gh_vm_free(struct work_struct *work); -int gh_vm_function_register(struct gh_vm_function *fn) +static DEFINE_XARRAY(gh_vm_functions); + +static void gh_vm_put_function(struct gh_vm_function *fn) { - if (!fn->bind || !fn->unbind) - return -EINVAL; - - return xa_err(xa_store(&functions, fn->type, fn, GFP_KERNEL)); + module_put(fn->mod); } -EXPORT_SYMBOL_GPL(gh_vm_function_register); - -static void gh_vm_remove_function_instance(struct gh_vm_function_instance *inst) - __must_hold(&inst->ghvm->fn_lock) -{ - inst->fn->unbind(inst); - list_del(&inst->vm_list); - module_put(inst->fn->mod); - kfree(inst->argp); - kfree(inst); -} - -void gh_vm_function_unregister(struct gh_vm_function *fn) -{ - /* Expecting unregister to only come when unloading a module */ - WARN_ON(fn->mod && module_refcount(fn->mod)); - xa_erase(&functions, fn->type); -} -EXPORT_SYMBOL_GPL(gh_vm_function_unregister); static struct gh_vm_function *gh_vm_get_function(u32 type) { struct gh_vm_function *fn; int r; - fn = xa_load(&functions, type); + fn = xa_load(&gh_vm_functions, type); if (!fn) { r = request_module("ghfunc:%d", type); if (r) - return ERR_PTR(r); + return ERR_PTR(r > 0 ? -r : r); - fn = xa_load(&functions, type); + fn = xa_load(&gh_vm_functions, type); } if (!fn || !try_module_get(fn->mod)) @@ -68,14 +48,36 @@ static struct gh_vm_function *gh_vm_get_function(u32 type) return fn; } -static long gh_vm_add_function(struct gh_vm *ghvm, struct gh_fn_desc *f) +static void gh_vm_remove_function_instance(struct gh_vm_function_instance *inst) + __must_hold(&inst->ghvm->fn_lock) +{ + inst->fn->unbind(inst); + list_del(&inst->vm_list); + gh_vm_put_function(inst->fn); + kfree(inst->argp); + kfree(inst); +} + +static void gh_vm_remove_functions(struct gh_vm *ghvm) +{ + struct gh_vm_function_instance *inst, *iiter; + + mutex_lock(&ghvm->fn_lock); + list_for_each_entry_safe(inst, iiter, &ghvm->functions, vm_list) { + gh_vm_remove_function_instance(inst); + } + mutex_unlock(&ghvm->fn_lock); +} + +static long gh_vm_add_function_instance(struct gh_vm *ghvm, struct gh_fn_desc *f) { struct gh_vm_function_instance *inst; void __user *argp; long r = 0; if (f->arg_size > GH_FN_MAX_ARG_SIZE) { - dev_err(ghvm->parent, "%s: arg_size > %d\n", __func__, GH_FN_MAX_ARG_SIZE); + dev_err_ratelimited(ghvm->parent, "%s: arg_size > %d\n", + __func__, GH_FN_MAX_ARG_SIZE); return -EINVAL; } @@ -110,7 +112,8 @@ static long gh_vm_add_function(struct gh_vm *ghvm, struct gh_fn_desc *f) mutex_lock(&ghvm->fn_lock); r = inst->fn->bind(inst); if (r < 0) { - module_put(inst->fn->mod); + mutex_unlock(&ghvm->fn_lock); + gh_vm_put_function(inst->fn); goto free_arg; } @@ -125,7 +128,7 @@ free: return r; } -static long gh_vm_rm_function(struct gh_vm *ghvm, struct gh_fn_desc *f) +static long gh_vm_rm_function_instance(struct gh_vm *ghvm, struct gh_fn_desc *f) { struct gh_vm_function_instance *inst, *iter; void __user *user_argp; @@ -150,11 +153,13 @@ static long gh_vm_rm_function(struct gh_vm *ghvm, struct gh_fn_desc *f) goto out; } + r = -ENOENT; list_for_each_entry_safe(inst, iter, &ghvm->functions, vm_list) { if (inst->fn->type == f->type && - f->arg_size == inst->arg_size && - !memcmp(argp, inst->argp, f->arg_size)) + inst->fn->compare(inst, argp, f->arg_size)) { gh_vm_remove_function_instance(inst); + r = 0; + } } kfree(argp); @@ -165,6 +170,23 @@ out: return r; } +int gh_vm_function_register(struct gh_vm_function *fn) +{ + if (!fn->bind || !fn->unbind) + return -EINVAL; + + return xa_err(xa_store(&gh_vm_functions, fn->type, fn, GFP_KERNEL)); +} +EXPORT_SYMBOL_GPL(gh_vm_function_register); + +void gh_vm_function_unregister(struct gh_vm_function *fn) +{ + /* Expecting unregister to only come when unloading a module */ + WARN_ON(fn->mod && module_refcount(fn->mod)); + xa_erase(&gh_vm_functions, fn->type); +} +EXPORT_SYMBOL_GPL(gh_vm_function_unregister); + int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket) { struct gh_vm_resource_ticket *iter; @@ -189,7 +211,7 @@ int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket * list_for_each_entry(ghrsc, &ghvm->resources, list) { if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) { - if (!ticket->populate(ticket, ghrsc)) + if (ticket->populate(ticket, ghrsc)) list_move(&ghrsc->list, &ticket->resources); } } @@ -395,7 +417,6 @@ static void gh_vm_stop(struct gh_vm *ghvm) static void gh_vm_free(struct work_struct *work) { struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work); - struct gh_vm_function_instance *inst, *iiter; struct gh_vm_resource_ticket *ticket, *titer; struct gh_resource *ghrsc, *riter; struct gh_vm_mem *mapping, *tmp; @@ -407,11 +428,7 @@ static void gh_vm_free(struct work_struct *work) fallthrough; case GH_RM_VM_STATUS_INIT_FAILED: case GH_RM_VM_STATUS_EXITED: - mutex_lock(&ghvm->fn_lock); - list_for_each_entry_safe(inst, iiter, &ghvm->functions, vm_list) { - gh_vm_remove_function_instance(inst); - } - mutex_unlock(&ghvm->fn_lock); + gh_vm_remove_functions(ghvm); mutex_lock(&ghvm->resources_lock); if (!list_empty(&ghvm->resource_tickets)) { @@ -728,21 +745,16 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&f, argp, sizeof(f))) return -EFAULT; - r = gh_vm_add_function(ghvm, &f); + r = gh_vm_add_function_instance(ghvm, &f); break; } case GH_VM_REMOVE_FUNCTION: { - struct gh_fn_desc *f; + struct gh_fn_desc f; - f = kzalloc(sizeof(*f), GFP_KERNEL); - if (!f) - return -ENOMEM; - - if (copy_from_user(f, argp, sizeof(*f))) + if (copy_from_user(&f, argp, sizeof(f))) return -EFAULT; - r = gh_vm_rm_function(ghvm, f); - kfree(f); + r = gh_vm_rm_function_instance(ghvm, &f); break; } default: diff --git a/include/linux/gunyah_vm_mgr.h b/include/linux/gunyah_vm_mgr.h index 2dbf5e5f4037..1527861a5c63 100644 --- a/include/linux/gunyah_vm_mgr.h +++ b/include/linux/gunyah_vm_mgr.h @@ -27,6 +27,7 @@ struct gh_vm_function { struct module *mod; long (*bind)(struct gh_vm_function_instance *f); void (*unbind)(struct gh_vm_function_instance *f); + bool (*compare)(const struct gh_vm_function_instance *f, const void *arg, size_t size); }; /** @@ -53,22 +54,35 @@ struct gh_vm_function_instance { int gh_vm_function_register(struct gh_vm_function *f); void gh_vm_function_unregister(struct gh_vm_function *f); -#define DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind) \ - static struct gh_vm_function _name = { \ +/* Since the function identifiers were setup in a uapi header as an + * enum and we do no want to change that, the user must supply the expanded + * constant as well and the compiler checks they are the same. + * See also MODULE_ALIAS_RDMA_NETLINK. + */ +#define MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx) \ + static inline void __maybe_unused __chk##_idx(void) \ + { \ + BUILD_BUG_ON(_type != _idx); \ + } \ + MODULE_ALIAS("ghfunc:" __stringify(_idx)) + +#define DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind, _compare) \ + static struct gh_vm_function _name = { \ .type = _type, \ .name = __stringify(_name), \ .mod = THIS_MODULE, \ .bind = _bind, \ .unbind = _unbind, \ - }; \ - MODULE_ALIAS("ghfunc:"__stringify(_type)) + .compare = _compare, \ + } #define module_gh_vm_function(__gf) \ module_driver(__gf, gh_vm_function_register, gh_vm_function_unregister) -#define DECLARE_GH_VM_FUNCTION_INIT(_name, _type, _bind, _unbind) \ - DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind); \ - module_gh_vm_function(_name) +#define DECLARE_GH_VM_FUNCTION_INIT(_name, _type, _idx, _bind, _unbind, _compare) \ + DECLARE_GH_VM_FUNCTION(_name, _type, _bind, _unbind, _compare); \ + module_gh_vm_function(_name); \ + MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx) struct gh_vm_resource_ticket { struct list_head list; /* for gh_vm's resources list */ From 084d70e264611e2a2189c584de0bfc882014214a Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 14 Apr 2023 16:37:35 -0700 Subject: [PATCH 39/63] ANDROID: gunyah: Sync with latest "virt: gunyah: Add resource tickets" Align resource tickets with the Gunyah v13 pathces posted to kernel.org: https://lore.kernel.org/all/20230509204801.2824351-19-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: Ie08544786045b338c332b3a35c125fcb9a77b697 Signed-off-by: Elliot Berman --- drivers/virt/gunyah/gunyah_irqfd.c | 6 +-- drivers/virt/gunyah/gunyah_vcpu.c | 8 ++-- drivers/virt/gunyah/vm_mgr.c | 60 +++++++++++++++++------------- include/linux/gunyah_vm_mgr.h | 6 +-- 4 files changed, 46 insertions(+), 34 deletions(-) diff --git a/drivers/virt/gunyah/gunyah_irqfd.c b/drivers/virt/gunyah/gunyah_irqfd.c index 7629e5777137..0371fd3da578 100644 --- a/drivers/virt/gunyah/gunyah_irqfd.c +++ b/drivers/virt/gunyah/gunyah_irqfd.c @@ -54,7 +54,7 @@ static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, p add_wait_queue(wqh, &irq_ctx->wait); } -static int gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc) +static bool gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc) { struct gh_irqfd *irqfd = container_of(ticket, struct gh_irqfd, ticket); u64 enable_mask = GH_BELL_NONBLOCK; @@ -64,7 +64,7 @@ static int gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_res if (irqfd->ghrsc) { pr_warn("irqfd%d already got a Gunyah resource. Check if multiple resources with same label were configured.\n", irqfd->ticket.label); - return -1; + return false; } irqfd->ghrsc = ghrsc; @@ -75,7 +75,7 @@ static int gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_res irqfd->ticket.label); } - return 0; + return true; } static void gh_irqfd_unpopulate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc) diff --git a/drivers/virt/gunyah/gunyah_vcpu.c b/drivers/virt/gunyah/gunyah_vcpu.c index b6692afca3de..c329184e5fb6 100644 --- a/drivers/virt/gunyah/gunyah_vcpu.c +++ b/drivers/virt/gunyah/gunyah_vcpu.c @@ -323,14 +323,16 @@ static const struct file_operations gh_vcpu_fops = { .mmap = gh_vcpu_mmap, }; -static int gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc) +static bool gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc) { struct gh_vcpu *vcpu = container_of(ticket, struct gh_vcpu, ticket); int ret; mutex_lock(&vcpu->run_lock); if (vcpu->rsc) { - ret = -1; + pr_warn("vcpu%d already got a Gunyah resource. Check if multiple resources with same label were configured.\n", + vcpu->ticket.label); + ret = -EEXIST; goto out; } @@ -344,7 +346,7 @@ static int gh_vcpu_populate(struct gh_vm_resource_ticket *ticket, struct gh_reso out: mutex_unlock(&vcpu->run_lock); - return ret; + return !ret; } static void gh_vcpu_unpopulate(struct gh_vm_resource_ticket *ticket, diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c index fe546220b7c1..a0b067c25b56 100644 --- a/drivers/virt/gunyah/vm_mgr.c +++ b/drivers/virt/gunyah/vm_mgr.c @@ -190,11 +190,11 @@ EXPORT_SYMBOL_GPL(gh_vm_function_unregister); int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket *ticket) { struct gh_vm_resource_ticket *iter; - struct gh_resource *ghrsc; + struct gh_resource *ghrsc, *rsc_iter; int ret = 0; mutex_lock(&ghvm->resources_lock); - list_for_each_entry(iter, &ghvm->resource_tickets, list) { + list_for_each_entry(iter, &ghvm->resource_tickets, vm_list) { if (iter->resource_type == ticket->resource_type && iter->label == ticket->label) { ret = -EEXIST; goto out; @@ -206,10 +206,10 @@ int gh_vm_add_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_ticket * goto out; } - list_add(&ticket->list, &ghvm->resource_tickets); + list_add(&ticket->vm_list, &ghvm->resource_tickets); INIT_LIST_HEAD(&ticket->resources); - list_for_each_entry(ghrsc, &ghvm->resources, list) { + list_for_each_entry_safe(ghrsc, rsc_iter, &ghvm->resources, list) { if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) { if (ticket->populate(ticket, ghrsc)) list_move(&ghrsc->list, &ticket->resources); @@ -232,7 +232,7 @@ void gh_vm_remove_resource_ticket(struct gh_vm *ghvm, struct gh_vm_resource_tick } module_put(ticket->owner); - list_del(&ticket->list); + list_del(&ticket->vm_list); mutex_unlock(&ghvm->resources_lock); } EXPORT_SYMBOL_GPL(gh_vm_remove_resource_ticket); @@ -242,12 +242,17 @@ static void gh_vm_add_resource(struct gh_vm *ghvm, struct gh_resource *ghrsc) struct gh_vm_resource_ticket *ticket; mutex_lock(&ghvm->resources_lock); - list_for_each_entry(ticket, &ghvm->resource_tickets, list) { + list_for_each_entry(ticket, &ghvm->resource_tickets, vm_list) { if (ghrsc->type == ticket->resource_type && ghrsc->rm_label == ticket->label) { - if (!ticket->populate(ticket, ghrsc)) { + if (ticket->populate(ticket, ghrsc)) list_add(&ghrsc->list, &ticket->resources); - goto found; - } + else + list_add(&ghrsc->list, &ghvm->resources); + /* unconditonal -- we prevent multiple identical + * resource tickets so there will not be some other + * ticket elsewhere in the list if populate() failed. + */ + goto found; } } list_add(&ghrsc->list, &ghvm->resources); @@ -255,6 +260,26 @@ found: mutex_unlock(&ghvm->resources_lock); } +static void gh_vm_clean_resources(struct gh_vm *ghvm) +{ + struct gh_vm_resource_ticket *ticket, *titer; + struct gh_resource *ghrsc, *riter; + + mutex_lock(&ghvm->resources_lock); + if (!list_empty(&ghvm->resource_tickets)) { + dev_warn(ghvm->parent, "Dangling resource tickets:\n"); + list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, vm_list) { + dev_warn(ghvm->parent, " %pS\n", ticket->populate); + gh_vm_remove_resource_ticket(ghvm, ticket); + } + } + + list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) { + gh_rm_free_resource(ghrsc); + } + mutex_unlock(&ghvm->resources_lock); +} + static int _gh_vm_io_handler_compare(const struct rb_node *node, const struct rb_node *parent) { struct gh_vm_io_handler *n = container_of(node, struct gh_vm_io_handler, node); @@ -417,8 +442,6 @@ static void gh_vm_stop(struct gh_vm *ghvm) static void gh_vm_free(struct work_struct *work) { struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work); - struct gh_vm_resource_ticket *ticket, *titer; - struct gh_resource *ghrsc, *riter; struct gh_vm_mem *mapping, *tmp; int ret; @@ -429,20 +452,7 @@ static void gh_vm_free(struct work_struct *work) case GH_RM_VM_STATUS_INIT_FAILED: case GH_RM_VM_STATUS_EXITED: gh_vm_remove_functions(ghvm); - - mutex_lock(&ghvm->resources_lock); - if (!list_empty(&ghvm->resource_tickets)) { - dev_warn(ghvm->parent, "Dangling resource tickets:\n"); - list_for_each_entry_safe(ticket, titer, &ghvm->resource_tickets, list) { - dev_warn(ghvm->parent, " %pS\n", ticket->populate); - gh_vm_remove_resource_ticket(ghvm, ticket); - } - } - - list_for_each_entry_safe(ghrsc, riter, &ghvm->resources, list) { - gh_rm_free_resource(ghrsc); - } - mutex_unlock(&ghvm->resources_lock); + gh_vm_clean_resources(ghvm); /* vm_status == LOAD if user creates VM, but then destroys it * without ever trying to start it. In that case, we have only diff --git a/include/linux/gunyah_vm_mgr.h b/include/linux/gunyah_vm_mgr.h index 1527861a5c63..0fa3cf6bcaca 100644 --- a/include/linux/gunyah_vm_mgr.h +++ b/include/linux/gunyah_vm_mgr.h @@ -85,13 +85,13 @@ void gh_vm_function_unregister(struct gh_vm_function *f); MODULE_ALIAS_GH_VM_FUNCTION(_type, _idx) struct gh_vm_resource_ticket { - struct list_head list; /* for gh_vm's resources list */ - struct list_head resources; /* for gh_resources's list */ + struct list_head vm_list; /* for gh_vm's resource tickets list */ + struct list_head resources; /* resources associated with this ticket */ enum gh_resource_type resource_type; u32 label; struct module *owner; - int (*populate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc); + bool (*populate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc); void (*unpopulate)(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc); }; From 28ecb1162adbdf7143042b7ba37dedc671218b67 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Fri, 14 Apr 2023 16:41:21 -0700 Subject: [PATCH 40/63] ANDROID: gunyah: Sync with latest "gunyah: vm_mgr: Add/remove user memory regions" Align Gunyah memory parcel to Gunyah v12 patches posted to kernel.org. We deviate from a perfect copy from kernel.org because: - in pages_are_mergeable, zone_device_pages_have_same_pgmap is not present in 6.1. Drop this check. https://lore.kernel.org/all/20230509204801.2824351-11-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I90ec2ac416b24bcc65635f27cae7665ce879783f Signed-off-by: Elliot Berman --- drivers/virt/gunyah/vm_mgr.c | 20 ++- drivers/virt/gunyah/vm_mgr.h | 5 +- drivers/virt/gunyah/vm_mgr_mm.c | 229 ++++++++++++++++---------------- include/linux/gunyah_rsc_mgr.h | 8 +- 4 files changed, 133 insertions(+), 129 deletions(-) diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c index a0b067c25b56..81c1d5225cde 100644 --- a/drivers/virt/gunyah/vm_mgr.c +++ b/drivers/virt/gunyah/vm_mgr.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include @@ -442,7 +441,6 @@ static void gh_vm_stop(struct gh_vm *ghvm) static void gh_vm_free(struct work_struct *work) { struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work); - struct gh_vm_mem *mapping, *tmp; int ret; switch (ghvm->vm_status) { @@ -466,12 +464,7 @@ static void gh_vm_free(struct work_struct *work) wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_RESET); } - mutex_lock(&ghvm->mm_lock); - list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) { - gh_vm_mem_reclaim(ghvm, mapping); - kfree(mapping); - } - mutex_unlock(&ghvm->mm_lock); + gh_vm_mem_reclaim(ghvm); fallthrough; case GH_RM_VM_STATUS_NO_STATE: ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid); @@ -544,6 +537,8 @@ static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm) return ERR_PTR(ret); } + mmgrab(current->mm); + ghvm->mm = current->mm; mutex_init(&ghvm->mm_lock); INIT_LIST_HEAD(&ghvm->memory_mappings); init_rwsem(&ghvm->status_lock); @@ -586,8 +581,8 @@ static int gh_vm_start(struct gh_vm *ghvm) if (ret) { dev_warn(ghvm->parent, "Failed to %s parcel %d: %d\n", mapping->share_type == VM_MEM_LEND ? "lend" : "share", - mapping->parcel.label, - ret); + mapping->parcel.label, ret); + mutex_unlock(&ghvm->mm_lock); goto err; } } @@ -711,6 +706,10 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case GH_VM_SET_USER_MEM_REGION: { struct gh_userspace_memory_region region; + /* only allow owner task to add memory */ + if (ghvm->mm != current->mm) + return -EPERM; + if (copy_from_user(®ion, argp, sizeof(region))) return -EFAULT; @@ -727,7 +726,6 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&dtb_config, argp, sizeof(dtb_config))) return -EFAULT; - dtb_config.size = PAGE_ALIGN(dtb_config.size); if (dtb_config.guest_phys_addr + dtb_config.size < dtb_config.guest_phys_addr) return -EOVERFLOW; diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h index d713c94744fc..9fc4e30129a7 100644 --- a/drivers/virt/gunyah/vm_mgr.h +++ b/drivers/virt/gunyah/vm_mgr.h @@ -50,6 +50,7 @@ struct gh_vm { struct work_struct free_work; struct kref kref; + struct mm_struct *mm; /* userspace tied to this vm */ struct mutex mm_lock; struct list_head memory_mappings; struct mutex fn_lock; @@ -62,9 +63,7 @@ struct gh_vm { }; int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region, bool lend); -void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping); -int gh_vm_mem_free(struct gh_vm *ghvm, u32 label); -struct gh_vm_mem *gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label); +void gh_vm_mem_reclaim(struct gh_vm *ghvm); struct gh_vm_mem *gh_vm_mem_find_by_addr(struct gh_vm *ghvm, u64 guest_phys_addr, u32 size); int gh_vm_mmio_write(struct gh_vm *ghvm, u64 addr, u32 len, u64 data); diff --git a/drivers/virt/gunyah/vm_mgr_mm.c b/drivers/virt/gunyah/vm_mgr_mm.c index b8896ca6941b..952cc85e5d4b 100644 --- a/drivers/virt/gunyah/vm_mgr_mm.c +++ b/drivers/virt/gunyah/vm_mgr_mm.c @@ -12,6 +12,21 @@ #include "vm_mgr.h" +static bool pages_are_mergeable(struct page *a, struct page *b) +{ + if (page_to_pfn(a) + 1 != page_to_pfn(b)) + return false; + return true; +} + +static bool gh_vm_mem_overlap(struct gh_vm_mem *a, u64 addr, u64 size) +{ + u64 a_end = a->guest_phys_addr + (a->npages << PAGE_SHIFT); + u64 end = addr + size; + + return a->guest_phys_addr < end && addr < a_end; +} + static struct gh_vm_mem *__gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label) __must_hold(&ghvm->mm_lock) { @@ -24,10 +39,10 @@ static struct gh_vm_mem *__gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label return NULL; } -void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping) +static void gh_vm_mem_reclaim_mapping(struct gh_vm *ghvm, struct gh_vm_mem *mapping) __must_hold(&ghvm->mm_lock) { - int i, ret = 0; + int ret = 0; if (mapping->parcel.mem_handle != GH_MEM_HANDLE_INVAL) { ret = gh_rm_mem_reclaim(ghvm->rm, &mapping->parcel); @@ -36,9 +51,10 @@ void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping) mapping->parcel.label, ret); } - if (!ret) - for (i = 0; i < mapping->npages; i++) - unpin_user_page(mapping->pages[i]); + if (!ret) { + unpin_user_pages(mapping->pages, mapping->npages); + account_locked_vm(ghvm->mm, mapping->npages, false); + } kfree(mapping->pages); kfree(mapping->parcel.acl_entries); @@ -47,21 +63,32 @@ void gh_vm_mem_reclaim(struct gh_vm *ghvm, struct gh_vm_mem *mapping) list_del(&mapping->list); } +void gh_vm_mem_reclaim(struct gh_vm *ghvm) +{ + struct gh_vm_mem *mapping, *tmp; + + mutex_lock(&ghvm->mm_lock); + + list_for_each_entry_safe(mapping, tmp, &ghvm->memory_mappings, list) { + gh_vm_mem_reclaim_mapping(ghvm, mapping); + kfree(mapping); + } + + mutex_unlock(&ghvm->mm_lock); +} + struct gh_vm_mem *gh_vm_mem_find_by_addr(struct gh_vm *ghvm, u64 guest_phys_addr, u32 size) { - struct gh_vm_mem *mapping = NULL; - int ret; + struct gh_vm_mem *mapping; - ret = mutex_lock_interruptible(&ghvm->mm_lock); - if (ret) - return ERR_PTR(ret); + if (overflows_type(guest_phys_addr + size, u64)) + return NULL; + + mutex_lock(&ghvm->mm_lock); list_for_each_entry(mapping, &ghvm->memory_mappings, list) { - if (guest_phys_addr >= mapping->guest_phys_addr && - (guest_phys_addr + size <= mapping->guest_phys_addr + - (mapping->npages << PAGE_SHIFT))) { + if (gh_vm_mem_overlap(mapping, guest_phys_addr, size)) goto unlock; - } } mapping = NULL; @@ -70,91 +97,81 @@ unlock: return mapping; } -struct gh_vm_mem *gh_vm_mem_find_by_label(struct gh_vm *ghvm, u32 label) -{ - struct gh_vm_mem *mapping; - int ret; - - ret = mutex_lock_interruptible(&ghvm->mm_lock); - if (ret) - return ERR_PTR(ret); - - mapping = __gh_vm_mem_find_by_label(ghvm, label); - mutex_unlock(&ghvm->mm_lock); - - return mapping ? : ERR_PTR(-ENODEV); -} - int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *region, bool lend) { struct gh_vm_mem *mapping, *tmp_mapping; - struct gh_rm_mem_entry *mem_entries; - phys_addr_t curr_page, prev_page; + struct page *curr_page, *prev_page; struct gh_rm_mem_parcel *parcel; int i, j, pinned, ret = 0; + unsigned int gup_flags; size_t entry_size; u16 vmid; if (!region->memory_size || !PAGE_ALIGNED(region->memory_size) || - !PAGE_ALIGNED(region->userspace_addr) || !PAGE_ALIGNED(region->guest_phys_addr)) + !PAGE_ALIGNED(region->userspace_addr) || + !PAGE_ALIGNED(region->guest_phys_addr)) return -EINVAL; - if (region->guest_phys_addr + region->memory_size < region->guest_phys_addr) + if (overflows_type(region->guest_phys_addr + region->memory_size, u64)) return -EOVERFLOW; ret = mutex_lock_interruptible(&ghvm->mm_lock); if (ret) return ret; + /* Check label is unique */ mapping = __gh_vm_mem_find_by_label(ghvm, region->label); if (mapping) { - mutex_unlock(&ghvm->mm_lock); - return -EEXIST; + ret = -EEXIST; + goto unlock; } - mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); - if (!mapping) { - mutex_unlock(&ghvm->mm_lock); - return -ENOMEM; - } - - mapping->parcel.label = region->label; - mapping->guest_phys_addr = region->guest_phys_addr; - mapping->npages = region->memory_size >> PAGE_SHIFT; - parcel = &mapping->parcel; - parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */ - parcel->mem_type = GH_RM_MEM_TYPE_NORMAL; - /* Check for overlap */ list_for_each_entry(tmp_mapping, &ghvm->memory_mappings, list) { - if (!((mapping->guest_phys_addr + (mapping->npages << PAGE_SHIFT) <= - tmp_mapping->guest_phys_addr) || - (mapping->guest_phys_addr >= - tmp_mapping->guest_phys_addr + (tmp_mapping->npages << PAGE_SHIFT)))) { + if (gh_vm_mem_overlap(tmp_mapping, region->guest_phys_addr, + region->memory_size)) { ret = -EEXIST; - goto free_mapping; + goto unlock; } } - list_add(&mapping->list, &ghvm->memory_mappings); + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL_ACCOUNT); + if (!mapping) { + ret = -ENOMEM; + goto unlock; + } - mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL); + mapping->guest_phys_addr = region->guest_phys_addr; + mapping->npages = region->memory_size >> PAGE_SHIFT; + parcel = &mapping->parcel; + parcel->label = region->label; + parcel->mem_handle = GH_MEM_HANDLE_INVAL; /* to be filled later by mem_share/mem_lend */ + parcel->mem_type = GH_RM_MEM_TYPE_NORMAL; + + ret = account_locked_vm(ghvm->mm, mapping->npages, true); + if (ret) + goto free_mapping; + + mapping->pages = kcalloc(mapping->npages, sizeof(*mapping->pages), GFP_KERNEL_ACCOUNT); if (!mapping->pages) { ret = -ENOMEM; mapping->npages = 0; /* update npages for reclaim */ - goto reclaim; + goto unlock_pages; } + gup_flags = FOLL_LONGTERM; + if (region->flags & GH_MEM_ALLOW_WRITE) + gup_flags |= FOLL_WRITE; + pinned = pin_user_pages_fast(region->userspace_addr, mapping->npages, - FOLL_WRITE | FOLL_LONGTERM, mapping->pages); + gup_flags, mapping->pages); if (pinned < 0) { ret = pinned; - mapping->npages = 0; /* update npages for reclaim */ - goto reclaim; + goto free_pages; } else if (pinned != mapping->npages) { ret = -EFAULT; mapping->npages = pinned; /* update npages for reclaim */ - goto reclaim; + goto unpin_pages; } if (lend) { @@ -164,15 +181,16 @@ int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *regio parcel->n_acl_entries = 2; mapping->share_type = VM_MEM_SHARE; } - parcel->acl_entries = kcalloc(parcel->n_acl_entries, sizeof(*parcel->acl_entries), - GFP_KERNEL); + parcel->acl_entries = kcalloc(parcel->n_acl_entries, + sizeof(*parcel->acl_entries), GFP_KERNEL); if (!parcel->acl_entries) { ret = -ENOMEM; - goto reclaim; + goto unpin_pages; } - parcel->acl_entries[0].vmid = cpu_to_le16(ghvm->vmid); - + /* acl_entries[0].vmid will be this VM's vmid. We'll fill it when the + * VM is starting and we know the VM's vmid. + */ if (region->flags & GH_MEM_ALLOW_READ) parcel->acl_entries[0].perms |= GH_RM_ACL_R; if (region->flags & GH_MEM_ALLOW_WRITE) @@ -180,78 +198,67 @@ int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *regio if (region->flags & GH_MEM_ALLOW_EXEC) parcel->acl_entries[0].perms |= GH_RM_ACL_X; - if (mapping->share_type == VM_MEM_SHARE) { + if (!lend) { ret = gh_rm_get_vmid(ghvm->rm, &vmid); if (ret) - goto reclaim; + goto free_acl; parcel->acl_entries[1].vmid = cpu_to_le16(vmid); /* Host assumed to have all these permissions. Gunyah will not - * grant new permissions if host actually had less than RWX - */ - parcel->acl_entries[1].perms |= GH_RM_ACL_R | GH_RM_ACL_W | GH_RM_ACL_X; + * grant new permissions if host actually had less than RWX + */ + parcel->acl_entries[1].perms = GH_RM_ACL_R | GH_RM_ACL_W | GH_RM_ACL_X; } - mem_entries = kcalloc(mapping->npages, sizeof(*mem_entries), GFP_KERNEL); - if (!mem_entries) { + parcel->n_mem_entries = 1; + for (i = 1; i < mapping->npages; i++) { + if (!pages_are_mergeable(mapping->pages[i - 1], mapping->pages[i])) + parcel->n_mem_entries++; + } + + parcel->mem_entries = kcalloc(parcel->n_mem_entries, + sizeof(parcel->mem_entries[0]), + GFP_KERNEL_ACCOUNT); + if (!parcel->mem_entries) { ret = -ENOMEM; - goto reclaim; + goto free_acl; } /* reduce number of entries by combining contiguous pages into single memory entry */ - prev_page = page_to_phys(mapping->pages[0]); - mem_entries[0].ipa_base = cpu_to_le64(prev_page); + prev_page = mapping->pages[0]; + parcel->mem_entries[0].ipa_base = cpu_to_le64(page_to_phys(prev_page)); entry_size = PAGE_SIZE; for (i = 1, j = 0; i < mapping->npages; i++) { - curr_page = page_to_phys(mapping->pages[i]); - if (curr_page - prev_page == PAGE_SIZE) { + curr_page = mapping->pages[i]; + if (pages_are_mergeable(prev_page, curr_page)) { entry_size += PAGE_SIZE; } else { - mem_entries[j].size = cpu_to_le64(entry_size); + parcel->mem_entries[j].size = cpu_to_le64(entry_size); j++; - mem_entries[j].ipa_base = cpu_to_le64(curr_page); + BUG_ON(j >= parcel->n_mem_entries); + parcel->mem_entries[j].ipa_base = + cpu_to_le64(page_to_phys(curr_page)); entry_size = PAGE_SIZE; } prev_page = curr_page; } - mem_entries[j].size = cpu_to_le64(entry_size); - - parcel->n_mem_entries = j + 1; - parcel->mem_entries = kmemdup(mem_entries, sizeof(*mem_entries) * parcel->n_mem_entries, - GFP_KERNEL); - kfree(mem_entries); - if (!parcel->mem_entries) { - ret = -ENOMEM; - goto reclaim; - } + parcel->mem_entries[j].size = cpu_to_le64(entry_size); + list_add(&mapping->list, &ghvm->memory_mappings); mutex_unlock(&ghvm->mm_lock); return 0; -reclaim: - gh_vm_mem_reclaim(ghvm, mapping); +free_acl: + kfree(parcel->acl_entries); +unpin_pages: + unpin_user_pages(mapping->pages, pinned); +free_pages: + kfree(mapping->pages); +unlock_pages: + account_locked_vm(ghvm->mm, mapping->npages, false); free_mapping: kfree(mapping); - mutex_unlock(&ghvm->mm_lock); - return ret; -} - -int gh_vm_mem_free(struct gh_vm *ghvm, u32 label) -{ - struct gh_vm_mem *mapping; - int ret; - - ret = mutex_lock_interruptible(&ghvm->mm_lock); - if (ret) - return ret; - - mapping = __gh_vm_mem_find_by_label(ghvm, label); - if (!mapping) - goto out; - - gh_vm_mem_reclaim(ghvm, mapping); - kfree(mapping); -out: +unlock: mutex_unlock(&ghvm->mm_lock); return ret; } diff --git a/include/linux/gunyah_rsc_mgr.h b/include/linux/gunyah_rsc_mgr.h index 7e2c9b7d5e20..27283c881ecb 100644 --- a/include/linux/gunyah_rsc_mgr.h +++ b/include/linux/gunyah_rsc_mgr.h @@ -81,16 +81,16 @@ enum gh_rm_mem_type { }; /* - * struct gh_rm_mem_parcel - Package info about memory to be lent/shared/donated/reclaimed + * struct gh_rm_mem_parcel - Info about memory to be lent/shared/donated/reclaimed * @mem_type: The type of memory: normal (DDR) or IO * @label: An client-specified identifier which can be used by the other VMs to identify the purpose * of the memory parcel. + * @n_acl_entries: Count of the number of entries in the @acl_entries array. * @acl_entries: An array of access control entries. Each entry specifies a VM and what access * is allowed for the memory parcel. - * @n_acl_entries: Count of the number of entries in the `acl_entries` array. - * @mem_entries: An list of regions to be associated with the memory parcel. Addresses should be + * @n_mem_entries: Count of the number of entries in the @mem_entries array. + * @mem_entries: An array of regions to be associated with the memory parcel. Addresses should be * (intermediate) physical addresses from Linux's perspective. - * @n_mem_entries: Count of the number of entries in the `mem_entries` array. * @mem_handle: On success, filled with memory handle that RM allocates for this memory parcel */ struct gh_rm_mem_parcel { From 1b9d0e44a7a0892941afe40e7ffb4c6caaad0c37 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 17 Apr 2023 11:30:25 -0700 Subject: [PATCH 41/63] ANDROID: gunyah: Sync with latest "gunyah: vm_mgr: Add ioctls to support basic non-proxy VM boot" Align VM lifecycle to Gunyah v13 patches posted to kernel.org. - Move gh_vm_free and kref functions down. - Simplify/clean up gh_vm_free - Defer vmid allocation to when the VM is being started https://lore.kernel.org/all/20230509204801.2824351-12-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I413865c16a730365edc83385bc37394b99517ab1 Signed-off-by: Elliot Berman --- drivers/virt/gunyah/vm_mgr.c | 180 ++++++++++++++++------------------- 1 file changed, 83 insertions(+), 97 deletions(-) diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c index 81c1d5225cde..cda7d8b26ef4 100644 --- a/drivers/virt/gunyah/vm_mgr.c +++ b/drivers/virt/gunyah/vm_mgr.c @@ -438,117 +438,33 @@ static void gh_vm_stop(struct gh_vm *ghvm) up_write(&ghvm->status_lock); } -static void gh_vm_free(struct work_struct *work) -{ - struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work); - int ret; - - switch (ghvm->vm_status) { - case GH_RM_VM_STATUS_RUNNING: - gh_vm_stop(ghvm); - fallthrough; - case GH_RM_VM_STATUS_INIT_FAILED: - case GH_RM_VM_STATUS_EXITED: - gh_vm_remove_functions(ghvm); - gh_vm_clean_resources(ghvm); - - /* vm_status == LOAD if user creates VM, but then destroys it - * without ever trying to start it. In that case, we have only - * allocated VMID. Clean up functions (above), memory (below), - * and dealloc vmid (below), but no call gh_rm_vm_reset(). - */ - if (ghvm->vm_status != GH_RM_VM_STATUS_LOAD) { - ret = gh_rm_vm_reset(ghvm->rm, ghvm->vmid); - if (ret) - dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret); - wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_RESET); - } - - gh_vm_mem_reclaim(ghvm); - fallthrough; - case GH_RM_VM_STATUS_NO_STATE: - ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid); - if (ret) - dev_warn(ghvm->parent, "Failed to deallocate vmid: %d\n", ret); - - gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb); - gh_rm_put(ghvm->rm); - kfree(ghvm); - break; - default: - dev_err(ghvm->parent, "VM is unknown state: %d. VM will not be cleaned up.\n", - ghvm->vm_status); - - gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb); - gh_rm_put(ghvm->rm); - kfree(ghvm); - break; - } -} - -static void _gh_vm_put(struct kref *kref) -{ - struct gh_vm *ghvm = container_of(kref, struct gh_vm, kref); - - /* VM will be reset and make RM calls which can interruptible sleep. - * Defer to a work so this thread can receive signal. - */ - schedule_work(&ghvm->free_work); -} - -int __must_check gh_vm_get(struct gh_vm *ghvm) -{ - return kref_get_unless_zero(&ghvm->kref); -} -EXPORT_SYMBOL_GPL(gh_vm_get); - -void gh_vm_put(struct gh_vm *ghvm) -{ - kref_put(&ghvm->kref, _gh_vm_put); -} -EXPORT_SYMBOL_GPL(gh_vm_put); - static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm) { struct gh_vm *ghvm; - int vmid, ret; - - vmid = gh_rm_alloc_vmid(rm, 0); - if (vmid < 0) - return ERR_PTR(vmid); ghvm = kzalloc(sizeof(*ghvm), GFP_KERNEL); - if (!ghvm) { - gh_rm_dealloc_vmid(rm, vmid); + if (!ghvm) return ERR_PTR(-ENOMEM); - } ghvm->parent = gh_rm_get(rm); - ghvm->vmid = vmid; + ghvm->vmid = GH_VMID_INVAL; ghvm->rm = rm; - init_waitqueue_head(&ghvm->vm_status_wait); - ghvm->nb.notifier_call = gh_vm_rm_notification; - ret = gh_rm_notifier_register(rm, &ghvm->nb); - if (ret) { - gh_rm_put(rm); - gh_rm_dealloc_vmid(rm, vmid); - kfree(ghvm); - return ERR_PTR(ret); - } - mmgrab(current->mm); ghvm->mm = current->mm; mutex_init(&ghvm->mm_lock); INIT_LIST_HEAD(&ghvm->memory_mappings); init_rwsem(&ghvm->status_lock); + init_waitqueue_head(&ghvm->vm_status_wait); INIT_WORK(&ghvm->free_work, gh_vm_free); kref_init(&ghvm->kref); mutex_init(&ghvm->resources_lock); INIT_LIST_HEAD(&ghvm->resources); INIT_LIST_HEAD(&ghvm->resource_tickets); + init_rwsem(&ghvm->mmio_handler_lock); + ghvm->mmio_handler_root = RB_ROOT; INIT_LIST_HEAD(&ghvm->functions); - ghvm->vm_status = GH_RM_VM_STATUS_LOAD; + ghvm->vm_status = GH_RM_VM_STATUS_NO_STATE; return ghvm; } @@ -563,13 +479,27 @@ static int gh_vm_start(struct gh_vm *ghvm) int ret, i, n; down_write(&ghvm->status_lock); - if (ghvm->vm_status != GH_RM_VM_STATUS_LOAD) { + if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE) { up_write(&ghvm->status_lock); return 0; } + ghvm->nb.notifier_call = gh_vm_rm_notification; + ret = gh_rm_notifier_register(ghvm->rm, &ghvm->nb); + if (ret) + goto err; + + ret = gh_rm_alloc_vmid(ghvm->rm, 0); + if (ret < 0) { + gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb); + goto err; + } + ghvm->vmid = ret; + ghvm->vm_status = GH_RM_VM_STATUS_LOAD; + mutex_lock(&ghvm->mm_lock); list_for_each_entry(mapping, &ghvm->memory_mappings, list) { + mapping->parcel.acl_entries[0].vmid = cpu_to_le16(ghvm->vmid); switch (mapping->share_type) { case VM_MEM_LEND: ret = gh_rm_mem_lend(ghvm->rm, &mapping->parcel); @@ -624,11 +554,12 @@ static int gh_vm_start(struct gh_vm *ghvm) } ret = gh_rm_vm_init(ghvm->rm, ghvm->vmid); - ghvm->vm_status = GH_RM_VM_STATUS_RESET; if (ret) { + ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED; dev_warn(ghvm->parent, "Failed to initialize VM: %d\n", ret); goto err; } + ghvm->vm_status = GH_RM_VM_STATUS_READY; ret = gh_rm_get_hyp_resources(ghvm->rm, ghvm->vmid, &resources); if (ret) { @@ -656,7 +587,6 @@ static int gh_vm_start(struct gh_vm *ghvm) up_write(&ghvm->status_lock); return ret; err: - ghvm->vm_status = GH_RM_VM_STATUS_INIT_FAILED; /* gh_vm_free will handle releasing resources and reclaiming memory */ up_write(&ghvm->status_lock); return ret; @@ -671,11 +601,11 @@ static int gh_vm_ensure_started(struct gh_vm *ghvm) return ret; /* Unlikely because VM is typically started */ - if (unlikely(ghvm->vm_status == GH_RM_VM_STATUS_LOAD)) { + if (unlikely(ghvm->vm_status == GH_RM_VM_STATUS_NO_STATE)) { up_read(&ghvm->status_lock); ret = gh_vm_start(ghvm); if (ret) - goto out; + return ret; /** gh_vm_start() is guaranteed to bring status out of * GH_RM_VM_STATUS_LOAD, thus inifitely recursive call is not * possible @@ -687,7 +617,6 @@ static int gh_vm_ensure_started(struct gh_vm *ghvm) if (unlikely(ghvm->vm_status != GH_RM_VM_STATUS_RUNNING)) ret = -ENODEV; -out: up_read(&ghvm->status_lock); return ret; } @@ -773,6 +702,63 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return r; } +static void gh_vm_free(struct work_struct *work) +{ + struct gh_vm *ghvm = container_of(work, struct gh_vm, free_work); + int ret; + + if (ghvm->vm_status == GH_RM_VM_STATUS_RUNNING) + gh_vm_stop(ghvm); + + gh_vm_remove_functions(ghvm); + gh_vm_clean_resources(ghvm); + + if (ghvm->vm_status != GH_RM_VM_STATUS_NO_STATE && + ghvm->vm_status != GH_RM_VM_STATUS_LOAD && + ghvm->vm_status != GH_RM_VM_STATUS_RESET) { + ret = gh_rm_vm_reset(ghvm->rm, ghvm->vmid); + if (ret) + dev_err(ghvm->parent, "Failed to reset the vm: %d\n", ret); + wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_RESET); + } + + gh_vm_mem_reclaim(ghvm); + + if (ghvm->vm_status > GH_RM_VM_STATUS_NO_STATE) { + gh_rm_notifier_unregister(ghvm->rm, &ghvm->nb); + + ret = gh_rm_dealloc_vmid(ghvm->rm, ghvm->vmid); + if (ret) + dev_warn(ghvm->parent, "Failed to deallocate vmid: %d\n", ret); + } + + gh_rm_put(ghvm->rm); + mmdrop(ghvm->mm); + kfree(ghvm); +} + +int __must_check gh_vm_get(struct gh_vm *ghvm) +{ + return kref_get_unless_zero(&ghvm->kref); +} +EXPORT_SYMBOL_GPL(gh_vm_get); + +static void _gh_vm_put(struct kref *kref) +{ + struct gh_vm *ghvm = container_of(kref, struct gh_vm, kref); + + /* VM will be reset and make RM calls which can interruptible sleep. + * Defer to a work so this thread can receive signal. + */ + schedule_work(&ghvm->free_work); +} + +void gh_vm_put(struct gh_vm *ghvm) +{ + kref_put(&ghvm->kref, _gh_vm_put); +} +EXPORT_SYMBOL_GPL(gh_vm_put); + static int gh_vm_release(struct inode *inode, struct file *filp) { struct gh_vm *ghvm = filp->private_data; @@ -822,7 +808,7 @@ static long gh_dev_ioctl_create_vm(struct gh_rm *rm, unsigned long arg) err_put_fd: put_unused_fd(fd); err_destroy_vm: - gh_vm_free(&ghvm->free_work); + gh_vm_put(ghvm); return err; } From 2220f8190ad5334308bf75973d5083204efb86ae Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 17 Apr 2023 11:33:33 -0700 Subject: [PATCH 42/63] ANDROID: gunyah: Sync with latest "virt: gunyah: Add ioeventfd" Align ioeventfd handling to Gunyah v13 patches: https://lore.kernel.org/all/20230509204801.2824351-24-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I8d66d83bee284eacb4bc9d76d3cbfd52785d9661 Signed-off-by: Elliot Berman --- drivers/virt/gunyah/gunyah_ioeventfd.c | 18 +++++++++--------- drivers/virt/gunyah/vm_mgr.c | 14 +++++++++++--- include/uapi/linux/gunyah.h | 2 +- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/virt/gunyah/gunyah_ioeventfd.c b/drivers/virt/gunyah/gunyah_ioeventfd.c index f61291c17be5..5b1b9fd9ac3a 100644 --- a/drivers/virt/gunyah/gunyah_ioeventfd.c +++ b/drivers/virt/gunyah/gunyah_ioeventfd.c @@ -35,13 +35,17 @@ static struct gh_vm_io_handler_ops io_ops = { static long gh_ioeventfd_bind(struct gh_vm_function_instance *f) { const struct gh_fn_ioeventfd_arg *args = f->argp; - struct eventfd_ctx *ctx = NULL; struct gh_ioeventfd *iofd; + struct eventfd_ctx *ctx; int ret; if (f->arg_size != sizeof(*args)) return -EINVAL; + /* All other flag bits are reserved for future use */ + if (args->flags & ~GH_IOEVENTFD_FLAGS_DATAMATCH) + return -EINVAL; + /* must be natural-word sized, or 0 to ignore length */ switch (args->len) { case 0: @@ -55,15 +59,11 @@ static long gh_ioeventfd_bind(struct gh_vm_function_instance *f) } /* check for range overflow */ - if (args->addr + args->len < args->addr) + if (overflows_type(args->addr + args->len, u64)) return -EINVAL; /* ioeventfd with no length can't be combined with DATAMATCH */ - if (!args->len && (args->flags & GH_IOEVENTFD_DATAMATCH)) - return -EINVAL; - - /* All other flag bits are reserved for future use */ - if (args->flags & ~GH_IOEVENTFD_DATAMATCH) + if (!args->len && (args->flags & GH_IOEVENTFD_FLAGS_DATAMATCH)) return -EINVAL; ctx = eventfd_ctx_fdget(args->fd); @@ -81,7 +81,7 @@ static long gh_ioeventfd_bind(struct gh_vm_function_instance *f) iofd->ctx = ctx; - if (args->flags & GH_IOEVENTFD_DATAMATCH) { + if (args->flags & GH_IOEVENTFD_FLAGS_DATAMATCH) { iofd->io_handler.datamatch = true; iofd->io_handler.len = args->len; iofd->io_handler.data = args->datamatch; @@ -126,5 +126,5 @@ static bool gh_ioevent_compare(const struct gh_vm_function_instance *f, DECLARE_GH_VM_FUNCTION_INIT(ioeventfd, GH_FN_IOEVENTFD, 3, gh_ioeventfd_bind, gh_ioevent_unbind, gh_ioevent_compare); -MODULE_DESCRIPTION("Gunyah ioeventfds"); +MODULE_DESCRIPTION("Gunyah ioeventfd VM Function"); MODULE_LICENSE("GPL"); diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c index cda7d8b26ef4..1ff96c35af56 100644 --- a/drivers/virt/gunyah/vm_mgr.c +++ b/drivers/virt/gunyah/vm_mgr.c @@ -294,9 +294,16 @@ static int _gh_vm_io_handler_compare(const struct rb_node *node, const struct rb return -1; if (n->len > p->len) return 1; - if (n->datamatch < p->datamatch) + /* one of the io handlers doesn't have datamatch and the other does. + * For purposes of comparison, that makes them identical since the + * one that doesn't have datamatch will cover the same handler that + * does. + */ + if (n->datamatch != p->datamatch) + return 0; + if (n->data < p->data) return -1; - if (n->datamatch > p->datamatch) + if (n->data > p->data) return 1; return 0; } @@ -319,7 +326,8 @@ static struct gh_vm_io_handler *gh_vm_mgr_find_io_hdlr(struct gh_vm *ghvm, u64 a struct gh_vm_io_handler key = { .addr = addr, .len = len, - .datamatch = data, + .datamatch = true, + .data = data, }; struct rb_node *node; diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h index b747d8a1ce0c..a0ae5673908c 100644 --- a/include/uapi/linux/gunyah.h +++ b/include/uapi/linux/gunyah.h @@ -133,7 +133,7 @@ struct gh_fn_irqfd_arg { __u32 padding; }; -#define GH_IOEVENTFD_DATAMATCH (1UL << 0) +#define GH_IOEVENTFD_FLAGS_DATAMATCH (1UL << 0) /** * struct gh_fn_ioeventfd_arg - Arguments to create an ioeventfd function From b0426ab62ea5521ce3db2432834920930853e238 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Thu, 12 Jan 2023 10:23:12 -0800 Subject: [PATCH 43/63] BACKPORT: misc: fastrpc: Pass bitfield into qcom_scm_assign_mem The srcvm parameter of qcom_scm_assign_mem is a pointer to a bitfield of VMIDs. The bitfield is updated with which VMIDs have permissions after the qcom_scm_assign_mem call. This makes it simpler for clients to make qcom_scm_assign_mem calls later, they always pass in same srcvm bitfield and do not need to closely track whether memory was originally shared. When restoring permissions to HLOS, fastrpc is incorrectly using the first VMID directly -- neither the BIT nor the other possible VMIDs the memory was already assigned to. We already have a field intended for this purpose: "perms" in the struct fastrpc_channel_ctx, but it was never used. Start using the perms field. Cc: Abel Vesa Cc: Vamsi Krishna Gattupalli Cc: Srinivas Kandagatla Fixes: e90d91190619 ("misc: fastrpc: Add support to secure memory map") Fixes: 0871561055e6 ("misc: fastrpc: Add support for audiopd") Fixes: 532ad70c6d44 ("misc: fastrpc: Add mmap request assigning for static PD pool") Tested-by: Srinivas Kandagatla Signed-off-by: Elliot Berman drivers/misc/fastrpc.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) Link: https://lore.kernel.org/r/20230112182313.521467-1-quic_eberman@quicinc.com Signed-off-by: Greg Kroah-Hartman Bug: 279506910 (cherry picked from commit aaca766c77fcf5aabda846d3372a1d40b0d4735d) [eberman: Drop modifications to qcom_scm_assign_mem not in 14-6.1] Change-Id: I9eff564504fa277519245a446eb6fcad41a0ee42 Signed-off-by: Elliot Berman --- drivers/misc/fastrpc.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 02d26160c64e..1cd3e8ceb7b8 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -303,7 +303,7 @@ static void fastrpc_free_map(struct kref *ref) perm.vmid = QCOM_SCM_VMID_HLOS; perm.perm = QCOM_SCM_PERM_RWX; err = qcom_scm_assign_mem(map->phys, map->size, - &(map->fl->cctx->vmperms[0].vmid), &perm, 1); + &map->fl->cctx->perms, &perm, 1); if (err) { dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d", map->phys, map->size, err); @@ -754,10 +754,8 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd, * If subsystem VMIDs are defined in DTSI, then do * hyp_assign from HLOS to those VM(s) */ - unsigned int perms = BIT(QCOM_SCM_VMID_HLOS); - map->attr = attr; - err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms, + err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms, fl->cctx->vmperms, fl->cctx->vmcount); if (err) { dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d", From 15a4929f8e00ecaa2c1488ba8a5e8bf87ba5ee72 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 13 Feb 2023 10:18:29 -0800 Subject: [PATCH 44/63] BACKPORT: firmware: qcom_scm: Use fixed width src vm bitmap The maximum VMID for assign_mem is 63. Use a u64 to represent this bitmap instead of architecture-dependent "unsigned int" which varies in size on 32-bit and 64-bit platforms. Acked-by: Kalle Valo (ath10k) Tested-by: Gokul krishna Krishnakumar Signed-off-by: Elliot Berman Reviewed-by: Bjorn Andersson Signed-off-by: Bjorn Andersson Link: https://lore.kernel.org/r/20230213181832.3489174-1-quic_eberman@quicinc.com Bug: 279506910 (cherry picked from commit 968a26a07f75377afbd4f7bb18ef587a1443c244) Change-Id: Ie7125d1299e4edda47f3e6e9031dc515cfdd8f0f [eberman: Drop modifications to drivers/remoteproc/qcom_q6v5_pas.c which don't exist in 14-6.1] Signed-off-by: Elliot Berman --- drivers/firmware/qcom_scm.c | 12 +++++++----- drivers/misc/fastrpc.c | 2 +- drivers/net/wireless/ath/ath10k/qmi.c | 4 ++-- drivers/remoteproc/qcom_q6v5_mss.c | 8 ++++---- drivers/soc/qcom/rmtfs_mem.c | 2 +- include/linux/qcom_scm.h | 2 +- 6 files changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index cdbfe54c8146..92763dce6477 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -898,7 +898,7 @@ static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region, * Return negative errno on failure or 0 on success with @srcvm updated. */ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - unsigned int *srcvm, + u64 *srcvm, const struct qcom_scm_vmperm *newvm, unsigned int dest_cnt) { @@ -915,9 +915,9 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, __le32 *src; void *ptr; int ret, i, b; - unsigned long srcvm_bits = *srcvm; + u64 srcvm_bits = *srcvm; - src_sz = hweight_long(srcvm_bits) * sizeof(*src); + src_sz = hweight64(srcvm_bits) * sizeof(*src); mem_to_map_sz = sizeof(*mem_to_map); dest_sz = dest_cnt * sizeof(*destvm); ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) + @@ -930,8 +930,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, /* Fill source vmid detail */ src = ptr; i = 0; - for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG) - src[i++] = cpu_to_le32(b); + for (b = 0; b < BITS_PER_TYPE(u64); b++) { + if (srcvm_bits & BIT(b)) + src[i++] = cpu_to_le32(b); + } /* Fill details of mem buff to map */ mem_to_map = ptr + ALIGN(src_sz, SZ_64); diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 1cd3e8ceb7b8..67c13a4df589 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -247,7 +247,7 @@ struct fastrpc_channel_ctx { int domain_id; int sesscount; int vmcount; - u32 perms; + u64 perms; struct qcom_scm_vmperm vmperms[FASTRPC_MAX_VMIDS]; struct rpmsg_device *rpdev; struct fastrpc_session_ctx session[FASTRPC_MAX_SESSIONS]; diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 66cb7a1e628a..6d1d87e1cdde 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -28,7 +28,7 @@ static int ath10k_qmi_map_msa_permission(struct ath10k_qmi *qmi, { struct qcom_scm_vmperm dst_perms[3]; struct ath10k *ar = qmi->ar; - unsigned int src_perms; + u64 src_perms; u32 perm_count; int ret; @@ -60,7 +60,7 @@ static int ath10k_qmi_unmap_msa_permission(struct ath10k_qmi *qmi, { struct qcom_scm_vmperm dst_perms; struct ath10k *ar = qmi->ar; - unsigned int src_perms; + u64 src_perms; int ret; src_perms = BIT(QCOM_SCM_VMID_MSS_MSA) | BIT(QCOM_SCM_VMID_WLAN); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index 7dbab5fcbe1e..fb03c706437a 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -230,8 +230,8 @@ struct q6v5 { bool has_qaccept_regs; bool has_ext_cntl_regs; bool has_vq6; - int mpss_perm; - int mba_perm; + u64 mpss_perm; + u64 mba_perm; const char *hexagon_mdt_image; int version; }; @@ -407,7 +407,7 @@ static void q6v5_pds_disable(struct q6v5 *qproc, struct device **pds, } } -static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, +static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, u64 *current_perm, bool local, bool remote, phys_addr_t addr, size_t size) { @@ -939,7 +939,7 @@ static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw, unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS; dma_addr_t phys; void *metadata; - int mdata_perm; + u64 mdata_perm; int xferop_ret; size_t size; void *ptr; diff --git a/drivers/soc/qcom/rmtfs_mem.c b/drivers/soc/qcom/rmtfs_mem.c index 0feaae357821..69991e47aa23 100644 --- a/drivers/soc/qcom/rmtfs_mem.c +++ b/drivers/soc/qcom/rmtfs_mem.c @@ -30,7 +30,7 @@ struct qcom_rmtfs_mem { unsigned int client_id; - unsigned int perms; + u64 perms; }; static ssize_t qcom_rmtfs_mem_show(struct device *dev, diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index f8335644a01a..77f7b5837216 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h @@ -96,7 +96,7 @@ extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size, u32 cp_nonpixel_start, u32 cp_nonpixel_size); extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, - unsigned int *src, + u64 *src, const struct qcom_scm_vmperm *newvm, unsigned int dest_cnt); From b3f59a9b33183e778f8e024d1ee28bde85be6f41 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 17 Apr 2023 11:41:51 -0700 Subject: [PATCH 45/63] ANDROID: gunyah: Sync with latest "firmware: qcom_scm: Register Gunyah platform ops" The QCOM platform hooks are not enabled in gki_defconfig, but backport to align to Gunyah v13 patches posted to kernel.org: https://lore.kernel.org/all/20230509204801.2824351-15-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: Idce927cfa89cfea137b96024dd3c5a2bf297da82 Signed-off-by: Elliot Berman --- arch/arm64/configs/gki_defconfig | 1 + drivers/virt/gunyah/Kconfig | 13 +++ drivers/virt/gunyah/Makefile | 1 + drivers/virt/gunyah/gunyah_qcom.c | 147 ++++++++++++++++++++++++++++++ 4 files changed, 162 insertions(+) create mode 100644 drivers/virt/gunyah/gunyah_qcom.c diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index fb4106db4a43..260befab3d4b 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -538,6 +538,7 @@ CONFIG_DMABUF_HEAPS_PAGE_POOL=y CONFIG_UIO=y CONFIG_VIRT_DRIVERS=y CONFIG_GUNYAH=y +# CONFIG_GUNYAH_QCOM_PLATFORM is not set CONFIG_GUNYAH_VCPU=y CONFIG_GUNYAH_IRQFD=y CONFIG_GUNYAH_IOEVENTFD=y diff --git a/drivers/virt/gunyah/Kconfig b/drivers/virt/gunyah/Kconfig index 2eac0b68fcec..02c7a8b60eae 100644 --- a/drivers/virt/gunyah/Kconfig +++ b/drivers/virt/gunyah/Kconfig @@ -6,6 +6,7 @@ config GUNYAH depends on MAILBOX select GUNYAH_PLATFORM_HOOKS select AUXILIARY_BUS + imply GUNYAH_QCOM_PLATFORM if ARCH_QCOM help The Gunyah drivers are the helper interfaces that run in a guest VM such as basic inter-VM IPC and signaling mechanisms, and higher level @@ -17,6 +18,18 @@ config GUNYAH config GUNYAH_PLATFORM_HOOKS tristate +config GUNYAH_QCOM_PLATFORM + tristate "Support for Gunyah on Qualcomm platforms" + depends on GUNYAH + select GUNYAH_PLATFORM_HOOKS + select QCOM_SCM + help + Enable support for interacting with Gunyah on Qualcomm + platforms. Interaction with Qualcomm firmware requires + extra platform-specific support. + + Say Y/M here to use Gunyah on Qualcomm platforms. + config GUNYAH_VCPU tristate "Runnable Gunyah vCPUs" depends on GUNYAH diff --git a/drivers/virt/gunyah/Makefile b/drivers/virt/gunyah/Makefile index c9fb17d3a5b0..efda8f732f8a 100644 --- a/drivers/virt/gunyah/Makefile +++ b/drivers/virt/gunyah/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_GUNYAH_PLATFORM_HOOKS) += gunyah_platform_hooks.o +obj-$(CONFIG_GUNYAH_QCOM_PLATFORM) += gunyah_qcom.o gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o obj-$(CONFIG_GUNYAH) += gunyah_rsc_mgr.o diff --git a/drivers/virt/gunyah/gunyah_qcom.c b/drivers/virt/gunyah/gunyah_qcom.c new file mode 100644 index 000000000000..ec62f1e23724 --- /dev/null +++ b/drivers/virt/gunyah/gunyah_qcom.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include + +#define QCOM_SCM_RM_MANAGED_VMID 0x3A +#define QCOM_SCM_MAX_MANAGED_VMID 0x3F + +static int qcom_scm_gh_rm_pre_mem_share(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel) +{ + struct qcom_scm_vmperm *new_perms; + u64 src, src_cpy; + int ret = 0, i, n; + u16 vmid; + + new_perms = kcalloc(mem_parcel->n_acl_entries, sizeof(*new_perms), GFP_KERNEL); + if (!new_perms) + return -ENOMEM; + + for (n = 0; n < mem_parcel->n_acl_entries; n++) { + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) + new_perms[n].vmid = vmid; + else + new_perms[n].vmid = QCOM_SCM_RM_MANAGED_VMID; + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_X) + new_perms[n].perm |= QCOM_SCM_PERM_EXEC; + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_W) + new_perms[n].perm |= QCOM_SCM_PERM_WRITE; + if (mem_parcel->acl_entries[n].perms & GH_RM_ACL_R) + new_perms[n].perm |= QCOM_SCM_PERM_READ; + } + + src = (1ull << QCOM_SCM_VMID_HLOS); + + for (i = 0; i < mem_parcel->n_mem_entries; i++) { + src_cpy = src; + ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), + le64_to_cpu(mem_parcel->mem_entries[i].size), + &src_cpy, new_perms, mem_parcel->n_acl_entries); + if (ret) { + src = 0; + for (n = 0; n < mem_parcel->n_acl_entries; n++) { + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) + src |= (1ull << vmid); + else + src |= (1ull << QCOM_SCM_RM_MANAGED_VMID); + } + + new_perms[0].vmid = QCOM_SCM_VMID_HLOS; + + for (i--; i >= 0; i--) { + src_cpy = src; + WARN_ON_ONCE(qcom_scm_assign_mem( + le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), + le64_to_cpu(mem_parcel->mem_entries[i].size), + &src_cpy, new_perms, 1)); + } + break; + } + } + + kfree(new_perms); + return ret; +} + +static int qcom_scm_gh_rm_post_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *mem_parcel) +{ + struct qcom_scm_vmperm new_perms; + u64 src = 0, src_cpy; + int ret = 0, i, n; + u16 vmid; + + new_perms.vmid = QCOM_SCM_VMID_HLOS; + new_perms.perm = QCOM_SCM_PERM_EXEC | QCOM_SCM_PERM_WRITE | QCOM_SCM_PERM_READ; + + for (n = 0; n < mem_parcel->n_acl_entries; n++) { + vmid = le16_to_cpu(mem_parcel->acl_entries[n].vmid); + if (vmid <= QCOM_SCM_MAX_MANAGED_VMID) + src |= (1ull << vmid); + else + src |= (1ull << QCOM_SCM_RM_MANAGED_VMID); + } + + for (i = 0; i < mem_parcel->n_mem_entries; i++) { + src_cpy = src; + ret = qcom_scm_assign_mem(le64_to_cpu(mem_parcel->mem_entries[i].ipa_base), + le64_to_cpu(mem_parcel->mem_entries[i].size), + &src_cpy, &new_perms, 1); + WARN_ON_ONCE(ret); + } + + return ret; +} + +static struct gh_rm_platform_ops qcom_scm_gh_rm_platform_ops = { + .pre_mem_share = qcom_scm_gh_rm_pre_mem_share, + .post_mem_reclaim = qcom_scm_gh_rm_post_mem_reclaim, +}; + +/* {19bd54bd-0b37-571b-946f-609b54539de6} */ +static const uuid_t QCOM_EXT_UUID = + UUID_INIT(0x19bd54bd, 0x0b37, 0x571b, 0x94, 0x6f, 0x60, 0x9b, 0x54, 0x53, 0x9d, 0xe6); + +#define GH_QCOM_EXT_CALL_UUID_ID ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \ + ARM_SMCCC_OWNER_VENDOR_HYP, 0x3f01) + +static bool gh_has_qcom_extensions(void) +{ + struct arm_smccc_res res; + uuid_t uuid; + + arm_smccc_1_1_smc(GH_QCOM_EXT_CALL_UUID_ID, &res); + + ((u32 *)&uuid.b[0])[0] = lower_32_bits(res.a0); + ((u32 *)&uuid.b[0])[1] = lower_32_bits(res.a1); + ((u32 *)&uuid.b[0])[2] = lower_32_bits(res.a2); + ((u32 *)&uuid.b[0])[3] = lower_32_bits(res.a3); + + return uuid_equal(&uuid, &QCOM_EXT_UUID); +} + +static int __init qcom_gh_platform_hooks_register(void) +{ + if (!gh_has_qcom_extensions()) + return -ENODEV; + + return gh_rm_register_platform_ops(&qcom_scm_gh_rm_platform_ops); +} + +static void __exit qcom_gh_platform_hooks_unregister(void) +{ + gh_rm_unregister_platform_ops(&qcom_scm_gh_rm_platform_ops); +} + +module_init(qcom_gh_platform_hooks_register); +module_exit(qcom_gh_platform_hooks_unregister); +MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Platform Hooks for Gunyah"); +MODULE_LICENSE("GPL"); From a30bae5a9acfbb9af6659c1df1566d582924b2e0 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 17 Apr 2023 11:58:21 -0700 Subject: [PATCH 46/63] ANDROID: gunyah: Sync with latest documentation and UAPI Align docs and UAPI to Gunyah v13 patches: https://lore.kernel.org/all/20230509204801.2824351-1-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I2719f07f69877374ffa88020fe2a23a70d79bb8b Signed-off-by: Elliot Berman --- Documentation/virt/gunyah/vm-manager.rst | 49 +++---- drivers/virt/gunyah/gunyah_irqfd.c | 4 +- include/uapi/linux/gunyah.h | 174 ++++++++++++++--------- 3 files changed, 127 insertions(+), 100 deletions(-) diff --git a/Documentation/virt/gunyah/vm-manager.rst b/Documentation/virt/gunyah/vm-manager.rst index cd41a705849f..87838c5b5945 100644 --- a/Documentation/virt/gunyah/vm-manager.rst +++ b/Documentation/virt/gunyah/vm-manager.rst @@ -12,7 +12,7 @@ most of the configuration about a Gunyah virtual machine is described in the VM's devicetree. The devicetree is generated by userspace. Interacting with the virtual machine is still done via the kernel and VM configuration requires some of the corresponding functionality to be set up in the kernel. For instance, -sharing userspace memory with a VM is done via the GH_VM_SET_USER_MEM_REGION +sharing userspace memory with a VM is done via the `GH_VM_SET_USER_MEM_REGION`_ ioctl. The VM itself is configured to use the memory region via the devicetree. @@ -22,13 +22,13 @@ Gunyah Functions Components of a Gunyah VM's configuration that need kernel configuration are called "functions" and are built on top of a framework. Functions are identified by a string and have some argument(s) to configure them. They are typically -created by the `GH_VM_ADD_FUNCTION` ioctl. +created by the `GH_VM_ADD_FUNCTION`_ ioctl. Functions typically will always do at least one of these operations: 1. Create resource ticket(s). Resource tickets allow a function to register itself as the client for a Gunyah resource (e.g. doorbell or vCPU) and - the function is given the pointer to the `struct gh_resource` when the + the function is given the pointer to the &struct gh_resource when the VM is starting. 2. Register IO handler(s). IO handlers allow a function to handle stage-2 faults @@ -46,7 +46,7 @@ IOCTLs and userspace VMM flows The kernel exposes a char device interface at /dev/gunyah. -To create a VM, use the GH_CREATE_VM ioctl. A successful call will return a +To create a VM, use the `GH_CREATE_VM`_ ioctl. A successful call will return a "Gunyah VM" file descriptor. /dev/gunyah API Descriptions @@ -75,22 +75,13 @@ be configured to accept these at boot-up. The guest physical address is used by Linux kernel to check that the requested user regions do not overlap and to help find the corresponding memory region -for calls like GH_VM_SET_DTB_CONFIG. It must be page aligned. +for calls like `GH_VM_SET_DTB_CONFIG`_. It must be page aligned. -memory_size and userspace_addr must be page-aligned. - -The flags field of gh_userspace_memory_region accepts the following bits. All -other bits must be 0 and are reserved for future use. The ioctl will return --EINVAL if an unsupported bit is detected. - - - GH_MEM_ALLOW_READ/GH_MEM_ALLOW_WRITE/GH_MEM_ALLOW_EXEC sets read/write/exec - permissions for the guest, respectively. - -To add a memory region, call GH_VM_SET_USER_MEM_REGION with fields set as +To add a memory region, call `GH_VM_SET_USER_MEM_REGION`_ with fields set as described above. .. kernel-doc:: include/uapi/linux/gunyah.h - :identifiers: gh_userspace_memory_region + :identifiers: gh_userspace_memory_region gh_mem_flags GH_VM_SET_DTB_CONFIG ~~~~~~~~~~~~~~~~~~~~ @@ -111,20 +102,20 @@ GH_VM_ADD_FUNCTION ~~~~~~~~~~~~~~~~~~ This ioctl registers a Gunyah VM function with the VM manager. The VM function -is described with a `type` string and some arguments for that type. Typically, -the function is added before the VM starts, but the function doesn't "operate" -until the VM starts with GH_VM_START: e.g. vCPU ioclts will all return an error -until the VM starts because the vCPUs don't exist until the VM is started. This -allows the VMM to set up all the kernel functionality needed for the VM *before* -the VM starts. +is described with a &struct gh_fn_desc.type and some arguments for that type. +Typically, the function is added before the VM starts, but the function doesn't +"operate" until the VM starts with `GH_VM_START`_. For example, vCPU ioclts will +all return an error until the VM starts because the vCPUs don't exist until the +VM is started. This allows the VMM to set up all the kernel functions needed for +the VM *before* the VM starts. .. kernel-doc:: include/uapi/linux/gunyah.h - :identifiers: gh_fn_desc + :identifiers: gh_fn_desc gh_fn_type -The possible types are documented below: +The argument types are documented below: .. kernel-doc:: include/uapi/linux/gunyah.h - :identifiers: GH_FN_VCPU gh_fn_vcpu_arg GH_FN_IRQFD gh_fn_irqfd_arg GH_FN_IOEVENTFD gh_fn_ioeventfd_arg + :identifiers: gh_fn_vcpu_arg gh_fn_irqfd_arg gh_irqfd_flags gh_fn_ioeventfd_arg gh_ioeventfd_flags Gunyah VCPU API Descriptions ---------------------------- @@ -137,15 +128,15 @@ GH_VCPU_RUN This ioctl is used to run a guest virtual cpu. While there are no explicit parameters, there is an implicit parameter block that can be obtained by mmap()ing the vcpu fd at offset 0, with the size given by -GH_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct +`GH_VCPU_MMAP_SIZE`_. The parameter block is formatted as a 'struct gh_vcpu_run' (see below). GH_VCPU_MMAP_SIZE ~~~~~~~~~~~~~~~~~ -The GH_VCPU_RUN ioctl communicates with userspace via a shared +The `GH_VCPU_RUN`_ ioctl communicates with userspace via a shared memory region. This ioctl returns the size of that region. See the -GH_VCPU_RUN documentation for details. +`GH_VCPU_RUN`_ documentation for details. .. kernel-doc:: include/uapi/linux/gunyah.h - :identifiers: gh_vcpu_run gh_vm_exit_info + :identifiers: gh_vcpu_exit gh_vcpu_run gh_vm_status gh_vm_exit_info diff --git a/drivers/virt/gunyah/gunyah_irqfd.c b/drivers/virt/gunyah/gunyah_irqfd.c index 0371fd3da578..25db65fc751e 100644 --- a/drivers/virt/gunyah/gunyah_irqfd.c +++ b/drivers/virt/gunyah/gunyah_irqfd.c @@ -98,7 +98,7 @@ static long gh_irqfd_bind(struct gh_vm_function_instance *f) return -EINVAL; /* All other flag bits are reserved for future use */ - if (args->flags & ~GH_IRQFD_LEVEL) + if (args->flags & ~GH_IRQFD_FLAGS_LEVEL) return -EINVAL; irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); @@ -120,7 +120,7 @@ static long gh_irqfd_bind(struct gh_vm_function_instance *f) goto err_fdput; } - if (args->flags & GH_IRQFD_LEVEL) + if (args->flags & GH_IRQFD_FLAGS_LEVEL) irqfd->level = true; init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); diff --git a/include/uapi/linux/gunyah.h b/include/uapi/linux/gunyah.h index a0ae5673908c..a1919c75a489 100644 --- a/include/uapi/linux/gunyah.h +++ b/include/uapi/linux/gunyah.h @@ -3,8 +3,8 @@ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ -#ifndef _UAPI_LINUX_GUNYAH -#define _UAPI_LINUX_GUNYAH +#ifndef _UAPI_LINUX_GUNYAH_H +#define _UAPI_LINUX_GUNYAH_H /* * Userspace interface for /dev/gunyah - gunyah based virtual machine @@ -24,14 +24,22 @@ * ioctls for VM fds */ -#define GH_MEM_ALLOW_READ (1UL << 0) -#define GH_MEM_ALLOW_WRITE (1UL << 1) -#define GH_MEM_ALLOW_EXEC (1UL << 2) +/** + * enum gh_mem_flags - Possible flags on &struct gh_userspace_memory_region + * @GH_MEM_ALLOW_READ: Allow guest to read the memory + * @GH_MEM_ALLOW_WRITE: Allow guest to write to the memory + * @GH_MEM_ALLOW_EXEC: Allow guest to execute instructions in the memory + */ +enum gh_mem_flags { + GH_MEM_ALLOW_READ = 1UL << 0, + GH_MEM_ALLOW_WRITE = 1UL << 1, + GH_MEM_ALLOW_EXEC = 1UL << 2, +}; /** * struct gh_userspace_memory_region - Userspace memory descripion for GH_VM_SET_USER_MEM_REGION - * @label: Unique identifer to the region. - * @flags: Flags for memory parcel behavior + * @label: Identifer to the region which is unique to the VM. + * @flags: Flags for memory parcel behavior. See &enum gh_mem_flags. * @guest_phys_addr: Location of the memory region in guest's memory space (page-aligned) * @memory_size: Size of the region (page-aligned) * @userspace_addr: Location of the memory region in caller (userspace)'s memory @@ -52,7 +60,9 @@ struct gh_userspace_memory_region { /** * struct gh_vm_dtb_config - Set the location of the VM's devicetree blob * @guest_phys_addr: Address of the VM's devicetree in guest memory. - * @size: Maximum size of the devicetree. + * @size: Maximum size of the devicetree including space for overlays. + * Resource manager applies an overlay to the DTB and dtb_size should + * include room for the overlay. A page of memory is typicaly plenty. */ struct gh_vm_dtb_config { __u64 guest_phys_addr; @@ -63,67 +73,61 @@ struct gh_vm_dtb_config { #define GH_VM_START _IO(GH_IOCTL_TYPE, 0x3) /** - * GH_FN_VCPU - create a vCPU instance to control a vCPU + * enum gh_fn_type - Valid types of Gunyah VM functions + * @GH_FN_VCPU: create a vCPU instance to control a vCPU + * &struct gh_fn_desc.arg is a pointer to &struct gh_fn_vcpu_arg + * Return: file descriptor to manipulate the vcpu. + * @GH_FN_IRQFD: register eventfd to assert a Gunyah doorbell + * &struct gh_fn_desc.arg is a pointer to &struct gh_fn_irqfd_arg + * @GH_FN_IOEVENTFD: register ioeventfd to trigger when VM faults on parameter + * &struct gh_fn_desc.arg is a pointer to &struct gh_fn_ioeventfd_arg + */ +enum gh_fn_type { + GH_FN_VCPU = 1, + GH_FN_IRQFD, + GH_FN_IOEVENTFD, +}; + +#define GH_FN_MAX_ARG_SIZE 256 + +/** + * struct gh_fn_vcpu_arg - Arguments to create a vCPU. + * @id: vcpu id * - * gh_fn_desc is filled with &struct gh_fn_vcpu_arg + * Create this function with &GH_VM_ADD_FUNCTION using type &GH_FN_VCPU. * * The vcpu type will register with the VM Manager to expect to control * vCPU number `vcpu_id`. It returns a file descriptor allowing interaction with * the vCPU. See the Gunyah vCPU API description sections for interacting with * the Gunyah vCPU file descriptors. - * - * Return: file descriptor to manipulate the vcpu. See GH_VCPU_* ioctls - */ -#define GH_FN_VCPU 1 - -/** - * GH_FN_IRQFD - register eventfd to assert a Gunyah doorbell - * - * gh_fn_desc is filled with gh_fn_irqfd_arg - * - * Allows setting an eventfd to directly trigger a guest interrupt. - * irqfd.fd specifies the file descriptor to use as the eventfd. - * irqfd.label corresponds to the doorbell label used in the guest VM's devicetree. - * - * Return: 0 - */ -#define GH_FN_IRQFD 2 - -/** - * GH_FN_IOEVENTFD - register ioeventfd to trigger when VM faults on parameter - * - * gh_fn_desc is filled with gh_fn_ioeventfd_arg - * - * Attaches an ioeventfd to a legal mmio address within the guest. A guest write - * in the registered address will signal the provided event instead of triggering - * an exit on the GH_VCPU_RUN ioctl. - * - * If GH_IOEVENTFD_DATAMATCH flag is set, the event will be signaled only if the - * written value to the registered address is equal to datamatch in - * struct gh_fn_ioeventfd_arg. - * - * Return: 0 - */ -#define GH_FN_IOEVENTFD 3 - -#define GH_FN_MAX_ARG_SIZE 256 - -/** - * struct gh_fn_vcpu_arg - Arguments to create a vCPU - * @id: vcpu id */ struct gh_fn_vcpu_arg { __u32 id; }; -#define GH_IRQFD_LEVEL (1UL << 0) +/** + * enum gh_irqfd_flags - flags for use in gh_fn_irqfd_arg + * @GH_IRQFD_FLAGS_LEVEL: make the interrupt operate like a level triggered + * interrupt on guest side. Triggering IRQFD before + * guest handles the interrupt causes interrupt to + * stay asserted. + */ +enum gh_irqfd_flags { + GH_IRQFD_FLAGS_LEVEL = 1UL << 0, +}; /** - * struct gh_fn_irqfd_arg - Arguments to create an irqfd function + * struct gh_fn_irqfd_arg - Arguments to create an irqfd function. + * + * Create this function with &GH_VM_ADD_FUNCTION using type &GH_FN_IRQFD. + * + * Allows setting an eventfd to directly trigger a guest interrupt. + * irqfd.fd specifies the file descriptor to use as the eventfd. + * irqfd.label corresponds to the doorbell label used in the guest VM's devicetree. + * * @fd: an eventfd which when written to will raise a doorbell * @label: Label of the doorbell created on the guest VM - * @flags: GH_IRQFD_LEVEL configures the corresponding doorbell to behave - * like a level triggered interrupt. + * @flags: see &enum gh_irqfd_flags * @padding: padding bytes */ struct gh_fn_irqfd_arg { @@ -133,7 +137,15 @@ struct gh_fn_irqfd_arg { __u32 padding; }; -#define GH_IOEVENTFD_FLAGS_DATAMATCH (1UL << 0) +/** + * enum gh_ioeventfd_flags - flags for use in gh_fn_ioeventfd_arg + * @GH_IOEVENTFD_FLAGS_DATAMATCH: the event will be signaled only if the + * written value to the registered address is + * equal to &struct gh_fn_ioeventfd_arg.datamatch + */ +enum gh_ioeventfd_flags { + GH_IOEVENTFD_FLAGS_DATAMATCH = 1UL << 0, +}; /** * struct gh_fn_ioeventfd_arg - Arguments to create an ioeventfd function @@ -141,10 +153,14 @@ struct gh_fn_irqfd_arg { * @addr: Address in guest memory * @len: Length of access * @fd: When ioeventfd is matched, this eventfd is written - * @flags: If GH_IOEVENTFD_DATAMATCH flag is set, the event will be signaled - * only if the written value to the registered address is equal to - * @datamatch + * @flags: See &enum gh_ioeventfd_flags * @padding: padding bytes + * + * Create this function with &GH_VM_ADD_FUNCTION using type &GH_FN_IOEVENTFD. + * + * Attaches an ioeventfd to a legal mmio address within the guest. A guest write + * in the registered address will signal the provided event instead of triggering + * an exit on the GH_VCPU_RUN ioctl. */ struct gh_fn_ioeventfd_arg { __u64 datamatch; @@ -157,9 +173,10 @@ struct gh_fn_ioeventfd_arg { /** * struct gh_fn_desc - Arguments to create a VM function - * @type: Type of the function. See GH_FN_* macro for supported types + * @type: Type of the function. See &enum gh_fn_type. * @arg_size: Size of argument to pass to the function. arg_size <= GH_FN_MAX_ARG_SIZE - * @arg: Value or pointer to argument given to the function + * @arg: Pointer to argument given to the function. See &enum gh_fn_type for expected + * arguments for a function type. */ struct gh_fn_desc { __u32 type; @@ -170,13 +187,21 @@ struct gh_fn_desc { #define GH_VM_ADD_FUNCTION _IOW(GH_IOCTL_TYPE, 0x4, struct gh_fn_desc) #define GH_VM_REMOVE_FUNCTION _IOW(GH_IOCTL_TYPE, 0x7, struct gh_fn_desc) +/* + * ioctls for vCPU fds + */ + +/** + * enum gh_vm_status - Stores status reason why VM is not runnable (exited). + * @GH_VM_STATUS_LOAD_FAILED: VM didn't start because it couldn't be loaded. + * @GH_VM_STATUS_EXITED: VM requested shutdown/reboot. + * Use &struct gh_vm_exit_info.reason for further details. + * @GH_VM_STATUS_CRASHED: VM state is unknown and has crashed. + */ enum gh_vm_status { GH_VM_STATUS_LOAD_FAILED = 1, -#define GH_VM_STATUS_LOAD_FAILED GH_VM_STATUS_LOAD_FAILED GH_VM_STATUS_EXITED = 2, -#define GH_VM_STATUS_EXITED GH_VM_STATUS_EXITED GH_VM_STATUS_CRASHED = 3, -#define GH_VM_STATUS_CRASHED GH_VM_STATUS_CRASHED }; /* @@ -203,9 +228,20 @@ struct gh_vm_exit_info { __u8 reason[GH_VM_MAX_EXIT_REASON_SIZE]; }; -#define GH_VCPU_EXIT_UNKNOWN 0 -#define GH_VCPU_EXIT_MMIO 1 -#define GH_VCPU_EXIT_STATUS 2 +/** + * enum gh_vcpu_exit - Stores reason why &GH_VCPU_RUN ioctl recently exited with status 0 + * @GH_VCPU_EXIT_UNKNOWN: Not used, status != 0 + * @GH_VCPU_EXIT_MMIO: vCPU performed a read or write that could not be handled + * by hypervisor or Linux. Use @struct gh_vcpu_run.mmio for + * details of the read/write. + * @GH_VCPU_EXIT_STATUS: vCPU not able to run because the VM has exited. + * Use @struct gh_vcpu_run.status for why VM has exited. + */ +enum gh_vcpu_exit { + GH_VCPU_EXIT_UNKNOWN, + GH_VCPU_EXIT_MMIO, + GH_VCPU_EXIT_STATUS, +}; /** * struct gh_vcpu_run - Application code obtains a pointer to the gh_vcpu_run @@ -213,19 +249,19 @@ struct gh_vm_exit_info { * @immediate_exit: polled when scheduling the vcpu. If set, immediately returns -EINTR. * @padding: padding bytes * @exit_reason: Set when GH_VCPU_RUN returns successfully and gives reason why - * GH_VCPU_RUN has stopped running the vCPU. + * GH_VCPU_RUN has stopped running the vCPU. See &enum gh_vcpu_exit. * @mmio: Used when exit_reason == GH_VCPU_EXIT_MMIO * The guest has faulted on an memory-mapped I/O instruction that * couldn't be satisfied by gunyah. * @mmio.phys_addr: Address guest tried to access * @mmio.data: the value that was written if `is_write == 1`. Filled by - * user for reads (`is_wite == 0`). + * user for reads (`is_write == 0`). * @mmio.len: Length of write. Only the first `len` bytes of `data` * are considered by Gunyah. * @mmio.is_write: 1 if VM tried to perform a write, 0 for a read * @status: Used when exit_reason == GH_VCPU_EXIT_STATUS. * The guest VM is no longer runnable. This struct informs why. - * @status.status: See `enum gh_vm_status` for possible values + * @status.status: See &enum gh_vm_status for possible values * @status.exit_info: Used when status == GH_VM_STATUS_EXITED */ struct gh_vcpu_run { From 58a642ea086f427cbe7f484c1fb1bd43cec445a8 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 17 Apr 2023 12:03:40 -0700 Subject: [PATCH 47/63] ANDROID: gunyah: Sync with latest hypercalls Align hypercalls to Gunyah v13 patches: https://lore.kernel.org/all/20230509204801.2824351-1-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: Ie99913e7d9213e4805a98aa04a06c751ece32488 Signed-off-by: Elliot Berman --- arch/arm64/gunyah/gunyah_hypercall.c | 6 +++--- drivers/virt/gunyah/gunyah_irqfd.c | 17 ++++++++++------- drivers/virt/gunyah/gunyah_vcpu.c | 15 +-------------- include/linux/gunyah.h | 25 ++++++++++++++++++------- 4 files changed, 32 insertions(+), 31 deletions(-) diff --git a/arch/arm64/gunyah/gunyah_hypercall.c b/arch/arm64/gunyah/gunyah_hypercall.c index 2925932660f1..3d48c8650851 100644 --- a/arch/arm64/gunyah/gunyah_hypercall.c +++ b/arch/arm64/gunyah/gunyah_hypercall.c @@ -63,7 +63,7 @@ enum gh_error gh_hypercall_bell_send(u64 capid, u64 new_flags, u64 *old_flags) arm_smccc_1_1_hvc(GH_HYPERCALL_BELL_SEND, capid, new_flags, 0, &res); - if (res.a0 == GH_ERROR_OK) + if (res.a0 == GH_ERROR_OK && old_flags) *old_flags = res.a1; return res.a0; @@ -80,7 +80,7 @@ enum gh_error gh_hypercall_bell_set_mask(u64 capid, u64 enable_mask, u64 ack_mas } EXPORT_SYMBOL_GPL(gh_hypercall_bell_set_mask); -enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, int tx_flags, bool *ready) +enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, u64 tx_flags, bool *ready) { struct arm_smccc_res res; @@ -126,7 +126,7 @@ enum gh_error gh_hypercall_vcpu_run(u64 capid, u64 *resume_data, arm_smccc_1_2_hvc(&args, &res); if (res.a0 == GH_ERROR_OK) { - resp->state = res.a1; + resp->sized_state = res.a1; resp->state_data[0] = res.a2; resp->state_data[1] = res.a3; resp->state_data[2] = res.a4; diff --git a/drivers/virt/gunyah/gunyah_irqfd.c b/drivers/virt/gunyah/gunyah_irqfd.c index 25db65fc751e..1b7f0c5073da 100644 --- a/drivers/virt/gunyah/gunyah_irqfd.c +++ b/drivers/virt/gunyah/gunyah_irqfd.c @@ -30,13 +30,11 @@ static int irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync, v { struct gh_irqfd *irqfd = container_of(wait, struct gh_irqfd, wait); __poll_t flags = key_to_poll(key); - u64 enable_mask = GH_BELL_NONBLOCK; - u64 old_flags; int ret = 0; if (flags & EPOLLIN) { if (irqfd->ghrsc) { - ret = gh_hypercall_bell_send(irqfd->ghrsc->capid, enable_mask, &old_flags); + ret = gh_hypercall_bell_send(irqfd->ghrsc->capid, 1, NULL); if (ret) pr_err_ratelimited("Failed to inject interrupt %d: %d\n", irqfd->ticket.label, ret); @@ -57,9 +55,7 @@ static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, p static bool gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_resource *ghrsc) { struct gh_irqfd *irqfd = container_of(ticket, struct gh_irqfd, ticket); - u64 enable_mask = GH_BELL_NONBLOCK; - u64 ack_mask = ~0; - int ret = 0; + int ret; if (irqfd->ghrsc) { pr_warn("irqfd%d already got a Gunyah resource. Check if multiple resources with same label were configured.\n", @@ -69,7 +65,14 @@ static bool gh_irqfd_populate(struct gh_vm_resource_ticket *ticket, struct gh_re irqfd->ghrsc = ghrsc; if (irqfd->level) { - ret = gh_hypercall_bell_set_mask(irqfd->ghrsc->capid, enable_mask, ack_mask); + /* Configure the bell to trigger when bit 0 is asserted (see + * irq_wakeup) and for bell to automatically clear bit 0 once + * received by the VM (ack_mask). need to make sure bit 0 is cleared right away, + * otherwise the line will never be deasserted. Emulating edge + * trigger interrupt does not need to set either mask + * because irq is listed only once per gh_hypercall_bell_send + */ + ret = gh_hypercall_bell_set_mask(irqfd->ghrsc->capid, 1, 1); if (ret) pr_warn("irq %d couldn't be set as level triggered. Might cause IRQ storm if asserted\n", irqfd->ticket.label); diff --git a/drivers/virt/gunyah/gunyah_vcpu.c b/drivers/virt/gunyah/gunyah_vcpu.c index c329184e5fb6..acb565c3a680 100644 --- a/drivers/virt/gunyah/gunyah_vcpu.c +++ b/drivers/virt/gunyah/gunyah_vcpu.c @@ -41,19 +41,6 @@ struct gh_vcpu { struct kref kref; }; -/* VCPU is ready to run */ -#define GH_VCPU_STATE_READY 0 -/* VCPU is sleeping until an interrupt arrives */ -#define GH_VCPU_STATE_EXPECTS_WAKEUP 1 -/* VCPU is powered off */ -#define GH_VCPU_STATE_POWERED_OFF 2 -/* VCPU is blocked in EL2 for unspecified reason */ -#define GH_VCPU_STATE_BLOCKED 3 -/* VCPU has returned for MMIO READ */ -#define GH_VCPU_ADDRSPACE_VMMIO_READ 4 -/* VCPU has returned for MMIO WRITE */ -#define GH_VCPU_ADDRSPACE_VMMIO_WRITE 5 - static void vcpu_release(struct kref *kref) { struct gh_vcpu *vcpu = container_of(kref, struct gh_vcpu, kref); @@ -245,7 +232,7 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu) break; default: pr_warn_ratelimited("Unknown vCPU state: %llx\n", - vcpu_run_resp.state); + vcpu_run_resp.sized_state); schedule(); break; } diff --git a/include/linux/gunyah.h b/include/linux/gunyah.h index e4de51381041..ddea1ea9ce9c 100644 --- a/include/linux/gunyah.h +++ b/include/linux/gunyah.h @@ -33,11 +33,6 @@ struct gh_resource { u32 rm_label; }; -/** - * Gunyah Doorbells - */ -#define GH_BELL_NONBLOCK BIT(32) - /** * Gunyah Message Queues */ @@ -181,12 +176,28 @@ enum gh_error gh_hypercall_bell_set_mask(u64 capid, u64 enable_mask, u64 ack_mas #define GH_HYPERCALL_MSGQ_TX_FLAGS_PUSH BIT(0) -enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, int tx_flags, bool *ready); +enum gh_error gh_hypercall_msgq_send(u64 capid, size_t size, void *buff, u64 tx_flags, bool *ready); enum gh_error gh_hypercall_msgq_recv(u64 capid, void *buff, size_t size, size_t *recv_size, bool *ready); struct gh_hypercall_vcpu_run_resp { - u64 state; + union { + enum { + /* VCPU is ready to run */ + GH_VCPU_STATE_READY = 0, + /* VCPU is sleeping until an interrupt arrives */ + GH_VCPU_STATE_EXPECTS_WAKEUP = 1, + /* VCPU is powered off */ + GH_VCPU_STATE_POWERED_OFF = 2, + /* VCPU is blocked in EL2 for unspecified reason */ + GH_VCPU_STATE_BLOCKED = 3, + /* VCPU has returned for MMIO READ */ + GH_VCPU_ADDRSPACE_VMMIO_READ = 4, + /* VCPU has returned for MMIO WRITE */ + GH_VCPU_ADDRSPACE_VMMIO_WRITE = 5, + } state; + u64 sized_state; + }; u64 state_data[3]; }; From 016d92266efd0ba265561c03ed140be7a2cf04cf Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 17 Apr 2023 12:05:59 -0700 Subject: [PATCH 48/63] ANDROID: gunyah: Sync with latest "gunyah: Common types and error codes for Gunyah hypercalls" Rename gh_remap_error to gh_error_remap to align with Gunyah v13 patches: https://lore.kernel.org/all/20230509204801.2824351-3-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: Id3e033108a6a42868dc12a9c20c1a06775418979 Signed-off-by: Elliot Berman --- drivers/mailbox/gunyah-msgq.c | 2 +- drivers/virt/gunyah/gunyah_vcpu.c | 7 +++---- include/linux/gunyah.h | 4 ++-- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/mailbox/gunyah-msgq.c b/drivers/mailbox/gunyah-msgq.c index d16c523901ac..2cd05719c827 100644 --- a/drivers/mailbox/gunyah-msgq.c +++ b/drivers/mailbox/gunyah-msgq.c @@ -80,7 +80,7 @@ static int gh_msgq_send_data(struct mbox_chan *chan, void *data) * framework, then no other messages can be sent and nobody will know * to retry this message. */ - msgq->last_ret = gh_remap_error(gh_error); + msgq->last_ret = gh_error_remap(gh_error); /** * This message was successfully sent, but message queue isn't ready to diff --git a/drivers/virt/gunyah/gunyah_vcpu.c b/drivers/virt/gunyah/gunyah_vcpu.c index acb565c3a680..455ed4425121 100644 --- a/drivers/virt/gunyah/gunyah_vcpu.c +++ b/drivers/virt/gunyah/gunyah_vcpu.c @@ -192,7 +192,6 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu) gh_error = gh_hypercall_vcpu_run(vcpu->rsc->capid, state_data, &vcpu_run_resp); if (gh_error == GH_ERROR_OK) { - ret = 0; switch (vcpu_run_resp.state) { case GH_VCPU_STATE_READY: if (need_resched()) @@ -238,9 +237,9 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu) } } else if (gh_error == GH_ERROR_RETRY) { schedule(); - ret = 0; - } else - ret = gh_remap_error(gh_error); + } else { + ret = gh_error_remap(gh_error); + } } out: diff --git a/include/linux/gunyah.h b/include/linux/gunyah.h index ddea1ea9ce9c..2a16219fad18 100644 --- a/include/linux/gunyah.h +++ b/include/linux/gunyah.h @@ -110,10 +110,10 @@ enum gh_error { }; /** - * gh_remap_error() - Remap Gunyah hypervisor errors into a Linux error code + * gh_error_remap() - Remap Gunyah hypervisor errors into a Linux error code * @gh_error: Gunyah hypercall return value */ -static inline int gh_remap_error(enum gh_error gh_error) +static inline int gh_error_remap(enum gh_error gh_error) { switch (gh_error) { case GH_ERROR_OK: From afaf16332908230dd8a7ac2f7289979289d24c04 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 8 May 2023 10:33:53 -0700 Subject: [PATCH 49/63] ANDROID: gunyah: Sync with latest "mailbox: Add Gunyah message queue mailbox" Align msgq mailbox implementation to version 13 of Gunyah patches: https://lore.kernel.org/all/20230509204801.2824351-6-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I017873310e6c8650afa3e6dae379c7e7048b7197 Signed-off-by: Elliot Berman --- drivers/mailbox/gunyah-msgq.c | 66 +++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 30 deletions(-) diff --git a/drivers/mailbox/gunyah-msgq.c b/drivers/mailbox/gunyah-msgq.c index 2cd05719c827..b7a54f233680 100644 --- a/drivers/mailbox/gunyah-msgq.c +++ b/drivers/mailbox/gunyah-msgq.c @@ -30,7 +30,8 @@ static irqreturn_t gh_msgq_rx_irq_handler(int irq, void *data) dev_warn(msgq->mbox.dev, "Failed to receive data: %d\n", gh_error); break; } - mbox_chan_received_data(gh_msgq_chan(msgq), &rx_data); + if (likely(gh_msgq_chan(msgq)->cl)) + mbox_chan_received_data(gh_msgq_chan(msgq), &rx_data); } return IRQ_HANDLED; @@ -62,6 +63,9 @@ static int gh_msgq_send_data(struct mbox_chan *chan, void *data) enum gh_error gh_error; bool ready; + if (!msgq->tx_ghrsc) + return -EOPNOTSUPP; + if (msgq_data->push) tx_flags |= GH_HYPERCALL_MSGQ_TX_FLAGS_PUSH; @@ -112,7 +116,7 @@ static struct mbox_chan_ops gh_msgq_ops = { /** * gh_msgq_init() - Initialize a Gunyah message queue with an mbox_client - * @parent: optional, device parent used for the mailbox controller + * @parent: device parent used for the mailbox controller * @msgq: Pointer to the gh_msgq to initialize * @cl: A mailbox client to bind to the mailbox channel that the message queue creates * @tx_ghrsc: optional, the transmission side of the message queue @@ -139,66 +143,68 @@ int gh_msgq_init(struct device *parent, struct gh_msgq *msgq, struct mbox_client (rx_ghrsc && rx_ghrsc->type != GH_RESOURCE_TYPE_MSGQ_RX)) return -EINVAL; - msgq->tx_ghrsc = tx_ghrsc; - msgq->rx_ghrsc = rx_ghrsc; - msgq->mbox.dev = parent; msgq->mbox.ops = &gh_msgq_ops; msgq->mbox.num_chans = 1; msgq->mbox.txdone_irq = true; msgq->mbox.chans = &msgq->mbox_chan; - if (msgq->tx_ghrsc) { + ret = mbox_controller_register(&msgq->mbox); + if (ret) + return ret; + + ret = mbox_bind_client(gh_msgq_chan(msgq), cl); + if (ret) + goto err_mbox; + + if (tx_ghrsc) { + msgq->tx_ghrsc = tx_ghrsc; + ret = request_irq(msgq->tx_ghrsc->irq, gh_msgq_tx_irq_handler, 0, "gh_msgq_tx", msgq); if (ret) - goto err_chans; + goto err_tx_ghrsc; + + tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet); } - if (msgq->rx_ghrsc) { + if (rx_ghrsc) { + msgq->rx_ghrsc = rx_ghrsc; + ret = request_threaded_irq(msgq->rx_ghrsc->irq, NULL, gh_msgq_rx_irq_handler, IRQF_ONESHOT, "gh_msgq_rx", msgq); if (ret) goto err_tx_irq; } - tasklet_setup(&msgq->txdone_tasklet, gh_msgq_txdone_tasklet); - - ret = mbox_controller_register(&msgq->mbox); - if (ret) - goto err_rx_irq; - - ret = mbox_bind_client(gh_msgq_chan(msgq), cl); - if (ret) - goto err_mbox; - return 0; -err_mbox: - mbox_controller_unregister(&msgq->mbox); -err_rx_irq: - if (msgq->rx_ghrsc) - free_irq(msgq->rx_ghrsc->irq, msgq); err_tx_irq: if (msgq->tx_ghrsc) free_irq(msgq->tx_ghrsc->irq, msgq); -err_chans: - kfree(msgq->mbox.chans); + + msgq->rx_ghrsc = NULL; +err_tx_ghrsc: + msgq->tx_ghrsc = NULL; +err_mbox: + mbox_controller_unregister(&msgq->mbox); return ret; } EXPORT_SYMBOL_GPL(gh_msgq_init); void gh_msgq_remove(struct gh_msgq *msgq) { - tasklet_kill(&msgq->txdone_tasklet); - mbox_controller_unregister(&msgq->mbox); - if (msgq->rx_ghrsc) free_irq(msgq->rx_ghrsc->irq, msgq); - if (msgq->tx_ghrsc) + if (msgq->tx_ghrsc) { + tasklet_kill(&msgq->txdone_tasklet); free_irq(msgq->tx_ghrsc->irq, msgq); + } - kfree(msgq->mbox.chans); + mbox_controller_unregister(&msgq->mbox); + + msgq->rx_ghrsc = NULL; + msgq->tx_ghrsc = NULL; } EXPORT_SYMBOL_GPL(gh_msgq_remove); From 9a9fc8d1b2882c3decf429d5e32c603fe8ab65e9 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Mon, 17 Apr 2023 12:07:52 -0700 Subject: [PATCH 50/63] ANDROID: gunyah: Sync remaining gunyah drivers with latest Apply remaining minor fixups from Gunyah v13 patches: https://lore.kernel.org/all/20230509204801.2824351-1-quic_eberman@quicinc.com/ Bug: 279506910 Change-Id: I1a596b9df29d210c51b612845e4a1aafbea00441 Signed-off-by: Elliot Berman --- arch/arm64/include/asm/gunyah.h | 6 +++--- drivers/virt/gunyah/Makefile | 4 ++-- drivers/virt/gunyah/gunyah_irqfd.c | 2 +- drivers/virt/gunyah/gunyah_vcpu.c | 32 ++++++++++++++++++------------ drivers/virt/gunyah/rsc_mgr.c | 7 ++++--- drivers/virt/gunyah/rsc_mgr_rpc.c | 4 ++-- drivers/virt/gunyah/vm_mgr.c | 16 ++++++++++----- drivers/virt/gunyah/vm_mgr.h | 4 ++-- drivers/virt/gunyah/vm_mgr_mm.c | 3 --- include/linux/gunyah.h | 16 +++++++-------- include/linux/gunyah_rsc_mgr.h | 8 +------- samples/gunyah/gunyah_vmm.c | 4 ++-- 12 files changed, 55 insertions(+), 51 deletions(-) diff --git a/arch/arm64/include/asm/gunyah.h b/arch/arm64/include/asm/gunyah.h index a8b368f53bab..c83d983b0f4e 100644 --- a/arch/arm64/include/asm/gunyah.h +++ b/arch/arm64/include/asm/gunyah.h @@ -1,9 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ -#ifndef __ASM_GUNYAH_H_ -#define __ASM_GUNYAH_H_ +#ifndef _ASM_GUNYAH_H +#define _ASM_GUNYAH_H #include #include diff --git a/drivers/virt/gunyah/Makefile b/drivers/virt/gunyah/Makefile index efda8f732f8a..63ca11e74796 100644 --- a/drivers/virt/gunyah/Makefile +++ b/drivers/virt/gunyah/Makefile @@ -3,8 +3,8 @@ obj-$(CONFIG_GUNYAH_PLATFORM_HOOKS) += gunyah_platform_hooks.o obj-$(CONFIG_GUNYAH_QCOM_PLATFORM) += gunyah_qcom.o -gunyah_rsc_mgr-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o -obj-$(CONFIG_GUNYAH) += gunyah_rsc_mgr.o +gunyah-y += rsc_mgr.o rsc_mgr_rpc.o vm_mgr.o vm_mgr_mm.o +obj-$(CONFIG_GUNYAH) += gunyah.o obj-$(CONFIG_GUNYAH_VCPU) += gunyah_vcpu.o obj-$(CONFIG_GUNYAH_IRQFD) += gunyah_irqfd.o diff --git a/drivers/virt/gunyah/gunyah_irqfd.c b/drivers/virt/gunyah/gunyah_irqfd.c index 1b7f0c5073da..3e954ebd2029 100644 --- a/drivers/virt/gunyah/gunyah_irqfd.c +++ b/drivers/virt/gunyah/gunyah_irqfd.c @@ -176,5 +176,5 @@ static bool gh_irqfd_compare(const struct gh_vm_function_instance *f, DECLARE_GH_VM_FUNCTION_INIT(irqfd, GH_FN_IRQFD, 2, gh_irqfd_bind, gh_irqfd_unbind, gh_irqfd_compare); -MODULE_DESCRIPTION("Gunyah irqfds"); +MODULE_DESCRIPTION("Gunyah irqfd VM Function"); MODULE_LICENSE("GPL"); diff --git a/drivers/virt/gunyah/gunyah_vcpu.c b/drivers/virt/gunyah/gunyah_vcpu.c index 455ed4425121..4f0bbd58a205 100644 --- a/drivers/virt/gunyah/gunyah_vcpu.c +++ b/drivers/virt/gunyah/gunyah_vcpu.c @@ -68,6 +68,9 @@ static bool gh_handle_mmio(struct gh_vcpu *vcpu, len = vcpu_run_resp->state_data[1], data = vcpu_run_resp->state_data[2]; + if (WARN_ON(len > sizeof(u64))) + len = sizeof(u64); + if (vcpu_run_resp->state == GH_VCPU_ADDRSPACE_VMMIO_READ) { vcpu->vcpu_run->mmio.is_write = 0; /* Record that we need to give vCPU user's supplied value next gh_vcpu_run() */ @@ -175,6 +178,8 @@ static int gh_vcpu_run(struct gh_vcpu *vcpu) vcpu->state = GH_VCPU_READY; break; case GH_VCPU_MMIO_READ: + if (unlikely(vcpu->mmio_read_len > sizeof(state_data[0]))) + vcpu->mmio_read_len = sizeof(state_data[0]); memcpy(&state_data[0], vcpu->vcpu_run->mmio.data, vcpu->mmio_read_len); vcpu->state = GH_VCPU_READY; break; @@ -387,15 +392,9 @@ static long gh_vcpu_bind(struct gh_vm_function_instance *f) if (r) goto err_destroy_page; - fd = get_unused_fd_flags(O_CLOEXEC); - if (fd < 0) { - r = fd; - goto err_remove_vcpu; - } - if (!gh_vm_get(f->ghvm)) { r = -ENODEV; - goto err_put_fd; + goto err_remove_resource_ticket; } vcpu->ghvm = f->ghvm; @@ -409,23 +408,30 @@ static long gh_vcpu_bind(struct gh_vm_function_instance *f) goto err_put_gh_vm; kref_get(&vcpu->kref); - snprintf(name, sizeof(name), "gh-vcpu:%d", vcpu->ticket.label); + + fd = get_unused_fd_flags(O_CLOEXEC); + if (fd < 0) { + r = fd; + goto err_notifier; + } + + snprintf(name, sizeof(name), "gh-vcpu:%u", vcpu->ticket.label); file = anon_inode_getfile(name, &gh_vcpu_fops, vcpu, O_RDWR); if (IS_ERR(file)) { r = PTR_ERR(file); - goto err_notifier; + goto err_put_fd; } fd_install(fd, file); return fd; +err_put_fd: + put_unused_fd(fd); err_notifier: gh_rm_notifier_unregister(f->rm, &vcpu->nb); err_put_gh_vm: gh_vm_put(vcpu->ghvm); -err_put_fd: - put_unused_fd(fd); -err_remove_vcpu: +err_remove_resource_ticket: gh_vm_remove_resource_ticket(f->ghvm, &vcpu->ticket); err_destroy_page: free_page((unsigned long)vcpu->vcpu_run); @@ -458,5 +464,5 @@ static bool gh_vcpu_compare(const struct gh_vm_function_instance *f, } DECLARE_GH_VM_FUNCTION_INIT(vcpu, GH_FN_VCPU, 1, gh_vcpu_bind, gh_vcpu_unbind, gh_vcpu_compare); -MODULE_DESCRIPTION("Gunyah vCPU Driver"); +MODULE_DESCRIPTION("Gunyah vCPU Function"); MODULE_LICENSE("GPL"); diff --git a/drivers/virt/gunyah/rsc_mgr.c b/drivers/virt/gunyah/rsc_mgr.c index 10cc8db37d30..5571540311af 100644 --- a/drivers/virt/gunyah/rsc_mgr.c +++ b/drivers/virt/gunyah/rsc_mgr.c @@ -126,7 +126,8 @@ struct gh_rm_connection { * @dev: pointer to device * @tx_ghrsc: message queue resource to TX to RM * @rx_ghrsc: message queue resource to RX from RM - * @msgq: mailbox instance of above + * @msgq: mailbox instance of TX/RX resources above + * @msgq_client: mailbox client of above msgq * @active_rx_connection: ongoing gh_rm_connection for which we're receiving fragments * @last_tx_ret: return value of last mailbox tx * @call_xarray: xarray to allocate & lookup sequence IDs for Request/Response flows @@ -160,7 +161,7 @@ struct gh_rm { /** * gh_rm_remap_error() - Remap Gunyah resource manager errors into a Linux error code - * @gh_error: "Standard" return value from Gunyah resource manager + * @rm_error: "Standard" return value from Gunyah resource manager */ static inline int gh_rm_remap_error(enum gh_rm_error rm_error) { @@ -378,7 +379,7 @@ static void gh_rm_notif_work(struct work_struct *work) notification.work); struct gh_rm *rm = connection->notification.rm; - blocking_notifier_call_chain(&rm->nh, connection->msg_id, connection->payload); + blocking_notifier_call_chain(&rm->nh, le32_to_cpu(connection->msg_id), connection->payload); put_device(rm->dev); kfree(connection->payload); diff --git a/drivers/virt/gunyah/rsc_mgr_rpc.c b/drivers/virt/gunyah/rsc_mgr_rpc.c index 4a8f94a34cf2..f48e7df2dbef 100644 --- a/drivers/virt/gunyah/rsc_mgr_rpc.c +++ b/drivers/virt/gunyah/rsc_mgr_rpc.c @@ -308,7 +308,7 @@ int gh_rm_mem_reclaim(struct gh_rm *rm, struct gh_rm_mem_parcel *parcel) int ret; ret = gh_rm_call(rm, GH_RM_RPC_MEM_RECLAIM, &req, sizeof(req), NULL, NULL); - /* Do not call platform mem reclaim hooks: the reclaim didn't happen*/ + /* Only call the platform mem reclaim hooks if we reclaimed the memory */ if (ret) return ret; @@ -348,7 +348,7 @@ EXPORT_SYMBOL_GPL(gh_rm_vm_set_firmware_mem); int gh_rm_alloc_vmid(struct gh_rm *rm, u16 vmid) { struct gh_rm_vm_common_vmid_req req_payload = { - .vmid = vmid, + .vmid = cpu_to_le16(vmid), }; struct gh_rm_vm_alloc_vmid_resp *resp_payload; size_t resp_size; diff --git a/drivers/virt/gunyah/vm_mgr.c b/drivers/virt/gunyah/vm_mgr.c index 1ff96c35af56..4e824758ddf3 100644 --- a/drivers/virt/gunyah/vm_mgr.c +++ b/drivers/virt/gunyah/vm_mgr.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -385,7 +386,7 @@ static int gh_vm_rm_notification_status(struct gh_vm *ghvm, void *data) { struct gh_rm_vm_status_payload *payload = data; - if (payload->vmid != ghvm->vmid) + if (le16_to_cpu(payload->vmid) != ghvm->vmid) return NOTIFY_OK; /* All other state transitions are synchronous to a corresponding RM call */ @@ -403,7 +404,7 @@ static int gh_vm_rm_notification_exited(struct gh_vm *ghvm, void *data) { struct gh_rm_vm_exited_payload *payload = data; - if (payload->vmid != ghvm->vmid) + if (le16_to_cpu(payload->vmid) != ghvm->vmid) return NOTIFY_OK; down_write(&ghvm->status_lock); @@ -413,6 +414,7 @@ static int gh_vm_rm_notification_exited(struct gh_vm *ghvm, void *data) memcpy(&ghvm->exit_info.reason, payload->exit_reason, min(GH_VM_MAX_EXIT_REASON_SIZE, ghvm->exit_info.reason_size)); up_write(&ghvm->status_lock); + wake_up(&ghvm->vm_status_wait); return NOTIFY_DONE; } @@ -441,9 +443,9 @@ static void gh_vm_stop(struct gh_vm *ghvm) if (ret) dev_warn(ghvm->parent, "Failed to stop VM: %d\n", ret); } - - ghvm->vm_status = GH_RM_VM_STATUS_EXITED; up_write(&ghvm->status_lock); + + wait_event(ghvm->vm_status_wait, ghvm->vm_status == GH_RM_VM_STATUS_EXITED); } static __must_check struct gh_vm *gh_vm_alloc(struct gh_rm *rm) @@ -663,9 +665,13 @@ static long gh_vm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_from_user(&dtb_config, argp, sizeof(dtb_config))) return -EFAULT; - if (dtb_config.guest_phys_addr + dtb_config.size < dtb_config.guest_phys_addr) + if (overflows_type(dtb_config.guest_phys_addr + dtb_config.size, u64)) return -EOVERFLOW; + /* Gunyah requires that dtb_config is page aligned */ + if (!PAGE_ALIGNED(dtb_config.guest_phys_addr) || !PAGE_ALIGNED(dtb_config.size)) + return -EINVAL; + ghvm->dtb_config = dtb_config; r = 0; diff --git a/drivers/virt/gunyah/vm_mgr.h b/drivers/virt/gunyah/vm_mgr.h index 9fc4e30129a7..6b88ba64e07d 100644 --- a/drivers/virt/gunyah/vm_mgr.h +++ b/drivers/virt/gunyah/vm_mgr.h @@ -3,8 +3,8 @@ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ -#ifndef _GH_PRIV_VM_MGR_H -#define _GH_PRIV_VM_MGR_H +#ifndef _GH_VM_MGR_H +#define _GH_VM_MGR_H #include #include diff --git a/drivers/virt/gunyah/vm_mgr_mm.c b/drivers/virt/gunyah/vm_mgr_mm.c index 952cc85e5d4b..3157d5317843 100644 --- a/drivers/virt/gunyah/vm_mgr_mm.c +++ b/drivers/virt/gunyah/vm_mgr_mm.c @@ -119,14 +119,12 @@ int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *regio if (ret) return ret; - /* Check label is unique */ mapping = __gh_vm_mem_find_by_label(ghvm, region->label); if (mapping) { ret = -EEXIST; goto unlock; } - /* Check for overlap */ list_for_each_entry(tmp_mapping, &ghvm->memory_mappings, list) { if (gh_vm_mem_overlap(tmp_mapping, region->guest_phys_addr, region->memory_size)) { @@ -235,7 +233,6 @@ int gh_vm_mem_alloc(struct gh_vm *ghvm, struct gh_userspace_memory_region *regio } else { parcel->mem_entries[j].size = cpu_to_le64(entry_size); j++; - BUG_ON(j >= parcel->n_mem_entries); parcel->mem_entries[j].ipa_base = cpu_to_le64(page_to_phys(curr_page)); entry_size = PAGE_SIZE; diff --git a/include/linux/gunyah.h b/include/linux/gunyah.h index 2a16219fad18..1f1685518bf3 100644 --- a/include/linux/gunyah.h +++ b/include/linux/gunyah.h @@ -14,13 +14,13 @@ #include #include -/* Follows resource manager's resource types for VM_GET_HYP_RESOURCES */ +/* Matches resource manager's resource types for VM_GET_HYP_RESOURCES RPC */ enum gh_resource_type { GH_RESOURCE_TYPE_BELL_TX = 0, GH_RESOURCE_TYPE_BELL_RX = 1, GH_RESOURCE_TYPE_MSGQ_TX = 2, GH_RESOURCE_TYPE_MSGQ_RX = 3, - GH_RESOURCE_TYPE_VCPU = 4, + GH_RESOURCE_TYPE_VCPU = 4, }; struct gh_resource { @@ -28,7 +28,6 @@ struct gh_resource { u64 capid; unsigned int irq; - /* To help allocator in vm manager */ struct list_head list; u32 rm_label; }; @@ -37,7 +36,7 @@ struct gh_resource { * Gunyah Message Queues */ -#define GH_MSGQ_MAX_MSG_SIZE 240 +#define GH_MSGQ_MAX_MSG_SIZE 240 struct gh_msgq_tx_data { size_t length; @@ -144,16 +143,17 @@ static inline int gh_error_remap(enum gh_error gh_error) } enum gh_api_feature { - GH_FEATURE_DOORBELL = 1, - GH_FEATURE_MSGQUEUE = 2, - GH_FEATURE_VCPU = 5, - GH_FEATURE_MEMEXTENT = 6, + GH_FEATURE_DOORBELL = 1, + GH_FEATURE_MSGQUEUE = 2, + GH_FEATURE_VCPU = 5, + GH_FEATURE_MEMEXTENT = 6, }; bool arch_is_gh_guest(void); #define GH_API_V1 1 +/* Other bits reserved for future use and will be zero */ #define GH_API_INFO_API_VERSION_MASK GENMASK_ULL(13, 0) #define GH_API_INFO_BIG_ENDIAN BIT_ULL(14) #define GH_API_INFO_IS_64BIT BIT_ULL(15) diff --git a/include/linux/gunyah_rsc_mgr.h b/include/linux/gunyah_rsc_mgr.h index 27283c881ecb..f73371bd8f7c 100644 --- a/include/linux/gunyah_rsc_mgr.h +++ b/include/linux/gunyah_rsc_mgr.h @@ -10,7 +10,7 @@ #include #include -#define GH_VMID_INVAL U16_MAX +#define GH_VMID_INVAL U16_MAX #define GH_MEM_HANDLE_INVAL U32_MAX struct gh_rm; @@ -31,12 +31,6 @@ struct gh_rm_vm_exited_payload { #define GH_RM_NOTIFICATION_VM_EXITED 0x56100001 enum gh_rm_vm_status { - /** - * RM doesn't have a state where load partially failed because - * only Linux - */ - GH_RM_VM_STATUS_LOAD_FAILED = -1, - GH_RM_VM_STATUS_NO_STATE = 0, GH_RM_VM_STATUS_INIT = 1, GH_RM_VM_STATUS_READY = 2, diff --git a/samples/gunyah/gunyah_vmm.c b/samples/gunyah/gunyah_vmm.c index d0ba9c20cb13..d0eb49e86372 100644 --- a/samples/gunyah/gunyah_vmm.c +++ b/samples/gunyah/gunyah_vmm.c @@ -56,8 +56,8 @@ static void print_help(char *cmd) "Usage: %s \n" " --help, -h this menu\n" " --image, -i VM image file to load (e.g. a kernel Image) [Required]\n" - " --dtb, -d Devicetree to load [Required]\n" - " --ramdisk, -r Ramdisk to load\n" + " --dtb, -d Devicetree file to load [Required]\n" + " --ramdisk, -r Ramdisk file to load\n" " --base, -B
Set the base address of guest's memory [Default: 0x80000000]\n" " --size, -S The number of bytes large to make the guest's memory [Default: 0x6400000 (100 MB)]\n" " --image_offset, -I Offset into guest memory to load the VM image file [Default: 0x10000]\n" From 4c868837facf5191ea599ad65db1ac28ebeb01e4 Mon Sep 17 00:00:00 2001 From: Elliot Berman Date: Tue, 9 May 2023 10:10:59 -0700 Subject: [PATCH 51/63] ANDROID: abi_gki_aarch64_qcom: Add gh_rm_register_platform_ops From commit 80dfafb2b9b6 ("ANDROID: gunyah: Sync with latest "firmware: qcom_scm: Register Gunyah platform ops""), the QCOM platform extensions now use gh_rm_(un)register_platform_ops instead of the devm_ equivalent because the platform extensions are no longer directly backed by a device. 2 function symbol(s) added 'int gh_rm_register_platform_ops(struct gh_rm_platform_ops*)' 'void gh_rm_unregister_platform_ops(struct gh_rm_platform_ops*)' Bug: 279506910 Change-Id: I7ad36387a9d254691ecf9b769e058d972bd41c42 Signed-off-by: Elliot Berman --- android/abi_gki_aarch64.stg | 36 ++++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_qcom | 2 ++ 2 files changed, 38 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index fb992fc1426c..b7f287d9c8b0 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -285979,6 +285979,11 @@ function { parameter_id: 0x3f222c68 parameter_id: 0x15b54c6f } +function { + id: 0x1f07d2c7 + return_type_id: 0x48b5725f + parameter_id: 0x3e7e0d52 +} function { id: 0x1f0d7714 return_type_id: 0x48b5725f @@ -294940,6 +294945,11 @@ function { parameter_id: 0xf435685e parameter_id: 0x0efc9002 } +function { + id: 0x921f607b + return_type_id: 0x6720d32f + parameter_id: 0x3e7e0d52 +} function { id: 0x9220b9bd return_type_id: 0x6720d32f @@ -340695,6 +340705,24 @@ elf_symbol { type_id: 0x977a5487 full_name: "gh_rm_notifier_unregister" } +elf_symbol { + id: 0x62906068 + name: "gh_rm_register_platform_ops" + is_defined: true + symbol_type: FUNCTION + crc: 0xa577ae43 + type_id: 0x921f607b + full_name: "gh_rm_register_platform_ops" +} +elf_symbol { + id: 0x35ffaad3 + name: "gh_rm_unregister_platform_ops" + is_defined: true + symbol_type: FUNCTION + crc: 0x5759f053 + type_id: 0x1f07d2c7 + full_name: "gh_rm_unregister_platform_ops" +} elf_symbol { id: 0x5a582da8 name: "gic_nonsecure_priorities" @@ -383995,6 +384023,14 @@ symbols { key: "gh_rm_notifier_unregister" value: 0x3049a5e5 } + symbol { + key: "gh_rm_register_platform_ops" + value: 0x62906068 + } + symbol { + key: "gh_rm_unregister_platform_ops" + value: 0x35ffaad3 + } symbol { key: "gic_nonsecure_priorities" value: 0x5a582da8 diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index eda7c6d6cf3c..5c5d9a97b1f2 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -1225,6 +1225,8 @@ gh_rm_call gh_rm_notifier_register gh_rm_notifier_unregister + gh_rm_register_platform_ops + gh_rm_unregister_platform_ops gic_nonsecure_priorities gov_attr_set_init gov_attr_set_put From e6e6e1273db431fe03ae6b0bf48738970f6ccaea Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Tue, 20 Sep 2022 14:42:50 -0700 Subject: [PATCH 52/63] ANDROID: mm: introduce page_pinner For CMA allocation, it's really critical to migrate a page but sometimes it fails. One of the reasons is some driver holds a page refcount for a long time so VM couldn't migrate the page at that time. The concern here is there is no way to find the who hold the refcount of the page effectively. This patch introduces feature to keep tracking page's pinner. All get_page sites are vulnerable to pin a page for a long time but the cost to keep track it would be significat since get_page is the most frequent kernel operation. Furthermore, the page could be not user page but kernel page which is not related to the page migration failure. Thus, this patch keeps tracks of only migration failed pages to reduce runtime cost. Once page migration fails in CMA allocation path, those pages are marked as "migration failure" and every put_page operation against those pages, callstack of the put are recorded into page_pinner buffer. Later, admin can see what pages were failed and who released the refcount since the failure. It really helps effectively to find out longtime refcount holder to prevent the page migration. note: page_pinner doesn't guarantee attributing/unattributing are atomic if they happen at the same time. It's just best effort so false-positive could happen. Bug: 183414571 BUg: 240196534 Signed-off-by: Minchan Kim Signed-off-by: Minchan Kim Change-Id: I603d0c0122734c377db6b1eb95848a6f734173a0 (cherry picked from commit 898cfbf094a2fc13c67fab5b5d3c916f0139833a) --- arch/arm64/configs/gki_defconfig | 1 + include/linux/page_ext.h | 4 + include/linux/page_pinner.h | 48 ++++ mm/Kconfig.debug | 16 ++ mm/Makefile | 1 + mm/page_alloc.c | 13 +- mm/page_ext.c | 4 + mm/page_isolation.c | 3 + mm/page_pinner.c | 423 +++++++++++++++++++++++++++++++ 9 files changed, 512 insertions(+), 1 deletion(-) create mode 100644 include/linux/page_pinner.h create mode 100644 mm/page_pinner.c diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 260befab3d4b..e139d2c9d10f 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -701,6 +701,7 @@ CONFIG_UBSAN_LOCAL_BOUNDS=y # CONFIG_UBSAN_BOOL is not set # CONFIG_UBSAN_ENUM is not set CONFIG_PAGE_OWNER=y +CONFIG_PAGE_PINNER=y CONFIG_DEBUG_STACK_USAGE=y CONFIG_DEBUG_MEMORY_INIT=y CONFIG_KASAN=y diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h index 22be4582faae..6136338b2b17 100644 --- a/include/linux/page_ext.h +++ b/include/linux/page_ext.h @@ -19,6 +19,10 @@ struct page_ext_operations { enum page_ext_flags { PAGE_EXT_OWNER, PAGE_EXT_OWNER_ALLOCATED, +#if defined(CONFIG_PAGE_PINNER) + /* page migration failed */ + PAGE_EXT_PINNER_MIGRATION_FAILED, +#endif #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) PAGE_EXT_YOUNG, PAGE_EXT_IDLE, diff --git a/include/linux/page_pinner.h b/include/linux/page_pinner.h new file mode 100644 index 000000000000..4d574d1ced59 --- /dev/null +++ b/include/linux/page_pinner.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_PAGE_PINNER_H +#define __LINUX_PAGE_PINNER_H + +#include + +#ifdef CONFIG_PAGE_PINNER +extern struct static_key_false page_pinner_inited; +extern struct static_key_true failure_tracking; +extern struct page_ext_operations page_pinner_ops; + +extern void __free_page_pinner(struct page *page, unsigned int order); +void __page_pinner_failure_detect(struct page *page); +void __page_pinner_put_page(struct page *page); + +static inline void free_page_pinner(struct page *page, unsigned int order) +{ + if (static_branch_unlikely(&page_pinner_inited)) + __free_page_pinner(page, order); +} + +static inline void page_pinner_put_page(struct page *page) +{ + if (!static_branch_unlikely(&failure_tracking)) + return; + + __page_pinner_put_page(page); +} + +static inline void page_pinner_failure_detect(struct page *page) +{ + if (!static_branch_unlikely(&failure_tracking)) + return; + + __page_pinner_failure_detect(page); +} +#else +static inline void free_page_pinner(struct page *page, unsigned int order) +{ +} +static inline void page_pinner_put_page(struct page *page) +{ +} +static inline void page_pinner_failure_detect(struct page *page) +{ +} +#endif /* CONFIG_PAGE_PINNER */ +#endif /* __LINUX_PAGE_PINNER_H */ diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index ce8dded36de9..2eae432e042c 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -119,6 +119,22 @@ config PAGE_TABLE_CHECK_ENFORCED If unsure say "n". +config PAGE_PINNER + bool "Track page pinner" + depends on DEBUG_KERNEL && STACKTRACE_SUPPORT + select DEBUG_FS + select STACKTRACE + select STACKDEPOT + select PAGE_EXTENSION + help + This keeps track of what call chain is the pinner of a page, may + help to find page migration failures. Even if you include this + feature in your build, it is disabled by default. You should pass + "page_pinner=on" to boot parameter in order to enable it. Eats + a fair amount of memory if enabled. + + If unsure, say N. + config PAGE_POISONING bool "Poison pages after freeing" help diff --git a/mm/Makefile b/mm/Makefile index 5ef58b2081d4..6878edc7c7d0 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -109,6 +109,7 @@ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o obj-$(CONFIG_DEBUG_RODATA_TEST) += rodata_test.o obj-$(CONFIG_DEBUG_VM_PGTABLE) += debug_vm_pgtable.o obj-$(CONFIG_PAGE_OWNER) += page_owner.o +obj-$(CONFIG_PAGE_PINNER) += page_pinner.o obj-$(CONFIG_CLEANCACHE) += cleancache.o obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o obj-$(CONFIG_ZPOOL) += zpool.o diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4afcd84a422c..104003f72893 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1438,6 +1438,7 @@ static __always_inline bool free_pages_prepare(struct page *page, if (memcg_kmem_enabled() && PageMemcgKmem(page)) __memcg_kmem_uncharge_page(page, order); reset_page_owner(page, order); + free_page_pinner(page, order); page_table_check_free(page, order); return false; } @@ -1478,6 +1479,7 @@ static __always_inline bool free_pages_prepare(struct page *page, page_cpupid_reset_last(page); page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; reset_page_owner(page, order); + free_page_pinner(page, order); page_table_check_free(page, order); if (!PageHighMem(page)) { @@ -9310,8 +9312,17 @@ int __alloc_contig_migrate_range(struct compact_control *cc, lru_cache_enable(); if (ret < 0) { - if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) + if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) { + struct page *page; + alloc_contig_dump_pages(&cc->migratepages); + list_for_each_entry(page, &cc->migratepages, lru) { + /* The page will be freed by putback_movable_pages soon */ + if (page_count(page) == 1) + continue; + page_pinner_failure_detect(page); + } + } putback_movable_pages(&cc->migratepages); return ret; } diff --git a/mm/page_ext.c b/mm/page_ext.c index 1b3a67b9d5e0..6fcae7c60101 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include @@ -81,6 +82,9 @@ static struct page_ext_operations *page_ext_ops[] __initdata = { #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT) &page_idle_ops, #endif +#ifdef CONFIG_PAGE_PINNER + &page_pinner_ops, +#endif #ifdef CONFIG_PAGE_TABLE_CHECK &page_table_check_ops, #endif diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 47fbc1696466..90e6a0090085 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include "internal.h" @@ -666,6 +667,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, out: trace_test_pages_isolated(start_pfn, end_pfn, pfn); + if (pfn < end_pfn) + page_pinner_failure_detect(pfn_to_page(pfn)); return ret; } diff --git a/mm/page_pinner.c b/mm/page_pinner.c new file mode 100644 index 000000000000..2a25a3720d11 --- /dev/null +++ b/mm/page_pinner.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" + +#define PAGE_PINNER_STACK_DEPTH 16 +static unsigned long pp_buf_size = 4096; + +struct page_pinner { + depot_stack_handle_t handle; + u64 ts_usec; + atomic_t count; +}; + +enum pp_state { + PP_PUT, + PP_FREE, + PP_FAIL_DETECTED, +}; + +struct captured_pinner { + depot_stack_handle_t handle; + union { + u64 ts_usec; + u64 elapsed; + }; + + /* struct page fields */ + unsigned long pfn; + int count; + int mapcount; + struct address_space *mapping; + unsigned long flags; + enum pp_state state; +}; + +struct page_pinner_buffer { + spinlock_t lock; + unsigned long index; + struct captured_pinner *buffer; +}; + +/* alloc_contig failed pinner */ +static struct page_pinner_buffer pp_buffer; + +static bool page_pinner_enabled; +DEFINE_STATIC_KEY_FALSE(page_pinner_inited); + +DEFINE_STATIC_KEY_TRUE(failure_tracking); +EXPORT_SYMBOL_GPL(failure_tracking); + +static depot_stack_handle_t failure_handle; + +static int __init early_page_pinner_param(char *buf) +{ + page_pinner_enabled = true; + return 0; +} +early_param("page_pinner", early_page_pinner_param); + +static bool need_page_pinner(void) +{ + return page_pinner_enabled; +} + +static noinline void register_failure_stack(void) +{ + unsigned long entries[4]; + unsigned int nr_entries; + + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); + failure_handle = stack_depot_save(entries, nr_entries, GFP_KERNEL); +} + +static void init_page_pinner(void) +{ + if (!page_pinner_enabled) + return; + + register_failure_stack(); + static_branch_enable(&page_pinner_inited); +} + +struct page_ext_operations page_pinner_ops = { + .size = sizeof(struct page_pinner), + .need = need_page_pinner, + .init = init_page_pinner, +}; + +static inline struct page_pinner *get_page_pinner(struct page_ext *page_ext) +{ + return (void *)page_ext + page_pinner_ops.offset; +} + +static noinline depot_stack_handle_t save_stack(gfp_t flags) +{ + unsigned long entries[PAGE_PINNER_STACK_DEPTH]; + depot_stack_handle_t handle; + unsigned int nr_entries; + + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 2); + handle = stack_depot_save(entries, nr_entries, flags); + if (!handle) + handle = failure_handle; + + return handle; +} + +static void capture_page_state(struct page *page, + struct captured_pinner *record) +{ + record->flags = page->flags; + record->mapping = page_mapping(page); + record->pfn = page_to_pfn(page); + record->count = page_count(page); + record->mapcount = page_mapcount(page); +} + +static void add_record(struct page_pinner_buffer *pp_buf, + struct captured_pinner *record) +{ + unsigned long flags; + unsigned int idx; + + spin_lock_irqsave(&pp_buf->lock, flags); + idx = pp_buf->index++; + pp_buf->index %= pp_buf_size; + pp_buf->buffer[idx] = *record; + spin_unlock_irqrestore(&pp_buf->lock, flags); +} + +void __free_page_pinner(struct page *page, unsigned int order) +{ + struct page_pinner *page_pinner; + struct page_ext *page_ext; + int i; + + /* free_page could be called before buffer is initialized */ + if (!pp_buffer.buffer) + return; + + page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + return; + + for (i = 0; i < (1 << order); i++) { + struct captured_pinner record; + + if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) + continue; + + page_pinner = get_page_pinner(page_ext); + /* record page free call path */ + page_ext = lookup_page_ext(page); + if (unlikely(!page_ext)) + continue; + + record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); + record.ts_usec = (u64)ktime_to_us(ktime_get_boottime()); + record.state = PP_FREE; + capture_page_state(page, &record); + + add_record(&pp_buffer, &record); + + atomic_set(&page_pinner->count, 0); + page_pinner->ts_usec = 0; + clear_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags); + page_ext = page_ext_next(page_ext); + } +} + +static ssize_t +print_page_pinner(char __user *buf, size_t count, struct captured_pinner *record) +{ + int ret; + unsigned long *entries; + unsigned int nr_entries; + char *kbuf; + + count = min_t(size_t, count, PAGE_SIZE); + kbuf = kmalloc(count, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + if (record->state == PP_PUT) { + ret = snprintf(kbuf, count, "At least, pinned for %llu us\n", + record->elapsed); + } else { + u64 ts_usec = record->ts_usec; + unsigned long rem_usec = do_div(ts_usec, 1000000); + + ret = snprintf(kbuf, count, + "%s [%5lu.%06lu]\n", + record->state == PP_FREE ? "Freed at" : + "Failure detected at", + (unsigned long)ts_usec, rem_usec); + } + + if (ret >= count) + goto err; + + /* Print information relevant to grouping pages by mobility */ + ret += snprintf(kbuf + ret, count - ret, + "PFN 0x%lx Block %lu count %d mapcount %d mapping %pS Flags %#lx(%pGp)\n", + record->pfn, + record->pfn >> pageblock_order, + record->count, record->mapcount, + record->mapping, + record->flags, &record->flags); + + if (ret >= count) + goto err; + + nr_entries = stack_depot_fetch(record->handle, &entries); + ret += stack_trace_snprint(kbuf + ret, count - ret, entries, + nr_entries, 0); + if (ret >= count) + goto err; + + ret += snprintf(kbuf + ret, count - ret, "\n"); + if (ret >= count) + goto err; + + if (copy_to_user(buf, kbuf, ret)) + ret = -EFAULT; + + kfree(kbuf); + return ret; + +err: + kfree(kbuf); + return -ENOMEM; +} + +void __page_pinner_failure_detect(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + struct page_pinner *page_pinner; + struct captured_pinner record; + u64 now; + + if (unlikely(!page_ext)) + return; + + if (test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) + return; + + now = (u64)ktime_to_us(ktime_get_boottime()); + page_pinner = get_page_pinner(page_ext); + if (!page_pinner->ts_usec) + page_pinner->ts_usec = now; + set_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags); + record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); + record.ts_usec = now; + record.state = PP_FAIL_DETECTED; + capture_page_state(page, &record); + + add_record(&pp_buffer, &record); +} +EXPORT_SYMBOL_GPL(__page_pinner_failure_detect); + +void __page_pinner_put_page(struct page *page) +{ + struct page_ext *page_ext = lookup_page_ext(page); + struct page_pinner *page_pinner; + struct captured_pinner record; + u64 now, ts_usec; + + if (unlikely(!page_ext)) + return; + + if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) + return; + + page_pinner = get_page_pinner(page_ext); + record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); + now = (u64)ktime_to_us(ktime_get_boottime()); + ts_usec = page_pinner->ts_usec; + + if (now > ts_usec) + record.elapsed = now - ts_usec; + else + record.elapsed = 0; + record.state = PP_PUT; + capture_page_state(page, &record); + + add_record(&pp_buffer, &record); +} +EXPORT_SYMBOL_GPL(__page_pinner_put_page); + +static ssize_t read_buffer(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + u64 tmp; + loff_t i, idx; + struct captured_pinner record; + unsigned long flags; + + if (!static_branch_unlikely(&failure_tracking)) + return -EINVAL; + + if (*ppos >= pp_buf_size) + return 0; + + i = *ppos; + *ppos = i + 1; + + /* + * reading the records in the reverse order with newest one + * being read first followed by older ones + */ + tmp = pp_buffer.index - 1 - i + pp_buf_size; + idx = do_div(tmp, pp_buf_size); + + spin_lock_irqsave(&pp_buffer.lock, flags); + record = pp_buffer.buffer[idx]; + spin_unlock_irqrestore(&pp_buffer.lock, flags); + if (!record.handle) + return 0; + + return print_page_pinner(buf, count, &record); +} + +static const struct file_operations proc_buffer_operations = { + .read = read_buffer, +}; + +static int failure_tracking_set(void *data, u64 val) +{ + bool on; + + on = (bool)val; + if (on) + static_branch_enable(&failure_tracking); + else + static_branch_disable(&failure_tracking); + return 0; +} + +static int failure_tracking_get(void *data, u64 *val) +{ + *val = static_branch_unlikely(&failure_tracking); + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(failure_tracking_fops, + failure_tracking_get, + failure_tracking_set, "%llu\n"); + +static int buffer_size_set(void *data, u64 val) +{ + unsigned long flags; + struct captured_pinner *new, *old; + + new = kvmalloc_array(val, sizeof(*new), GFP_KERNEL); + if (!new) + return -ENOMEM; + + spin_lock_irqsave(&pp_buffer.lock, flags); + old = pp_buffer.buffer; + pp_buffer.buffer = new; + pp_buffer.index = 0; + pp_buf_size = val; + spin_unlock_irqrestore(&pp_buffer.lock, flags); + kvfree(old); + + return 0; +} + +static int buffer_size_get(void *data, u64 *val) +{ + *val = pp_buf_size; + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(buffer_size_fops, + buffer_size_get, + buffer_size_set, "%llu\n"); + +static int __init page_pinner_init(void) +{ + struct dentry *pp_debugfs_root; + + if (!static_branch_unlikely(&page_pinner_inited)) + return 0; + + pp_buffer.buffer = kvmalloc_array(pp_buf_size, sizeof(*pp_buffer.buffer), + GFP_KERNEL); + if (!pp_buffer.buffer) { + pr_info("page_pinner disabled due to failure of buffer allocation\n"); + return 1; + } + + spin_lock_init(&pp_buffer.lock); + pp_buffer.index = 0; + + pr_info("page_pinner enabled\n"); + + pp_debugfs_root = debugfs_create_dir("page_pinner", NULL); + + debugfs_create_file("buffer", 0444, + pp_debugfs_root, NULL, + &proc_buffer_operations); + + debugfs_create_file("failure_tracking", 0644, + pp_debugfs_root, NULL, + &failure_tracking_fops); + + debugfs_create_file("buffer_size", 0644, + pp_debugfs_root, NULL, + &buffer_size_fops); + return 0; +} +late_initcall(page_pinner_init) From 231a4cccec448391d8c1d7610fece41628aafbfc Mon Sep 17 00:00:00 2001 From: Charan Teja Kalla Date: Thu, 18 Aug 2022 19:20:00 +0530 Subject: [PATCH 53/63] ANDROID: mm: fix use-after free of page_ext in page_pinner Apply new page_ext refcounting scheme to page_pinner. Bug: 236222283 Bug: 240196534 [surenb: extracted from aosp/2369529] Change-Id: I3b64caf5a7e8ff316507cc3933f5b3696142268d Signed-off-by: Suren Baghdasaryan --- mm/page_pinner.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/mm/page_pinner.c b/mm/page_pinner.c index 2a25a3720d11..380aaec6eec9 100644 --- a/mm/page_pinner.c +++ b/mm/page_pinner.c @@ -150,7 +150,7 @@ void __free_page_pinner(struct page *page, unsigned int order) if (!pp_buffer.buffer) return; - page_ext = lookup_page_ext(page); + page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; @@ -161,10 +161,6 @@ void __free_page_pinner(struct page *page, unsigned int order) continue; page_pinner = get_page_pinner(page_ext); - /* record page free call path */ - page_ext = lookup_page_ext(page); - if (unlikely(!page_ext)) - continue; record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); record.ts_usec = (u64)ktime_to_us(ktime_get_boottime()); @@ -178,6 +174,7 @@ void __free_page_pinner(struct page *page, unsigned int order) clear_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags); page_ext = page_ext_next(page_ext); } + page_ext_put(page_ext); } static ssize_t @@ -245,7 +242,7 @@ err: void __page_pinner_failure_detect(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); struct page_pinner *page_pinner; struct captured_pinner record; u64 now; @@ -253,8 +250,10 @@ void __page_pinner_failure_detect(struct page *page) if (unlikely(!page_ext)) return; - if (test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) + if (test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) { + page_ext_put(page_ext); return; + } now = (u64)ktime_to_us(ktime_get_boottime()); page_pinner = get_page_pinner(page_ext); @@ -267,12 +266,13 @@ void __page_pinner_failure_detect(struct page *page) capture_page_state(page, &record); add_record(&pp_buffer, &record); + page_ext_put(page_ext); } EXPORT_SYMBOL_GPL(__page_pinner_failure_detect); void __page_pinner_put_page(struct page *page) { - struct page_ext *page_ext = lookup_page_ext(page); + struct page_ext *page_ext = page_ext_get(page); struct page_pinner *page_pinner; struct captured_pinner record; u64 now, ts_usec; @@ -280,8 +280,10 @@ void __page_pinner_put_page(struct page *page) if (unlikely(!page_ext)) return; - if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) + if (!test_bit(PAGE_EXT_PINNER_MIGRATION_FAILED, &page_ext->flags)) { + page_ext_put(page_ext); return; + } page_pinner = get_page_pinner(page_ext); record.handle = save_stack(GFP_NOWAIT|__GFP_NOWARN); @@ -296,6 +298,7 @@ void __page_pinner_put_page(struct page *page) capture_page_state(page, &record); add_record(&pp_buffer, &record); + page_ext_put(page_ext); } EXPORT_SYMBOL_GPL(__page_pinner_put_page); From 83b784c3d716ce03ab6b2a9ee178045b1952147d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Sun, 27 Nov 2022 10:30:33 -0800 Subject: [PATCH 54/63] ANDROID: page_pinner: prevent pp_buffer access before initialization If page_pinner is configured with page_pinner_enabled=false and failure_tracking=true, pp_buffer will be accessed without being initialized. Prevent this by adding page_pinner_inited checks in functions that access it. Fixes: 898cfbf094a2 ("ANDROID: mm: introduce page_pinner") Bug: 259024332 Bug: 260179017 Change-Id: I8f612cae3e74d36e8a4eee5edec25281246cbe5e Signed-off-by: Suren Baghdasaryan Signed-off-by: Richard Chang (cherry picked from commit 23fb3111f63e5fe239a769668275c20493a5849c) --- include/linux/page_pinner.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/page_pinner.h b/include/linux/page_pinner.h index 4d574d1ced59..cb29f5c3d68e 100644 --- a/include/linux/page_pinner.h +++ b/include/linux/page_pinner.h @@ -21,6 +21,9 @@ static inline void free_page_pinner(struct page *page, unsigned int order) static inline void page_pinner_put_page(struct page *page) { + if (!static_branch_unlikely(&page_pinner_inited)) + return; + if (!static_branch_unlikely(&failure_tracking)) return; @@ -29,6 +32,9 @@ static inline void page_pinner_put_page(struct page *page) static inline void page_pinner_failure_detect(struct page *page) { + if (!static_branch_unlikely(&page_pinner_inited)) + return; + if (!static_branch_unlikely(&failure_tracking)) return; From d47c9481dafae5152bb1b036dad831317004060d Mon Sep 17 00:00:00 2001 From: Charan Teja Kalla Date: Fri, 16 Dec 2022 12:52:28 +0530 Subject: [PATCH 55/63] ANDROID: page_pinner: prevent pp_buffer uninitialized access There is a race window between page_pinner_inited set and the pp_buffer initialization which cause accessing the pp_buffer->lock. Avoid this by moving the pp_buffer initialization to page_ext_ops->init() which sets the page_pinner_inited only after the pp_buffer is initialized. Race scenario: 1) init_page_pinner is called --> page_pinner_inited is set. 2) __alloc_contig_migrate_range --> __page_pinner_failure_detect() accesses the pp_buffer->lock(yet to be initialized). 3) Then the pp_buffer is allocated and initialized. Below is the issue call stack: spin_bug+0x0 _raw_spin_lock_irqsave+0x3c __page_pinner_failure_detect+0x110 __alloc_contig_migrate_range+0x1c4 alloc_contig_range+0x130 cma_alloc+0x170 dma_alloc_contiguous+0xa0 __dma_direct_alloc_pages+0x16c dma_direct_alloc+0x88 Bug: 259024332 Change-Id: I6849ac4d944498b9a431b47cad7adc7903c9bbaa Signed-off-by: Charan Teja Kalla --- mm/page_pinner.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/page_pinner.c b/mm/page_pinner.c index 380aaec6eec9..2567206e7ce8 100644 --- a/mm/page_pinner.c +++ b/mm/page_pinner.c @@ -88,6 +88,16 @@ static void init_page_pinner(void) if (!page_pinner_enabled) return; + pp_buffer.buffer = kvmalloc_array(pp_buf_size, sizeof(*pp_buffer.buffer), + GFP_KERNEL); + if (!pp_buffer.buffer) { + pr_info("page_pinner disabled due to failure of buffer allocation\n"); + return; + } + + spin_lock_init(&pp_buffer.lock); + pp_buffer.index = 0; + register_failure_stack(); static_branch_enable(&page_pinner_inited); } @@ -396,16 +406,6 @@ static int __init page_pinner_init(void) if (!static_branch_unlikely(&page_pinner_inited)) return 0; - pp_buffer.buffer = kvmalloc_array(pp_buf_size, sizeof(*pp_buffer.buffer), - GFP_KERNEL); - if (!pp_buffer.buffer) { - pr_info("page_pinner disabled due to failure of buffer allocation\n"); - return 1; - } - - spin_lock_init(&pp_buffer.lock); - pp_buffer.index = 0; - pr_info("page_pinner enabled\n"); pp_debugfs_root = debugfs_create_dir("page_pinner", NULL); From 2488e2e472e7f66933b1f190251a35f13ea67699 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 23 Mar 2023 13:37:22 -0700 Subject: [PATCH 56/63] ANDROID: page_pinner: add missing page_pinner_put_page aosp/2369528 missed page_pinner_put_page in put_page_testzero path. Fix it. Bug: 274967172 Change-Id: Ia2af2ffb752f8405b4289ca88cde09f201548e1f Signed-off-by: Minchan Kim --- include/linux/mm.h | 8 +++++++- include/linux/page_pinner.h | 6 ------ mm/page_pinner.c | 14 +++++++++++--- 3 files changed, 18 insertions(+), 10 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 07ee5deefa91..b82c982b9818 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -28,6 +28,7 @@ #include #include #include +#include #include struct mempolicy; @@ -760,8 +761,13 @@ static inline unsigned int folio_order(struct folio *folio) */ static inline int put_page_testzero(struct page *page) { + int ret; + VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); - return page_ref_dec_and_test(page); + ret = page_ref_dec_and_test(page); + page_pinner_put_page(page); + + return ret; } static inline int folio_put_testzero(struct folio *folio) diff --git a/include/linux/page_pinner.h b/include/linux/page_pinner.h index cb29f5c3d68e..d87706cc3317 100644 --- a/include/linux/page_pinner.h +++ b/include/linux/page_pinner.h @@ -24,9 +24,6 @@ static inline void page_pinner_put_page(struct page *page) if (!static_branch_unlikely(&page_pinner_inited)) return; - if (!static_branch_unlikely(&failure_tracking)) - return; - __page_pinner_put_page(page); } @@ -35,9 +32,6 @@ static inline void page_pinner_failure_detect(struct page *page) if (!static_branch_unlikely(&page_pinner_inited)) return; - if (!static_branch_unlikely(&failure_tracking)) - return; - __page_pinner_failure_detect(page); } #else diff --git a/mm/page_pinner.c b/mm/page_pinner.c index 2567206e7ce8..2b1b1165867e 100644 --- a/mm/page_pinner.c +++ b/mm/page_pinner.c @@ -56,9 +56,9 @@ static struct page_pinner_buffer pp_buffer; static bool page_pinner_enabled; DEFINE_STATIC_KEY_FALSE(page_pinner_inited); +EXPORT_SYMBOL_GPL(page_pinner_inited); DEFINE_STATIC_KEY_TRUE(failure_tracking); -EXPORT_SYMBOL_GPL(failure_tracking); static depot_stack_handle_t failure_handle; @@ -252,11 +252,15 @@ err: void __page_pinner_failure_detect(struct page *page) { - struct page_ext *page_ext = page_ext_get(page); + struct page_ext *page_ext; struct page_pinner *page_pinner; struct captured_pinner record; u64 now; + if (!static_branch_unlikely(&failure_tracking)) + return; + + page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; @@ -282,11 +286,15 @@ EXPORT_SYMBOL_GPL(__page_pinner_failure_detect); void __page_pinner_put_page(struct page *page) { - struct page_ext *page_ext = page_ext_get(page); + struct page_ext *page_ext; struct page_pinner *page_pinner; struct captured_pinner record; u64 now, ts_usec; + if (!static_branch_unlikely(&failure_tracking)) + return; + + page_ext = page_ext_get(page); if (unlikely(!page_ext)) return; From 27dfd1c13e3a2647e2a91a8447c191c00114ee9f Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 23 Mar 2023 14:40:27 -0700 Subject: [PATCH 57/63] ANDROID: Update the ABI symbol list 1 function symbol(s) added 'void __page_pinner_put_page(struct page*)' 1 variable symbol(s) added 'struct static_key_false page_pinner_inited' Bug: 274967172 Change-Id: I458edf7089b44696fa270be0ed538441c99ab5e6 Signed-off-by: Minchan Kim --- android/abi_gki_aarch64.stg | 26 ++++++++++++++++++++++++++ android/abi_gki_aarch64_pixel | 2 ++ android/abi_gki_aarch64_virtual_device | 2 ++ 3 files changed, 30 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index b7f287d9c8b0..32ea49767c1d 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -318178,6 +318178,15 @@ elf_symbol { type_id: 0x9c203488 full_name: "__page_mapcount" } +elf_symbol { + id: 0xe595f8f9 + name: "__page_pinner_put_page" + is_defined: true + symbol_type: FUNCTION + crc: 0x9c81e126 + type_id: 0x11388634 + full_name: "__page_pinner_put_page" +} elf_symbol { id: 0xdc9a73c7 name: "__pagevec_release" @@ -351604,6 +351613,15 @@ elf_symbol { type_id: 0x6a8ce717 full_name: "page_mapping" } +elf_symbol { + id: 0x44e50ff8 + name: "page_pinner_inited" + is_defined: true + symbol_type: OBJECT + crc: 0xacfe4142 + type_id: 0x8e47c273 + full_name: "page_pinner_inited" +} elf_symbol { id: 0xec5c680b name: "page_pool_alloc_pages" @@ -374015,6 +374033,10 @@ symbols { key: "__page_mapcount" value: 0x8d43f7e0 } + symbol { + key: "__page_pinner_put_page" + value: 0xe595f8f9 + } symbol { key: "__pagevec_release" value: 0xdc9a73c7 @@ -388867,6 +388889,10 @@ symbols { key: "page_mapping" value: 0x4f3e5356 } + symbol { + key: "page_pinner_inited" + value: 0x44e50ff8 + } symbol { key: "page_pool_alloc_pages" value: 0xec5c680b diff --git a/android/abi_gki_aarch64_pixel b/android/abi_gki_aarch64_pixel index 612c8c87cda0..3b51cf27e60e 100644 --- a/android/abi_gki_aarch64_pixel +++ b/android/abi_gki_aarch64_pixel @@ -1324,6 +1324,8 @@ page_frag_alloc_align __page_frag_cache_drain page_frag_free + page_pinner_inited + __page_pinner_put_page panic panic_notifier_list param_array_ops diff --git a/android/abi_gki_aarch64_virtual_device b/android/abi_gki_aarch64_virtual_device index 5fde7b144040..553c3e13046b 100644 --- a/android/abi_gki_aarch64_virtual_device +++ b/android/abi_gki_aarch64_virtual_device @@ -299,6 +299,8 @@ page_frag_alloc_align __page_frag_cache_drain page_frag_free + page_pinner_inited + __page_pinner_put_page param_ops_bool param_ops_charp param_ops_int From bf4922727c63b164c3b66589e69108b868464e0b Mon Sep 17 00:00:00 2001 From: Zhuguangqing Date: Wed, 10 Mar 2021 14:56:02 +0800 Subject: [PATCH 58/63] ANDROID: Add vendor hooks to signal. This hook will allow us to get signal messages so that we can set limitations for certain tasks and restore them when receiving important signals. Bug: 184898838 Bug: 281920779 Signed-off-by: Zhuguangqing Change-Id: I83a28b0a6eb413976f4c57f2314d008ad792fa0d (cherry picked from commit 58e3f869fc3fe84fb7062496ccd049db47f3ed7f) --- drivers/android/vendor_hooks.c | 2 ++ include/trace/hooks/signal.h | 16 ++++++++++++++++ kernel/signal.c | 4 +++- 3 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 include/trace/hooks/signal.h diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index d7e325c0883f..75c46edf5eb3 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -61,6 +61,7 @@ #include #include #include +#include /* * Export tracepoints that act as a bare tracehook (ie: have no trace event @@ -73,6 +74,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_priority_skip); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_set_priority); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_restore_priority); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_ilocked); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rtmutex_wait_start); diff --git a/include/trace/hooks/signal.h b/include/trace/hooks/signal.h new file mode 100644 index 000000000000..c1051ee5d3ac --- /dev/null +++ b/include/trace/hooks/signal.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM signal +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH trace/hooks +#if !defined(_TRACE_HOOK_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HOOK_SIGNAL_H +#include + +struct task_struct; +DECLARE_HOOK(android_vh_do_send_sig_info, + TP_PROTO(int sig, struct task_struct *killer, struct task_struct *dst), + TP_ARGS(sig, killer, dst)); +#endif /* _TRACE_HOOK_SIGNAL_H */ +/* This part must be outside protection */ +#include diff --git a/kernel/signal.c b/kernel/signal.c index d140672185a4..2df1129e4938 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -56,6 +56,8 @@ #include #include /* for syscall_get_* */ +#undef CREATE_TRACE_POINTS +#include /* * SLAB caches for signal bits. */ @@ -1291,7 +1293,7 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p { unsigned long flags; int ret = -ESRCH; - + trace_android_vh_do_send_sig_info(sig, current, p); if (lock_task_sighand(p, &flags)) { ret = send_signal_locked(sig, info, p, type); unlock_task_sighand(p, &flags); From 17fff41db863f170ebea8b5ae5c99ccd874ac4cd Mon Sep 17 00:00:00 2001 From: Zhuguangqing Date: Tue, 9 Mar 2021 15:47:43 +0800 Subject: [PATCH 59/63] ANDROID: Add vendor hooks for binder perf tuning Add some hooks in the binder module so that we can do task dependency analysis and statistical work in OEM's module for further optimization. Bug: 235925535 Bug: 281920779 Signed-off-by: Zhuguangqing Change-Id: Id47e59c4e3ccd07b26eef758ada147b98cd1964e Signed-off-by: heshuai1 Signed-off-by: Carlos Llamas [ cmllamas: don't export complete private definition struct binder_alloc in vendor hooks, instead just pass member alloc->free_async_space as implemented by heshuai1 and squashed here ] (cherry picked from commit 254fb1f4034fde523378ee58a501d212a59047b7) --- drivers/android/binder.c | 2 ++ drivers/android/binder_alloc.c | 2 ++ drivers/android/vendor_hooks.c | 3 +++ include/trace/hooks/binder.h | 13 +++++++++++++ 4 files changed, 20 insertions(+) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 15ae88216c51..b3a5b79c4bb1 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -3187,6 +3187,7 @@ static void binder_transaction(struct binder_proc *proc, target_proc = target_thread->proc; target_proc->tmp_ref++; binder_inner_proc_unlock(target_thread->proc); + trace_android_vh_binder_reply(target_proc, proc, thread, tr); } else { if (tr->target.handle) { struct binder_ref *ref; @@ -3249,6 +3250,7 @@ static void binder_transaction(struct binder_proc *proc, return_error_line = __LINE__; goto err_invalid_target_handle; } + trace_android_vh_binder_trans(target_proc, proc, thread, tr); if (security_binder_transaction(proc->cred, target_proc->cred) < 0) { binder_txn_error("%d:%d transaction credentials failed\n", diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 4ad42b0f75cd..e1620e184903 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -25,6 +25,7 @@ #include #include "binder_alloc.h" #include "binder_trace.h" +#include struct list_lru binder_alloc_lru; @@ -406,6 +407,7 @@ static struct binder_buffer *binder_alloc_new_buf_locked( alloc->pid, extra_buffers_size); return ERR_PTR(-EINVAL); } + trace_android_vh_binder_alloc_new_buf_locked(size, &alloc->free_async_space, is_async); if (is_async && alloc->free_async_space < size + sizeof(struct binder_buffer)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 75c46edf5eb3..17f1487ba574 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -232,3 +232,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_preset); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpuset_fork); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_alloc_new_buf_locked); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_reply); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_trans); diff --git a/include/trace/hooks/binder.h b/include/trace/hooks/binder.h index 5d0ee3a605ff..4ae5ef5b3cce 100644 --- a/include/trace/hooks/binder.h +++ b/include/trace/hooks/binder.h @@ -70,6 +70,19 @@ DECLARE_HOOK(android_vh_binder_select_worklist_ilocked, TP_PROTO(struct list_head **list, struct binder_thread *thread, struct binder_proc *proc, int wait_for_proc_work), TP_ARGS(list, thread, proc, wait_for_proc_work)); +DECLARE_HOOK(android_vh_binder_alloc_new_buf_locked, + TP_PROTO(size_t size, size_t *free_async_space, int is_async), + TP_ARGS(size, free_async_space, is_async)); +struct binder_transaction_data; +DECLARE_HOOK(android_vh_binder_reply, + TP_PROTO(struct binder_proc *target_proc, struct binder_proc *proc, + struct binder_thread *thread, struct binder_transaction_data *tr), + TP_ARGS(target_proc, proc, thread, tr)); +DECLARE_HOOK(android_vh_binder_trans, + TP_PROTO(struct binder_proc *target_proc, struct binder_proc *proc, + struct binder_thread *thread, struct binder_transaction_data *tr), + TP_ARGS(target_proc, proc, thread, tr)); + #endif /* _TRACE_HOOK_BINDER_H */ /* This part must be outside protection */ #include From fdd7d6fbac3e0752c2d6adf2aafb31546080fb1c Mon Sep 17 00:00:00 2001 From: wang qiankun Date: Thu, 4 May 2023 17:07:18 +0800 Subject: [PATCH 60/63] ANDROID: GKI: update the ABI symbol list INFO: ABI DIFFERENCES HAVE BEEN DETECTED! INFO: 4 function symbol(s) added 'int __traceiter_android_vh_binder_alloc_new_buf_locked(void*, size_t, size_t*, int)' 'int __traceiter_android_vh_binder_reply(void*, struct binder_proc*, struct binder_proc*, struct binder_thread*, struct binder_transaction_data*)' 'int __traceiter_android_vh_binder_trans(void*, struct binder_proc*, struct binder_proc*, struct binder_thread*, struct binder_transaction_data*)' 'int __traceiter_android_vh_do_send_sig_info(void*, int, struct task_struct*, struct task_struct*)' 4 variable symbol(s) added 'struct tracepoint __tracepoint_android_vh_binder_alloc_new_buf_locked' 'struct tracepoint __tracepoint_android_vh_binder_reply' 'struct tracepoint __tracepoint_android_vh_binder_trans' 'struct tracepoint __tracepoint_android_vh_do_send_sig_info' function symbol 'int snd_usb_power_domain_set(struct snd_usb_audio*, struct snd_usb_power_domain*, unsigned char)' changed CRC changed from 0xf4ff4ee5 to 0xb29e40d5 Bug: 281920779 Change-Id: I3e6cd71847ee2eeee26fc8aef985055c00504d14 Signed-off-by: wang qiankun --- android/abi_gki_aarch64.stg | 261 +++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_xiaomi | 14 ++ 2 files changed, 275 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 32ea49767c1d..00b219663b4e 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -7790,6 +7790,11 @@ pointer_reference { kind: POINTER pointee_type_id: 0x6976b87f } +pointer_reference { + id: 0x10de2fab + kind: POINTER + pointee_type_id: 0x69385830 +} pointer_reference { id: 0x10e15e7e kind: POINTER @@ -28460,6 +28465,11 @@ typedef { name: "bh_end_io_t" referred_type_id: 0x17592b81 } +typedef { + id: 0x95ef30d6 + name: "binder_size_t" + referred_type_id: 0xedf277ba +} typedef { id: 0x86d95287 name: "binder_uintptr_t" @@ -51593,6 +51603,11 @@ member { type_id: 0xd359db99 offset: 192 } +member { + id: 0x9ad307bc + name: "buf" + type_id: 0xc8c766a0 +} member { id: 0x9aeff0bf name: "buf" @@ -51795,6 +51810,11 @@ member { name: "buffer" type_id: 0x8e7b8b93 } +member { + id: 0x3358d289 + name: "buffer" + type_id: 0x86d95287 +} member { id: 0x335a61cc name: "buffer" @@ -58866,6 +58886,12 @@ member { type_id: 0xd3c80119 offset: 24256 } +member { + id: 0x5406c379 + name: "code" + type_id: 0xe62ebf07 + offset: 128 +} member { id: 0x5406c75b name: "code" @@ -61532,6 +61558,12 @@ member { type_id: 0x86d95287 offset: 768 } +member { + id: 0x58cc89fb + name: "cookie" + type_id: 0x86d95287 + offset: 64 +} member { id: 0x14bf8247 name: "cookie1" @@ -66156,6 +66188,12 @@ member { type_id: 0x41fadac3 offset: 64 } +member { + id: 0xffd54088 + name: "data" + type_id: 0x4765767f + offset: 384 +} member { id: 0xffd88cf6 name: "data" @@ -66487,6 +66525,12 @@ member { type_id: 0x5d8155a5 offset: 40 } +member { + id: 0x569ccc07 + name: "data_size" + type_id: 0x95ef30d6 + offset: 256 +} member { id: 0x56c02652 name: "data_size" @@ -92895,6 +92939,11 @@ member { type_id: 0xe62ebf07 offset: 128 } +member { + id: 0xb805bbb2 + name: "handle" + type_id: 0xe62ebf07 +} member { id: 0xb80b9f8f name: "handle" @@ -134069,12 +134118,24 @@ member { type_id: 0x8c43dc29 offset: 320 } +member { + id: 0x35690218 + name: "offsets" + type_id: 0x86d95287 + offset: 64 +} member { id: 0x3572f05b name: "offsets" type_id: 0x9d2c4a95 offset: 832 } +member { + id: 0xaa221c83 + name: "offsets_size" + type_id: 0x95ef30d6 + offset: 320 +} member { id: 0xaa43c86e name: "offsets_size" @@ -149099,6 +149160,11 @@ member { type_id: 0x86d95287 offset: 704 } +member { + id: 0x46761387 + name: "ptr" + type_id: 0x86d95287 +} member { id: 0x46c17f73 name: "ptr" @@ -149119,6 +149185,11 @@ member { name: "ptr" type_id: 0x2f5073a5 } +member { + id: 0x46e47dff + name: "ptr" + type_id: 0x14b72a39 +} member { id: 0x46e877b0 name: "ptr" @@ -165827,6 +165898,18 @@ member { type_id: 0xe90b32b7 offset: 928 } +member { + id: 0x38f30d00 + name: "sender_euid" + type_id: 0xba3f457a + offset: 224 +} +member { + id: 0xd3bc24ef + name: "sender_pid" + type_id: 0x763389c7 + offset: 192 +} member { id: 0xc26d459e name: "sendmsg" @@ -180727,6 +180810,11 @@ member { type_id: 0x6720d32f offset: 67008 } +member { + id: 0xb3da3bc9 + name: "target" + type_id: 0x78ea2ea9 +} member { id: 0x30229734 name: "target_alloc" @@ -201501,6 +201589,15 @@ struct_union { member_id: 0x9152ae19 } } +struct_union { + id: 0x14b72a39 + kind: STRUCT + definition { + bytesize: 16 + member_id: 0x3358d289 + member_id: 0x35690218 + } +} struct_union { id: 0x14e96bc2 kind: STRUCT @@ -204130,6 +204227,15 @@ struct_union { member_id: 0x39141955 } } +struct_union { + id: 0x4765767f + kind: UNION + definition { + bytesize: 16 + member_id: 0x46e47dff + member_id: 0x9ad307bc + } +} struct_union { id: 0x47d8e06a kind: UNION @@ -206809,6 +206915,15 @@ struct_union { member_id: 0x72454096 } } +struct_union { + id: 0x78ea2ea9 + kind: UNION + definition { + bytesize: 8 + member_id: 0xb805bbb2 + member_id: 0x46761387 + } +} struct_union { id: 0x791ba47c kind: UNION @@ -209041,6 +209156,23 @@ struct_union { member_id: 0xed700768 } } +struct_union { + id: 0x69385830 + kind: STRUCT + name: "binder_transaction_data" + definition { + bytesize: 64 + member_id: 0xb3da3bc9 + member_id: 0x58cc89fb + member_id: 0x5406c379 + member_id: 0x2d8ea701 + member_id: 0xd3bc24ef + member_id: 0x38f30d00 + member_id: 0x569ccc07 + member_id: 0xaa221c83 + member_id: 0xffd54088 + } +} struct_union { id: 0x5fed90c9 kind: STRUCT @@ -300137,6 +300269,14 @@ function { parameter_id: 0x6720d32f parameter_id: 0x2ec35650 } +function { + id: 0x98731419 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0xf435685e + parameter_id: 0x379d63b0 + parameter_id: 0x6720d32f +} function { id: 0x987349b3 return_type_id: 0x6720d32f @@ -301998,6 +302138,14 @@ function { parameter_id: 0x18bd6530 parameter_id: 0x6720d32f } +function { + id: 0x9a340b23 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x6720d32f + parameter_id: 0x1d19a9d5 + parameter_id: 0x1d19a9d5 +} function { id: 0x9a343225 return_type_id: 0x6720d32f @@ -305580,6 +305728,15 @@ function { parameter_id: 0x18bd6530 parameter_id: 0x386883b9 } +function { + id: 0x9bd6fb19 + return_type_id: 0x6720d32f + parameter_id: 0x18bd6530 + parameter_id: 0x1f8dbf97 + parameter_id: 0x1f8dbf97 + parameter_id: 0x24373219 + parameter_id: 0x10de2fab +} function { id: 0x9bd6ff14 return_type_id: 0x6720d32f @@ -319960,6 +320117,15 @@ elf_symbol { type_id: 0x9b2d3bb4 full_name: "__traceiter_android_vh_audio_usb_offload_connect" } +elf_symbol { + id: 0x530ad17d + name: "__traceiter_android_vh_binder_alloc_new_buf_locked" + is_defined: true + symbol_type: FUNCTION + crc: 0x2a27381c + type_id: 0x98731419 + full_name: "__traceiter_android_vh_binder_alloc_new_buf_locked" +} elf_symbol { id: 0xbebf7d98 name: "__traceiter_android_vh_binder_free_proc" @@ -320032,6 +320198,15 @@ elf_symbol { type_id: 0x9bd88151 full_name: "__traceiter_android_vh_binder_read_done" } +elf_symbol { + id: 0x2d244867 + name: "__traceiter_android_vh_binder_reply" + is_defined: true + symbol_type: FUNCTION + crc: 0x0ec641ea + type_id: 0x9bd6fb19 + full_name: "__traceiter_android_vh_binder_reply" +} elf_symbol { id: 0xc6c9353c name: "__traceiter_android_vh_binder_restore_priority" @@ -320077,6 +320252,15 @@ elf_symbol { type_id: 0x9bd88151 full_name: "__traceiter_android_vh_binder_thread_release" } +elf_symbol { + id: 0xf6faffcb + name: "__traceiter_android_vh_binder_trans" + is_defined: true + symbol_type: FUNCTION + crc: 0x1570346e + type_id: 0x9bd6fb19 + full_name: "__traceiter_android_vh_binder_trans" +} elf_symbol { id: 0x5cf60b10 name: "__traceiter_android_vh_binder_transaction_init" @@ -320257,6 +320441,15 @@ elf_symbol { type_id: 0x9a35263f full_name: "__traceiter_android_vh_do_futex" } +elf_symbol { + id: 0x1cc3aec5 + name: "__traceiter_android_vh_do_send_sig_info" + is_defined: true + symbol_type: FUNCTION + crc: 0xd6ea1719 + type_id: 0x9a340b23 + full_name: "__traceiter_android_vh_do_send_sig_info" +} elf_symbol { id: 0x9dbd7b92 name: "__traceiter_android_vh_do_wake_up_sync" @@ -322318,6 +322511,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_audio_usb_offload_connect" } +elf_symbol { + id: 0xc8703937 + name: "__tracepoint_android_vh_binder_alloc_new_buf_locked" + is_defined: true + symbol_type: OBJECT + crc: 0x02c7faf0 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_binder_alloc_new_buf_locked" +} elf_symbol { id: 0xf32898c6 name: "__tracepoint_android_vh_binder_free_proc" @@ -322390,6 +322592,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_binder_read_done" } +elf_symbol { + id: 0x6de9ac69 + name: "__tracepoint_android_vh_binder_reply" + is_defined: true + symbol_type: OBJECT + crc: 0x39132f3d + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_binder_reply" +} elf_symbol { id: 0x57a9a36a name: "__tracepoint_android_vh_binder_restore_priority" @@ -322435,6 +322646,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_binder_thread_release" } +elf_symbol { + id: 0xec7035fd + name: "__tracepoint_android_vh_binder_trans" + is_defined: true + symbol_type: OBJECT + crc: 0x2bda2355 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_binder_trans" +} elf_symbol { id: 0xa9d55136 name: "__tracepoint_android_vh_binder_transaction_init" @@ -322615,6 +322835,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_vh_do_futex" } +elf_symbol { + id: 0x82ce823f + name: "__tracepoint_android_vh_do_send_sig_info" + is_defined: true + symbol_type: OBJECT + crc: 0x692a21ea + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_vh_do_send_sig_info" +} elf_symbol { id: 0xe2d7542c name: "__tracepoint_android_vh_do_wake_up_sync" @@ -374825,6 +375054,10 @@ symbols { key: "__traceiter_android_vh_audio_usb_offload_connect" value: 0x528da532 } + symbol { + key: "__traceiter_android_vh_binder_alloc_new_buf_locked" + value: 0x530ad17d + } symbol { key: "__traceiter_android_vh_binder_free_proc" value: 0xbebf7d98 @@ -374857,6 +375090,10 @@ symbols { key: "__traceiter_android_vh_binder_read_done" value: 0x5c1ee0c5 } + symbol { + key: "__traceiter_android_vh_binder_reply" + value: 0x2d244867 + } symbol { key: "__traceiter_android_vh_binder_restore_priority" value: 0xc6c9353c @@ -374877,6 +375114,10 @@ symbols { key: "__traceiter_android_vh_binder_thread_release" value: 0x25f13dbe } + symbol { + key: "__traceiter_android_vh_binder_trans" + value: 0xf6faffcb + } symbol { key: "__traceiter_android_vh_binder_transaction_init" value: 0x5cf60b10 @@ -374957,6 +375198,10 @@ symbols { key: "__traceiter_android_vh_do_futex" value: 0xd593b3ef } + symbol { + key: "__traceiter_android_vh_do_send_sig_info" + value: 0x1cc3aec5 + } symbol { key: "__traceiter_android_vh_do_wake_up_sync" value: 0x9dbd7b92 @@ -375873,6 +376118,10 @@ symbols { key: "__tracepoint_android_vh_audio_usb_offload_connect" value: 0xfb7cdd24 } + symbol { + key: "__tracepoint_android_vh_binder_alloc_new_buf_locked" + value: 0xc8703937 + } symbol { key: "__tracepoint_android_vh_binder_free_proc" value: 0xf32898c6 @@ -375905,6 +376154,10 @@ symbols { key: "__tracepoint_android_vh_binder_read_done" value: 0x54aac8cb } + symbol { + key: "__tracepoint_android_vh_binder_reply" + value: 0x6de9ac69 + } symbol { key: "__tracepoint_android_vh_binder_restore_priority" value: 0x57a9a36a @@ -375925,6 +376178,10 @@ symbols { key: "__tracepoint_android_vh_binder_thread_release" value: 0x2fce8f78 } + symbol { + key: "__tracepoint_android_vh_binder_trans" + value: 0xec7035fd + } symbol { key: "__tracepoint_android_vh_binder_transaction_init" value: 0xa9d55136 @@ -376005,6 +376262,10 @@ symbols { key: "__tracepoint_android_vh_do_futex" value: 0x9fe99d05 } + symbol { + key: "__tracepoint_android_vh_do_send_sig_info" + value: 0x82ce823f + } symbol { key: "__tracepoint_android_vh_do_wake_up_sync" value: 0xe2d7542c diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index 5c5d16ecf712..806e50a01b31 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -185,3 +185,17 @@ __tracepoint_android_rvh_set_cpus_allowed_comm __tracepoint_android_rvh_dequeue_task cpuset_cpus_allowed + +#required by millet.ko + __traceiter_android_vh_binder_wait_for_work + __traceiter_android_vh_do_send_sig_info + __traceiter_android_vh_binder_preset + __traceiter_android_vh_binder_trans + __traceiter_android_vh_binder_reply + __traceiter_android_vh_binder_alloc_new_buf_locked + __tracepoint_android_vh_binder_wait_for_work + __tracepoint_android_vh_do_send_sig_info + __tracepoint_android_vh_binder_preset + __tracepoint_android_vh_binder_trans + __tracepoint_android_vh_binder_reply + __tracepoint_android_vh_binder_alloc_new_buf_locked From 632ec01905b6312857afa0efe922ab1f5f16adc6 Mon Sep 17 00:00:00 2001 From: Zhuguangqing Date: Wed, 10 Mar 2021 14:38:02 +0800 Subject: [PATCH 61/63] ANDROID: freezer: export the freezer_cgrp_subsys for GKI purpose. Exporting the symbol freezer_cgrp_subsys, in that vendor module can add can_attach & cancel_attach member function. It is vendor-specific tuning. Bug: 182496370 Bug: 281920779 Signed-off-by: Zhuguangqing Change-Id: I153682b9d1015eed3f048b45ea6495ebb8f3c261 (cherry picked from commit ee3f4d2821f5b2a794f0a1f5ed423f561a01adae) (cherry picked from commit 8a90e4d4e555dd5484213c6fec5061958016a194) --- kernel/cgroup/legacy_freezer.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c index 936473203a6b..31ae16da6af3 100644 --- a/kernel/cgroup/legacy_freezer.c +++ b/kernel/cgroup/legacy_freezer.c @@ -475,3 +475,4 @@ struct cgroup_subsys freezer_cgrp_subsys = { .fork = freezer_fork, .legacy_cftypes = files, }; +EXPORT_SYMBOL_GPL(freezer_cgrp_subsys); From a9a44851ec76a09ac87600a4f7af72412745fbae Mon Sep 17 00:00:00 2001 From: heshuai1 Date: Fri, 7 May 2021 19:40:52 +0800 Subject: [PATCH 62/63] ANDROID: freezer: Add vendor hook to freezer for GKI purpose. Add the vendor hook to freezer.c so that OEM's logic can be executed when the process is about to be frozen. We need to clear the flag for some tasks and rebind task dependencies for optimization purposes. Bug: 187458531 Bug: 281920779 Signed-off-by: heshuai1 Change-Id: Iea42fd9604d6b33ccd6502425416f0dd28eecebb (cherry picked from commit a1580311c36ca28344b2f03b3c8a72d9f8db5bde) --- drivers/android/vendor_hooks.c | 1 + include/trace/hooks/cgroup.h | 4 ++++ kernel/freezer.c | 4 ++++ 3 files changed, 9 insertions(+) diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index 17f1487ba574..f475163c811e 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -67,6 +67,7 @@ * Export tracepoints that act as a bare tracehook (ie: have no trace event * associated with them) to allow external modules to probe them. */ +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_refrigerator); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init); diff --git a/include/trace/hooks/cgroup.h b/include/trace/hooks/cgroup.h index 51cc0340912c..15582cc572bd 100644 --- a/include/trace/hooks/cgroup.h +++ b/include/trace/hooks/cgroup.h @@ -12,6 +12,10 @@ DECLARE_HOOK(android_vh_cgroup_set_task, TP_PROTO(int ret, struct task_struct *task), TP_ARGS(ret, task)); +DECLARE_RESTRICTED_HOOK(android_rvh_refrigerator, + TP_PROTO(bool f), + TP_ARGS(f), 1); + struct cgroup_subsys; struct cgroup_taskset; DECLARE_HOOK(android_vh_cgroup_attach, diff --git a/kernel/freezer.c b/kernel/freezer.c index 4fad0e6fca64..b672c614b1e9 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -12,6 +12,9 @@ #include #include +#undef CREATE_TRACE_POINT +#include + /* total number of freezing conditions in effect */ DEFINE_STATIC_KEY_FALSE(freezer_active); EXPORT_SYMBOL(freezer_active); @@ -75,6 +78,7 @@ bool __refrigerator(bool check_kthr_stop) spin_lock_irq(&freezer_lock); freeze = freezing(current) && !(check_kthr_stop && kthread_should_stop()); + trace_android_rvh_refrigerator(pm_nosig_freezing); spin_unlock_irq(&freezer_lock); if (!freeze) From 7d346b229c7855559989de2c8081d1632231cf7b Mon Sep 17 00:00:00 2001 From: wang qiankun Date: Wed, 10 May 2023 11:58:17 +0800 Subject: [PATCH 63/63] ANDROID: GKI: update the ABI symbol list INFO: ABI DIFFERENCES HAVE BEEN DETECTED! INFO: 1 function symbol(s) added 'int __traceiter_android_rvh_refrigerator(void*, bool)' 2 variable symbol(s) added 'struct tracepoint __tracepoint_android_rvh_refrigerator' 'struct cgroup_subsys freezer_cgrp_subsys' Bug: 281920779 Change-Id: I58d1bfba887b7f0b6af471e8b18fab368b119b7d Signed-off-by: wang qiankun --- android/abi_gki_aarch64.stg | 39 ++++++++++++++++++++++++++++++++++ android/abi_gki_aarch64_xiaomi | 5 +++++ 2 files changed, 44 insertions(+) diff --git a/android/abi_gki_aarch64.stg b/android/abi_gki_aarch64.stg index 00b219663b4e..0157c7db0ab3 100644 --- a/android/abi_gki_aarch64.stg +++ b/android/abi_gki_aarch64.stg @@ -319604,6 +319604,15 @@ elf_symbol { type_id: 0x9bdbdcc4 full_name: "__traceiter_android_rvh_prepare_prio_fork" } +elf_symbol { + id: 0x13f466b7 + name: "__traceiter_android_rvh_refrigerator" + is_defined: true + symbol_type: FUNCTION + crc: 0x811d8704 + type_id: 0x9a1a471c + full_name: "__traceiter_android_rvh_refrigerator" +} elf_symbol { id: 0xe3e24295 name: "__traceiter_android_rvh_replace_next_task_fair" @@ -321998,6 +322007,15 @@ elf_symbol { type_id: 0x18ccbd2c full_name: "__tracepoint_android_rvh_prepare_prio_fork" } +elf_symbol { + id: 0x3b6248c1 + name: "__tracepoint_android_rvh_refrigerator" + is_defined: true + symbol_type: OBJECT + crc: 0xaa020dd1 + type_id: 0x18ccbd2c + full_name: "__tracepoint_android_rvh_refrigerator" +} elf_symbol { id: 0x18bac297 name: "__tracepoint_android_rvh_replace_next_task_fair" @@ -339608,6 +339626,15 @@ elf_symbol { type_id: 0x8e47c273 full_name: "freezer_active" } +elf_symbol { + id: 0x3918f832 + name: "freezer_cgrp_subsys" + is_defined: true + symbol_type: OBJECT + crc: 0x85c823b4 + type_id: 0x00571446 + full_name: "freezer_cgrp_subsys" +} elf_symbol { id: 0x238d82fe name: "freezing_slow_path" @@ -374826,6 +374853,10 @@ symbols { key: "__traceiter_android_rvh_prepare_prio_fork" value: 0xeccbc3c1 } + symbol { + key: "__traceiter_android_rvh_refrigerator" + value: 0x13f466b7 + } symbol { key: "__traceiter_android_rvh_replace_next_task_fair" value: 0xe3e24295 @@ -375890,6 +375921,10 @@ symbols { key: "__tracepoint_android_rvh_prepare_prio_fork" value: 0x50605d97 } + symbol { + key: "__tracepoint_android_rvh_refrigerator" + value: 0x3b6248c1 + } symbol { key: "__tracepoint_android_rvh_replace_next_task_fair" value: 0x18bac297 @@ -383714,6 +383749,10 @@ symbols { key: "freezer_active" value: 0x2ed5588c } + symbol { + key: "freezer_cgrp_subsys" + value: 0x3918f832 + } symbol { key: "freezing_slow_path" value: 0x238d82fe diff --git a/android/abi_gki_aarch64_xiaomi b/android/abi_gki_aarch64_xiaomi index 806e50a01b31..c78e92e07ffe 100644 --- a/android/abi_gki_aarch64_xiaomi +++ b/android/abi_gki_aarch64_xiaomi @@ -167,6 +167,11 @@ __tracepoint_android_rvh_dequeue_task_fair __tracepoint_android_rvh_entity_tick +#required by millet.ko + __traceiter_android_rvh_refrigerator + __tracepoint_android_rvh_refrigerator + freezer_cgrp_subsys + #required by metis.ko module __traceiter_android_vh_rwsem_read_wait_start __traceiter_android_vh_rwsem_write_wait_start