From 7f6c411c9b50cfab41cc798e003eff27608c7016 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 25 Mar 2021 14:12:34 -0400 Subject: [PATCH 1/4] hostfs: fix memory handling in follow_link() 1) argument should not be freed in any case - the caller already has it as ->s_fs_info (and uses it a lot afterwards) 2) allocate readlink buffer with kmalloc() - the caller has no way to tell if it's got that (on absolute symlink) or a result of kasprintf(). Sure, for SLAB and SLUB kfree() works on results of kmem_cache_alloc(), but that's not documented anywhere, might change in the future *and* is already not true for SLOB. Fixes: 52b209f7b848 ("get rid of hostfs_read_inode()") Signed-off-by: Al Viro --- fs/hostfs/hostfs_kern.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 29e407762626..743a005a5c64 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -144,7 +144,7 @@ static char *follow_link(char *link) char *name, *resolved, *end; int n; - name = __getname(); + name = kmalloc(PATH_MAX, GFP_KERNEL); if (!name) { n = -ENOMEM; goto out_free; @@ -173,12 +173,11 @@ static char *follow_link(char *link) goto out_free; } - __putname(name); - kfree(link); + kfree(name); return resolved; out_free: - __putname(name); + kfree(name); return ERR_PTR(n); } From 0687c66b5f666b5ad433f4e94251590d9bc9d10e Mon Sep 17 00:00:00 2001 From: Zqiang Date: Thu, 18 Feb 2021 11:16:49 +0800 Subject: [PATCH 2/4] workqueue: Move the position of debug_work_activate() in __queue_work() The debug_work_activate() is called on the premise that the work can be inserted, because if wq be in WQ_DRAINING status, insert work may be failed. Fixes: e41e704bc4f4 ("workqueue: improve destroy_workqueue() debuggability") Signed-off-by: Zqiang Reviewed-by: Lai Jiangshan Signed-off-by: Tejun Heo --- kernel/workqueue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0d150da252e8..21fb00b52def 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1412,7 +1412,6 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, */ lockdep_assert_irqs_disabled(); - debug_work_activate(work); /* if draining, only works from the same workqueue are allowed */ if (unlikely(wq->flags & __WQ_DRAINING) && @@ -1494,6 +1493,7 @@ retry: worklist = &pwq->delayed_works; } + debug_work_activate(work); insert_work(pwq, work, worklist, work_flags); out: From 89e28ce60cb65971c73359c66d076aa20a395cd5 Mon Sep 17 00:00:00 2001 From: Wang Qing Date: Wed, 24 Mar 2021 19:40:29 +0800 Subject: [PATCH 3/4] workqueue/watchdog: Make unbound workqueues aware of touch_softlockup_watchdog() 84;0;0c84;0;0c There are two workqueue-specific watchdog timestamps: + @wq_watchdog_touched_cpu (per-CPU) updated by touch_softlockup_watchdog() + @wq_watchdog_touched (global) updated by touch_all_softlockup_watchdogs() watchdog_timer_fn() checks only the global @wq_watchdog_touched for unbound workqueues. As a result, unbound workqueues are not aware of touch_softlockup_watchdog(). The watchdog might report a stall even when the unbound workqueues are blocked by a known slow code. Solution: touch_softlockup_watchdog() must touch also the global @wq_watchdog_touched timestamp. The global timestamp can no longer be used for bound workqueues because it is now updated from all CPUs. Instead, bound workqueues have to check only @wq_watchdog_touched_cpu and these timestamps have to be updated for all CPUs in touch_all_softlockup_watchdogs(). Beware: The change might cause the opposite problem. An unbound workqueue might get blocked on CPU A because of a real softlockup. The workqueue watchdog would miss it when the timestamp got touched on CPU B. It is acceptable because softlockups are detected by softlockup watchdog. The workqueue watchdog is there to detect stalls where a work never finishes, for example, because of dependencies of works queued into the same workqueue. V3: - Modify the commit message clearly according to Petr's suggestion. Signed-off-by: Wang Qing Signed-off-by: Tejun Heo --- kernel/watchdog.c | 5 +++-- kernel/workqueue.c | 17 ++++++----------- 2 files changed, 9 insertions(+), 13 deletions(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 71109065bd8e..107bc38b1945 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -278,9 +278,10 @@ void touch_all_softlockup_watchdogs(void) * update as well, the only side effect might be a cycle delay for * the softlockup check. */ - for_each_cpu(cpu, &watchdog_allowed_mask) + for_each_cpu(cpu, &watchdog_allowed_mask) { per_cpu(watchdog_touch_ts, cpu) = SOFTLOCKUP_RESET; - wq_watchdog_touch(-1); + wq_watchdog_touch(cpu); + } } void touch_softlockup_watchdog_sync(void) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 21fb00b52def..79f2319543ce 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -5787,22 +5787,17 @@ static void wq_watchdog_timer_fn(struct timer_list *unused) continue; /* get the latest of pool and touched timestamps */ + if (pool->cpu >= 0) + touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu)); + else + touched = READ_ONCE(wq_watchdog_touched); pool_ts = READ_ONCE(pool->watchdog_ts); - touched = READ_ONCE(wq_watchdog_touched); if (time_after(pool_ts, touched)) ts = pool_ts; else ts = touched; - if (pool->cpu >= 0) { - unsigned long cpu_touched = - READ_ONCE(per_cpu(wq_watchdog_touched_cpu, - pool->cpu)); - if (time_after(cpu_touched, ts)) - ts = cpu_touched; - } - /* did we stall? */ if (time_after(jiffies, ts + thresh)) { lockup_detected = true; @@ -5826,8 +5821,8 @@ notrace void wq_watchdog_touch(int cpu) { if (cpu >= 0) per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies; - else - wq_watchdog_touched = jiffies; + + wq_watchdog_touched = jiffies; } static void wq_watchdog_set_thresh(unsigned long thresh) From 7d01ef7585c07afaf487759a48486228cd065726 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 6 Apr 2021 12:33:07 -0400 Subject: [PATCH 4/4] Make sure nd->path.mnt and nd->path.dentry are always valid pointers Initialize them in set_nameidata() and make sure that terminate_walk() clears them once the pointers become potentially invalid (i.e. we leave RCU mode or drop them in non-RCU one). Currently we have "path_init() always initializes them and nobody accesses them outside of path_init()/terminate_walk() segments", which is asking for trouble. With that change we would have nd->path.{mnt,dentry} 1) always valid - NULL or pointing to currently allocated objects. 2) non-NULL while we are successfully walking 3) NULL when we are not walking at all 4) contributing to refcounts whenever non-NULL outside of RCU mode. Fixes: 6c6ec2b0a3e0 ("fs: add support for LOOKUP_CACHED") Reported-by: syzbot+c88a7030da47945a3cc3@syzkaller.appspotmail.com Tested-by: Christian Brauner Signed-off-by: Al Viro --- fs/namei.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fs/namei.c b/fs/namei.c index 216f16e74351..fc8760d4314e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -579,6 +579,8 @@ static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) p->stack = p->internal; p->dfd = dfd; p->name = name; + p->path.mnt = NULL; + p->path.dentry = NULL; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; @@ -652,6 +654,8 @@ static void terminate_walk(struct nameidata *nd) rcu_read_unlock(); } nd->depth = 0; + nd->path.mnt = NULL; + nd->path.dentry = NULL; } /* path_put is needed afterwards regardless of success or failure */ @@ -2322,8 +2326,6 @@ static const char *path_init(struct nameidata *nd, unsigned flags) } nd->root.mnt = NULL; - nd->path.mnt = NULL; - nd->path.dentry = NULL; /* Absolute pathname -- fetch the root (LOOKUP_IN_ROOT uses nd->dfd). */ if (*s == '/' && !(flags & LOOKUP_IN_ROOT)) {