From 4a289d123f62f7fdf33e7ce02c4c4c0d3b708a2b Mon Sep 17 00:00:00 2001 From: Fedor Pchelkin Date: Tue, 25 Jul 2023 14:58:58 +0300 Subject: [PATCH 001/123] NFSv4.2: fix error handling in nfs42_proc_getxattr [ Upstream commit 4e3733fd2b0f677faae21cf838a43faf317986d3 ] There is a slight issue with error handling code inside nfs42_proc_getxattr(). If page allocating loop fails then we free the failing page array element which is NULL but __free_page() can't deal with NULL args. Found by Linux Verification Center (linuxtesting.org). Fixes: a1f26739ccdc ("NFSv4.2: improve page handling for GETXATTR") Signed-off-by: Fedor Pchelkin Reviewed-by: Benjamin Coddington Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- fs/nfs/nfs42proc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index ecb428512fe1..7c33bba179d2 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -1359,7 +1359,6 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, for (i = 0; i < np; i++) { pages[i] = alloc_page(GFP_KERNEL); if (!pages[i]) { - np = i + 1; err = -ENOMEM; goto out; } @@ -1383,8 +1382,8 @@ ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, } while (exception.retry); out: - while (--np >= 0) - __free_page(pages[np]); + while (--i >= 0) + __free_page(pages[i]); kfree(pages); return err; From d9aac9cdd6e2b4cf21dc91ad0803a7df2b177bac Mon Sep 17 00:00:00 2001 From: Fedor Pchelkin Date: Tue, 25 Jul 2023 14:59:30 +0300 Subject: [PATCH 002/123] NFSv4: fix out path in __nfs4_get_acl_uncached [ Upstream commit f4e89f1a6dab4c063fc1e823cc9dddc408ff40cf ] Another highly rare error case when a page allocating loop (inside __nfs4_get_acl_uncached, this time) is not properly unwound on error. Since pages array is allocated being uninitialized, need to free only lower array indices. NULL checks were useful before commit 62a1573fcf84 ("NFSv4 fix acl retrieval over krb5i/krb5p mounts") when the array had been initialized to zero on stack. Found by Linux Verification Center (linuxtesting.org). Fixes: 62a1573fcf84 ("NFSv4 fix acl retrieval over krb5i/krb5p mounts") Signed-off-by: Fedor Pchelkin Reviewed-by: Benjamin Coddington Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- fs/nfs/nfs4proc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 177cb7b089b9..d67383665e9b 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -5995,9 +5995,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, out_ok: ret = res.acl_len; out_free: - for (i = 0; i < npages; i++) - if (pages[i]) - __free_page(pages[i]); + while (--i >= 0) + __free_page(pages[i]); if (res.acl_scratch) __free_page(res.acl_scratch); kfree(pages); From 26ea8668b8aae5f57068c2f442a28a8e420a759b Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Mon, 3 Jul 2023 14:18:29 -0400 Subject: [PATCH 003/123] xprtrdma: Remap Receive buffers after a reconnect [ Upstream commit 895cedc1791916e8a98864f12b656702fad0bb67 ] On server-initiated disconnect, rpcrdma_xprt_disconnect() was DMA- unmapping the Receive buffers, but rpcrdma_post_recvs() neglected to remap them after a new connection had been established. The result was immediate failure of the new connection with the Receives flushing with LOCAL_PROT_ERR. Fixes: 671c450b6fe0 ("xprtrdma: Fix oops in Receive handler after device removal") Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust Signed-off-by: Sasha Levin --- net/sunrpc/xprtrdma/verbs.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index b098fde373ab..28c0771c4e8c 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c @@ -935,9 +935,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, if (!rep->rr_rdmabuf) goto out_free; - if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) - goto out_free_regbuf; - rep->rr_cid.ci_completion_id = atomic_inc_return(&r_xprt->rx_ep->re_completion_ids); @@ -956,8 +953,6 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, spin_unlock(&buf->rb_lock); return rep; -out_free_regbuf: - rpcrdma_regbuf_free(rep->rr_rdmabuf); out_free: kfree(rep); out: @@ -1363,6 +1358,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed, bool temp) rep = rpcrdma_rep_create(r_xprt, temp); if (!rep) break; + if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { + rpcrdma_rep_put(buf, rep); + break; + } rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id; trace_xprtrdma_post_recv(rep); From cd1f889c99eee5a6fae671962a63bc89e68d7837 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jan 2023 16:41:02 +0100 Subject: [PATCH 004/123] drm/ast: Use drm_aperture_remove_conflicting_pci_framebuffers [ Upstream commit c1ebead36099deb85384f6fb262fe619a04cee73 ] It's just open coded and matches. Note that Thomas said that his version apparently failed for some reason, but hey maybe we should try again. Signed-off-by: Daniel Vetter Cc: Dave Airlie Cc: Thomas Zimmermann Cc: Javier Martinez Canillas Cc: Helge Deller Cc: linux-fbdev@vger.kernel.org Tested-by: Thomas Zimmmermann Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20230111154112.90575-1-daniel.vetter@ffwll.ch Stable-dep-of: 5ae3716cfdcd ("video/aperture: Only remove sysfb on the default vga pci device") Signed-off-by: Sasha Levin --- drivers/gpu/drm/ast/ast_drv.c | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index b9392f31e629..800471f2a203 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -89,27 +89,13 @@ static const struct pci_device_id ast_pciidlist[] = { MODULE_DEVICE_TABLE(pci, ast_pciidlist); -static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev) -{ - bool primary = false; - resource_size_t base, size; - - base = pci_resource_start(pdev, 0); - size = pci_resource_len(pdev, 0); -#ifdef CONFIG_X86 - primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; -#endif - - return drm_aperture_remove_conflicting_framebuffers(base, size, primary, &ast_driver); -} - static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct ast_private *ast; struct drm_device *dev; int ret; - ret = ast_remove_conflicting_framebuffers(pdev); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &ast_driver); if (ret) return ret; From 6db53af15444e7022640d7b8d5e7531d94e27a43 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Wed, 11 Jan 2023 16:41:08 +0100 Subject: [PATCH 005/123] fbdev/radeon: use pci aperture helpers [ Upstream commit 9b539c4d1b921bc9c8c578d4d50f0a7e7874d384 ] It's not exactly the same since the open coded version doesn't set primary correctly. But that's a bugfix, so shouldn't hurt really. Signed-off-by: Daniel Vetter Cc: Benjamin Herrenschmidt Cc: linux-fbdev@vger.kernel.org Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20230111154112.90575-7-daniel.vetter@ffwll.ch Stable-dep-of: 5ae3716cfdcd ("video/aperture: Only remove sysfb on the default vga pci device") Signed-off-by: Sasha Levin --- drivers/video/fbdev/aty/radeon_base.c | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c index 8b28c9bddd97..50c384ce2883 100644 --- a/drivers/video/fbdev/aty/radeon_base.c +++ b/drivers/video/fbdev/aty/radeon_base.c @@ -2238,14 +2238,6 @@ static const struct bin_attribute edid2_attr = { .read = radeon_show_edid2, }; -static int radeon_kick_out_firmware_fb(struct pci_dev *pdev) -{ - resource_size_t base = pci_resource_start(pdev, 0); - resource_size_t size = pci_resource_len(pdev, 0); - - return aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME); -} - static int radeonfb_pci_register(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2296,7 +2288,7 @@ static int radeonfb_pci_register(struct pci_dev *pdev, rinfo->fb_base_phys = pci_resource_start (pdev, 0); rinfo->mmio_base_phys = pci_resource_start (pdev, 2); - ret = radeon_kick_out_firmware_fb(pdev); + ret = aperture_remove_conflicting_pci_devices(pdev, KBUILD_MODNAME); if (ret) goto err_release_fb; From cccfcbb9e51af026529618e3bdc09b6beacac919 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Apr 2023 15:21:01 +0200 Subject: [PATCH 006/123] drm/gma500: Use drm_aperture_remove_conflicting_pci_framebuffers [ Upstream commit 80e993988b97fe794f3ec2be6db05fe30f9353c3 ] This one nukes all framebuffers, which is a bit much. In reality gma500 is igpu and never shipped with anything discrete, so there should not be any difference. v2: Unfortunately the framebuffer sits outside of the pci bars for gma500, and so only using the pci helpers won't be enough. Otoh if we only use non-pci helper, then we don't get the vga handling, and subsequent refactoring to untangle these special cases won't work. It's not pretty, but the simplest fix (since gma500 really is the only quirky pci driver like this we have) is to just have both calls. v4: - fix Daniel's S-o-b address v5: - add back an S-o-b tag with Daniel's Intel address Signed-off-by: Daniel Vetter Signed-off-by: Daniel Vetter Signed-off-by: Thomas Zimmermann Cc: Patrik Jakobsson Cc: Thomas Zimmermann Cc: Javier Martinez Canillas Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20230406132109.32050-2-tzimmermann@suse.de Stable-dep-of: 5ae3716cfdcd ("video/aperture: Only remove sysfb on the default vga pci device") Signed-off-by: Sasha Levin --- drivers/gpu/drm/gma500/psb_drv.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index cd9c73f5a64a..000e6704e3c7 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -424,12 +424,17 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* * We cannot yet easily find the framebuffer's location in memory. So - * remove all framebuffers here. + * remove all framebuffers here. Note that we still want the pci special + * handling to kick out vgacon. * * TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then we * might be able to read the framebuffer range from the device. */ - ret = drm_aperture_remove_framebuffers(true, &driver); + ret = drm_aperture_remove_framebuffers(false, &driver); + if (ret) + return ret; + + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); if (ret) return ret; From 437e99f2a1e933348c4cedb2c7ce6f0ad81b935e Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Apr 2023 15:21:03 +0200 Subject: [PATCH 007/123] drm/aperture: Remove primary argument [ Upstream commit 62aeaeaa1b267c5149abee6b45967a5df3feed58 ] Only really pci devices have a business setting this - it's for figuring out whether the legacy vga stuff should be nuked too. And with the preceding two patches those are all using the pci version of this. Which means for all other callers primary == false and we can remove it now. v2: - Reorder to avoid compile fail (Thomas) - Include gma500, which retained it's called to the non-pci version. v4: - fix Daniel's S-o-b address v5: - add back an S-o-b tag with Daniel's Intel address Signed-off-by: Daniel Vetter Signed-off-by: Daniel Vetter Signed-off-by: Thomas Zimmermann Cc: Thomas Zimmermann Cc: Javier Martinez Canillas Cc: Maarten Lankhorst Cc: Maxime Ripard Cc: Deepak Rawat Cc: Neil Armstrong Cc: Kevin Hilman Cc: Jerome Brunet Cc: Martin Blumenstingl Cc: Thierry Reding Cc: Jonathan Hunter Cc: Emma Anholt Cc: Helge Deller Cc: David Airlie Cc: Daniel Vetter Cc: linux-hyperv@vger.kernel.org Cc: linux-amlogic@lists.infradead.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-tegra@vger.kernel.org Cc: linux-fbdev@vger.kernel.org Acked-by: Martin Blumenstingl Acked-by: Thierry Reding Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20230406132109.32050-4-tzimmermann@suse.de Stable-dep-of: 5ae3716cfdcd ("video/aperture: Only remove sysfb on the default vga pci device") Signed-off-by: Sasha Levin --- drivers/gpu/drm/arm/hdlcd_drv.c | 2 +- drivers/gpu/drm/armada/armada_drv.c | 2 +- drivers/gpu/drm/drm_aperture.c | 11 +++-------- drivers/gpu/drm/gma500/psb_drv.c | 2 +- drivers/gpu/drm/hyperv/hyperv_drm_drv.c | 1 - drivers/gpu/drm/meson/meson_drv.c | 2 +- drivers/gpu/drm/msm/msm_fbdev.c | 2 +- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 2 +- drivers/gpu/drm/stm/drv.c | 2 +- drivers/gpu/drm/sun4i/sun4i_drv.c | 2 +- drivers/gpu/drm/tegra/drm.c | 2 +- drivers/gpu/drm/vc4/vc4_drv.c | 2 +- include/drm/drm_aperture.h | 7 +++---- 13 files changed, 16 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c index a032003c340c..d6ea47873627 100644 --- a/drivers/gpu/drm/arm/hdlcd_drv.c +++ b/drivers/gpu/drm/arm/hdlcd_drv.c @@ -290,7 +290,7 @@ static int hdlcd_drm_bind(struct device *dev) */ if (hdlcd_read(hdlcd, HDLCD_REG_COMMAND)) { hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); - drm_aperture_remove_framebuffers(false, &hdlcd_driver); + drm_aperture_remove_framebuffers(&hdlcd_driver); } drm_mode_config_reset(drm); diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 142668cd6d7c..688ba358f531 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -95,7 +95,7 @@ static int armada_drm_bind(struct device *dev) } /* Remove early framebuffers */ - ret = drm_aperture_remove_framebuffers(false, &armada_drm_driver); + ret = drm_aperture_remove_framebuffers(&armada_drm_driver); if (ret) { dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n", __func__, ret); diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c index 3b8fdeeafd53..697cffbfd603 100644 --- a/drivers/gpu/drm/drm_aperture.c +++ b/drivers/gpu/drm/drm_aperture.c @@ -32,17 +32,13 @@ * * static int remove_conflicting_framebuffers(struct pci_dev *pdev) * { - * bool primary = false; * resource_size_t base, size; * int ret; * * base = pci_resource_start(pdev, 0); * size = pci_resource_len(pdev, 0); - * #ifdef CONFIG_X86 - * primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; - * #endif * - * return drm_aperture_remove_conflicting_framebuffers(base, size, primary, + * return drm_aperture_remove_conflicting_framebuffers(base, size, * &example_driver); * } * @@ -161,7 +157,6 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware); * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range * @base: the aperture's base address in physical memory * @size: aperture size in bytes - * @primary: also kick vga16fb if present * @req_driver: requesting DRM driver * * This function removes graphics device drivers which use the memory range described by @@ -171,9 +166,9 @@ EXPORT_SYMBOL(devm_aperture_acquire_from_firmware); * 0 on success, or a negative errno code otherwise */ int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - bool primary, const struct drm_driver *req_driver) + const struct drm_driver *req_driver) { - return aperture_remove_conflicting_devices(base, size, primary, req_driver->name); + return aperture_remove_conflicting_devices(base, size, false, req_driver->name); } EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers); diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 000e6704e3c7..738eb558a97e 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -430,7 +430,7 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) * TODO: Refactor psb_driver_load() to map vdc_reg earlier. Then we * might be able to read the framebuffer range from the device. */ - ret = drm_aperture_remove_framebuffers(false, &driver); + ret = drm_aperture_remove_framebuffers(&driver); if (ret) return ret; diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index ca127ff797f7..29ee0814bccc 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -74,7 +74,6 @@ static int hyperv_setup_vram(struct hyperv_drm_device *hv, drm_aperture_remove_conflicting_framebuffers(screen_info.lfb_base, screen_info.lfb_size, - false, &hyperv_driver); hv->fb_size = (unsigned long)hv->mmio_megabytes * 1024 * 1024; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index eea433ade79d..119544d88b58 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -285,7 +285,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) * Remove early framebuffers (ie. simplefb). The framebuffer can be * located anywhere in RAM */ - ret = drm_aperture_remove_framebuffers(false, &meson_driver); + ret = drm_aperture_remove_framebuffers(&meson_driver); if (ret) goto free_drm; diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 46168eccfac4..d4a9b501e1bc 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -157,7 +157,7 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev) } /* the fw fb could be anywhere in memory */ - ret = drm_aperture_remove_framebuffers(false, dev->driver); + ret = drm_aperture_remove_framebuffers(dev->driver); if (ret) goto fini; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 813f9f8c8698..8e12053a220b 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -140,7 +140,7 @@ static int rockchip_drm_bind(struct device *dev) int ret; /* Remove existing drivers that may own the framebuffer memory. */ - ret = drm_aperture_remove_framebuffers(false, &rockchip_drm_driver); + ret = drm_aperture_remove_framebuffers(&rockchip_drm_driver); if (ret) { DRM_DEV_ERROR(dev, "Failed to remove existing framebuffers - %d.\n", diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index d7914f5122df..0a09a85ac9d6 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -185,7 +185,7 @@ static int stm_drm_platform_probe(struct platform_device *pdev) DRM_DEBUG("%s\n", __func__); - ret = drm_aperture_remove_framebuffers(false, &drv_driver); + ret = drm_aperture_remove_framebuffers(&drv_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 7910c5853f0a..5c483bbccbbb 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -98,7 +98,7 @@ static int sun4i_drv_bind(struct device *dev) goto unbind_all; /* Remove early framebuffers (ie. simplefb) */ - ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver); + ret = drm_aperture_remove_framebuffers(&sun4i_drv_driver); if (ret) goto unbind_all; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index a1f909dac89a..5fc55b9777cb 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1252,7 +1252,7 @@ static int host1x_drm_probe(struct host1x_device *dev) drm_mode_config_reset(drm); - err = drm_aperture_remove_framebuffers(false, &tegra_drm_driver); + err = drm_aperture_remove_framebuffers(&tegra_drm_driver); if (err < 0) goto hub; diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 8c329c071c62..b6384a5dfdbc 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -351,7 +351,7 @@ static int vc4_drm_bind(struct device *dev) return -EPROBE_DEFER; } - ret = drm_aperture_remove_framebuffers(false, driver); + ret = drm_aperture_remove_framebuffers(driver); if (ret) return ret; diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h index 7096703c3949..cbe33b49fd5d 100644 --- a/include/drm/drm_aperture.h +++ b/include/drm/drm_aperture.h @@ -13,14 +13,13 @@ int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t resource_size_t size); int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size, - bool primary, const struct drm_driver *req_driver); + const struct drm_driver *req_driver); int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const struct drm_driver *req_driver); /** * drm_aperture_remove_framebuffers - remove all existing framebuffers - * @primary: also kick vga16fb if present * @req_driver: requesting DRM driver * * This function removes all graphics device drivers. Use this function on systems @@ -30,9 +29,9 @@ int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, * 0 on success, or a negative errno code otherwise */ static inline int -drm_aperture_remove_framebuffers(bool primary, const struct drm_driver *req_driver) +drm_aperture_remove_framebuffers(const struct drm_driver *req_driver) { - return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary, + return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, req_driver); } From 4aad3b82b9de7c4ffccb02f87d0c6903569333c9 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Apr 2023 15:21:04 +0200 Subject: [PATCH 008/123] video/aperture: Only kick vgacon when the pdev is decoding vga [ Upstream commit 7450cd235b45d43ee6f3c235f89e92623458175d ] Otherwise it's a bit silly, and we might throw out the driver for the screen the user is actually looking at. I haven't found a bug report for this case yet, but we did get bug reports for the analog case where we're throwing out the efifb driver. v2: Flip the check around to make it clear it's a special case for kicking out the vgacon driver only (Thomas) v4: - fixes to commit message - fix Daniel's S-o-b address v5: - add back an S-o-b tag with Daniel's Intel address Link: https://bugzilla.kernel.org/show_bug.cgi?id=216303 Signed-off-by: Daniel Vetter Signed-off-by: Daniel Vetter Signed-off-by: Thomas Zimmermann Cc: Thomas Zimmermann Cc: Javier Martinez Canillas Cc: Helge Deller Cc: linux-fbdev@vger.kernel.org Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20230406132109.32050-5-tzimmermann@suse.de Stable-dep-of: 5ae3716cfdcd ("video/aperture: Only remove sysfb on the default vga pci device") Signed-off-by: Sasha Levin --- drivers/video/aperture.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c index 5c94abdb1ad6..7ea18086e659 100644 --- a/drivers/video/aperture.c +++ b/drivers/video/aperture.c @@ -344,13 +344,15 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na aperture_detach_devices(base, size); } - /* - * WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. - */ - ret = vga_remove_vgacon(pdev); - if (ret) - return ret; + if (primary) { + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ + ret = vga_remove_vgacon(pdev); + if (ret) + return ret; + } return 0; From 28916927b7626ed3c44d902eafbf9d00691266e1 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Thu, 6 Apr 2023 15:21:05 +0200 Subject: [PATCH 009/123] video/aperture: Move vga handling to pci function [ Upstream commit f1d599d315fb7b7343cddaf365e671aaa8453aca ] A few reasons for this: - It's really the only one where this matters. I tried looking around, and I didn't find any non-pci vga-compatible controllers for x86 (since that's the only platform where we had this until a few patches ago), where a driver participating in the aperture claim dance would interfere. - I also don't expect that any future bus anytime soon will not just look like pci towards the OS, that's been the case for like 25+ years by now for practically everything (even non non-x86). - Also it's a bit funny if we have one part of the vga removal in the pci function, and the other in the generic one. v2: Rebase. v4: - fix Daniel's S-o-b address v5: - add back an S-o-b tag with Daniel's Intel address Signed-off-by: Daniel Vetter Signed-off-by: Daniel Vetter Signed-off-by: Thomas Zimmermann Cc: Thomas Zimmermann Cc: Javier Martinez Canillas Cc: Helge Deller Cc: linux-fbdev@vger.kernel.org Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20230406132109.32050-6-tzimmermann@suse.de Stable-dep-of: 5ae3716cfdcd ("video/aperture: Only remove sysfb on the default vga pci device") Signed-off-by: Sasha Levin --- drivers/video/aperture.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c index 7ea18086e659..3e4a1f55f51b 100644 --- a/drivers/video/aperture.c +++ b/drivers/video/aperture.c @@ -298,14 +298,6 @@ int aperture_remove_conflicting_devices(resource_size_t base, resource_size_t si aperture_detach_devices(base, size); - /* - * If this is the primary adapter, there could be a VGA device - * that consumes the VGA framebuffer I/O range. Remove this device - * as well. - */ - if (primary) - aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE); - return 0; } EXPORT_SYMBOL(aperture_remove_conflicting_devices); @@ -345,6 +337,13 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na } if (primary) { + /* + * If this is the primary adapter, there could be a VGA device + * that consumes the VGA framebuffer I/O range. Remove this + * device as well. + */ + aperture_detach_devices(VGA_FB_PHYS_BASE, VGA_FB_PHYS_SIZE); + /* * WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. From 3e4d038da33e75765584fa54d6cc1a327535af65 Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Mon, 24 Apr 2023 21:15:57 +0200 Subject: [PATCH 010/123] PCI: acpiphp: Reassign resources on bridge if necessary [ Upstream commit 40613da52b13fb21c5566f10b287e0ca8c12c4e9 ] When using ACPI PCI hotplug, hotplugging a device with large BARs may fail if bridge windows programmed by firmware are not large enough. Reproducer: $ qemu-kvm -monitor stdio -M q35 -m 4G \ -global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=on \ -device id=rp1,pcie-root-port,bus=pcie.0,chassis=4 \ disk_image wait till linux guest boots, then hotplug device: (qemu) device_add qxl,bus=rp1 hotplug on guest side fails with: pci 0000:01:00.0: [1b36:0100] type 00 class 0x038000 pci 0000:01:00.0: reg 0x10: [mem 0x00000000-0x03ffffff] pci 0000:01:00.0: reg 0x14: [mem 0x00000000-0x03ffffff] pci 0000:01:00.0: reg 0x18: [mem 0x00000000-0x00001fff] pci 0000:01:00.0: reg 0x1c: [io 0x0000-0x001f] pci 0000:01:00.0: BAR 0: no space for [mem size 0x04000000] pci 0000:01:00.0: BAR 0: failed to assign [mem size 0x04000000] pci 0000:01:00.0: BAR 1: no space for [mem size 0x04000000] pci 0000:01:00.0: BAR 1: failed to assign [mem size 0x04000000] pci 0000:01:00.0: BAR 2: assigned [mem 0xfe800000-0xfe801fff] pci 0000:01:00.0: BAR 3: assigned [io 0x1000-0x101f] qxl 0000:01:00.0: enabling device (0000 -> 0003) Unable to create vram_mapping qxl: probe of 0000:01:00.0 failed with error -12 However when using native PCIe hotplug '-global ICH9-LPC.acpi-pci-hotplug-with-bridge-support=off' it works fine, since kernel attempts to reassign unused resources. Use the same machinery as native PCIe hotplug to (re)assign resources. Link: https://lore.kernel.org/r/20230424191557.2464760-1-imammedo@redhat.com Signed-off-by: Igor Mammedov Signed-off-by: Bjorn Helgaas Acked-by: Michael S. Tsirkin Acked-by: Rafael J. Wysocki Cc: stable@vger.kernel.org Signed-off-by: Sasha Levin --- drivers/pci/hotplug/acpiphp_glue.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 6efa3d8db9a5..393f341d9d76 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -490,7 +490,6 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) acpiphp_native_scan_bridge(dev); } } else { - LIST_HEAD(add_list); int max, pass; acpiphp_rescan_slot(slot); @@ -504,12 +503,10 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); - __pci_bus_size_bridges(dev->subordinate, - &add_list); } } } - __pci_bus_assign_resources(bus, &add_list, NULL); + pci_assign_unassigned_bridge_resources(bus->self); } acpiphp_sanitize_bus(bus); From 92c568c82ee74cb8343620d14190d9c1169e9f71 Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Tue, 4 Apr 2023 10:33:44 +0100 Subject: [PATCH 011/123] MIPS: cpu-features: Enable octeon_cache by cpu_type [ Upstream commit f641519409a73403ee6612b8648b95a688ab85c2 ] cpu_has_octeon_cache was tied to 0 for generic cpu-features, whith this generic kernel built for octeon CPU won't boot. Just enable this flag by cpu_type. It won't hurt orther platforms because compiler will eliminate the code path on other processors. Signed-off-by: Jiaxun Yang Signed-off-by: Thomas Bogendoerfer Stable-dep-of: 5487a7b60695 ("MIPS: cpu-features: Use boot_cpu_type for CPU type based features") Signed-off-by: Sasha Levin --- arch/mips/include/asm/cpu-features.h | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index c0983130a44c..53c8551ec89b 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -121,7 +121,24 @@ #define cpu_has_4k_cache __isa_ge_or_opt(1, MIPS_CPU_4K_CACHE) #endif #ifndef cpu_has_octeon_cache -#define cpu_has_octeon_cache 0 +#define cpu_has_octeon_cache \ +({ \ + int __res; \ + \ + switch (current_cpu_type()) { \ + case CPU_CAVIUM_OCTEON: \ + case CPU_CAVIUM_OCTEON_PLUS: \ + case CPU_CAVIUM_OCTEON2: \ + case CPU_CAVIUM_OCTEON3: \ + __res = 1; \ + break; \ + \ + default: \ + __res = 0; \ + } \ + \ + __res; \ +}) #endif /* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */ #ifndef cpu_has_fpu From 1fa68a78109840ccb3d1b69ea0ba3cd9b30ed17f Mon Sep 17 00:00:00 2001 From: Jiaxun Yang Date: Wed, 7 Jun 2023 13:51:22 +0800 Subject: [PATCH 012/123] MIPS: cpu-features: Use boot_cpu_type for CPU type based features [ Upstream commit 5487a7b60695a92cf998350e4beac17144c91fcd ] Some CPU feature macros were using current_cpu_type to mark feature availability. However current_cpu_type will use smp_processor_id, which is prohibited under preemptable context. Since those features are all uniform on all CPUs in a SMP system, use boot_cpu_type instead of current_cpu_type to fix preemptable kernel. Cc: stable@vger.kernel.org Signed-off-by: Jiaxun Yang Signed-off-by: Thomas Bogendoerfer Signed-off-by: Sasha Levin --- arch/mips/include/asm/cpu-features.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 53c8551ec89b..e0a4da4cfd8b 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -125,7 +125,7 @@ ({ \ int __res; \ \ - switch (current_cpu_type()) { \ + switch (boot_cpu_type()) { \ case CPU_CAVIUM_OCTEON: \ case CPU_CAVIUM_OCTEON_PLUS: \ case CPU_CAVIUM_OCTEON2: \ @@ -368,7 +368,7 @@ ({ \ int __res; \ \ - switch (current_cpu_type()) { \ + switch (boot_cpu_type()) { \ case CPU_M14KC: \ case CPU_74K: \ case CPU_1074K: \ From 8168c96c24ecfe0265c295aa4d969b4b650f00d3 Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Tue, 6 Jun 2023 21:59:24 +0800 Subject: [PATCH 013/123] jbd2: remove t_checkpoint_io_list [ Upstream commit be22255360f80d3af789daad00025171a65424a5 ] Since t_checkpoint_io_list was stop using in jbd2_log_do_checkpoint() now, it's time to remove the whole t_checkpoint_io_list logic. Signed-off-by: Zhang Yi Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230606135928.434610-3-yi.zhang@huaweicloud.com Signed-off-by: Theodore Ts'o Stable-dep-of: 46f881b5b175 ("jbd2: fix a race when checking checkpoint buffer busy") Signed-off-by: Sasha Levin --- fs/jbd2/checkpoint.c | 42 ++---------------------------------------- fs/jbd2/commit.c | 3 +-- include/linux/jbd2.h | 6 ------ 3 files changed, 3 insertions(+), 48 deletions(-) diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index c4e0da6db719..723b4eb11282 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -27,7 +27,7 @@ * * Called with j_list_lock held. */ -static inline void __buffer_unlink_first(struct journal_head *jh) +static inline void __buffer_unlink(struct journal_head *jh) { transaction_t *transaction = jh->b_cp_transaction; @@ -40,23 +40,6 @@ static inline void __buffer_unlink_first(struct journal_head *jh) } } -/* - * Unlink a buffer from a transaction checkpoint(io) list. - * - * Called with j_list_lock held. - */ -static inline void __buffer_unlink(struct journal_head *jh) -{ - transaction_t *transaction = jh->b_cp_transaction; - - __buffer_unlink_first(jh); - if (transaction->t_checkpoint_io_list == jh) { - transaction->t_checkpoint_io_list = jh->b_cpnext; - if (transaction->t_checkpoint_io_list == jh) - transaction->t_checkpoint_io_list = NULL; - } -} - /* * Check a checkpoint buffer could be release or not. * @@ -505,15 +488,6 @@ again: break; if (need_resched() || spin_needbreak(&journal->j_list_lock)) break; - if (released) - continue; - - nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_io_list, - nr_to_scan, &released); - if (*nr_to_scan == 0) - break; - if (need_resched() || spin_needbreak(&journal->j_list_lock)) - break; } while (transaction != last_transaction); if (transaction != last_transaction) { @@ -568,17 +542,6 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) */ if (need_resched()) return; - if (ret) - continue; - /* - * It is essential that we are as careful as in the case of - * t_checkpoint_list with removing the buffer from the list as - * we can possibly see not yet submitted buffers on io_list - */ - ret = journal_clean_one_cp_list(transaction-> - t_checkpoint_io_list, destroy); - if (need_resched()) - return; /* * Stop scanning if we couldn't free the transaction. This * avoids pointless scanning of transactions which still @@ -663,7 +626,7 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) jbd2_journal_put_journal_head(jh); /* Is this transaction empty? */ - if (transaction->t_checkpoint_list || transaction->t_checkpoint_io_list) + if (transaction->t_checkpoint_list) return 0; /* @@ -755,7 +718,6 @@ void __jbd2_journal_drop_transaction(journal_t *journal, transaction_t *transact J_ASSERT(transaction->t_forget == NULL); J_ASSERT(transaction->t_shadow_list == NULL); J_ASSERT(transaction->t_checkpoint_list == NULL); - J_ASSERT(transaction->t_checkpoint_io_list == NULL); J_ASSERT(atomic_read(&transaction->t_updates) == 0); J_ASSERT(journal->j_committing_transaction != transaction); J_ASSERT(journal->j_running_transaction != transaction); diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 885a7a6cc53e..f1d9db6686e3 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -1171,8 +1171,7 @@ restart_loop: spin_lock(&journal->j_list_lock); commit_transaction->t_state = T_FINISHED; /* Check if the transaction can be dropped now that we are finished */ - if (commit_transaction->t_checkpoint_list == NULL && - commit_transaction->t_checkpoint_io_list == NULL) { + if (commit_transaction->t_checkpoint_list == NULL) { __jbd2_journal_drop_transaction(journal, commit_transaction); jbd2_journal_free_transaction(commit_transaction); } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 0b7242370b56..67912fe08fbb 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -622,12 +622,6 @@ struct transaction_s */ struct journal_head *t_checkpoint_list; - /* - * Doubly-linked circular list of all buffers submitted for IO while - * checkpointing. [j_list_lock] - */ - struct journal_head *t_checkpoint_io_list; - /* * Doubly-linked circular list of metadata buffers being * shadowed by log IO. The IO buffers on the iobuf list and From 5fda50e262e65bd553ff777c4b280afd1495a18b Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Tue, 6 Jun 2023 21:59:25 +0800 Subject: [PATCH 014/123] jbd2: remove journal_clean_one_cp_list() [ Upstream commit b98dba273a0e47dbfade89c9af73c5b012a4eabb ] journal_clean_one_cp_list() and journal_shrink_one_cp_list() are almost the same, so merge them into journal_shrink_one_cp_list(), remove the nr_to_scan parameter, always scan and try to free the whole checkpoint list. Signed-off-by: Zhang Yi Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230606135928.434610-4-yi.zhang@huaweicloud.com Signed-off-by: Theodore Ts'o Stable-dep-of: 46f881b5b175 ("jbd2: fix a race when checking checkpoint buffer busy") Signed-off-by: Sasha Levin --- fs/jbd2/checkpoint.c | 85 ++++++++++--------------------------- include/trace/events/jbd2.h | 12 ++---- 2 files changed, 26 insertions(+), 71 deletions(-) diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 723b4eb11282..42b34cab64fb 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -350,19 +350,24 @@ int jbd2_cleanup_journal_tail(journal_t *journal) /* Checkpoint list management */ /* - * journal_clean_one_cp_list + * journal_shrink_one_cp_list * - * Find all the written-back checkpoint buffers in the given list and - * release them. If 'destroy' is set, clean all buffers unconditionally. + * Find all the written-back checkpoint buffers in the given list + * and try to release them. If the whole transaction is released, set + * the 'released' parameter. Return the number of released checkpointed + * buffers. * * Called with j_list_lock held. - * Returns 1 if we freed the transaction, 0 otherwise. */ -static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) +static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, + bool destroy, bool *released) { struct journal_head *last_jh; struct journal_head *next_jh = jh; + unsigned long nr_freed = 0; + int ret; + *released = false; if (!jh) return 0; @@ -372,52 +377,6 @@ static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) next_jh = jh->b_cpnext; if (!destroy && __cp_buffer_busy(jh)) - return 0; - - if (__jbd2_journal_remove_checkpoint(jh)) - return 1; - /* - * This function only frees up some memory - * if possible so we dont have an obligation - * to finish processing. Bail out if preemption - * requested: - */ - if (need_resched()) - return 0; - } while (jh != last_jh); - - return 0; -} - -/* - * journal_shrink_one_cp_list - * - * Find 'nr_to_scan' written-back checkpoint buffers in the given list - * and try to release them. If the whole transaction is released, set - * the 'released' parameter. Return the number of released checkpointed - * buffers. - * - * Called with j_list_lock held. - */ -static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, - unsigned long *nr_to_scan, - bool *released) -{ - struct journal_head *last_jh; - struct journal_head *next_jh = jh; - unsigned long nr_freed = 0; - int ret; - - if (!jh || *nr_to_scan == 0) - return 0; - - last_jh = jh->b_cpprev; - do { - jh = next_jh; - next_jh = jh->b_cpnext; - - (*nr_to_scan)--; - if (__cp_buffer_busy(jh)) continue; nr_freed++; @@ -429,7 +388,7 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, if (need_resched()) break; - } while (jh != last_jh && *nr_to_scan); + } while (jh != last_jh); return nr_freed; } @@ -447,11 +406,11 @@ unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan) { transaction_t *transaction, *last_transaction, *next_transaction; - bool released; + bool __maybe_unused released; tid_t first_tid = 0, last_tid = 0, next_tid = 0; tid_t tid = 0; unsigned long nr_freed = 0; - unsigned long nr_scanned = *nr_to_scan; + unsigned long freed; again: spin_lock(&journal->j_list_lock); @@ -480,10 +439,11 @@ again: transaction = next_transaction; next_transaction = transaction->t_cpnext; tid = transaction->t_tid; - released = false; - nr_freed += journal_shrink_one_cp_list(transaction->t_checkpoint_list, - nr_to_scan, &released); + freed = journal_shrink_one_cp_list(transaction->t_checkpoint_list, + false, &released); + nr_freed += freed; + (*nr_to_scan) -= min(*nr_to_scan, freed); if (*nr_to_scan == 0) break; if (need_resched() || spin_needbreak(&journal->j_list_lock)) @@ -504,9 +464,8 @@ again: if (*nr_to_scan && next_tid) goto again; out: - nr_scanned -= *nr_to_scan; trace_jbd2_shrink_checkpoint_list(journal, first_tid, tid, last_tid, - nr_freed, nr_scanned, next_tid); + nr_freed, next_tid); return nr_freed; } @@ -522,7 +481,7 @@ out: void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) { transaction_t *transaction, *last_transaction, *next_transaction; - int ret; + bool released; transaction = journal->j_checkpoint_transactions; if (!transaction) @@ -533,8 +492,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) do { transaction = next_transaction; next_transaction = transaction->t_cpnext; - ret = journal_clean_one_cp_list(transaction->t_checkpoint_list, - destroy); + journal_shrink_one_cp_list(transaction->t_checkpoint_list, + destroy, &released); /* * This function only frees up some memory if possible so we * dont have an obligation to finish processing. Bail out if @@ -547,7 +506,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) * avoids pointless scanning of transactions which still * weren't checkpointed. */ - if (!ret) + if (!released) return; } while (transaction != last_transaction); } diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 8f5ee380d309..5646ae15a957 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -462,11 +462,9 @@ TRACE_EVENT(jbd2_shrink_scan_exit, TRACE_EVENT(jbd2_shrink_checkpoint_list, TP_PROTO(journal_t *journal, tid_t first_tid, tid_t tid, tid_t last_tid, - unsigned long nr_freed, unsigned long nr_scanned, - tid_t next_tid), + unsigned long nr_freed, tid_t next_tid), - TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, - nr_scanned, next_tid), + TP_ARGS(journal, first_tid, tid, last_tid, nr_freed, next_tid), TP_STRUCT__entry( __field(dev_t, dev) @@ -474,7 +472,6 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list, __field(tid_t, tid) __field(tid_t, last_tid) __field(unsigned long, nr_freed) - __field(unsigned long, nr_scanned) __field(tid_t, next_tid) ), @@ -484,15 +481,14 @@ TRACE_EVENT(jbd2_shrink_checkpoint_list, __entry->tid = tid; __entry->last_tid = last_tid; __entry->nr_freed = nr_freed; - __entry->nr_scanned = nr_scanned; __entry->next_tid = next_tid; ), TP_printk("dev %d,%d shrink transaction %u-%u(%u) freed %lu " - "scanned %lu next transaction %u", + "next transaction %u", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->first_tid, __entry->tid, __entry->last_tid, - __entry->nr_freed, __entry->nr_scanned, __entry->next_tid) + __entry->nr_freed, __entry->next_tid) ); #endif /* _TRACE_JBD2_H */ From e5c768d809a85e9efd0274b2efe69d4970cc0014 Mon Sep 17 00:00:00 2001 From: Zhang Yi Date: Tue, 6 Jun 2023 21:59:27 +0800 Subject: [PATCH 015/123] jbd2: fix a race when checking checkpoint buffer busy [ Upstream commit 46f881b5b1758dc4a35fba4a643c10717d0cf427 ] Before removing checkpoint buffer from the t_checkpoint_list, we have to check both BH_Dirty and BH_Lock bits together to distinguish buffers have not been or were being written back. But __cp_buffer_busy() checks them separately, it first check lock state and then check dirty, the window between these two checks could be raced by writing back procedure, which locks buffer and clears buffer dirty before I/O completes. So it cannot guarantee checkpointing buffers been written back to disk if some error happens later. Finally, it may clean checkpoint transactions and lead to inconsistent filesystem. jbd2_journal_forget() and __journal_try_to_free_buffer() also have the same problem (journal_unmap_buffer() escape from this issue since it's running under the buffer lock), so fix them through introducing a new helper to try holding the buffer lock and remove really clean buffer. Link: https://bugzilla.kernel.org/show_bug.cgi?id=217490 Cc: stable@vger.kernel.org Suggested-by: Jan Kara Signed-off-by: Zhang Yi Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230606135928.434610-6-yi.zhang@huaweicloud.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/jbd2/checkpoint.c | 38 +++++++++++++++++++++++++++++++++++--- fs/jbd2/transaction.c | 17 +++++------------ include/linux/jbd2.h | 1 + 3 files changed, 41 insertions(+), 15 deletions(-) diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 42b34cab64fb..9ec91017a7f3 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -376,11 +376,15 @@ static unsigned long journal_shrink_one_cp_list(struct journal_head *jh, jh = next_jh; next_jh = jh->b_cpnext; - if (!destroy && __cp_buffer_busy(jh)) - continue; + if (destroy) { + ret = __jbd2_journal_remove_checkpoint(jh); + } else { + ret = jbd2_journal_try_remove_checkpoint(jh); + if (ret < 0) + continue; + } nr_freed++; - ret = __jbd2_journal_remove_checkpoint(jh); if (ret) { *released = true; break; @@ -616,6 +620,34 @@ int __jbd2_journal_remove_checkpoint(struct journal_head *jh) return 1; } +/* + * Check the checkpoint buffer and try to remove it from the checkpoint + * list if it's clean. Returns -EBUSY if it is not clean, returns 1 if + * it frees the transaction, 0 otherwise. + * + * This function is called with j_list_lock held. + */ +int jbd2_journal_try_remove_checkpoint(struct journal_head *jh) +{ + struct buffer_head *bh = jh2bh(jh); + + if (!trylock_buffer(bh)) + return -EBUSY; + if (buffer_dirty(bh)) { + unlock_buffer(bh); + return -EBUSY; + } + unlock_buffer(bh); + + /* + * Buffer is clean and the IO has finished (we held the buffer + * lock) so the checkpoint is done. We can safely remove the + * buffer from this transaction. + */ + JBUFFER_TRACE(jh, "remove from checkpoint list"); + return __jbd2_journal_remove_checkpoint(jh); +} + /* * journal_insert_checkpoint: put a committed buffer onto a checkpoint * list so that we know when it is safe to clean the transaction out of diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 18611241f451..6ef5022949c4 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -1784,8 +1784,7 @@ int jbd2_journal_forget(handle_t *handle, struct buffer_head *bh) * Otherwise, if the buffer has been written to disk, * it is safe to remove the checkpoint and drop it. */ - if (!buffer_dirty(bh)) { - __jbd2_journal_remove_checkpoint(jh); + if (jbd2_journal_try_remove_checkpoint(jh) >= 0) { spin_unlock(&journal->j_list_lock); goto drop; } @@ -2112,20 +2111,14 @@ __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) jh = bh2jh(bh); - if (buffer_locked(bh) || buffer_dirty(bh)) - goto out; - if (jh->b_next_transaction != NULL || jh->b_transaction != NULL) - goto out; + return; spin_lock(&journal->j_list_lock); - if (jh->b_cp_transaction != NULL) { - /* written-back checkpointed metadata buffer */ - JBUFFER_TRACE(jh, "remove from checkpoint list"); - __jbd2_journal_remove_checkpoint(jh); - } + /* Remove written-back checkpointed metadata buffer */ + if (jh->b_cp_transaction != NULL) + jbd2_journal_try_remove_checkpoint(jh); spin_unlock(&journal->j_list_lock); -out: return; } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 67912fe08fbb..ebb1608d9dcd 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1435,6 +1435,7 @@ extern void jbd2_journal_commit_transaction(journal_t *); void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); unsigned long jbd2_journal_shrink_checkpoint_list(journal_t *journal, unsigned long *nr_to_scan); int __jbd2_journal_remove_checkpoint(struct journal_head *); +int jbd2_journal_try_remove_checkpoint(struct journal_head *jh); void jbd2_journal_destroy_checkpoint(journal_t *journal); void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); From 335987e21237fff0902262ca02b29463e84e3272 Mon Sep 17 00:00:00 2001 From: Ziyang Xuan Date: Tue, 11 Jul 2023 09:17:37 +0800 Subject: [PATCH 016/123] can: raw: fix receiver memory leak [ Upstream commit ee8b94c8510ce64afe0b87ef548d23e00915fb10 ] Got kmemleak errors with the following ltp can_filter testcase: for ((i=1; i<=100; i++)) do ./can_filter & sleep 0.1 done ============================================================== [<00000000db4a4943>] can_rx_register+0x147/0x360 [can] [<00000000a289549d>] raw_setsockopt+0x5ef/0x853 [can_raw] [<000000006d3d9ebd>] __sys_setsockopt+0x173/0x2c0 [<00000000407dbfec>] __x64_sys_setsockopt+0x61/0x70 [<00000000fd468496>] do_syscall_64+0x33/0x40 [<00000000b7e47d51>] entry_SYSCALL_64_after_hwframe+0x61/0xc6 It's a bug in the concurrent scenario of unregister_netdevice_many() and raw_release() as following: cpu0 cpu1 unregister_netdevice_many(can_dev) unlist_netdevice(can_dev) // dev_get_by_index() return NULL after this net_set_todo(can_dev) raw_release(can_socket) dev = dev_get_by_index(, ro->ifindex); // dev == NULL if (dev) { // receivers in dev_rcv_lists not free because dev is NULL raw_disable_allfilters(, dev, ); dev_put(dev); } ... ro->bound = 0; ... call_netdevice_notifiers(NETDEV_UNREGISTER, ) raw_notify(, NETDEV_UNREGISTER, ) if (ro->bound) // invalid because ro->bound has been set 0 raw_disable_allfilters(, dev, ); // receivers in dev_rcv_lists will never be freed Add a net_device pointer member in struct raw_sock to record bound can_dev, and use rtnl_lock to serialize raw_socket members between raw_bind(), raw_release(), raw_setsockopt() and raw_notify(). Use ro->dev to decide whether to free receivers in dev_rcv_lists. Fixes: 8d0caedb7596 ("can: bcm/raw/isotp: use per module netdevice notifier") Reviewed-by: Oliver Hartkopp Acked-by: Oliver Hartkopp Signed-off-by: Ziyang Xuan Link: https://lore.kernel.org/all/20230711011737.1969582-1-william.xuanziyang@huawei.com Cc: stable@vger.kernel.org Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- net/can/raw.c | 57 ++++++++++++++++++++++----------------------------- 1 file changed, 24 insertions(+), 33 deletions(-) diff --git a/net/can/raw.c b/net/can/raw.c index 4abab2c3011a..1cd2c8748c26 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -84,6 +84,7 @@ struct raw_sock { struct sock sk; int bound; int ifindex; + struct net_device *dev; struct list_head notifier; int loopback; int recv_own_msgs; @@ -277,7 +278,7 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg, if (!net_eq(dev_net(dev), sock_net(sk))) return; - if (ro->ifindex != dev->ifindex) + if (ro->dev != dev) return; switch (msg) { @@ -292,6 +293,7 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg, ro->ifindex = 0; ro->bound = 0; + ro->dev = NULL; ro->count = 0; release_sock(sk); @@ -337,6 +339,7 @@ static int raw_init(struct sock *sk) ro->bound = 0; ro->ifindex = 0; + ro->dev = NULL; /* set default filter to single entry dfilter */ ro->dfilter.can_id = 0; @@ -385,19 +388,13 @@ static int raw_release(struct socket *sock) lock_sock(sk); + rtnl_lock(); /* remove current filters & unregister */ if (ro->bound) { - if (ro->ifindex) { - struct net_device *dev; - - dev = dev_get_by_index(sock_net(sk), ro->ifindex); - if (dev) { - raw_disable_allfilters(dev_net(dev), dev, sk); - dev_put(dev); - } - } else { + if (ro->dev) + raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); + else raw_disable_allfilters(sock_net(sk), NULL, sk); - } } if (ro->count > 1) @@ -405,8 +402,10 @@ static int raw_release(struct socket *sock) ro->ifindex = 0; ro->bound = 0; + ro->dev = NULL; ro->count = 0; free_percpu(ro->uniq); + rtnl_unlock(); sock_orphan(sk); sock->sk = NULL; @@ -422,6 +421,7 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct raw_sock *ro = raw_sk(sk); + struct net_device *dev = NULL; int ifindex; int err = 0; int notify_enetdown = 0; @@ -431,14 +431,13 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (addr->can_family != AF_CAN) return -EINVAL; + rtnl_lock(); lock_sock(sk); if (ro->bound && addr->can_ifindex == ro->ifindex) goto out; if (addr->can_ifindex) { - struct net_device *dev; - dev = dev_get_by_index(sock_net(sk), addr->can_ifindex); if (!dev) { err = -ENODEV; @@ -467,26 +466,20 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (!err) { if (ro->bound) { /* unregister old filters */ - if (ro->ifindex) { - struct net_device *dev; - - dev = dev_get_by_index(sock_net(sk), - ro->ifindex); - if (dev) { - raw_disable_allfilters(dev_net(dev), - dev, sk); - dev_put(dev); - } - } else { + if (ro->dev) + raw_disable_allfilters(dev_net(ro->dev), + ro->dev, sk); + else raw_disable_allfilters(sock_net(sk), NULL, sk); - } } ro->ifindex = ifindex; ro->bound = 1; + ro->dev = dev; } out: release_sock(sk); + rtnl_unlock(); if (notify_enetdown) { sk->sk_err = ENETDOWN; @@ -552,9 +545,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) { - dev = dev_get_by_index(sock_net(sk), ro->ifindex); - if (!dev) { + dev = ro->dev; + if (ro->bound && dev) { + if (dev->reg_state != NETREG_REGISTERED) { if (count > 1) kfree(filter); err = -ENODEV; @@ -595,7 +588,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, ro->count = count; out_fil: - dev_put(dev); release_sock(sk); rtnl_unlock(); @@ -613,9 +605,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, rtnl_lock(); lock_sock(sk); - if (ro->bound && ro->ifindex) { - dev = dev_get_by_index(sock_net(sk), ro->ifindex); - if (!dev) { + dev = ro->dev; + if (ro->bound && dev) { + if (dev->reg_state != NETREG_REGISTERED) { err = -ENODEV; goto out_err; } @@ -639,7 +631,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, ro->err_mask = err_mask; out_err: - dev_put(dev); release_sock(sk); rtnl_unlock(); From 40dafcab9da92179d610ea5a5f0c32f5ff2fc365 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 20 Jul 2023 11:44:38 +0000 Subject: [PATCH 017/123] can: raw: fix lockdep issue in raw_release() [ Upstream commit 11c9027c983e9e4b408ee5613b6504d24ebd85be ] syzbot complained about a lockdep issue [1] Since raw_bind() and raw_setsockopt() first get RTNL before locking the socket, we must adopt the same order in raw_release() [1] WARNING: possible circular locking dependency detected 6.5.0-rc1-syzkaller-00192-g78adb4bcf99e #0 Not tainted ------------------------------------------------------ syz-executor.0/14110 is trying to acquire lock: ffff88804e4b6130 (sk_lock-AF_CAN){+.+.}-{0:0}, at: lock_sock include/net/sock.h:1708 [inline] ffff88804e4b6130 (sk_lock-AF_CAN){+.+.}-{0:0}, at: raw_bind+0xb1/0xab0 net/can/raw.c:435 but task is already holding lock: ffffffff8e3df368 (rtnl_mutex){+.+.}-{3:3}, at: raw_bind+0xa7/0xab0 net/can/raw.c:434 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (rtnl_mutex){+.+.}-{3:3}: __mutex_lock_common kernel/locking/mutex.c:603 [inline] __mutex_lock+0x181/0x1340 kernel/locking/mutex.c:747 raw_release+0x1c6/0x9b0 net/can/raw.c:391 __sock_release+0xcd/0x290 net/socket.c:654 sock_close+0x1c/0x20 net/socket.c:1386 __fput+0x3fd/0xac0 fs/file_table.c:384 task_work_run+0x14d/0x240 kernel/task_work.c:179 resume_user_mode_work include/linux/resume_user_mode.h:49 [inline] exit_to_user_mode_loop kernel/entry/common.c:171 [inline] exit_to_user_mode_prepare+0x210/0x240 kernel/entry/common.c:204 __syscall_exit_to_user_mode_work kernel/entry/common.c:286 [inline] syscall_exit_to_user_mode+0x1d/0x50 kernel/entry/common.c:297 do_syscall_64+0x44/0xb0 arch/x86/entry/common.c:86 entry_SYSCALL_64_after_hwframe+0x63/0xcd -> #0 (sk_lock-AF_CAN){+.+.}-{0:0}: check_prev_add kernel/locking/lockdep.c:3142 [inline] check_prevs_add kernel/locking/lockdep.c:3261 [inline] validate_chain kernel/locking/lockdep.c:3876 [inline] __lock_acquire+0x2e3d/0x5de0 kernel/locking/lockdep.c:5144 lock_acquire kernel/locking/lockdep.c:5761 [inline] lock_acquire+0x1ae/0x510 kernel/locking/lockdep.c:5726 lock_sock_nested+0x3a/0xf0 net/core/sock.c:3492 lock_sock include/net/sock.h:1708 [inline] raw_bind+0xb1/0xab0 net/can/raw.c:435 __sys_bind+0x1ec/0x220 net/socket.c:1792 __do_sys_bind net/socket.c:1803 [inline] __se_sys_bind net/socket.c:1801 [inline] __x64_sys_bind+0x72/0xb0 net/socket.c:1801 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(rtnl_mutex); lock(sk_lock-AF_CAN); lock(rtnl_mutex); lock(sk_lock-AF_CAN); *** DEADLOCK *** 1 lock held by syz-executor.0/14110: stack backtrace: CPU: 0 PID: 14110 Comm: syz-executor.0 Not tainted 6.5.0-rc1-syzkaller-00192-g78adb4bcf99e #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/03/2023 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xd9/0x1b0 lib/dump_stack.c:106 check_noncircular+0x311/0x3f0 kernel/locking/lockdep.c:2195 check_prev_add kernel/locking/lockdep.c:3142 [inline] check_prevs_add kernel/locking/lockdep.c:3261 [inline] validate_chain kernel/locking/lockdep.c:3876 [inline] __lock_acquire+0x2e3d/0x5de0 kernel/locking/lockdep.c:5144 lock_acquire kernel/locking/lockdep.c:5761 [inline] lock_acquire+0x1ae/0x510 kernel/locking/lockdep.c:5726 lock_sock_nested+0x3a/0xf0 net/core/sock.c:3492 lock_sock include/net/sock.h:1708 [inline] raw_bind+0xb1/0xab0 net/can/raw.c:435 __sys_bind+0x1ec/0x220 net/socket.c:1792 __do_sys_bind net/socket.c:1803 [inline] __se_sys_bind net/socket.c:1801 [inline] __x64_sys_bind+0x72/0xb0 net/socket.c:1801 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7fd89007cb29 Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 e1 20 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007fd890d2a0c8 EFLAGS: 00000246 ORIG_RAX: 0000000000000031 RAX: ffffffffffffffda RBX: 00007fd89019bf80 RCX: 00007fd89007cb29 RDX: 0000000000000010 RSI: 0000000020000040 RDI: 0000000000000003 RBP: 00007fd8900c847a R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 000000000000000b R14: 00007fd89019bf80 R15: 00007ffebf8124f8 Fixes: ee8b94c8510c ("can: raw: fix receiver memory leak") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Ziyang Xuan Cc: Oliver Hartkopp Cc: stable@vger.kernel.org Cc: Marc Kleine-Budde Link: https://lore.kernel.org/all/20230720114438.172434-1-edumazet@google.com Signed-off-by: Marc Kleine-Budde Signed-off-by: Sasha Levin --- net/can/raw.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/can/raw.c b/net/can/raw.c index 1cd2c8748c26..0dd3259357a3 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -386,9 +386,9 @@ static int raw_release(struct socket *sock) list_del(&ro->notifier); spin_unlock(&raw_notifier_lock); + rtnl_lock(); lock_sock(sk); - rtnl_lock(); /* remove current filters & unregister */ if (ro->bound) { if (ro->dev) @@ -405,12 +405,13 @@ static int raw_release(struct socket *sock) ro->dev = NULL; ro->count = 0; free_percpu(ro->uniq); - rtnl_unlock(); sock_orphan(sk); sock->sk = NULL; release_sock(sk); + rtnl_unlock(); + sock_put(sk); return 0; From 246d763b79a597fcc84ff10d96b620974f47bb56 Mon Sep 17 00:00:00 2001 From: Yu Zhe Date: Fri, 3 Mar 2023 13:21:55 +0800 Subject: [PATCH 018/123] s390/zcrypt: remove unnecessary (void *) conversions [ Upstream commit 72c2112ce9d72e6c40dd893f32187a3d34453113 ] Pointer variables of void * type do not require type cast. Signed-off-by: Yu Zhe Reviewed-by: Muhammad Usama Anjum Link: https://lore.kernel.org/r/20230303052155.21072-1-yuzhe@nfschina.com Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik Stable-dep-of: 4cfca532ddc3 ("s390/zcrypt: fix reply buffer calculations for CCA replies") Signed-off-by: Sasha Levin --- drivers/s390/crypto/zcrypt_msgtype6.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index f99a9ef42116..37c01aaa21a2 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -926,8 +926,7 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = - (struct response_type *)msg->private; + struct response_type *resp_type = msg->private; struct type86x_reply *t86r; int len; @@ -982,8 +981,7 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq, .type = TYPE82_RSP_CODE, .reply_code = REP82_ERROR_MACHINE_FAILURE, }; - struct response_type *resp_type = - (struct response_type *)msg->private; + struct response_type *resp_type = msg->private; struct type86_ep11_reply *t86r; int len; @@ -1157,7 +1155,7 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, struct ap_message *ap_msg) { int rc; - struct response_type *rtype = (struct response_type *)(ap_msg->private); + struct response_type *rtype = ap_msg->private; struct { struct type6_hdr hdr; struct CPRBX cprbx; @@ -1243,7 +1241,7 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue * { int rc; unsigned int lfmt; - struct response_type *rtype = (struct response_type *)(ap_msg->private); + struct response_type *rtype = ap_msg->private; struct { struct type6_hdr hdr; struct ep11_cprb cprbx; @@ -1365,7 +1363,7 @@ static long zcrypt_msgtype6_rng(struct zcrypt_queue *zq, short int verb_length; short int key_length; } __packed * msg = ap_msg->msg; - struct response_type *rtype = (struct response_type *)(ap_msg->private); + struct response_type *rtype = ap_msg->private; int rc; msg->cprbx.domain = AP_QID_QUEUE(zq->queue->qid); From d4f5dcf68c0531b8a7191b736e121c6537e1b6cf Mon Sep 17 00:00:00 2001 From: Harald Freudenberger Date: Mon, 17 Jul 2023 16:55:29 +0200 Subject: [PATCH 019/123] s390/zcrypt: fix reply buffer calculations for CCA replies [ Upstream commit 4cfca532ddc3474b3fc42592d0e4237544344b1a ] The length information for available buffer space for CCA replies is covered with two fields in the T6 header prepended on each CCA reply: fromcardlen1 and fromcardlen2. The sum of these both values must not exceed the AP bus limit for this card (24KB for CEX8, 12KB CEX7 and older) minus the always present headers. The current code adjusted the fromcardlen2 value in case of exceeding the AP bus limit when there was a non-zero value given from userspace. Some tests now showed that this was the wrong assumption. Instead the userspace value given for this field should always be trusted and if the sum of the two fields exceeds the AP bus limit for this card the first field fromcardlen1 should be adjusted instead. So now the calculation is done with this new insight in mind. Also some additional checks for overflow have been introduced and some comments to provide some documentation for future maintainers of this complicated calculation code. Furthermore the 128 bytes of fix overhead which is used in the current code is not correct. Investigations showed that for a reply always the same two header structs are prepended before a possible payload. So this is also fixed with this patch. Signed-off-by: Harald Freudenberger Reviewed-by: Holger Dengler Cc: stable@vger.kernel.org Signed-off-by: Heiko Carstens Signed-off-by: Sasha Levin --- drivers/s390/crypto/zcrypt_msgtype6.c | 33 +++++++++++++++++++-------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 37c01aaa21a2..84e3ad290f6b 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -1154,23 +1154,36 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq, struct ica_xcRB *xcrb, struct ap_message *ap_msg) { - int rc; struct response_type *rtype = ap_msg->private; struct { struct type6_hdr hdr; struct CPRBX cprbx; /* ... more data blocks ... */ } __packed * msg = ap_msg->msg; + unsigned int max_payload_size; + int rc, delta; - /* - * Set the queue's reply buffer length minus 128 byte padding - * as reply limit for the card firmware. - */ - msg->hdr.fromcardlen1 = min_t(unsigned int, msg->hdr.fromcardlen1, - zq->reply.bufsize - 128); - if (msg->hdr.fromcardlen2) - msg->hdr.fromcardlen2 = - zq->reply.bufsize - msg->hdr.fromcardlen1 - 128; + /* calculate maximum payload for this card and msg type */ + max_payload_size = zq->reply.bufsize - sizeof(struct type86_fmt2_msg); + + /* limit each of the two from fields to the maximum payload size */ + msg->hdr.fromcardlen1 = min(msg->hdr.fromcardlen1, max_payload_size); + msg->hdr.fromcardlen2 = min(msg->hdr.fromcardlen2, max_payload_size); + + /* calculate delta if the sum of both exceeds max payload size */ + delta = msg->hdr.fromcardlen1 + msg->hdr.fromcardlen2 + - max_payload_size; + if (delta > 0) { + /* + * Sum exceeds maximum payload size, prune fromcardlen1 + * (always trust fromcardlen2) + */ + if (delta > msg->hdr.fromcardlen1) { + rc = -EINVAL; + goto out; + } + msg->hdr.fromcardlen1 -= delta; + } init_completion(&rtype->work); rc = ap_queue_message(zq->queue, ap_msg); From c23126f2c76a17b97520d306542cee32bb26fad8 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Tue, 25 Jul 2023 02:19:45 +0200 Subject: [PATCH 020/123] drm/i915: Add the gen12_needs_ccs_aux_inv helper [ Upstream commit b2f59e9026038a5bbcbc0019fa58f963138211ee ] We always assumed that a device might either have AUX or FLAT CCS, but this is an approximation that is not always true, e.g. PVC represents an exception. Set the basis for future finer selection by implementing a boolean gen12_needs_ccs_aux_inv() function that tells whether aux invalidation is needed or not. Currently PVC is the only exception to the above mentioned rule. Requires: 059ae7ae2a1c ("drm/i915/gt: Cleanup aux invalidation registers") Signed-off-by: Andi Shyti Cc: Matt Roper Cc: Jonathan Cavitt Cc: # v5.8+ Reviewed-by: Matt Roper Reviewed-by: Andrzej Hajda Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-3-andi.shyti@linux.intel.com (cherry picked from commit c827655b87ad201ebe36f2e28d16b5491c8f7801) Signed-off-by: Tvrtko Ursulin Signed-off-by: Sasha Levin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index b2838732ac93..8e286733a436 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,6 +165,18 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } +static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) +{ + if (IS_PONTEVECCHIO(engine->i915)) + return false; + + /* + * so far platforms supported by i915 having + * flat ccs do not require AUX invalidation + */ + return !HAS_FLAT_CCS(engine->i915); +} + u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) { u32 gsi_offset = gt->uncore->gsi_offset; @@ -236,7 +248,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) else if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; - if (!HAS_FLAT_CCS(rq->engine->i915)) + if (gen12_needs_ccs_aux_inv(rq->engine)) count = 8 + 4; else count = 8; @@ -254,7 +266,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - if (!HAS_FLAT_CCS(rq->engine->i915)) { + if (gen12_needs_ccs_aux_inv(rq->engine)) { /* hsdes: 1809175790 */ cs = gen12_emit_aux_table_inv(rq->engine->gt, cs, GEN12_CCS_AUX_INV); @@ -276,7 +288,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) if (mode & EMIT_INVALIDATE) { cmd += 2; - if (!HAS_FLAT_CCS(rq->engine->i915) && + if (gen12_needs_ccs_aux_inv(rq->engine) && (rq->engine->class == VIDEO_DECODE_CLASS || rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { aux_inv = rq->engine->mask & From 017d4404312ab94a61be218c0221cd0048a37896 Mon Sep 17 00:00:00 2001 From: Jonathan Cavitt Date: Tue, 25 Jul 2023 02:19:46 +0200 Subject: [PATCH 021/123] drm/i915/gt: Ensure memory quiesced before invalidation [ Upstream commit 78a6ccd65fa3a7cc697810db079cc4b84dff03d5 ] All memory traffic must be quiesced before requesting an aux invalidation on platforms that use Aux CCS. Fixes: 972282c4cf24 ("drm/i915/gen12: Add aux table invalidate for all engines") Requires: a2a4aa0eef3b ("drm/i915: Add the gen12_needs_ccs_aux_inv helper") Signed-off-by: Jonathan Cavitt Signed-off-by: Andi Shyti Cc: # v5.8+ Reviewed-by: Nirmoy Das Reviewed-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-4-andi.shyti@linux.intel.com (cherry picked from commit ad8ebf12217e451cd19804b1c3e97ad56491c74a) Signed-off-by: Tvrtko Ursulin Signed-off-by: Sasha Levin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 8e286733a436..6a8c2fab4ca8 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -193,7 +193,11 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) { struct intel_engine_cs *engine = rq->engine; - if (mode & EMIT_FLUSH) { + /* + * On Aux CCS platforms the invalidation of the Aux + * table requires quiescing memory traffic beforehand + */ + if (mode & EMIT_FLUSH || gen12_needs_ccs_aux_inv(engine)) { u32 flags = 0; u32 *cs; From 8e3f138b96f64fde58d74f886acbfd4baca907fc Mon Sep 17 00:00:00 2001 From: Jonathan Cavitt Date: Tue, 25 Jul 2023 02:19:49 +0200 Subject: [PATCH 022/123] drm/i915/gt: Poll aux invalidation register bit on invalidation [ Upstream commit 0fde2f23516a00fd90dfb980b66b4665fcbfa659 ] For platforms that use Aux CCS, wait for aux invalidation to complete by checking the aux invalidation register bit is cleared. Fixes: 972282c4cf24 ("drm/i915/gen12: Add aux table invalidate for all engines") Signed-off-by: Jonathan Cavitt Signed-off-by: Andi Shyti Cc: # v5.8+ Reviewed-by: Nirmoy Das Reviewed-by: Andrzej Hajda Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-7-andi.shyti@linux.intel.com (cherry picked from commit d459c86f00aa98028d155a012c65dc42f7c37e76) Signed-off-by: Tvrtko Ursulin Signed-off-by: Sasha Levin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 17 ++++++++++++----- drivers/gpu/drm/i915/gt/intel_gpu_commands.h | 1 + 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 6a8c2fab4ca8..975e31d876b1 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -184,7 +184,15 @@ u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; *cs++ = AUX_INV; - *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_REGISTER_POLL | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; + *cs++ = 0; + *cs++ = 0; return cs; } @@ -252,10 +260,9 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) else if (engine->class == COMPUTE_CLASS) flags &= ~PIPE_CONTROL_3D_ENGINE_FLAGS; + count = 8; if (gen12_needs_ccs_aux_inv(rq->engine)) - count = 8 + 4; - else - count = 8; + count += 8; cs = intel_ring_begin(rq, count); if (IS_ERR(cs)) @@ -298,7 +305,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) aux_inv = rq->engine->mask & ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0); if (aux_inv) - cmd += 4; + cmd += 8; } } diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index d4e9702d3c8e..25ea5f8a464a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -120,6 +120,7 @@ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ #define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ +#define MI_SEMAPHORE_REGISTER_POLL (1 << 16) #define MI_SEMAPHORE_POLL (1 << 15) #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) From 7e862cce34916458bf6af954d198cce103c1e13f Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Tue, 25 Jul 2023 02:19:50 +0200 Subject: [PATCH 023/123] drm/i915/gt: Support aux invalidation on all engines [ Upstream commit 6a35f22d222528e1b157c6978c9424d2f8cbe0a1 ] Perform some refactoring with the purpose of keeping in one single place all the operations around the aux table invalidation. With this refactoring add more engines where the invalidation should be performed. Fixes: 972282c4cf24 ("drm/i915/gen12: Add aux table invalidate for all engines") Signed-off-by: Andi Shyti Cc: Jonathan Cavitt Cc: Matt Roper Cc: # v5.8+ Reviewed-by: Andrzej Hajda Link: https://patchwork.freedesktop.org/patch/msgid/20230725001950.1014671-8-andi.shyti@linux.intel.com (cherry picked from commit 76ff7789d6e63d1a10b3b58f5c70b2e640c7a880) Signed-off-by: Tvrtko Ursulin Signed-off-by: Sasha Levin --- drivers/gpu/drm/i915/gt/gen8_engine_cs.c | 66 +++++++++++++----------- drivers/gpu/drm/i915/gt/gen8_engine_cs.h | 3 +- drivers/gpu/drm/i915/gt/intel_lrc.c | 17 +----- 3 files changed, 41 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c index 975e31d876b1..cc8468536871 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -165,21 +165,47 @@ static u32 preparser_disable(bool state) return MI_ARB_CHECK | 1 << 8 | state; } +static i915_reg_t gen12_get_aux_inv_reg(struct intel_engine_cs *engine) +{ + switch (engine->id) { + case RCS0: + return GEN12_CCS_AUX_INV; + case BCS0: + return GEN12_BCS0_AUX_INV; + case VCS0: + return GEN12_VD0_AUX_INV; + case VCS2: + return GEN12_VD2_AUX_INV; + case VECS0: + return GEN12_VE0_AUX_INV; + case CCS0: + return GEN12_CCS0_AUX_INV; + default: + return INVALID_MMIO_REG; + } +} + static bool gen12_needs_ccs_aux_inv(struct intel_engine_cs *engine) { + i915_reg_t reg = gen12_get_aux_inv_reg(engine); + if (IS_PONTEVECCHIO(engine->i915)) return false; /* - * so far platforms supported by i915 having - * flat ccs do not require AUX invalidation + * So far platforms supported by i915 having flat ccs do not require + * AUX invalidation. Check also whether the engine requires it. */ - return !HAS_FLAT_CCS(engine->i915); + return i915_mmio_reg_valid(reg) && !HAS_FLAT_CCS(engine->i915); } -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg) +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs) { - u32 gsi_offset = gt->uncore->gsi_offset; + i915_reg_t inv_reg = gen12_get_aux_inv_reg(engine); + u32 gsi_offset = engine->gt->uncore->gsi_offset; + + if (!gen12_needs_ccs_aux_inv(engine)) + return cs; *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; *cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset; @@ -277,11 +303,7 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - if (gen12_needs_ccs_aux_inv(rq->engine)) { - /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(rq->engine->gt, cs, - GEN12_CCS_AUX_INV); - } + cs = gen12_emit_aux_table_inv(engine, cs); *cs++ = preparser_disable(false); intel_ring_advance(rq, cs); @@ -292,21 +314,14 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) { - intel_engine_mask_t aux_inv = 0; - u32 cmd, *cs; + u32 cmd = 4; + u32 *cs; - cmd = 4; if (mode & EMIT_INVALIDATE) { cmd += 2; - if (gen12_needs_ccs_aux_inv(rq->engine) && - (rq->engine->class == VIDEO_DECODE_CLASS || - rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) { - aux_inv = rq->engine->mask & - ~GENMASK(_BCS(I915_MAX_BCS - 1), BCS0); - if (aux_inv) - cmd += 8; - } + if (gen12_needs_ccs_aux_inv(rq->engine)) + cmd += 8; } cs = intel_ring_begin(rq, cmd); @@ -337,14 +352,7 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) *cs++ = 0; /* upper addr */ *cs++ = 0; /* value */ - if (aux_inv) { /* hsdes: 1809175790 */ - if (rq->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VD0_AUX_INV); - else - cs = gen12_emit_aux_table_inv(rq->engine->gt, - cs, GEN12_VE0_AUX_INV); - } + cs = gen12_emit_aux_table_inv(rq->engine, cs); if (mode & EMIT_INVALIDATE) *cs++ = preparser_disable(false); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h index e4d24c811dd6..651eb786e930 100644 --- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -13,6 +13,7 @@ #include "intel_gt_regs.h" #include "intel_gpu_commands.h" +struct intel_engine_cs; struct intel_gt; struct i915_request; @@ -46,7 +47,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); -u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg); +u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs); static inline u32 * __gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 137e41e37ea5..7eb01ff17d89 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1296,10 +1296,7 @@ gen12_emit_indirect_ctx_rcs(const struct intel_context *ce, u32 *cs) IS_DG2_G11(ce->engine->i915)) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_CCS_AUX_INV); + cs = gen12_emit_aux_table_inv(ce->engine, cs); /* Wa_16014892111 */ if (IS_DG2(ce->engine->i915)) @@ -1322,17 +1319,7 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs) PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE, 0); - /* hsdes: 1809175790 */ - if (!HAS_FLAT_CCS(ce->engine->i915)) { - if (ce->engine->class == VIDEO_DECODE_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VD0_AUX_INV); - else if (ce->engine->class == VIDEO_ENHANCEMENT_CLASS) - cs = gen12_emit_aux_table_inv(ce->engine->gt, - cs, GEN12_VE0_AUX_INV); - } - - return cs; + return gen12_emit_aux_table_inv(ce->engine, cs); } static void From 7d0c2b0de2dbf086373049ac9bf45b9ae7f421bb Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Sat, 5 Aug 2023 11:38:15 +0800 Subject: [PATCH 024/123] tracing: Fix cpu buffers unavailable due to 'record_disabled' missed [ Upstream commit b71645d6af10196c46cbe3732de2ea7d36b3ff6d ] Trace ring buffer can no longer record anything after executing following commands at the shell prompt: # cd /sys/kernel/tracing # cat tracing_cpumask fff # echo 0 > tracing_cpumask # echo 1 > snapshot # echo fff > tracing_cpumask # echo 1 > tracing_on # echo "hello world" > trace_marker -bash: echo: write error: Bad file descriptor The root cause is that: 1. After `echo 0 > tracing_cpumask`, 'record_disabled' of cpu buffers in 'tr->array_buffer.buffer' became 1 (see tracing_set_cpumask()); 2. After `echo 1 > snapshot`, 'tr->array_buffer.buffer' is swapped with 'tr->max_buffer.buffer', then the 'record_disabled' became 0 (see update_max_tr()); 3. After `echo fff > tracing_cpumask`, the 'record_disabled' become -1; Then array_buffer and max_buffer are both unavailable due to value of 'record_disabled' is not 0. To fix it, enable or disable both array_buffer and max_buffer at the same time in tracing_set_cpumask(). Link: https://lkml.kernel.org/r/20230805033816.3284594-2-zhengyejian1@huawei.com Cc: Cc: Cc: Fixes: 71babb2705e2 ("tracing: change CPU ring buffer state from tracing_cpumask") Signed-off-by: Zheng Yejian Signed-off-by: Steven Rostedt (Google) Signed-off-by: Sasha Levin --- kernel/trace/trace.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index af33c5a4166d..d56dcd78452c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5189,11 +5189,17 @@ int tracing_set_cpumask(struct trace_array *tr, !cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_inc(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_disable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_disable_cpu(tr->max_buffer.buffer, cpu); +#endif } if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && cpumask_test_cpu(cpu, tracing_cpumask_new)) { atomic_dec(&per_cpu_ptr(tr->array_buffer.data, cpu)->disabled); ring_buffer_record_enable_cpu(tr->array_buffer.buffer, cpu); +#ifdef CONFIG_TRACER_MAX_TRACE + ring_buffer_record_enable_cpu(tr->max_buffer.buffer, cpu); +#endif } } arch_spin_unlock(&tr->max_lock); From 2cb0c037c927db4ec928cc927488e52aa359786e Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Thu, 17 Aug 2023 20:55:39 +0800 Subject: [PATCH 025/123] tracing: Fix memleak due to race between current_tracer and trace [ Upstream commit eecb91b9f98d6427d4af5fdb8f108f52572a39e7 ] Kmemleak report a leak in graph_trace_open(): unreferenced object 0xffff0040b95f4a00 (size 128): comm "cat", pid 204981, jiffies 4301155872 (age 99771.964s) hex dump (first 32 bytes): e0 05 e7 b4 ab 7d 00 00 0b 00 01 00 00 00 00 00 .....}.......... f4 00 01 10 00 a0 ff ff 00 00 00 00 65 00 10 00 ............e... backtrace: [<000000005db27c8b>] kmem_cache_alloc_trace+0x348/0x5f0 [<000000007df90faa>] graph_trace_open+0xb0/0x344 [<00000000737524cd>] __tracing_open+0x450/0xb10 [<0000000098043327>] tracing_open+0x1a0/0x2a0 [<00000000291c3876>] do_dentry_open+0x3c0/0xdc0 [<000000004015bcd6>] vfs_open+0x98/0xd0 [<000000002b5f60c9>] do_open+0x520/0x8d0 [<00000000376c7820>] path_openat+0x1c0/0x3e0 [<00000000336a54b5>] do_filp_open+0x14c/0x324 [<000000002802df13>] do_sys_openat2+0x2c4/0x530 [<0000000094eea458>] __arm64_sys_openat+0x130/0x1c4 [<00000000a71d7881>] el0_svc_common.constprop.0+0xfc/0x394 [<00000000313647bf>] do_el0_svc+0xac/0xec [<000000002ef1c651>] el0_svc+0x20/0x30 [<000000002fd4692a>] el0_sync_handler+0xb0/0xb4 [<000000000c309c35>] el0_sync+0x160/0x180 The root cause is descripted as follows: __tracing_open() { // 1. File 'trace' is being opened; ... *iter->trace = *tr->current_trace; // 2. Tracer 'function_graph' is // currently set; ... iter->trace->open(iter); // 3. Call graph_trace_open() here, // and memory are allocated in it; ... } s_start() { // 4. The opened file is being read; ... *iter->trace = *tr->current_trace; // 5. If tracer is switched to // 'nop' or others, then memory // in step 3 are leaked!!! ... } To fix it, in s_start(), close tracer before switching then reopen the new tracer after switching. And some tracers like 'wakeup' may not update 'iter->private' in some cases when reopen, then it should be cleared to avoid being mistakenly closed again. Link: https://lore.kernel.org/linux-trace-kernel/20230817125539.1646321-1-zhengyejian1@huawei.com Fixes: d7350c3f4569 ("tracing/core: make the read callbacks reentrants") Signed-off-by: Zheng Yejian Signed-off-by: Steven Rostedt (Google) Signed-off-by: Sasha Levin --- kernel/trace/trace.c | 9 ++++++++- kernel/trace/trace_irqsoff.c | 3 ++- kernel/trace/trace_sched_wakeup.c | 2 ++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d56dcd78452c..1a87cb70f1eb 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4128,8 +4128,15 @@ static void *s_start(struct seq_file *m, loff_t *pos) * will point to the same string as current_trace->name. */ mutex_lock(&trace_types_lock); - if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) + if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name)) { + /* Close iter->trace before switching to the new current tracer */ + if (iter->trace->close) + iter->trace->close(iter); *iter->trace = *tr->current_trace; + /* Reopen the new current tracer */ + if (iter->trace->open) + iter->trace->open(iter); + } mutex_unlock(&trace_types_lock); #ifdef CONFIG_TRACER_MAX_TRACE diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 590b3d51afae..ba37f768e2f2 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -231,7 +231,8 @@ static void irqsoff_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); - + else + iter->private = NULL; } static void irqsoff_trace_close(struct trace_iterator *iter) diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 330aee1c1a49..0469a04a355f 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -168,6 +168,8 @@ static void wakeup_trace_open(struct trace_iterator *iter) { if (is_graph(iter->tr)) graph_trace_open(iter); + else + iter->private = NULL; } static void wakeup_trace_close(struct trace_iterator *iter) From eaeef5c865ab9dc5c153b552b52ed0d90eea614e Mon Sep 17 00:00:00 2001 From: Hariprasad Kelam Date: Thu, 17 Aug 2023 12:00:06 +0530 Subject: [PATCH 026/123] octeontx2-af: SDP: fix receive link config [ Upstream commit 05f3d5bc23524bed6f043dfe6b44da687584f9fb ] On SDP interfaces, frame oversize and undersize errors are observed as driver is not considering packet sizes of all subscribers of the link before updating the link config. This patch fixes the same. Fixes: 9b7dd87ac071 ("octeontx2-af: Support to modify min/max allowed packet lengths") Signed-off-by: Hariprasad Kelam Signed-off-by: Sunil Goutham Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/20230817063006.10366-1-hkelam@marvell.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 705325431dec..5541e284cd3f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -4005,9 +4005,10 @@ rx_frscfg: if (link < 0) return NIX_AF_ERR_RX_LINK_INVALID; - nix_find_link_frs(rvu, req, pcifunc); linkcfg: + nix_find_link_frs(rvu, req, pcifunc); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); if (req->update_minlen) From 1375d2061204785d592c05f5dec47eb487ec3515 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Wed, 4 Jan 2023 20:05:17 -0800 Subject: [PATCH 027/123] devlink: move code to a dedicated directory [ Upstream commit f05bd8ebeb69c803efd6d8a76d96b7fcd7011094 ] The devlink code is hard to navigate with 13kLoC in one file. I really like the way Michal split the ethtool into per-command files and core. It'd probably be too much to split it all up, but we can at least separate the core parts out of the per-cmd implementations and put it in a directory so that new commands can be separate files. Move the code, subsequent commit will do a partial split. Reviewed-by: Jacob Keller Reviewed-by: Jiri Pirko Signed-off-by: Jakub Kicinski Stable-dep-of: 2ebbc9752d06 ("devlink: add missing unregister linecard notification") Signed-off-by: Sasha Levin --- MAINTAINERS | 2 +- net/Makefile | 1 + net/core/Makefile | 1 - net/devlink/Makefile | 3 +++ net/{core/devlink.c => devlink/leftover.c} | 0 5 files changed, 5 insertions(+), 2 deletions(-) create mode 100644 net/devlink/Makefile rename net/{core/devlink.c => devlink/leftover.c} (100%) diff --git a/MAINTAINERS b/MAINTAINERS index 379387e20a96..07a9c274c0e2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6027,7 +6027,7 @@ S: Supported F: Documentation/networking/devlink F: include/net/devlink.h F: include/uapi/linux/devlink.h -F: net/core/devlink.c +F: net/devlink/ DH ELECTRONICS IMX6 DHCOM BOARD SUPPORT M: Christoph Niedermaier diff --git a/net/Makefile b/net/Makefile index 6a62e5b27378..0914bea9c335 100644 --- a/net/Makefile +++ b/net/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_BPFILTER) += bpfilter/ obj-$(CONFIG_PACKET) += packet/ obj-$(CONFIG_NET_KEY) += key/ obj-$(CONFIG_BRIDGE) += bridge/ +obj-$(CONFIG_NET_DEVLINK) += devlink/ obj-$(CONFIG_NET_DSA) += dsa/ obj-$(CONFIG_ATALK) += appletalk/ obj-$(CONFIG_X25) += x25/ diff --git a/net/core/Makefile b/net/core/Makefile index 5857cec87b83..10edd66a8a37 100644 --- a/net/core/Makefile +++ b/net/core/Makefile @@ -33,7 +33,6 @@ obj-$(CONFIG_LWTUNNEL) += lwtunnel.o obj-$(CONFIG_LWTUNNEL_BPF) += lwt_bpf.o obj-$(CONFIG_DST_CACHE) += dst_cache.o obj-$(CONFIG_HWBM) += hwbm.o -obj-$(CONFIG_NET_DEVLINK) += devlink.o obj-$(CONFIG_GRO_CELLS) += gro_cells.o obj-$(CONFIG_FAILOVER) += failover.o obj-$(CONFIG_NET_SOCK_MSG) += skmsg.o diff --git a/net/devlink/Makefile b/net/devlink/Makefile new file mode 100644 index 000000000000..3a60959f71ee --- /dev/null +++ b/net/devlink/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-y := leftover.o diff --git a/net/core/devlink.c b/net/devlink/leftover.c similarity index 100% rename from net/core/devlink.c rename to net/devlink/leftover.c From b701b8d191daae3442942a1c68bb6e0582b566b2 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Thu, 17 Aug 2023 14:52:40 +0200 Subject: [PATCH 028/123] devlink: add missing unregister linecard notification [ Upstream commit 2ebbc9752d06bb1d01201fe632cb6da033b0248d ] Cited fixes commit introduced linecard notifications for register, however it didn't add them for unregister. Fix that by adding them. Fixes: c246f9b5fd61 ("devlink: add support to create line card and expose to user") Signed-off-by: Jiri Pirko Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230817125240.2144794-1-jiri@resnulli.us Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/devlink/leftover.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/devlink/leftover.c b/net/devlink/leftover.c index 5a4a4b34ac15..63188d6a50fe 100644 --- a/net/devlink/leftover.c +++ b/net/devlink/leftover.c @@ -9727,6 +9727,7 @@ static void devlink_notify_unregister(struct devlink *devlink) struct devlink_param_item *param_item; struct devlink_trap_item *trap_item; struct devlink_port *devlink_port; + struct devlink_linecard *linecard; struct devlink_rate *rate_node; struct devlink_region *region; @@ -9753,6 +9754,8 @@ static void devlink_notify_unregister(struct devlink *devlink) list_for_each_entry_reverse(devlink_port, &devlink->port_list, list) devlink_port_notify(devlink_port, DEVLINK_CMD_PORT_DEL); + list_for_each_entry_reverse(linecard, &devlink->linecard_list, list) + devlink_linecard_notify(linecard, DEVLINK_CMD_LINECARD_DEL); devlink_notify(devlink, DEVLINK_CMD_DEL); } From cfee17993d1065d7d6553e57d276cb36c2a768d4 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Thu, 17 Aug 2023 15:01:11 +0300 Subject: [PATCH 029/123] net: dsa: felix: fix oversize frame dropping for always closed tc-taprio gates [ Upstream commit d44036cad31170da0cb9c728e80743f84267da6e ] The blamed commit resolved a bug where frames would still get stuck at egress, even though they're smaller than the maxSDU[tc], because the driver did not take into account the extra 33 ns that the queue system needs for scheduling the frame. It now takes that into account, but the arithmetic that we perform in vsc9959_tas_remaining_gate_len_ps() is buggy, because we operate on 64-bit unsigned integers, so gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS may become a very large integer if gate_len_ns < 33 ns. In practice, this means that we've introduced a regression where all traffic class gates which are permanently closed will not get detected by the driver, and we won't enable oversize frame dropping for them. Before: mscc_felix 0000:00:00.5: port 0: max frame size 1526 needs 12400000 ps, 1152000 ps for mPackets at speed 1000 mscc_felix 0000:00:00.5: port 0 tc 0 min gate len 1000000, sending all frames mscc_felix 0000:00:00.5: port 0 tc 1 min gate len 0, sending all frames mscc_felix 0000:00:00.5: port 0 tc 2 min gate len 0, sending all frames mscc_felix 0000:00:00.5: port 0 tc 3 min gate len 0, sending all frames mscc_felix 0000:00:00.5: port 0 tc 4 min gate len 0, sending all frames mscc_felix 0000:00:00.5: port 0 tc 5 min gate len 0, sending all frames mscc_felix 0000:00:00.5: port 0 tc 6 min gate len 0, sending all frames mscc_felix 0000:00:00.5: port 0 tc 7 min gate length 5120 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 615 octets including FCS After: mscc_felix 0000:00:00.5: port 0: max frame size 1526 needs 12400000 ps, 1152000 ps for mPackets at speed 1000 mscc_felix 0000:00:00.5: port 0 tc 0 min gate len 1000000, sending all frames mscc_felix 0000:00:00.5: port 0 tc 1 min gate length 0 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 1 octets including FCS mscc_felix 0000:00:00.5: port 0 tc 2 min gate length 0 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 1 octets including FCS mscc_felix 0000:00:00.5: port 0 tc 3 min gate length 0 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 1 octets including FCS mscc_felix 0000:00:00.5: port 0 tc 4 min gate length 0 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 1 octets including FCS mscc_felix 0000:00:00.5: port 0 tc 5 min gate length 0 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 1 octets including FCS mscc_felix 0000:00:00.5: port 0 tc 6 min gate length 0 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 1 octets including FCS mscc_felix 0000:00:00.5: port 0 tc 7 min gate length 5120 ns not enough for max frame size 1526 at 1000 Mbps, dropping frames over 615 octets including FCS Fixes: 11afdc6526de ("net: dsa: felix: tc-taprio intervals smaller than MTU should send at least one packet") Signed-off-by: Vladimir Oltean Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230817120111.3522827-1-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/dsa/ocelot/felix_vsc9959.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 5f6af0870dfd..0186482194d2 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -1071,6 +1071,9 @@ static u64 vsc9959_tas_remaining_gate_len_ps(u64 gate_len_ns) if (gate_len_ns == U64_MAX) return U64_MAX; + if (gate_len_ns < VSC9959_TAS_MIN_GATE_LEN_NS) + return 0; + return (gate_len_ns - VSC9959_TAS_MIN_GATE_LEN_NS) * PSEC_PER_NSEC; } From b516a24f4c07426028baaa55595f50730df512c6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 Aug 2023 01:51:32 +0000 Subject: [PATCH 030/123] sock: annotate data-races around prot->memory_pressure [ Upstream commit 76f33296d2e09f63118db78125c95ef56df438e9 ] *prot->memory_pressure is read/writen locklessly, we need to add proper annotations. A recent commit added a new race, it is time to audit all accesses. Fixes: 2d0c88e84e48 ("sock: Fix misuse of sk_under_memory_pressure()") Fixes: 4d93df0abd50 ("[SCTP]: Rewrite of sctp buffer management code") Signed-off-by: Eric Dumazet Cc: Abel Wu Reviewed-by: Shakeel Butt Link: https://lore.kernel.org/r/20230818015132.2699348-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- include/net/sock.h | 7 ++++--- net/sctp/socket.c | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/net/sock.h b/include/net/sock.h index 699408944952..d1f936ed9755 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1320,6 +1320,7 @@ struct proto { /* * Pressure flag: try to collapse. * Technical note: it is used by multiple contexts non atomically. + * Make sure to use READ_ONCE()/WRITE_ONCE() for all reads/writes. * All the __sk_mem_schedule() is of this nature: accounting * is strict, actions are advisory and have some latency. */ @@ -1448,7 +1449,7 @@ static inline bool sk_has_memory_pressure(const struct sock *sk) static inline bool sk_under_global_memory_pressure(const struct sock *sk) { return sk->sk_prot->memory_pressure && - !!*sk->sk_prot->memory_pressure; + !!READ_ONCE(*sk->sk_prot->memory_pressure); } static inline bool sk_under_memory_pressure(const struct sock *sk) @@ -1460,7 +1461,7 @@ static inline bool sk_under_memory_pressure(const struct sock *sk) mem_cgroup_under_socket_pressure(sk->sk_memcg)) return true; - return !!*sk->sk_prot->memory_pressure; + return !!READ_ONCE(*sk->sk_prot->memory_pressure); } static inline long @@ -1537,7 +1538,7 @@ proto_memory_pressure(struct proto *prot) { if (!prot->memory_pressure) return false; - return !!*prot->memory_pressure; + return !!READ_ONCE(*prot->memory_pressure); } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c806d272107a..83656fe03a0e 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -98,7 +98,7 @@ struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) { - sctp_memory_pressure = 1; + WRITE_ONCE(sctp_memory_pressure, 1); } From 265ed382e0f4c7b28742e542d4b14799e131a57e Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 18 Aug 2023 01:58:20 +0000 Subject: [PATCH 031/123] dccp: annotate data-races in dccp_poll() [ Upstream commit cba3f1786916063261e3e5ccbb803abc325b24ef ] We changed tcp_poll() over time, bug never updated dccp. Note that we also could remove dccp instead of maintaining it. Fixes: 7c657876b63c ("[DCCP]: Initial implementation") Signed-off-by: Eric Dumazet Link: https://lore.kernel.org/r/20230818015820.2701595-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/dccp/proto.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/net/dccp/proto.c b/net/dccp/proto.c index abc02d25edc1..c522c76a9f89 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -312,11 +312,15 @@ EXPORT_SYMBOL_GPL(dccp_disconnect); __poll_t dccp_poll(struct file *file, struct socket *sock, poll_table *wait) { - __poll_t mask; struct sock *sk = sock->sk; + __poll_t mask; + u8 shutdown; + int state; sock_poll_wait(file, sock, wait); - if (sk->sk_state == DCCP_LISTEN) + + state = inet_sk_state_load(sk); + if (state == DCCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events @@ -325,20 +329,21 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, */ mask = 0; - if (sk->sk_err) + if (READ_ONCE(sk->sk_err)) mask = EPOLLERR; + shutdown = READ_ONCE(sk->sk_shutdown); - if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) + if (shutdown == SHUTDOWN_MASK || state == DCCP_CLOSED) mask |= EPOLLHUP; - if (sk->sk_shutdown & RCV_SHUTDOWN) + if (shutdown & RCV_SHUTDOWN) mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; /* Connected? */ - if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { + if ((1 << state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { if (atomic_read(&sk->sk_rmem_alloc) > 0) mask |= EPOLLIN | EPOLLRDNORM; - if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { + if (!(shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { mask |= EPOLLOUT | EPOLLWRNORM; } else { /* send SIGIO later */ @@ -356,7 +361,6 @@ __poll_t dccp_poll(struct file *file, struct socket *sock, } return mask; } - EXPORT_SYMBOL_GPL(dccp_poll); int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg) From 4496f6ccf599054b9a8d2a9d41e6410221269aa7 Mon Sep 17 00:00:00 2001 From: Lu Wei Date: Thu, 17 Aug 2023 22:54:49 +0800 Subject: [PATCH 032/123] ipvlan: Fix a reference count leak warning in ipvlan_ns_exit() [ Upstream commit 043d5f68d0ccdda91029b4b6dce7eeffdcfad281 ] There are two network devices(veth1 and veth3) in ns1, and ipvlan1 with L3S mode and ipvlan2 with L2 mode are created based on them as figure (1). In this case, ipvlan_register_nf_hook() will be called to register nf hook which is needed by ipvlans in L3S mode in ns1 and value of ipvl_nf_hook_refcnt is set to 1. (1) ns1 ns2 ------------ ------------ veth1--ipvlan1 (L3S) veth3--ipvlan2 (L2) (2) ns1 ns2 ------------ ------------ veth1--ipvlan1 (L3S) ipvlan2 (L2) veth3 | | |------->-------->--------->-------- migrate When veth3 migrates from ns1 to ns2 as figure (2), veth3 will register in ns2 and calls call_netdevice_notifiers with NETDEV_REGISTER event: dev_change_net_namespace call_netdevice_notifiers ipvlan_device_event ipvlan_migrate_l3s_hook ipvlan_register_nf_hook(newnet) (I) ipvlan_unregister_nf_hook(oldnet) (II) In function ipvlan_migrate_l3s_hook(), ipvl_nf_hook_refcnt in ns1 is not 0 since veth1 with ipvlan1 still in ns1, (I) and (II) will be called to register nf_hook in ns2 and unregister nf_hook in ns1. As a result, ipvl_nf_hook_refcnt in ns1 is decreased incorrectly and this in ns2 is increased incorrectly. When the second net namespace is removed, a reference count leak warning in ipvlan_ns_exit() will be triggered. This patch add a check before ipvlan_migrate_l3s_hook() is called. The warning can be triggered as follows: $ ip netns add ns1 $ ip netns add ns2 $ ip netns exec ns1 ip link add veth1 type veth peer name veth2 $ ip netns exec ns1 ip link add veth3 type veth peer name veth4 $ ip netns exec ns1 ip link add ipv1 link veth1 type ipvlan mode l3s $ ip netns exec ns1 ip link add ipv2 link veth3 type ipvlan mode l2 $ ip netns exec ns1 ip link set veth3 netns ns2 $ ip net del ns2 Fixes: 3133822f5ac1 ("ipvlan: use pernet operations and restrict l3s hooks to master netns") Signed-off-by: Lu Wei Reviewed-by: Florian Westphal Link: https://lore.kernel.org/r/20230817145449.141827-1-luwei32@huawei.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ipvlan/ipvlan_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 796a38f9d7b2..cd16bc8bf154 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -748,7 +748,8 @@ static int ipvlan_device_event(struct notifier_block *unused, write_pnet(&port->pnet, newnet); - ipvlan_migrate_l3s_hook(oldnet, newnet); + if (port->mode == IPVLAN_MODE_L3S) + ipvlan_migrate_l3s_hook(oldnet, newnet); break; } case NETDEV_UNREGISTER: From 22f9b5468df567ec3b3f493e16c65ff5df5fdff0 Mon Sep 17 00:00:00 2001 From: Danielle Ratson Date: Thu, 17 Aug 2023 15:58:22 +0200 Subject: [PATCH 033/123] mlxsw: pci: Set time stamp fields also when its type is MIRROR_UTC [ Upstream commit bc2de151ab6ad0762a04563527ec42e54dde572a ] Currently, in Spectrum-2 and above, time stamps are extracted from the CQE into the time stamp fields in 'struct mlxsw_skb_cb', only when the CQE time stamp type is UTC. The time stamps are read directly from the CQE and software can get the time stamp in UTC format using CQEv2. From Spectrum-4, the time stamps that are read from the CQE are allowed to be also from MIRROR_UTC type. Therefore, we get a warning [1] from the driver that the time stamp fields were not set, when LLDP control packet is sent. Allow the time stamp type to be MIRROR_UTC and set the time stamp in this case as well. [1] WARNING: CPU: 11 PID: 0 at drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c:1409 mlxsw_sp2_ptp_hwtstamp_fill+0x1f/0x70 [mlxsw_spectrum] [...] Call Trace: mlxsw_sp2_ptp_receive+0x3c/0x80 [mlxsw_spectrum] mlxsw_core_skb_receive+0x119/0x190 [mlxsw_core] mlxsw_pci_cq_tasklet+0x3c9/0x780 [mlxsw_pci] tasklet_action_common.constprop.0+0x9f/0x110 __do_softirq+0xbb/0x296 irq_exit_rcu+0x79/0xa0 common_interrupt+0x86/0xa0 Fixes: 4735402173e6 ("mlxsw: spectrum: Extend to support Spectrum-4 ASIC") Signed-off-by: Danielle Ratson Reviewed-by: Ido Schimmel Signed-off-by: Petr Machata Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/bcef4d044ef608a4e258d33a7ec0ecd91f480db5.1692268427.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlxsw/pci.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c968309657dd..51eea1f0529c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -517,11 +517,15 @@ static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci, struct sk_buff *skb, enum mlxsw_pci_cqe_v cqe_v, char *cqe) { + u8 ts_type; + if (cqe_v != MLXSW_PCI_CQE_V2) return; - if (mlxsw_pci_cqe2_time_stamp_type_get(cqe) != - MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC) + ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe); + + if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC && + ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC) return; mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe); From 7134565a8207fc6fafe188c95bdd5f09744ccfec Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 17 Aug 2023 15:58:23 +0200 Subject: [PATCH 034/123] mlxsw: reg: Fix SSPR register layout [ Upstream commit 0dc63b9cfd4c5666ced52c829fdd65dcaeb9f0f1 ] The two most significant bits of the "local_port" field in the SSPR register are always cleared since they are overwritten by the deprecated and overlapping "sub_port" field. On systems with more than 255 local ports (e.g., Spectrum-4), this results in the firmware maintaining invalid mappings between system port and local port. Specifically, two different systems ports (0x1 and 0x101) point to the same local port (0x1), which eventually leads to firmware errors. Fix by removing the deprecated "sub_port" field. Fixes: fd24b29a1b74 ("mlxsw: reg: Align existing registers to use extended local_port field") Signed-off-by: Ido Schimmel Signed-off-by: Petr Machata Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/9b909a3033c8d3d6f67f237306bef4411c5e6ae4.1692268427.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlxsw/reg.h | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 0777bed5bb1a..a34ff19c58bd 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -97,14 +97,6 @@ MLXSW_ITEM32(reg, sspr, m, 0x00, 31, 1); */ MLXSW_ITEM32_LP(reg, sspr, 0x00, 16, 0x00, 12); -/* reg_sspr_sub_port - * Virtual port within the physical port. - * Should be set to 0 when virtual ports are not enabled on the port. - * - * Access: RW - */ -MLXSW_ITEM32(reg, sspr, sub_port, 0x00, 8, 8); - /* reg_sspr_system_port * Unique identifier within the stacking domain that represents all the ports * that are available in the system (external ports). @@ -120,7 +112,6 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u16 local_port) MLXSW_REG_ZERO(sspr, payload); mlxsw_reg_sspr_m_set(payload, 1); mlxsw_reg_sspr_local_port_set(payload, local_port); - mlxsw_reg_sspr_sub_port_set(payload, 0); mlxsw_reg_sspr_system_port_set(payload, local_port); } From 1288f9907514b8e0cc1e17645252429ee0917182 Mon Sep 17 00:00:00 2001 From: Amit Cohen Date: Thu, 17 Aug 2023 15:58:24 +0200 Subject: [PATCH 035/123] mlxsw: Fix the size of 'VIRT_ROUTER_MSB' [ Upstream commit 348c976be0a599918b88729def198a843701c9fe ] The field 'virtual router' was extended to 12 bits in Spectrum-4. Therefore, the element 'MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB' needs 3 bits for Spectrum < 4 and 4 bits for Spectrum >= 4. The elements are stored in an internal storage scratchpad. Currently, the MSB is defined there as 3 bits. It means that for Spectrum-4, only 2K VRFs can be used for multicast routing, as the highest bit is not really used by the driver. Fix the definition of 'VIRT_ROUTER_MSB' to use 4 bits. Adjust the definitions of 'virtual router' field in the blocks accordingly - use '_avoid_size_check' for Spectrum-2 instead of for Spectrum-4. Fix the mask in parse function to use 4 bits. Fixes: 6d5d8ebb881c ("mlxsw: Rename virtual router flex key element") Signed-off-by: Amit Cohen Reviewed-by: Ido Schimmel Signed-off-by: Petr Machata Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/79bed2b70f6b9ed58d4df02e9798a23da648015b.1692268427.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c | 4 ++-- drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c | 2 +- drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index bd1a51a0a540..f208a237d0b5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -32,8 +32,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 3), - MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 20, 8), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_MSB, 0x18, 17, 4), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_LSB, 0x18, 21, 8), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c index e4f4cded2b6f..b1178b7a7f51 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -193,7 +193,7 @@ mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule, key->vrid, GENMASK(7, 0)); mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB, - key->vrid >> 8, GENMASK(2, 0)); + key->vrid >> 8, GENMASK(3, 0)); switch (key->proto) { case MLXSW_SP_L3_PROTO_IPV4: return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index 00c32320f891..173808c096ba 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -169,7 +169,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 24, 8), - MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x00, 0, 3), + MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x00, 0, 3, 0, true), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { @@ -319,7 +319,7 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_5b[] = { static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4b[] = { MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_LSB, 0x04, 13, 8), - MLXSW_AFK_ELEMENT_INST_EXT_U32(VIRT_ROUTER_MSB, 0x04, 21, 4, 0, true), + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_MSB, 0x04, 21, 4), }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_2b[] = { From c663607202f58e0fe17d2db1f9967d641c0195f5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Thu, 17 Aug 2023 15:58:25 +0200 Subject: [PATCH 036/123] selftests: mlxsw: Fix test failure on Spectrum-4 [ Upstream commit f520489e99a35b0a5257667274fbe9afd2d8c50b ] Remove assumptions about shared buffer cell size and instead query the cell size from devlink. Adjust the test to send small packets that fit inside a single cell. Tested on Spectrum-{1,2,3,4}. Fixes: 4735402173e6 ("mlxsw: spectrum: Extend to support Spectrum-4 ASIC") Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Signed-off-by: Petr Machata Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/f7dfbf3c4d1cb23838d9eb99bab09afaa320c4ca.1692268427.git.petrm@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- .../selftests/drivers/net/mlxsw/sharedbuffer.sh | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh index 7d9e73a43a49..0c47faff9274 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/sharedbuffer.sh @@ -98,12 +98,12 @@ sb_occ_etc_check() port_pool_test() { - local exp_max_occ=288 + local exp_max_occ=$(devlink_cell_size_get) local max_occ devlink sb occupancy clearmax $DEVLINK_DEV - $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ + $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ -t ip -q devlink sb occupancy snapshot $DEVLINK_DEV @@ -126,12 +126,12 @@ port_pool_test() port_tc_ip_test() { - local exp_max_occ=288 + local exp_max_occ=$(devlink_cell_size_get) local max_occ devlink sb occupancy clearmax $DEVLINK_DEV - $MZ $h1 -c 1 -p 160 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ + $MZ $h1 -c 1 -p 10 -a $h1mac -b $h2mac -A 192.0.1.1 -B 192.0.1.2 \ -t ip -q devlink sb occupancy snapshot $DEVLINK_DEV @@ -154,16 +154,12 @@ port_tc_ip_test() port_tc_arp_test() { - local exp_max_occ=96 + local exp_max_occ=$(devlink_cell_size_get) local max_occ - if [[ $MLXSW_CHIP != "mlxsw_spectrum" ]]; then - exp_max_occ=144 - fi - devlink sb occupancy clearmax $DEVLINK_DEV - $MZ $h1 -c 1 -p 160 -a $h1mac -A 192.0.1.1 -t arp -q + $MZ $h1 -c 1 -p 10 -a $h1mac -A 192.0.1.1 -t arp -q devlink sb occupancy snapshot $DEVLINK_DEV From ac259251487a174ece97dd745727e39712f18ebf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ar=C4=B1n=C3=A7=20=C3=9CNAL?= Date: Sun, 13 Aug 2023 13:59:17 +0300 Subject: [PATCH 037/123] net: dsa: mt7530: fix handling of 802.1X PAE frames MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e94b590abfff2cdbf0bdaa7d9904364c8d480af5 ] 802.1X PAE frames are link-local frames, therefore they must be trapped to the CPU port. Currently, the MT753X switches treat 802.1X PAE frames as regular multicast frames, therefore flooding them to user ports. To fix this, set 802.1X PAE frames to be trapped to the CPU port(s). Fixes: b8f126a8d543 ("net-next: dsa: add dsa support for Mediatek MT7530 switch") Signed-off-by: Arınç ÜNAL Reviewed-by: Vladimir Oltean Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/dsa/mt7530.c | 4 ++++ drivers/net/dsa/mt7530.h | 2 ++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 51d2ef0dc835..b988c8a40d53 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1005,6 +1005,10 @@ mt753x_trap_frames(struct mt7530_priv *priv) mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK, MT753X_BPDU_CPU_ONLY); + /* Trap 802.1X PAE frames to the CPU port(s) */ + mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK, + MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY)); + /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK, MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY)); diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 9a45663d8b4e..6202b0f8c3f3 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -64,6 +64,8 @@ enum mt753x_id { /* Registers for BPDU and PAE frame control*/ #define MT753X_BPC 0x24 #define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0) +#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16) +#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x) /* Register for :03 and :0E MAC DA frame control */ #define MT753X_RGAC2 0x2c From 029e491b8c11859525a1d6a307622bbc3a4ae559 Mon Sep 17 00:00:00 2001 From: Ruan Jinjie Date: Fri, 18 Aug 2023 13:12:20 +0800 Subject: [PATCH 038/123] net: bgmac: Fix return value check for fixed_phy_register() [ Upstream commit 23a14488ea5882dea5851b65c9fce2127ee8fcad ] The fixed_phy_register() function returns error pointers and never returns NULL. Update the checks accordingly. Fixes: c25b23b8a387 ("bgmac: register fixed PHY for ARM BCM470X / BCM5301X chipsets") Signed-off-by: Ruan Jinjie Reviewed-by: Andrew Lunn Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/bgmac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 10c7c232cc4e..52ee3751187a 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac) int err; phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phy_dev || IS_ERR(phy_dev)) { + if (IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } From afc9d3d217939842ba4e2795356b8dd53f7a2ed6 Mon Sep 17 00:00:00 2001 From: Ruan Jinjie Date: Fri, 18 Aug 2023 13:12:21 +0800 Subject: [PATCH 039/123] net: bcmgenet: Fix return value check for fixed_phy_register() [ Upstream commit 32bbe64a1386065ab2aef8ce8cae7c689d0add6e ] The fixed_phy_register() function returns error pointers and never returns NULL. Update the checks accordingly. Fixes: b0ba512e25d7 ("net: bcmgenet: enable driver to work without a device tree") Signed-off-by: Ruan Jinjie Reviewed-by: Leon Romanovsky Acked-by: Doug Berger Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/broadcom/genet/bcmmii.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index 1fe8038587ac..1779ee524dac 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -608,7 +608,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv) }; phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); - if (!phydev || IS_ERR(phydev)) { + if (IS_ERR(phydev)) { dev_err(kdev, "failed to register fixed PHY device\n"); return -ENODEV; } From 4af1fe642f3724f1567e7c2017eeb857b08399d6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 18 Aug 2023 18:26:02 -0700 Subject: [PATCH 040/123] net: validate veth and vxcan peer ifindexes [ Upstream commit f534f6581ec084fe94d6759f7672bd009794b07e ] veth and vxcan need to make sure the ifindexes of the peer are not negative, core does not validate this. Using iproute2 with user-space-level checking removed: Before: # ./ip link add index 10 type veth peer index -1 # ip link show 1: lo: mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000 link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 2: enp1s0: mtu 1500 qdisc fq_codel state UP mode DEFAULT group default qlen 1000 link/ether 52:54:00:74:b2:03 brd ff:ff:ff:ff:ff:ff 10: veth1@veth0: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether 8a:90:ff:57:6d:5d brd ff:ff:ff:ff:ff:ff -1: veth0@veth1: mtu 1500 qdisc noop state DOWN mode DEFAULT group default qlen 1000 link/ether ae:ed:18:e6:fa:7f brd ff:ff:ff:ff:ff:ff Now: $ ./ip link add index 10 type veth peer index -1 Error: ifindex can't be negative. This problem surfaced in net-next because an explicit WARN() was added, the root cause is older. Fixes: e6f8f1a739b6 ("veth: Allow to create peer link with given ifindex") Fixes: a8f820a380a2 ("can: add Virtual CAN Tunnel driver (vxcan)") Reported-by: syzbot+5ba06978f34abb058571@syzkaller.appspotmail.com Signed-off-by: Jakub Kicinski Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/can/vxcan.c | 7 +------ drivers/net/veth.c | 5 +---- include/net/rtnetlink.h | 4 ++-- net/core/rtnetlink.c | 22 ++++++++++++++++++---- 4 files changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index 26a472d2ea58..6d549dbdb467 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -192,12 +192,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, nla_peer = data[VXCAN_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + - sizeof(struct ifinfomsg), - nla_len(nla_peer) - - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index a71786b3e7ba..727b9278b9fe 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1716,10 +1716,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); - err = rtnl_nla_parse_ifla(peer_tb, - nla_data(nla_peer) + sizeof(struct ifinfomsg), - nla_len(nla_peer) - sizeof(struct ifinfomsg), - NULL); + err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index bf8bb3357825..9f881b74f32e 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h @@ -189,8 +189,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, int rtnl_delete_link(struct net_device *dev); int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm); -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr); +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr); struct net *rtnl_get_net_ns_capable(struct sock *sk, int netnsid); #define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2758b3f7c021..48e300a144ad 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2220,13 +2220,27 @@ out_err: return err; } -int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, - struct netlink_ext_ack *exterr) +int rtnl_nla_parse_ifinfomsg(struct nlattr **tb, const struct nlattr *nla_peer, + struct netlink_ext_ack *exterr) { - return nla_parse_deprecated(tb, IFLA_MAX, head, len, ifla_policy, + const struct ifinfomsg *ifmp; + const struct nlattr *attrs; + size_t len; + + ifmp = nla_data(nla_peer); + attrs = nla_data(nla_peer) + sizeof(struct ifinfomsg); + len = nla_len(nla_peer) - sizeof(struct ifinfomsg); + + if (ifmp->ifi_index < 0) { + NL_SET_ERR_MSG_ATTR(exterr, nla_peer, + "ifindex can't be negative"); + return -EINVAL; + } + + return nla_parse_deprecated(tb, IFLA_MAX, attrs, len, ifla_policy, exterr); } -EXPORT_SYMBOL(rtnl_nla_parse_ifla); +EXPORT_SYMBOL(rtnl_nla_parse_ifinfomsg); struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]) { From 417e7ec0d61e2bcb4e264375badbd2e3c879467c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sat, 19 Aug 2023 03:17:07 +0000 Subject: [PATCH 041/123] ipv4: fix data-races around inet->inet_id [ Upstream commit f866fbc842de5976e41ba874b76ce31710b634b5 ] UDP sendmsg() is lockless, so ip_select_ident_segs() can very well be run from multiple cpus [1] Convert inet->inet_id to an atomic_t, but implement a dedicated path for TCP, avoiding cost of a locked instruction (atomic_add_return()) Note that this patch will cause a trivial merge conflict because we added inet->flags in net-next tree. v2: added missing change in drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c (David Ahern) [1] BUG: KCSAN: data-race in __ip_make_skb / __ip_make_skb read-write to 0xffff888145af952a of 2 bytes by task 7803 on cpu 1: ip_select_ident_segs include/net/ip.h:542 [inline] ip_select_ident include/net/ip.h:556 [inline] __ip_make_skb+0x844/0xc70 net/ipv4/ip_output.c:1446 ip_make_skb+0x233/0x2c0 net/ipv4/ip_output.c:1560 udp_sendmsg+0x1199/0x1250 net/ipv4/udp.c:1260 inet_sendmsg+0x63/0x80 net/ipv4/af_inet.c:830 sock_sendmsg_nosec net/socket.c:725 [inline] sock_sendmsg net/socket.c:748 [inline] ____sys_sendmsg+0x37c/0x4d0 net/socket.c:2494 ___sys_sendmsg net/socket.c:2548 [inline] __sys_sendmmsg+0x269/0x500 net/socket.c:2634 __do_sys_sendmmsg net/socket.c:2663 [inline] __se_sys_sendmmsg net/socket.c:2660 [inline] __x64_sys_sendmmsg+0x57/0x60 net/socket.c:2660 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd read to 0xffff888145af952a of 2 bytes by task 7804 on cpu 0: ip_select_ident_segs include/net/ip.h:541 [inline] ip_select_ident include/net/ip.h:556 [inline] __ip_make_skb+0x817/0xc70 net/ipv4/ip_output.c:1446 ip_make_skb+0x233/0x2c0 net/ipv4/ip_output.c:1560 udp_sendmsg+0x1199/0x1250 net/ipv4/udp.c:1260 inet_sendmsg+0x63/0x80 net/ipv4/af_inet.c:830 sock_sendmsg_nosec net/socket.c:725 [inline] sock_sendmsg net/socket.c:748 [inline] ____sys_sendmsg+0x37c/0x4d0 net/socket.c:2494 ___sys_sendmsg net/socket.c:2548 [inline] __sys_sendmmsg+0x269/0x500 net/socket.c:2634 __do_sys_sendmmsg net/socket.c:2663 [inline] __se_sys_sendmmsg net/socket.c:2660 [inline] __x64_sys_sendmmsg+0x57/0x60 net/socket.c:2660 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd value changed: 0x184d -> 0x184e Reported by Kernel Concurrency Sanitizer on: CPU: 0 PID: 7804 Comm: syz-executor.1 Not tainted 6.5.0-rc6-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/26/2023 ================================================================== Fixes: 23f57406b82d ("ipv4: avoid using shared IP generator for connected sockets") Reported-by: syzbot Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- .../chelsio/inline_crypto/chtls/chtls_cm.c | 2 +- include/net/inet_sock.h | 2 +- include/net/ip.h | 15 +++++++++++++-- net/dccp/ipv4.c | 4 ++-- net/ipv4/af_inet.c | 2 +- net/ipv4/datagram.c | 2 +- net/ipv4/tcp_ipv4.c | 4 ++-- net/sctp/socket.c | 2 +- 8 files changed, 22 insertions(+), 11 deletions(-) diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index c2e7037c7ba1..7750702900fa 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1466,7 +1466,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt) tp->write_seq = snd_isn; tp->snd_nxt = snd_isn; tp->snd_una = snd_isn; - inet_sk(sk)->inet_id = get_random_u16(); + atomic_set(&inet_sk(sk)->inet_id, get_random_u16()); assign_rxopt(sk, opt); if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10)) diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index c8ef3b881f03..c2432c2addc8 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -222,8 +222,8 @@ struct inet_sock { __s16 uc_ttl; __u16 cmsg_flags; struct ip_options_rcu __rcu *inet_opt; + atomic_t inet_id; __be16 inet_sport; - __u16 inet_id; __u8 tos; __u8 min_ttl; diff --git a/include/net/ip.h b/include/net/ip.h index 530e7257e438..1872f570abed 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -532,8 +532,19 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb, * generator as much as we can. */ if (sk && inet_sk(sk)->inet_daddr) { - iph->id = htons(inet_sk(sk)->inet_id); - inet_sk(sk)->inet_id += segs; + int val; + + /* avoid atomic operations for TCP, + * as we hold socket lock at this point. + */ + if (sk_is_tcp(sk)) { + sock_owned_by_me(sk); + val = atomic_read(&inet_sk(sk)->inet_id); + atomic_set(&inet_sk(sk)->inet_id, val + segs); + } else { + val = atomic_add_return(segs, &inet_sk(sk)->inet_id); + } + iph->id = htons(val); return; } if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) { diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index b780827f5e0a..bfececa9e244 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -130,7 +130,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_daddr, inet->inet_sport, inet->inet_dport); - inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16()); err = dccp_connect(sk); rt = NULL; @@ -430,7 +430,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16()); if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) goto put_and_exit; diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index ebb737ac9e89..04853c83c85c 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -340,7 +340,7 @@ lookup_protocol: else inet->pmtudisc = IP_PMTUDISC_WANT; - inet->inet_id = 0; + atomic_set(&inet->inet_id, 0); sock_init_data(sock, sk); diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 4d1af0cd7d99..cb5dbee9e018 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c @@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len reuseport_has_conns_set(sk); sk->sk_state = TCP_ESTABLISHED; sk_set_txhash(sk); - inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16()); sk_dst_set(sk, &rt->dst); err = 0; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 08921b96f972..f9b8a4a1d2ed 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -312,7 +312,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) inet->inet_daddr)); } - inet->inet_id = get_random_u16(); + atomic_set(&inet->inet_id, get_random_u16()); if (tcp_fastopen_defer_connect(sk, &err)) return err; @@ -1539,7 +1539,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16()); /* Set ToS of the new socket based upon the value of incoming SYN. * ECT bits are set later in tcp_init_transfer(). diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 83656fe03a0e..a11b0d903514 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -9472,7 +9472,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, newinet->inet_rcv_saddr = inet->inet_rcv_saddr; newinet->inet_dport = htons(asoc->peer.port); newinet->pmtudisc = inet->pmtudisc; - newinet->inet_id = get_random_u16(); + atomic_set(&newinet->inet_id, get_random_u16()); newinet->uc_ttl = inet->uc_ttl; newinet->mc_loop = 1; From 1188e9dd7af97adb7af7ea2e9e63c771eaaf278c Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Thu, 10 Aug 2023 16:51:10 -0700 Subject: [PATCH 042/123] ice: fix receive buffer size miscalculation [ Upstream commit 10083aef784031fa9f06c19a1b182e6fad5338d9 ] The driver is misconfiguring the hardware for some values of MTU such that it could use multiple descriptors to receive a packet when it could have simply used one. Change the driver to use a round-up instead of the result of a shift, as the shift can truncate the lower bits of the size, and result in the problem noted above. It also aligns this driver with similar code in i40e. The insidiousness of this problem is that everything works with the wrong size, it's just not working as well as it could, as some MTU sizes end up using two or more descriptors, and there is no way to tell that is happening without looking at ice_trace or a bus analyzer. Fixes: efc2214b6047 ("ice: Add support for XDP") Reviewed-by: Przemek Kitszel Signed-off-by: Jesse Brandeburg Reviewed-by: Leon Romanovsky Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_base.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index e864634d66bc..818eca6aa4a4 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -396,7 +396,8 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring) /* Receive Packet Data Buffer Size. * The Packet Data Buffer Size is defined in 128 byte units. */ - rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S; + rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len, + BIT_ULL(ICE_RLAN_CTX_DBUF_S)); /* use 32 byte descriptors */ rlan_ctx.dsize = 1; From 7cddaed2a3f639c06ed033c92cfb552346c3ff83 Mon Sep 17 00:00:00 2001 From: Petr Oros Date: Fri, 11 Aug 2023 10:07:01 +0200 Subject: [PATCH 043/123] Revert "ice: Fix ice VF reset during iavf initialization" [ Upstream commit 0ecff05e6c59dd82dbcb9706db911f7fd9f40fb8 ] This reverts commit 7255355a0636b4eff08d5e8139c77d98f151c4fc. After this commit we are not able to attach VF to VM: virsh attach-interface v0 hostdev --managed 0000:41:01.0 --mac 52:52:52:52:52:52 error: Failed to attach interface error: Cannot set interface MAC to 52:52:52:52:52:52 for ifname enp65s0f0np0 vf 0: Resource temporarily unavailable ice_check_vf_ready_for_cfg() already contain waiting for reset. New condition in ice_check_vf_ready_for_reset() causing only problems. Fixes: 7255355a0636 ("ice: Fix ice VF reset during iavf initialization") Signed-off-by: Petr Oros Reviewed-by: Simon Horman Reviewed-by: Przemek Kitszel Reviewed-by: Jacob Keller Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_sriov.c | 8 ++++---- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 19 ------------------- drivers/net/ethernet/intel/ice/ice_vf_lib.h | 1 - drivers/net/ethernet/intel/ice/ice_virtchnl.c | 1 - 4 files changed, 4 insertions(+), 25 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index b8c31bf721ad..b719e9a771e3 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1240,7 +1240,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; @@ -1355,7 +1355,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto out_put_vf; } - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; @@ -1409,7 +1409,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) return -EOPNOTSUPP; } - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; @@ -1722,7 +1722,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos, if (!vf) return -EINVAL; - ret = ice_check_vf_ready_for_reset(vf); + ret = ice_check_vf_ready_for_cfg(vf); if (ret) goto out_put_vf; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 71047fc34139..86abbcb480d9 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -185,25 +185,6 @@ int ice_check_vf_ready_for_cfg(struct ice_vf *vf) return 0; } -/** - * ice_check_vf_ready_for_reset - check if VF is ready to be reset - * @vf: VF to check if it's ready to be reset - * - * The purpose of this function is to ensure that the VF is not in reset, - * disabled, and is both initialized and active, thus enabling us to safely - * initialize another reset. - */ -int ice_check_vf_ready_for_reset(struct ice_vf *vf) -{ - int ret; - - ret = ice_check_vf_ready_for_cfg(vf); - if (!ret && !test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) - ret = -EAGAIN; - - return ret; -} - /** * ice_trigger_vf_reset - Reset a VF on HW * @vf: pointer to the VF structure diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index e5bed8572462..9f7fcd8e5714 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -214,7 +214,6 @@ u16 ice_get_num_vfs(struct ice_pf *pf); struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf); bool ice_is_vf_disabled(struct ice_vf *vf); int ice_check_vf_ready_for_cfg(struct ice_vf *vf); -int ice_check_vf_ready_for_reset(struct ice_vf *vf); void ice_set_vf_state_dis(struct ice_vf *vf); bool ice_is_any_vf_in_unicast_promisc(struct ice_pf *pf); void diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index ef3c709d6a75..2b4c791b6cba 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -3722,7 +3722,6 @@ error_handler: ice_vc_notify_vf_link_state(vf); break; case VIRTCHNL_OP_RESET_VF: - clear_bit(ICE_VF_STATE_ACTIVE, vf->vf_states); ops->reset_vf(vf); break; case VIRTCHNL_OP_ADD_ETH_ADDR: From 850e2322ae59149475500bf1347cf60a0f7f051c Mon Sep 17 00:00:00 2001 From: Petr Oros Date: Fri, 11 Aug 2023 10:07:02 +0200 Subject: [PATCH 044/123] ice: Fix NULL pointer deref during VF reset [ Upstream commit 67f6317dfa609846a227a706532439a22828c24b ] During stress test with attaching and detaching VF from KVM and simultaneously changing VFs spoofcheck and trust there was a NULL pointer dereference in ice_reset_vf that VF's VSI is null. More than one instance of ice_reset_vf() can be running at a given time. When we rebuild the VSI in ice_reset_vf, another reset can be triaged from ice_service_task. In this case we can access the currently uninitialized VSI and cause panic. The window for this racing condition has been around for a long time but it's much worse after commit 227bf4500aaa ("ice: move VSI delete outside deconfig") because the reset runs faster. ice_reset_vf() using vf->cfg_lock and when we move this lock before accessing to the VF VSI, we can fix BUG for all cases. Panic occurs sometimes in ice_vsi_is_rx_queue_active() and sometimes in ice_vsi_stop_all_rx_rings() With our reproducer, we can hit BUG: ~8h before commit 227bf4500aaa ("ice: move VSI delete outside deconfig"). ~20m after commit 227bf4500aaa ("ice: move VSI delete outside deconfig"). After this fix we are not able to reproduce it after ~48h There was commit cf90b74341ee ("ice: Fix call trace with null VSI during VF reset") which also tried to fix this issue, but it was only partially resolved and the bug still exists. [ 6420.658415] BUG: kernel NULL pointer dereference, address: 0000000000000000 [ 6420.665382] #PF: supervisor read access in kernel mode [ 6420.670521] #PF: error_code(0x0000) - not-present page [ 6420.675659] PGD 0 [ 6420.677679] Oops: 0000 [#1] PREEMPT SMP NOPTI [ 6420.682038] CPU: 53 PID: 326472 Comm: kworker/53:0 Kdump: loaded Not tainted 5.14.0-317.el9.x86_64 #1 [ 6420.691250] Hardware name: Dell Inc. PowerEdge R750/04V528, BIOS 1.6.5 04/15/2022 [ 6420.698729] Workqueue: ice ice_service_task [ice] [ 6420.703462] RIP: 0010:ice_vsi_is_rx_queue_active+0x2d/0x60 [ice] [ 6420.705860] ice 0000:ca:00.0: VF 0 is now untrusted [ 6420.709494] Code: 00 00 66 83 bf 76 04 00 00 00 48 8b 77 10 74 3e 31 c0 eb 0f 0f b7 97 76 04 00 00 48 83 c0 01 39 c2 7e 2b 48 8b 97 68 04 00 00 <0f> b7 0c 42 48 8b 96 20 13 00 00 48 8d 94 8a 00 00 12 00 8b 12 83 [ 6420.714426] ice 0000:ca:00.0 ens7f0: Setting MAC 22:22:22:22:22:00 on VF 0. VF driver will be reinitialized [ 6420.733120] RSP: 0018:ff778d2ff383fdd8 EFLAGS: 00010246 [ 6420.733123] RAX: 0000000000000000 RBX: ff2acf1916294000 RCX: 0000000000000000 [ 6420.733125] RDX: 0000000000000000 RSI: ff2acf1f2c6401a0 RDI: ff2acf1a27301828 [ 6420.762346] RBP: ff2acf1a27301828 R08: 0000000000000010 R09: 0000000000001000 [ 6420.769476] R10: ff2acf1916286000 R11: 00000000019eba3f R12: ff2acf19066460d0 [ 6420.776611] R13: ff2acf1f2c6401a0 R14: ff2acf1f2c6401a0 R15: 00000000ffffffff [ 6420.783742] FS: 0000000000000000(0000) GS:ff2acf28ffa80000(0000) knlGS:0000000000000000 [ 6420.791829] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 6420.797575] CR2: 0000000000000000 CR3: 00000016ad410003 CR4: 0000000000773ee0 [ 6420.804708] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 6420.811034] vfio-pci 0000:ca:01.0: enabling device (0000 -> 0002) [ 6420.811840] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 6420.811841] PKRU: 55555554 [ 6420.811842] Call Trace: [ 6420.811843] [ 6420.811844] ice_reset_vf+0x9a/0x450 [ice] [ 6420.811876] ice_process_vflr_event+0x8f/0xc0 [ice] [ 6420.841343] ice_service_task+0x23b/0x600 [ice] [ 6420.845884] ? __schedule+0x212/0x550 [ 6420.849550] process_one_work+0x1e2/0x3b0 [ 6420.853563] ? rescuer_thread+0x390/0x390 [ 6420.857577] worker_thread+0x50/0x3a0 [ 6420.861242] ? rescuer_thread+0x390/0x390 [ 6420.865253] kthread+0xdd/0x100 [ 6420.868400] ? kthread_complete_and_exit+0x20/0x20 [ 6420.873194] ret_from_fork+0x1f/0x30 [ 6420.876774] [ 6420.878967] Modules linked in: vfio_pci vfio_pci_core vfio_iommu_type1 vfio iavf vhost_net vhost vhost_iotlb tap tun xt_CHECKSUM xt_MASQUERADE xt_conntrack ipt_REJECT nf_reject_ipv4 nft_compat nft_chain_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 nft_counter nf_tables bridge stp llc sctp ip6_udp_tunnel udp_tunnel nfp tls nfnetlink bluetooth mlx4_en mlx4_core rpcsec_gss_krb5 auth_rpcgss nfsv4 dns_resolver nfs lockd grace fscache netfs rfkill sunrpc intel_rapl_msr intel_rapl_common i10nm_edac nfit libnvdimm ipmi_ssif x86_pkg_temp_thermal intel_powerclamp coretemp irdma kvm_intel i40e kvm iTCO_wdt dcdbas ib_uverbs irqbypass iTCO_vendor_support mgag200 mei_me ib_core dell_smbios isst_if_mmio isst_if_mbox_pci rapl i2c_algo_bit drm_shmem_helper intel_cstate drm_kms_helper syscopyarea sysfillrect isst_if_common sysimgblt intel_uncore fb_sys_fops dell_wmi_descriptor wmi_bmof intel_vsec mei i2c_i801 acpi_ipmi ipmi_si i2c_smbus ipmi_devintf intel_pch_thermal acpi_power_meter pcspk r Fixes: efe41860008e ("ice: Fix memory corruption in VF driver") Fixes: f23df5220d2b ("ice: Fix spurious interrupt during removal of trusted VF") Signed-off-by: Petr Oros Reviewed-by: Simon Horman Reviewed-by: Przemek Kitszel Reviewed-by: Jacob Keller Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ice/ice_vf_lib.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 86abbcb480d9..9dbe6e9bb1f7 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -569,11 +569,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) return 0; } + if (flags & ICE_VF_RESET_LOCK) + mutex_lock(&vf->cfg_lock); + else + lockdep_assert_held(&vf->cfg_lock); + if (ice_is_vf_disabled(vf)) { vsi = ice_get_vf_vsi(vf); if (!vsi) { dev_dbg(dev, "VF is already removed\n"); - return -EINVAL; + err = -EINVAL; + goto out_unlock; } ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); @@ -582,14 +588,9 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); - return 0; + goto out_unlock; } - if (flags & ICE_VF_RESET_LOCK) - mutex_lock(&vf->cfg_lock); - else - lockdep_assert_held(&vf->cfg_lock); - /* Set VF disable bit state here, before triggering reset */ set_bit(ICE_VF_STATE_DIS, vf->vf_states); ice_trigger_vf_reset(vf, flags & ICE_VF_RESET_VFLR, false); From f41781b9d8a4af2b1ede0e1045b62359cf6b66b4 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Thu, 17 Aug 2023 16:24:59 +0800 Subject: [PATCH 045/123] selftests: bonding: do not set port down before adding to bond [ Upstream commit be809424659c2844a2d7ab653aacca4898538023 ] Before adding a port to bond, it need to be set down first. In the lacpdu test the author set the port down specifically. But commit a4abfa627c38 ("net: rtnetlink: Enslave device before bringing it up") changed the operation order, the kernel will set the port down _after_ adding to bond. So all the ports will be down at last and the test failed. In fact, the veth interfaces are already inactive when added. This means there's no need to set them down again before adding to the bond. Let's just remove the link down operation. Fixes: a4abfa627c38 ("net: rtnetlink: Enslave device before bringing it up") Reported-by: Zhengchao Shao Closes: https://lore.kernel.org/netdev/a0ef07c7-91b0-94bd-240d-944a330fcabd@huawei.com/ Signed-off-by: Hangbin Liu Link: https://lore.kernel.org/r/20230817082459.1685972-1-liuhangbin@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- .../selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh index 47ab90596acb..6358df5752f9 100755 --- a/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh +++ b/tools/testing/selftests/drivers/net/bonding/bond-break-lacpdu-tx.sh @@ -57,8 +57,8 @@ ip link add name veth2-bond type veth peer name veth2-end # add ports ip link set fbond master fab-br0 -ip link set veth1-bond down master fbond -ip link set veth2-bond down master fbond +ip link set veth1-bond master fbond +ip link set veth2-bond master fbond # bring up ip link set veth1-end up From 39d43b9cdfe8b02d9dc28994ddd578b9e5f34a7e Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Mon, 21 Aug 2023 16:45:46 +0200 Subject: [PATCH 046/123] can: isotp: fix support for transmission of SF without flow control [ Upstream commit 0bfe71159230bab79ee230225ae12ffecbb69f3e ] The original implementation had a very simple handling for single frame transmissions as it just sent the single frame without a timeout handling. With the new echo frame handling the echo frame was also introduced for single frames but the former exception ('simple without timers') has been maintained by accident. This leads to a 1 second timeout when closing the socket and to an -ECOMM error when CAN_ISOTP_WAIT_TX_DONE is selected. As the echo handling is always active (also for single frames) remove the wrong extra condition for single frames. Fixes: 9f39d36530e5 ("can: isotp: add support for transmission without flow control") Signed-off-by: Oliver Hartkopp Link: https://lore.kernel.org/r/20230821144547.6658-2-socketcan@hartkopp.net Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/can/isotp.c | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/net/can/isotp.c b/net/can/isotp.c index b3c2a49b189c..8c97f4061ffd 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -175,12 +175,6 @@ static bool isotp_register_rxid(struct isotp_sock *so) return (isotp_bc_flags(so) == 0); } -static bool isotp_register_txecho(struct isotp_sock *so) -{ - /* all modes but SF_BROADCAST register for tx echo skbs */ - return (isotp_bc_flags(so) != CAN_ISOTP_SF_BROADCAST); -} - static enum hrtimer_restart isotp_rx_timer_handler(struct hrtimer *hrtimer) { struct isotp_sock *so = container_of(hrtimer, struct isotp_sock, @@ -1176,7 +1170,7 @@ static int isotp_release(struct socket *sock) lock_sock(sk); /* remove current filters & unregister */ - if (so->bound && isotp_register_txecho(so)) { + if (so->bound) { if (so->ifindex) { struct net_device *dev; @@ -1293,14 +1287,12 @@ static int isotp_bind(struct socket *sock, struct sockaddr *uaddr, int len) can_rx_register(net, dev, rx_id, SINGLE_MASK(rx_id), isotp_rcv, sk, "isotp", sk); - if (isotp_register_txecho(so)) { - /* no consecutive frame echo skb in flight */ - so->cfecho = 0; + /* no consecutive frame echo skb in flight */ + so->cfecho = 0; - /* register for echo skb's */ - can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id), - isotp_rcv_echo, sk, "isotpe", sk); - } + /* register for echo skb's */ + can_rx_register(net, dev, tx_id, SINGLE_MASK(tx_id), + isotp_rcv_echo, sk, "isotpe", sk); dev_put(dev); @@ -1521,7 +1513,7 @@ static void isotp_notify(struct isotp_sock *so, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (so->bound && isotp_register_txecho(so)) { + if (so->bound) { if (isotp_register_rxid(so)) can_rx_unregister(dev_net(dev), dev, so->rxid, SINGLE_MASK(so->rxid), From 9b7fd6beec371a9fceb6bca0068272a4a31ba825 Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Mon, 21 Aug 2023 10:19:27 -0700 Subject: [PATCH 047/123] igb: Avoid starting unnecessary workqueues [ Upstream commit b888c510f7b3d64ca75fc0f43b4a4bd1a611312f ] If ptp_clock_register() fails or CONFIG_PTP isn't enabled, avoid starting PTP related workqueues. In this way we can fix this: BUG: unable to handle page fault for address: ffffc9000440b6f8 #PF: supervisor read access in kernel mode #PF: error_code(0x0000) - not-present page PGD 100000067 P4D 100000067 PUD 1001e0067 PMD 107dc5067 PTE 0 Oops: 0000 [#1] PREEMPT SMP [...] Workqueue: events igb_ptp_overflow_check RIP: 0010:igb_rd32+0x1f/0x60 [...] Call Trace: igb_ptp_read_82580+0x20/0x50 timecounter_read+0x15/0x60 igb_ptp_overflow_check+0x1a/0x50 process_one_work+0x1cb/0x3c0 worker_thread+0x53/0x3f0 ? rescuer_thread+0x370/0x370 kthread+0x142/0x160 ? kthread_associate_blkcg+0xc0/0xc0 ret_from_fork+0x1f/0x30 Fixes: 1f6e8178d685 ("igb: Prevent dropped Tx timestamps via work items and interrupts.") Fixes: d339b1331616 ("igb: add PTP Hardware Clock code") Signed-off-by: Alessio Igor Bogani Tested-by: Arpana Arland (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230821171927.2203644-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igb/igb_ptp.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 15e57460e19e..07171e574e7d 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -1404,18 +1404,6 @@ void igb_ptp_init(struct igb_adapter *adapter) return; } - spin_lock_init(&adapter->tmreg_lock); - INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); - - if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) - INIT_DELAYED_WORK(&adapter->ptp_overflow_work, - igb_ptp_overflow_check); - - adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; - adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; - - igb_ptp_reset(adapter); - adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, &adapter->pdev->dev); if (IS_ERR(adapter->ptp_clock)) { @@ -1425,6 +1413,18 @@ void igb_ptp_init(struct igb_adapter *adapter) dev_info(&adapter->pdev->dev, "added PHC on %s\n", adapter->netdev->name); adapter->ptp_flags |= IGB_PTP_ENABLED; + + spin_lock_init(&adapter->tmreg_lock); + INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work); + + if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK) + INIT_DELAYED_WORK(&adapter->ptp_overflow_work, + igb_ptp_overflow_check); + + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; + + igb_ptp_reset(adapter); } } From f94f30e2abfa17f1b8173ce1ed6fdfd0861f3d55 Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Mon, 21 Aug 2023 10:17:21 -0700 Subject: [PATCH 048/123] igc: Fix the typo in the PTM Control macro [ Upstream commit de43975721b97283d5f17eea4228faddf08f2681 ] The IGC_PTM_CTRL_SHRT_CYC defines the time between two consecutive PTM requests. The bit resolution of this field is six bits. That bit five was missing in the mask. This patch comes to correct the typo in the IGC_PTM_CTRL_SHRT_CYC macro. Fixes: a90ec8483732 ("igc: Add support for PTP getcrosststamp()") Signed-off-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Tony Nguyen Reviewed-by: Simon Horman Reviewed-by: Kalesh AP Link: https://lore.kernel.org/r/20230821171721.2203572-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igc/igc_defines.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index dbfa4b9dee06..90ca01889cd8 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -536,7 +536,7 @@ #define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ #define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ #define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ -#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x3f) << 2) #define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) #define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ From 581668893e3126176b08349b7e35a3fd6b578d6a Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Tue, 22 Aug 2023 06:12:31 -0400 Subject: [PATCH 049/123] net/sched: fix a qdisc modification with ambiguous command request [ Upstream commit da71714e359b64bd7aab3bd56ec53f307f058133 ] When replacing an existing root qdisc, with one that is of the same kind, the request boils down to essentially a parameterization change i.e not one that requires allocation and grafting of a new qdisc. syzbot was able to create a scenario which resulted in a taprio qdisc replacing an existing taprio qdisc with a combination of NLM_F_CREATE, NLM_F_REPLACE and NLM_F_EXCL leading to create and graft scenario. The fix ensures that only when the qdisc kinds are different that we should allow a create and graft, otherwise it goes into the "change" codepath. While at it, fix the code and comments to improve readability. While syzbot was able to create the issue, it did not zone on the root cause. Analysis from Vladimir Oltean helped narrow it down. v1->V2 changes: - remove "inline" function definition (Vladmir) - remove extrenous braces in branches (Vladmir) - change inline function names (Pedro) - Run tdc tests (Victor) v2->v3 changes: - dont break else/if (Simon) Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Reported-by: syzbot+a3618a167af2021433cd@syzkaller.appspotmail.com Closes: https://lore.kernel.org/netdev/20230816225759.g25x76kmgzya2gei@skbuf/T/ Tested-by: Vladimir Oltean Tested-by: Victor Nogueira Reviewed-by: Pedro Tammela Reviewed-by: Victor Nogueira Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/sched/sch_api.c | 53 ++++++++++++++++++++++++++++++++++----------- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 01d07e6a6811..e8f988e1c7e6 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -1550,10 +1550,28 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, return 0; } +static bool req_create_or_replace(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_REPLACE); +} + +static bool req_create_exclusive(struct nlmsghdr *n) +{ + return (n->nlmsg_flags & NLM_F_CREATE && + n->nlmsg_flags & NLM_F_EXCL); +} + +static bool req_change(struct nlmsghdr *n) +{ + return (!(n->nlmsg_flags & NLM_F_CREATE) && + !(n->nlmsg_flags & NLM_F_REPLACE) && + !(n->nlmsg_flags & NLM_F_EXCL)); +} + /* * Create/change qdisc. */ - static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, struct netlink_ext_ack *extack) { @@ -1647,27 +1665,35 @@ replay: * * We know, that some child q is already * attached to this parent and have choice: - * either to change it or to create/graft new one. + * 1) change it or 2) create/graft new one. + * If the requested qdisc kind is different + * than the existing one, then we choose graft. + * If they are the same then this is "change" + * operation - just let it fallthrough.. * * 1. We are allowed to create/graft only - * if CREATE and REPLACE flags are set. + * if the request is explicitly stating + * "please create if it doesn't exist". * - * 2. If EXCL is set, requestor wanted to say, - * that qdisc tcm_handle is not expected + * 2. If the request is to exclusive create + * then the qdisc tcm_handle is not expected * to exist, so that we choose create/graft too. * * 3. The last case is when no flags are set. + * This will happen when for example tc + * utility issues a "change" command. * Alas, it is sort of hole in API, we * cannot decide what to do unambiguously. - * For now we select create/graft, if - * user gave KIND, which does not match existing. + * For now we select create/graft. */ - if ((n->nlmsg_flags & NLM_F_CREATE) && - (n->nlmsg_flags & NLM_F_REPLACE) && - ((n->nlmsg_flags & NLM_F_EXCL) || - (tca[TCA_KIND] && - nla_strcmp(tca[TCA_KIND], q->ops->id)))) - goto create_n_graft; + if (tca[TCA_KIND] && + nla_strcmp(tca[TCA_KIND], q->ops->id)) { + if (req_create_or_replace(n) || + req_create_exclusive(n)) + goto create_n_graft; + else if (req_change(n)) + goto create_n_graft2; + } } } } else { @@ -1701,6 +1727,7 @@ create_n_graft: NL_SET_ERR_MSG(extack, "Qdisc not found. To create specify NLM_F_CREATE flag"); return -ENOENT; } +create_n_graft2: if (clid == TC_H_INGRESS) { if (dev_ingress_queue(dev)) { q = qdisc_create(dev, dev_ingress_queue(dev), From 136861956ad64429afbe31cfa90234114f7eab2e Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Tue, 22 Aug 2023 15:16:53 -0700 Subject: [PATCH 050/123] i40e: fix potential NULL pointer dereferencing of pf->vf i40e_sync_vsi_filters() [ Upstream commit 9525a3c38accd2e186f52443e35e633e296cc7f5 ] Add check for pf->vf not being NULL before dereferencing pf->vf[vsi->vf_id] in updating VSI filter sync. Add a similar check before dereferencing !pf->vf[vsi->vf_id].trusted in the condition for clearing promisc mode bit. Fixes: c87c938f62d8 ("i40e: Add VF VLAN pruning") Signed-off-by: Andrii Staikov Signed-off-by: Aleksandr Loktionov Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/i40e/i40e_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0e01b1927c1c..08ccf0024ce1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2615,7 +2615,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) retval = i40e_correct_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters); - else + else if (pf->vf) retval = i40e_correct_vf_mac_vlan_filters (vsi, &tmp_add_list, &tmp_del_list, vlan_filters, pf->vf[vsi->vf_id].trusted); @@ -2788,7 +2788,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) } /* if the VF is not trusted do not do promisc */ - if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) { + if (vsi->type == I40E_VSI_SRIOV && pf->vf && + !pf->vf[vsi->vf_id].trusted) { clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state); goto out; } From 41841b585e53babdfb0fa6fdfa54f6d7c28c1206 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Fri, 18 Aug 2023 01:13:31 +0200 Subject: [PATCH 051/123] netfilter: nf_tables: flush pending destroy work before netlink notifier [ Upstream commit 2c9f0293280e258606e54ed2b96fa71498432eae ] Destroy work waits for the RCU grace period then it releases the objects with no mutex held. All releases objects follow this path for transactions, therefore, order is guaranteed and references to top-level objects in the hierarchy remain valid. However, netlink notifier might interfer with pending destroy work. rcu_barrier() is not correct because objects are not release via RCU callback. Flush destroy work before releasing objects from netlink notifier path. Fixes: d4bc8271db21 ("netfilter: nf_tables: netlink notifier might race to release objects") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/nf_tables_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 4c2df7af73f7..3c5cac9bd9b7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -10509,7 +10509,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event, deleted = 0; mutex_lock(&nft_net->commit_mutex); if (!list_empty(&nf_tables_destroy_list)) - rcu_barrier(); + nf_tables_trans_destroy_flush_work(); again: list_for_each_entry(table, &nft_net->tables, list) { if (nft_table_has_owner(table) && From ed3fe5f9020c90125dfb40c1ae808d915ede68d8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 22 Aug 2023 19:49:52 +0200 Subject: [PATCH 052/123] netfilter: nf_tables: fix out of memory error handling [ Upstream commit 5e1be4cdc98c989d5387ce94ff15b5ad06a5b681 ] Several instances of pipapo_resize() don't propagate allocation failures, this causes a crash when fault injection is enabled for gfp_kernel slabs. Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges") Signed-off-by: Florian Westphal Reviewed-by: Stefano Brivio Signed-off-by: Sasha Levin --- net/netfilter/nft_set_pipapo.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index 32cfd0a84b0e..8c16681884b7 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -901,12 +901,14 @@ static void pipapo_lt_bits_adjust(struct nft_pipapo_field *f) static int pipapo_insert(struct nft_pipapo_field *f, const uint8_t *k, int mask_bits) { - int rule = f->rules++, group, ret, bit_offset = 0; + int rule = f->rules, group, ret, bit_offset = 0; - ret = pipapo_resize(f, f->rules - 1, f->rules); + ret = pipapo_resize(f, f->rules, f->rules + 1); if (ret) return ret; + f->rules++; + for (group = 0; group < f->groups; group++) { int i, v; u8 mask; @@ -1051,7 +1053,9 @@ static int pipapo_expand(struct nft_pipapo_field *f, step++; if (step >= len) { if (!masks) { - pipapo_insert(f, base, 0); + err = pipapo_insert(f, base, 0); + if (err < 0) + return err; masks = 1; } goto out; @@ -1234,6 +1238,9 @@ static int nft_pipapo_insert(const struct net *net, const struct nft_set *set, else ret = pipapo_expand(f, start, end, f->groups * f->bb); + if (ret < 0) + return ret; + if (f->bsize > bsize_max) bsize_max = f->bsize; From b15dea3de413b80c6e51acb26c0d09354080af65 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Wed, 23 Aug 2023 09:43:48 +0300 Subject: [PATCH 053/123] rtnetlink: Reject negative ifindexes in RTM_NEWLINK [ Upstream commit 30188bd7838c16a98a520db1fe9df01ffc6ed368 ] Negative ifindexes are illegal, but the kernel does not validate the ifindex in the ancillary header of RTM_NEWLINK messages, resulting in the kernel generating a warning [1] when such an ifindex is specified. Fix by rejecting negative ifindexes. [1] WARNING: CPU: 0 PID: 5031 at net/core/dev.c:9593 dev_index_reserve+0x1a2/0x1c0 net/core/dev.c:9593 [...] Call Trace: register_netdevice+0x69a/0x1490 net/core/dev.c:10081 br_dev_newlink+0x27/0x110 net/bridge/br_netlink.c:1552 rtnl_newlink_create net/core/rtnetlink.c:3471 [inline] __rtnl_newlink+0x115e/0x18c0 net/core/rtnetlink.c:3688 rtnl_newlink+0x67/0xa0 net/core/rtnetlink.c:3701 rtnetlink_rcv_msg+0x439/0xd30 net/core/rtnetlink.c:6427 netlink_rcv_skb+0x16b/0x440 net/netlink/af_netlink.c:2545 netlink_unicast_kernel net/netlink/af_netlink.c:1342 [inline] netlink_unicast+0x536/0x810 net/netlink/af_netlink.c:1368 netlink_sendmsg+0x93c/0xe40 net/netlink/af_netlink.c:1910 sock_sendmsg_nosec net/socket.c:728 [inline] sock_sendmsg+0xd9/0x180 net/socket.c:751 ____sys_sendmsg+0x6ac/0x940 net/socket.c:2538 ___sys_sendmsg+0x135/0x1d0 net/socket.c:2592 __sys_sendmsg+0x117/0x1e0 net/socket.c:2621 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Fixes: 38f7b870d4a6 ("[RTNETLINK]: Link creation API") Reported-by: syzbot+5ba06978f34abb058571@syzkaller.appspotmail.com Signed-off-by: Ido Schimmel Reviewed-by: Jiri Pirko Reviewed-by: Jakub Kicinski Link: https://lore.kernel.org/r/20230823064348.2252280-1-idosch@nvidia.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- net/core/rtnetlink.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 48e300a144ad..9d4507aa736b 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3465,6 +3465,9 @@ replay: if (ifm->ifi_index > 0) { link_specified = true; dev = __dev_get_by_index(net, ifm->ifi_index); + } else if (ifm->ifi_index < 0) { + NL_SET_ERR_MSG(extack, "ifindex can't be negative"); + return -EINVAL; } else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME]) { link_specified = true; dev = rtnl_dev_get(net, tb); From a0559fd0e14eb144b8151e3104785138786274a8 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 23 Aug 2023 15:19:04 +0800 Subject: [PATCH 054/123] bonding: fix macvlan over alb bond support [ Upstream commit e74216b8def3803e98ae536de78733e9d7f3b109 ] The commit 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode bonds") aims to enable the use of macvlans on top of rlb bond mode. However, the current rlb bond mode only handles ARP packets to update remote neighbor entries. This causes an issue when a macvlan is on top of the bond, and remote devices send packets to the macvlan using the bond's MAC address as the destination. After delivering the packets to the macvlan, the macvlan will rejects them as the MAC address is incorrect. Consequently, this commit makes macvlan over bond non-functional. To address this problem, one potential solution is to check for the presence of a macvlan port on the bond device using netif_is_macvlan_port(bond->dev) and return NULL in the rlb_arp_xmit() function. However, this approach doesn't fully resolve the situation when a VLAN exists between the bond and macvlan. So let's just do a partial revert for commit 14af9963ba1e in rlb_arp_xmit(). As the comment said, Don't modify or load balance ARPs that do not originate locally. Fixes: 14af9963ba1e ("bonding: Support macvlans on top of tlb/rlb mode bonds") Reported-by: susan.zheng@veritas.com Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2117816 Signed-off-by: Hangbin Liu Acked-by: Jay Vosburgh Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/bonding/bond_alb.c | 6 +++--- include/net/bonding.h | 11 +---------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index b9dbad3a8af8..fc5da5d7744d 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -660,10 +660,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond) return NULL; arp = (struct arp_pkt *)skb_network_header(skb); - /* Don't modify or load balance ARPs that do not originate locally - * (e.g.,arrive via a bridge). + /* Don't modify or load balance ARPs that do not originate + * from the bond itself or a VLAN directly above the bond. */ - if (!bond_slave_has_mac_rx(bond, arp->mac_src)) + if (!bond_slave_has_mac_rcu(bond, arp->mac_src)) return NULL; dev = ip_dev_find(dev_net(bond->dev), arp->ip_src); diff --git a/include/net/bonding.h b/include/net/bonding.h index 17329a19f0c6..9a3ac960dfe1 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -727,23 +727,14 @@ static inline struct slave *bond_slave_has_mac(struct bonding *bond, } /* Caller must hold rcu_read_lock() for read */ -static inline bool bond_slave_has_mac_rx(struct bonding *bond, const u8 *mac) +static inline bool bond_slave_has_mac_rcu(struct bonding *bond, const u8 *mac) { struct list_head *iter; struct slave *tmp; - struct netdev_hw_addr *ha; bond_for_each_slave_rcu(bond, tmp, iter) if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr)) return true; - - if (netdev_uc_empty(bond->dev)) - return false; - - netdev_for_each_uc_addr(ha, bond->dev) - if (ether_addr_equal_64bits(mac, ha->addr)) - return true; - return false; } From 2800385fda534f1bfed8389d634c393a07570bba Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 26 Apr 2023 15:03:23 -0700 Subject: [PATCH 055/123] KVM: x86: Preserve TDP MMU roots until they are explicitly invalidated commit edbdb43fc96b11b3bfa531be306a1993d9fe89ec upstream. Preserve TDP MMU roots until they are explicitly invalidated by gifting the TDP MMU itself a reference to a root when it is allocated. Keeping a reference in the TDP MMU fixes a flaw where the TDP MMU exhibits terrible performance, and can potentially even soft-hang a vCPU, if a vCPU frequently unloads its roots, e.g. when KVM is emulating SMI+RSM. When KVM emulates something that invalidates _all_ TLB entries, e.g. SMI and RSM, KVM unloads all of the vCPUs roots (KVM keeps a small per-vCPU cache of previous roots). Unloading roots is a simple way to ensure KVM flushes and synchronizes all roots for the vCPU, as KVM flushes and syncs when allocating a "new" root (from the vCPU's perspective). In the shadow MMU, KVM keeps track of all shadow pages, roots included, in a per-VM hash table. Unloading a shadow MMU root just wipes it from the per-vCPU cache; the root is still tracked in the per-VM hash table. When KVM loads a "new" root for the vCPU, KVM will find the old, unloaded root in the per-VM hash table. Unlike the shadow MMU, the TDP MMU doesn't track "inactive" roots in a per-VM structure, where "active" in this case means a root is either in-use or cached as a previous root by at least one vCPU. When a TDP MMU root becomes inactive, i.e. the last vCPU reference to the root is put, KVM immediately frees the root (asterisk on "immediately" as the actual freeing may be done by a worker, but for all intents and purposes the root is gone). The TDP MMU behavior is especially problematic for 1-vCPU setups, as unloading all roots effectively frees all roots. The issue is mitigated to some degree in multi-vCPU setups as a different vCPU usually holds a reference to an unloaded root and thus keeps the root alive, allowing the vCPU to reuse its old root after unloading (with a flush+sync). The TDP MMU flaw has been known for some time, as until very recently, KVM's handling of CR0.WP also triggered unloading of all roots. The CR0.WP toggling scenario was eventually addressed by not unloading roots when _only_ CR0.WP is toggled, but such an approach doesn't Just Work for emulating SMM as KVM must emulate a full TLB flush on entry and exit to/from SMM. Given that the shadow MMU plays nice with unloading roots at will, teaching the TDP MMU to do the same is far less complex than modifying KVM to track which roots need to be flushed before reuse. Note, preserving all possible TDP MMU roots is not a concern with respect to memory consumption. Now that the role for direct MMUs doesn't include information about the guest, e.g. CR0.PG, CR0.WP, CR4.SMEP, etc., there are _at most_ six possible roots (where "guest_mode" here means L2): 1. 4-level !SMM !guest_mode 2. 4-level SMM !guest_mode 3. 5-level !SMM !guest_mode 4. 5-level SMM !guest_mode 5. 4-level !SMM guest_mode 6. 5-level !SMM guest_mode And because each vCPU can track 4 valid roots, a VM can already have all 6 root combinations live at any given time. Not to mention that, in practice, no sane VMM will advertise different guest.MAXPHYADDR values across vCPUs, i.e. KVM won't ever use both 4-level and 5-level roots for a single VM. Furthermore, the vast majority of modern hypervisors will utilize EPT/NPT when available, thus the guest_mode=%true cases are also unlikely to be utilized. Reported-by: Jeremi Piotrowski Link: https://lore.kernel.org/all/959c5bce-beb5-b463-7158-33fc4a4f910c@linux.microsoft.com Link: https://lkml.kernel.org/r/20220209170020.1775368-1-pbonzini%40redhat.com Link: https://lore.kernel.org/all/20230322013731.102955-1-minipli@grsecurity.net Link: https://lore.kernel.org/all/000000000000a0bc2b05f9dd7fab@google.com Link: https://lore.kernel.org/all/000000000000eca0b905fa0f7756@google.com Cc: Ben Gardon Cc: David Matlack Cc: stable@vger.kernel.org Tested-by: Jeremi Piotrowski Link: https://lore.kernel.org/r/20230426220323.3079789-1-seanjc@google.com Signed-off-by: Sean Christopherson Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu/tdp_mmu.c | 121 +++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 65 deletions(-) diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 672f0432d777..70945f00ec41 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -51,7 +51,17 @@ void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) if (!kvm->arch.tdp_mmu_enabled) return; - /* Also waits for any queued work items. */ + /* + * Invalidate all roots, which besides the obvious, schedules all roots + * for zapping and thus puts the TDP MMU's reference to each root, i.e. + * ultimately frees all roots. + */ + kvm_tdp_mmu_invalidate_all_roots(kvm); + + /* + * Destroying a workqueue also first flushes the workqueue, i.e. no + * need to invoke kvm_tdp_mmu_zap_invalidated_roots(). + */ destroy_workqueue(kvm->arch.tdp_mmu_zap_wq); WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages)); @@ -127,16 +137,6 @@ static void tdp_mmu_schedule_zap_root(struct kvm *kvm, struct kvm_mmu_page *root queue_work(kvm->arch.tdp_mmu_zap_wq, &root->tdp_mmu_async_work); } -static inline bool kvm_tdp_root_mark_invalid(struct kvm_mmu_page *page) -{ - union kvm_mmu_page_role role = page->role; - role.invalid = true; - - /* No need to use cmpxchg, only the invalid bit can change. */ - role.word = xchg(&page->role.word, role.word); - return role.invalid; -} - void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, bool shared) { @@ -145,45 +145,12 @@ void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) return; - WARN_ON(!root->tdp_mmu_page); - /* - * The root now has refcount=0. It is valid, but readers already - * cannot acquire a reference to it because kvm_tdp_mmu_get_root() - * rejects it. This remains true for the rest of the execution - * of this function, because readers visit valid roots only - * (except for tdp_mmu_zap_root_work(), which however - * does not acquire any reference itself). - * - * Even though there are flows that need to visit all roots for - * correctness, they all take mmu_lock for write, so they cannot yet - * run concurrently. The same is true after kvm_tdp_root_mark_invalid, - * since the root still has refcount=0. - * - * However, tdp_mmu_zap_root can yield, and writers do not expect to - * see refcount=0 (see for example kvm_tdp_mmu_invalidate_all_roots()). - * So the root temporarily gets an extra reference, going to refcount=1 - * while staying invalid. Readers still cannot acquire any reference; - * but writers are now allowed to run if tdp_mmu_zap_root yields and - * they might take an extra reference if they themselves yield. - * Therefore, when the reference is given back by the worker, - * there is no guarantee that the refcount is still 1. If not, whoever - * puts the last reference will free the page, but they will not have to - * zap the root because a root cannot go from invalid to valid. + * The TDP MMU itself holds a reference to each root until the root is + * explicitly invalidated, i.e. the final reference should be never be + * put for a valid root. */ - if (!kvm_tdp_root_mark_invalid(root)) { - refcount_set(&root->tdp_mmu_root_count, 1); - - /* - * Zapping the root in a worker is not just "nice to have"; - * it is required because kvm_tdp_mmu_invalidate_all_roots() - * skips already-invalid roots. If kvm_tdp_mmu_put_root() did - * not add the root to the workqueue, kvm_tdp_mmu_zap_all_fast() - * might return with some roots not zapped yet. - */ - tdp_mmu_schedule_zap_root(kvm, root); - return; - } + KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm); spin_lock(&kvm->arch.tdp_mmu_pages_lock); list_del_rcu(&root->link); @@ -329,7 +296,14 @@ hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu) root = tdp_mmu_alloc_sp(vcpu); tdp_mmu_init_sp(root, NULL, 0, role); - refcount_set(&root->tdp_mmu_root_count, 1); + /* + * TDP MMU roots are kept until they are explicitly invalidated, either + * by a memslot update or by the destruction of the VM. Initialize the + * refcount to two; one reference for the vCPU, and one reference for + * the TDP MMU itself, which is held until the root is invalidated and + * is ultimately put by tdp_mmu_zap_root_work(). + */ + refcount_set(&root->tdp_mmu_root_count, 2); spin_lock(&kvm->arch.tdp_mmu_pages_lock); list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); @@ -1027,32 +1001,49 @@ void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm) /* * Mark each TDP MMU root as invalid to prevent vCPUs from reusing a root that * is about to be zapped, e.g. in response to a memslots update. The actual - * zapping is performed asynchronously, so a reference is taken on all roots. - * Using a separate workqueue makes it easy to ensure that the destruction is - * performed before the "fast zap" completes, without keeping a separate list - * of invalidated roots; the list is effectively the list of work items in - * the workqueue. + * zapping is performed asynchronously. Using a separate workqueue makes it + * easy to ensure that the destruction is performed before the "fast zap" + * completes, without keeping a separate list of invalidated roots; the list is + * effectively the list of work items in the workqueue. * - * Get a reference even if the root is already invalid, the asynchronous worker - * assumes it was gifted a reference to the root it processes. Because mmu_lock - * is held for write, it should be impossible to observe a root with zero refcount, - * i.e. the list of roots cannot be stale. - * - * This has essentially the same effect for the TDP MMU - * as updating mmu_valid_gen does for the shadow MMU. + * Note, the asynchronous worker is gifted the TDP MMU's reference. + * See kvm_tdp_mmu_get_vcpu_root_hpa(). */ void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm) { struct kvm_mmu_page *root; - lockdep_assert_held_write(&kvm->mmu_lock); - list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { - if (!root->role.invalid && - !WARN_ON_ONCE(!kvm_tdp_mmu_get_root(root))) { + /* + * mmu_lock must be held for write to ensure that a root doesn't become + * invalid while there are active readers (invalidating a root while + * there are active readers may or may not be problematic in practice, + * but it's uncharted territory and not supported). + * + * Waive the assertion if there are no users of @kvm, i.e. the VM is + * being destroyed after all references have been put, or if no vCPUs + * have been created (which means there are no roots), i.e. the VM is + * being destroyed in an error path of KVM_CREATE_VM. + */ + if (IS_ENABLED(CONFIG_PROVE_LOCKING) && + refcount_read(&kvm->users_count) && kvm->created_vcpus) + lockdep_assert_held_write(&kvm->mmu_lock); + + /* + * As above, mmu_lock isn't held when destroying the VM! There can't + * be other references to @kvm, i.e. nothing else can invalidate roots + * or be consuming roots, but walking the list of roots does need to be + * guarded against roots being deleted by the asynchronous zap worker. + */ + rcu_read_lock(); + + list_for_each_entry_rcu(root, &kvm->arch.tdp_mmu_roots, link) { + if (!root->role.invalid) { root->role.invalid = true; tdp_mmu_schedule_zap_root(kvm, root); } } + + rcu_read_unlock(); } /* From 82d811ff566594de3676f35808e8a9e19c5c864c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Wed, 23 Aug 2023 18:01:04 -0700 Subject: [PATCH 056/123] KVM: x86/mmu: Fix an sign-extension bug with mmu_seq that hangs vCPUs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Upstream commit ba6e3fe25543 ("KVM: x86/mmu: Grab mmu_invalidate_seq in kvm_faultin_pfn()") unknowingly fixed the bug in v6.3 when refactoring how KVM tracks the sequence counter snapshot. Take the vCPU's mmu_seq snapshot as an "unsigned long" instead of an "int" when checking to see if a page fault is stale, as the sequence count is stored as an "unsigned long" everywhere else in KVM. This fixes a bug where KVM will effectively hang vCPUs due to always thinking page faults are stale, which results in KVM refusing to "fix" faults. mmu_invalidate_seq (née mmu_notifier_seq) is a sequence counter used when KVM is handling page faults to detect if userspace mappings relevant to the guest were invalidated between snapshotting the counter and acquiring mmu_lock, i.e. to ensure that the userspace mapping KVM is using to resolve the page fault is fresh. If KVM sees that the counter has changed, KVM simply resumes the guest without fixing the fault. What _should_ happen is that the source of the mmu_notifier invalidations eventually goes away, mmu_invalidate_seq becomes stable, and KVM can once again fix guest page fault(s). But for a long-lived VM and/or a VM that the host just doesn't particularly like, it's possible for a VM to be on the receiving end of 2 billion (with a B) mmu_notifier invalidations. When that happens, bit 31 will be set in mmu_invalidate_seq. This causes the value to be turned into a 32-bit negative value when implicitly cast to an "int" by is_page_fault_stale(), and then sign-extended into a 64-bit unsigned when the signed "int" is implicitly cast back to an "unsigned long" on the call to mmu_invalidate_retry_hva(). As a result of the casting and sign-extension, given a sequence counter of e.g. 0x8002dc25, mmu_invalidate_retry_hva() ends up doing if (0x8002dc25 != 0xffffffff8002dc25) and signals that the page fault is stale and needs to be retried even though the sequence counter is stable, and KVM effectively hangs any vCPU that takes a page fault (EPT violation or #NPF when TDP is enabled). Reported-by: Brian Rak Reported-by: Amaan Cheval Reported-by: Eric Wheeler Closes: https://lore.kernel.org/all/f023d927-52aa-7e08-2ee5-59a2fbc65953@gameservers.com Fixes: a955cad84cda ("KVM: x86/mmu: Retry page fault if root is invalidated by memslot update") Signed-off-by: Sean Christopherson Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/mmu/mmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index 230108a90cf3..beca03556379 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -4212,7 +4212,8 @@ static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) * root was invalidated by a memslot update or a relevant mmu_notifier fired. */ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, - struct kvm_page_fault *fault, int mmu_seq) + struct kvm_page_fault *fault, + unsigned long mmu_seq) { struct kvm_mmu_page *sp = to_shadow_page(vcpu->arch.mmu->root.hpa); From 0d617fb6d5132dc1ffd12ec0c90af71fd89c63a0 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 7 Dec 2022 03:53:34 +0000 Subject: [PATCH 057/123] io_uring: get rid of double locking Commit 11373026f2960390d5e330df4e92735c4265c440 upstream. We don't need to take both uring_locks at once, msg_ring can be split in two parts, first getting a file from the filetable of the first ring and then installing it into the second one. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/a80ecc2bc99c3b3f2cf20015d618b7c51419a797.1670384893.git.asml.silence@gmail.com Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/msg_ring.c | 87 ++++++++++++++++++++++++++------------------- io_uring/msg_ring.h | 1 + io_uring/opdef.c | 1 + 3 files changed, 52 insertions(+), 37 deletions(-) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 3526389ac218..ee8e7ac8c582 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -15,6 +15,7 @@ struct io_msg { struct file *file; + struct file *src_file; u64 user_data; u32 len; u32 cmd; @@ -23,6 +24,17 @@ struct io_msg { u32 flags; }; +void io_msg_ring_cleanup(struct io_kiocb *req) +{ + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); + + if (WARN_ON_ONCE(!msg->src_file)) + return; + + fput(msg->src_file); + msg->src_file = NULL; +} + static int io_msg_ring_data(struct io_kiocb *req) { struct io_ring_ctx *target_ctx = req->file->private_data; @@ -39,17 +51,13 @@ static int io_msg_ring_data(struct io_kiocb *req) return -EOVERFLOW; } -static void io_double_unlock_ctx(struct io_ring_ctx *ctx, - struct io_ring_ctx *octx, +static void io_double_unlock_ctx(struct io_ring_ctx *octx, unsigned int issue_flags) { - if (issue_flags & IO_URING_F_UNLOCKED) - mutex_unlock(&ctx->uring_lock); mutex_unlock(&octx->uring_lock); } -static int io_double_lock_ctx(struct io_ring_ctx *ctx, - struct io_ring_ctx *octx, +static int io_double_lock_ctx(struct io_ring_ctx *octx, unsigned int issue_flags) { /* @@ -62,26 +70,36 @@ static int io_double_lock_ctx(struct io_ring_ctx *ctx, return -EAGAIN; return 0; } - - /* Always grab smallest value ctx first. We know ctx != octx. */ - if (ctx < octx) { - mutex_lock(&ctx->uring_lock); - mutex_lock(&octx->uring_lock); - } else { - mutex_lock(&octx->uring_lock); - mutex_lock(&ctx->uring_lock); - } - + mutex_lock(&octx->uring_lock); return 0; } +static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); + struct io_ring_ctx *ctx = req->ctx; + struct file *file = NULL; + unsigned long file_ptr; + int idx = msg->src_fd; + + io_ring_submit_lock(ctx, issue_flags); + if (likely(idx < ctx->nr_user_files)) { + idx = array_index_nospec(idx, ctx->nr_user_files); + file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr; + file = (struct file *) (file_ptr & FFS_MASK); + if (file) + get_file(file); + } + io_ring_submit_unlock(ctx, issue_flags); + return file; +} + static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *target_ctx = req->file->private_data; struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); struct io_ring_ctx *ctx = req->ctx; - unsigned long file_ptr; - struct file *src_file; + struct file *src_file = msg->src_file; int ret; if (msg->len) @@ -90,28 +108,22 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) return -EINVAL; if (target_ctx->flags & IORING_SETUP_R_DISABLED) return -EBADFD; + if (!src_file) { + src_file = io_msg_grab_file(req, issue_flags); + if (!src_file) + return -EBADF; + msg->src_file = src_file; + req->flags |= REQ_F_NEED_CLEANUP; + } - ret = io_double_lock_ctx(ctx, target_ctx, issue_flags); - if (unlikely(ret)) - return ret; - - ret = -EBADF; - if (unlikely(msg->src_fd >= ctx->nr_user_files)) - goto out_unlock; - - msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files); - file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr; - if (!file_ptr) - goto out_unlock; - - src_file = (struct file *) (file_ptr & FFS_MASK); - get_file(src_file); + if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) + return -EAGAIN; ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd); - if (ret < 0) { - fput(src_file); + if (ret < 0) goto out_unlock; - } + msg->src_file = NULL; + req->flags &= ~REQ_F_NEED_CLEANUP; if (msg->flags & IORING_MSG_RING_CQE_SKIP) goto out_unlock; @@ -125,7 +137,7 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0, true)) ret = -EOVERFLOW; out_unlock: - io_double_unlock_ctx(ctx, target_ctx, issue_flags); + io_double_unlock_ctx(target_ctx, issue_flags); return ret; } @@ -136,6 +148,7 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (unlikely(sqe->buf_index || sqe->personality)) return -EINVAL; + msg->src_file = NULL; msg->user_data = READ_ONCE(sqe->off); msg->len = READ_ONCE(sqe->len); msg->cmd = READ_ONCE(sqe->addr); diff --git a/io_uring/msg_ring.h b/io_uring/msg_ring.h index fb9601f202d0..3987ee6c0e5f 100644 --- a/io_uring/msg_ring.h +++ b/io_uring/msg_ring.h @@ -2,3 +2,4 @@ int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags); +void io_msg_ring_cleanup(struct io_kiocb *req); diff --git a/io_uring/opdef.c b/io_uring/opdef.c index 04dd2c983fce..3aa0d65c50e3 100644 --- a/io_uring/opdef.c +++ b/io_uring/opdef.c @@ -445,6 +445,7 @@ const struct io_op_def io_op_defs[] = { .name = "MSG_RING", .prep = io_msg_ring_prep, .issue = io_msg_ring, + .cleanup = io_msg_ring_cleanup, }, [IORING_OP_FSETXATTR] = { .needs_file = 1, From 4f59375285188baa5a22100af24f0fb3e2bc0e3d Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 7 Dec 2022 03:53:35 +0000 Subject: [PATCH 058/123] io_uring: extract a io_msg_install_complete helper Commit 172113101641cf1f9628c528ec790cb809f2b704 upstream. Extract a helper called io_msg_install_complete() from io_msg_send_fd(), will be used later. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/1500ca1054cc4286a3ee1c60aacead57fcdfa02a.1670384893.git.asml.silence@gmail.com Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/msg_ring.c | 38 +++++++++++++++++++++----------------- 1 file changed, 21 insertions(+), 17 deletions(-) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index ee8e7ac8c582..fd96a0cd85f0 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -94,40 +94,25 @@ static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_fl return file; } -static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) +static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *target_ctx = req->file->private_data; struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); - struct io_ring_ctx *ctx = req->ctx; struct file *src_file = msg->src_file; int ret; - if (msg->len) - return -EINVAL; - if (target_ctx == ctx) - return -EINVAL; - if (target_ctx->flags & IORING_SETUP_R_DISABLED) - return -EBADFD; - if (!src_file) { - src_file = io_msg_grab_file(req, issue_flags); - if (!src_file) - return -EBADF; - msg->src_file = src_file; - req->flags |= REQ_F_NEED_CLEANUP; - } - if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) return -EAGAIN; ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd); if (ret < 0) goto out_unlock; + msg->src_file = NULL; req->flags &= ~REQ_F_NEED_CLEANUP; if (msg->flags & IORING_MSG_RING_CQE_SKIP) goto out_unlock; - /* * If this fails, the target still received the file descriptor but * wasn't notified of the fact. This means that if this request @@ -141,6 +126,25 @@ out_unlock: return ret; } +static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) +{ + struct io_ring_ctx *target_ctx = req->file->private_data; + struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); + struct io_ring_ctx *ctx = req->ctx; + struct file *src_file = msg->src_file; + + if (target_ctx == ctx) + return -EINVAL; + if (!src_file) { + src_file = io_msg_grab_file(req, issue_flags); + if (!src_file) + return -EBADF; + msg->src_file = src_file; + req->flags |= REQ_F_NEED_CLEANUP; + } + return io_msg_install_complete(req, issue_flags); +} + int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); From 816c7cecf6a0cf04b5b543690e38a1b15bdf8e88 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 19 Jan 2023 09:01:27 -0700 Subject: [PATCH 059/123] io_uring/msg_ring: move double lock/unlock helpers higher up Commit 423d5081d0451faa59a707e57373801da5b40141 upstream. In preparation for needing them somewhere else, move them and get rid of the unused 'issue_flags' for the unlock side. No functional changes in this patch. Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/msg_ring.c | 47 ++++++++++++++++++++++----------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index fd96a0cd85f0..825d8c579fd3 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -24,6 +24,28 @@ struct io_msg { u32 flags; }; +static void io_double_unlock_ctx(struct io_ring_ctx *octx) +{ + mutex_unlock(&octx->uring_lock); +} + +static int io_double_lock_ctx(struct io_ring_ctx *octx, + unsigned int issue_flags) +{ + /* + * To ensure proper ordering between the two ctxs, we can only + * attempt a trylock on the target. If that fails and we already have + * the source ctx lock, punt to io-wq. + */ + if (!(issue_flags & IO_URING_F_UNLOCKED)) { + if (!mutex_trylock(&octx->uring_lock)) + return -EAGAIN; + return 0; + } + mutex_lock(&octx->uring_lock); + return 0; +} + void io_msg_ring_cleanup(struct io_kiocb *req) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); @@ -51,29 +73,6 @@ static int io_msg_ring_data(struct io_kiocb *req) return -EOVERFLOW; } -static void io_double_unlock_ctx(struct io_ring_ctx *octx, - unsigned int issue_flags) -{ - mutex_unlock(&octx->uring_lock); -} - -static int io_double_lock_ctx(struct io_ring_ctx *octx, - unsigned int issue_flags) -{ - /* - * To ensure proper ordering between the two ctxs, we can only - * attempt a trylock on the target. If that fails and we already have - * the source ctx lock, punt to io-wq. - */ - if (!(issue_flags & IO_URING_F_UNLOCKED)) { - if (!mutex_trylock(&octx->uring_lock)) - return -EAGAIN; - return 0; - } - mutex_lock(&octx->uring_lock); - return 0; -} - static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) { struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); @@ -122,7 +121,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0, true)) ret = -EOVERFLOW; out_unlock: - io_double_unlock_ctx(target_ctx, issue_flags); + io_double_unlock_ctx(target_ctx); return ret; } From 22a406b3629a10979916ea7cace47858410117b5 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 22 Aug 2023 18:00:02 -0600 Subject: [PATCH 060/123] io_uring/msg_ring: fix missing lock on overflow for IOPOLL Commit e12d7a46f65ae4b7d58a5e0c1cbfa825cf8d830d upstream. If the target ring is configured with IOPOLL, then we always need to hold the target ring uring_lock before posting CQEs. We could just grab it unconditionally, but since we don't expect many target rings to be of this type, make grabbing the uring_lock conditional on the ring type. Link: https://lore.kernel.org/io-uring/Y8krlYa52%2F0YGqkg@ip-172-31-85-199.ec2.internal/ Reported-by: Xingyuan Mo Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/msg_ring.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 825d8c579fd3..cd922d2bef5f 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -57,20 +57,30 @@ void io_msg_ring_cleanup(struct io_kiocb *req) msg->src_file = NULL; } -static int io_msg_ring_data(struct io_kiocb *req) +static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) { struct io_ring_ctx *target_ctx = req->file->private_data; struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); + int ret; if (msg->src_fd || msg->dst_fd || msg->flags) return -EINVAL; if (target_ctx->flags & IORING_SETUP_R_DISABLED) return -EBADFD; - if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) - return 0; + ret = -EOVERFLOW; + if (target_ctx->flags & IORING_SETUP_IOPOLL) { + if (unlikely(io_double_lock_ctx(target_ctx, issue_flags))) + return -EAGAIN; + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) + ret = 0; + io_double_unlock_ctx(target_ctx); + } else { + if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) + ret = 0; + } - return -EOVERFLOW; + return ret; } static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) @@ -175,7 +185,7 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) switch (msg->cmd) { case IORING_MSG_DATA: - ret = io_msg_ring_data(req); + ret = io_msg_ring_data(req, issue_flags); break; case IORING_MSG_SEND_FD: ret = io_msg_send_fd(req, issue_flags); From 014fec5540108047a92ce9527e0ec285b0d59691 Mon Sep 17 00:00:00 2001 From: BrenoRCBrito Date: Fri, 18 Aug 2023 18:14:16 -0300 Subject: [PATCH 061/123] ASoC: amd: yc: Add VivoBook Pro 15 to quirks list for acp6x commit 3b1f08833c45d0167741e4097b0150e7cf086102 upstream. VivoBook Pro 15 Ryzen Edition uses Ryzen 6800H processor, and adding to quirks list for acp6x will enable internal mic. Signed-off-by: BrenoRCBrito Link: https://lore.kernel.org/r/20230818211417.32167-1-brenorcbrito@gmail.com Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/amd/yc/acp6x-mach.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index c1ca3ceac5f2..26101299af4d 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -248,6 +248,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "M6500RC"), + } + }, { .driver_data = &acp6x_card, .matches = { From 85607ef399d9763c9a5d122eb6bce91d27d85cb9 Mon Sep 17 00:00:00 2001 From: Charles Keepax Date: Wed, 23 Aug 2023 09:53:08 +0100 Subject: [PATCH 062/123] ASoC: cs35l41: Correct amp_gain_tlv values commit 1613781d7e8a93618ff3a6b37f81f06769b53717 upstream. The current analog gain TLV seems to have completely incorrect values in it. The gain starts at 0.5dB, proceeds in 1dB steps, and has no mute value, correct the control to match. Signed-off-by: Charles Keepax Link: https://lore.kernel.org/r/20230823085308.753572-1-ckeepax@opensource.cirrus.com Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/cs35l41.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/codecs/cs35l41.c b/sound/soc/codecs/cs35l41.c index f2b5032daa6a..2f4b0ee93ace 100644 --- a/sound/soc/codecs/cs35l41.c +++ b/sound/soc/codecs/cs35l41.c @@ -167,7 +167,7 @@ static int cs35l41_get_fs_mon_config_index(int freq) static const DECLARE_TLV_DB_RANGE(dig_vol_tlv, 0, 0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1), 1, 913, TLV_DB_MINMAX_ITEM(-10200, 1200)); -static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 0, 1, 1); +static DECLARE_TLV_DB_SCALE(amp_gain_tlv, 50, 100, 0); static const struct snd_kcontrol_new dre_ctrl = SOC_DAPM_SINGLE("Switch", CS35L41_PWR_CTRL3, 20, 1, 0); From b8b7243aafecc1046d90b6d636355e2a1fe99835 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 23 Aug 2023 14:51:39 +1000 Subject: [PATCH 063/123] ibmveth: Use dcbf rather than dcbfl commit bfedba3b2c7793ce127680bc8f70711e05ec7a17 upstream. When building for power4, newer binutils don't recognise the "dcbfl" extended mnemonic. dcbfl RA, RB is equivalent to dcbf RA, RB, 1. Switch to "dcbf" to avoid the build error. Signed-off-by: Michael Ellerman Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmveth.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 5b96cd94dcd2..0b4ec6e41eb4 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -203,7 +203,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length) unsigned long offset; for (offset = 0; offset < length; offset += SMP_CACHE_BYTES) - asm("dcbfl %0,%1" :: "b" (addr), "r" (offset)); + asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset)); } /* replenish the buffers for a pool. note that we don't need to From e6a60eccd0c8c015a2493ceea832d2c0eaf83dba Mon Sep 17 00:00:00 2001 From: Ping-Ke Shih Date: Fri, 18 Aug 2023 09:40:04 +0800 Subject: [PATCH 064/123] wifi: mac80211: limit reorder_buf_filtered to avoid UBSAN warning commit b98c16107cc1647242abbd11f234c05a3a5864f6 upstream. The commit 06470f7468c8 ("mac80211: add API to allow filtering frames in BA sessions") added reorder_buf_filtered to mark frames filtered by firmware, and it can only work correctly if hw.max_rx_aggregation_subframes <= 64 since it stores the bitmap in a u64 variable. However, new HE or EHT devices can support BlockAck number up to 256 or 1024, and then using a higher subframe index leads UBSAN warning: UBSAN: shift-out-of-bounds in net/mac80211/rx.c:1129:39 shift exponent 215 is too large for 64-bit type 'long long unsigned int' Call Trace: dump_stack_lvl+0x48/0x70 dump_stack+0x10/0x20 __ubsan_handle_shift_out_of_bounds+0x1ac/0x360 ieee80211_release_reorder_frame.constprop.0.cold+0x64/0x69 [mac80211] ieee80211_sta_reorder_release+0x9c/0x400 [mac80211] ieee80211_prepare_and_rx_handle+0x1234/0x1420 [mac80211] ieee80211_rx_list+0xaef/0xf60 [mac80211] ieee80211_rx_napi+0x53/0xd0 [mac80211] Since only old hardware that supports <=64 BlockAck uses ieee80211_mark_rx_ba_filtered_frames(), limit the use as it is, so add a WARN_ONCE() and comment to note to avoid using this function if hardware capability is not suitable. Signed-off-by: Ping-Ke Shih Link: https://lore.kernel.org/r/20230818014004.16177-1-pkshih@realtek.com [edit commit message] Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- include/net/mac80211.h | 1 + net/mac80211/rx.c | 12 ++++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 72b739dc6d53..8a338c33118f 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -6444,6 +6444,7 @@ void ieee80211_stop_rx_ba_session(struct ieee80211_vif *vif, u16 ba_rx_bitmap, * marks frames marked in the bitmap as having been filtered. Afterwards, it * checks if any frames in the window starting from @ssn can now be released * (in case they were only waiting for frames that were filtered.) + * (Only work correctly if @max_rx_aggregation_subframes <= 64 frames) */ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, u16 ssn, u64 filtered, diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 0f81492da0b4..55dc0610e863 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -1102,7 +1102,8 @@ static inline bool ieee80211_rx_reorder_ready(struct tid_ampdu_rx *tid_agg_rx, struct sk_buff *tail = skb_peek_tail(frames); struct ieee80211_rx_status *status; - if (tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) + if (tid_agg_rx->reorder_buf_filtered && + tid_agg_rx->reorder_buf_filtered & BIT_ULL(index)) return true; if (!tail) @@ -1143,7 +1144,8 @@ static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata, } no_frame: - tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); + if (tid_agg_rx->reorder_buf_filtered) + tid_agg_rx->reorder_buf_filtered &= ~BIT_ULL(index); tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num); } @@ -4162,6 +4164,7 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, u16 ssn, u64 filtered, u16 received_mpdus) { + struct ieee80211_local *local; struct sta_info *sta; struct tid_ampdu_rx *tid_agg_rx; struct sk_buff_head frames; @@ -4179,6 +4182,11 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid, sta = container_of(pubsta, struct sta_info, sta); + local = sta->sdata->local; + WARN_ONCE(local->hw.max_rx_aggregation_subframes > 64, + "RX BA marker can't support max_rx_aggregation_subframes %u > 64\n", + local->hw.max_rx_aggregation_subframes); + if (!ieee80211_rx_data_set_sta(&rx, sta, -1)) return; From ac467d7405fe69b07b5a3fe233fd62fad7b2faec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Apitzsch?= Date: Sat, 19 Aug 2023 09:12:15 +0200 Subject: [PATCH 065/123] platform/x86: ideapad-laptop: Add support for new hotkeys found on ThinkBook 14s Yoga ITL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a260f7d726fde52c0278bd3fa085a758639bcee2 upstream. The Lenovo Thinkbook 14s Yoga ITL has 4 new symbols/shortcuts on their F9-F11 and PrtSc keys: F9: Has a symbol of a head with a headset, the manual says "Service key" F10: Has a symbol of a telephone horn which has been picked up from the receiver, the manual says: "Answer incoming calls" F11: Has a symbol of a telephone horn which is resting on the receiver, the manual says: "Reject incoming calls" PrtSc: Has a symbol of a siccor and a dashed ellipse, the manual says: "Open the Windows 'Snipping' Tool app" This commit adds support for these 4 new hkey events. Signed-off-by: André Apitzsch Link: https://lore.kernel.org/r/20230819-lenovo_keys-v1-1-9d34eac88e0a@apitzsch.eu Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede Signed-off-by: Greg Kroah-Hartman --- drivers/platform/x86/ideapad-laptop.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index bd38c7dcae34..de03b8889e9d 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1176,6 +1176,11 @@ static const struct key_entry ideapad_keymap[] = { { KE_IGNORE, 0x03 | IDEAPAD_WMI_KEY }, /* Customizable Lenovo Hotkey ("star" with 'S' inside) */ { KE_KEY, 0x01 | IDEAPAD_WMI_KEY, { KEY_FAVORITES } }, + { KE_KEY, 0x04 | IDEAPAD_WMI_KEY, { KEY_SELECTIVE_SCREENSHOT } }, + /* Lenovo Support */ + { KE_KEY, 0x07 | IDEAPAD_WMI_KEY, { KEY_HELP } }, + { KE_KEY, 0x0e | IDEAPAD_WMI_KEY, { KEY_PICKUP_PHONE } }, + { KE_KEY, 0x0f | IDEAPAD_WMI_KEY, { KEY_HANGUP_PHONE } }, /* Dark mode toggle */ { KE_KEY, 0x13 | IDEAPAD_WMI_KEY, { KEY_PROG1 } }, /* Sound profile switch */ From 14904f4d8bf8c7e12794739ee4f5ff3a346d7bb2 Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Fri, 30 Jun 2023 09:18:13 -0400 Subject: [PATCH 066/123] NFSv4: Fix dropped lock for racing OPEN and delegation return commit 1cbc11aaa01f80577b67ae02c73ee781112125fd upstream. Commmit f5ea16137a3f ("NFSv4: Retry LOCK on OLD_STATEID during delegation return") attempted to solve this problem by using nfs4's generic async error handling, but introduced a regression where v4.0 lock recovery would hang. The additional complexity introduced by overloading that error handling is not necessary for this case. This patch expects that commit to be reverted. The problem as originally explained in the above commit is: There's a small window where a LOCK sent during a delegation return can race with another OPEN on client, but the open stateid has not yet been updated. In this case, the client doesn't handle the OLD_STATEID error from the server and will lose this lock, emitting: "NFS: nfs4_handle_delegation_recall_error: unhandled error -10024". Fix this by using the old_stateid refresh helpers if the server replies with OLD_STATEID. Suggested-by: Trond Myklebust Signed-off-by: Benjamin Coddington Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/nfs4proc.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index d67383665e9b..1044305e7799 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7170,8 +7170,15 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata) } else if (!nfs4_update_lock_stateid(lsp, &data->res.stateid)) goto out_restart; break; - case -NFS4ERR_BAD_STATEID: case -NFS4ERR_OLD_STATEID: + if (data->arg.new_lock_owner != 0 && + nfs4_refresh_open_old_stateid(&data->arg.open_stateid, + lsp->ls_state)) + goto out_restart; + if (nfs4_refresh_lock_old_stateid(&data->arg.lock_stateid, lsp)) + goto out_restart; + fallthrough; + case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_EXPIRED: if (data->arg.new_lock_owner != 0) { From a7d172252bfad47d06a5dcf6fbb6d4ea85f4cedd Mon Sep 17 00:00:00 2001 From: Andrey Skvortsov Date: Sat, 5 Aug 2023 11:48:47 +0300 Subject: [PATCH 067/123] clk: Fix slab-out-of-bounds error in devm_clk_release() commit 66fbfb35da47f391bdadf9fa7ceb88af4faa9022 upstream. Problem can be reproduced by unloading snd_soc_simple_card, because in devm_get_clk_from_child() devres data is allocated as `struct clk`, but devm_clk_release() expects devres data to be `struct devm_clk_state`. KASAN report: ================================================================== BUG: KASAN: slab-out-of-bounds in devm_clk_release+0x20/0x54 Read of size 8 at addr ffffff800ee09688 by task (udev-worker)/287 Call trace: dump_backtrace+0xe8/0x11c show_stack+0x1c/0x30 dump_stack_lvl+0x60/0x78 print_report+0x150/0x450 kasan_report+0xa8/0xf0 __asan_load8+0x78/0xa0 devm_clk_release+0x20/0x54 release_nodes+0x84/0x120 devres_release_all+0x144/0x210 device_unbind_cleanup+0x1c/0xac really_probe+0x2f0/0x5b0 __driver_probe_device+0xc0/0x1f0 driver_probe_device+0x68/0x120 __driver_attach+0x140/0x294 bus_for_each_dev+0xec/0x160 driver_attach+0x38/0x44 bus_add_driver+0x24c/0x300 driver_register+0xf0/0x210 __platform_driver_register+0x48/0x54 asoc_simple_card_init+0x24/0x1000 [snd_soc_simple_card] do_one_initcall+0xac/0x340 do_init_module+0xd0/0x300 load_module+0x2ba4/0x3100 __do_sys_init_module+0x2c8/0x300 __arm64_sys_init_module+0x48/0x5c invoke_syscall+0x64/0x190 el0_svc_common.constprop.0+0x124/0x154 do_el0_svc+0x44/0xdc el0_svc+0x14/0x50 el0t_64_sync_handler+0xec/0x11c el0t_64_sync+0x14c/0x150 Allocated by task 287: kasan_save_stack+0x38/0x60 kasan_set_track+0x28/0x40 kasan_save_alloc_info+0x20/0x30 __kasan_kmalloc+0xac/0xb0 __kmalloc_node_track_caller+0x6c/0x1c4 __devres_alloc_node+0x44/0xb4 devm_get_clk_from_child+0x44/0xa0 asoc_simple_parse_clk+0x1b8/0x1dc [snd_soc_simple_card_utils] simple_parse_node.isra.0+0x1ec/0x230 [snd_soc_simple_card] simple_dai_link_of+0x1bc/0x334 [snd_soc_simple_card] __simple_for_each_link+0x2ec/0x320 [snd_soc_simple_card] asoc_simple_probe+0x468/0x4dc [snd_soc_simple_card] platform_probe+0x90/0xf0 really_probe+0x118/0x5b0 __driver_probe_device+0xc0/0x1f0 driver_probe_device+0x68/0x120 __driver_attach+0x140/0x294 bus_for_each_dev+0xec/0x160 driver_attach+0x38/0x44 bus_add_driver+0x24c/0x300 driver_register+0xf0/0x210 __platform_driver_register+0x48/0x54 asoc_simple_card_init+0x24/0x1000 [snd_soc_simple_card] do_one_initcall+0xac/0x340 do_init_module+0xd0/0x300 load_module+0x2ba4/0x3100 __do_sys_init_module+0x2c8/0x300 __arm64_sys_init_module+0x48/0x5c invoke_syscall+0x64/0x190 el0_svc_common.constprop.0+0x124/0x154 do_el0_svc+0x44/0xdc el0_svc+0x14/0x50 el0t_64_sync_handler+0xec/0x11c el0t_64_sync+0x14c/0x150 The buggy address belongs to the object at ffffff800ee09600 which belongs to the cache kmalloc-256 of size 256 The buggy address is located 136 bytes inside of 256-byte region [ffffff800ee09600, ffffff800ee09700) The buggy address belongs to the physical page: page:000000002d97303b refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x4ee08 head:000000002d97303b order:1 compound_mapcount:0 compound_pincount:0 flags: 0x10200(slab|head|zone=0) raw: 0000000000010200 0000000000000000 dead000000000122 ffffff8002c02480 raw: 0000000000000000 0000000080100010 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffffff800ee09580: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffff800ee09600: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 >ffffff800ee09680: 00 fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ^ ffffff800ee09700: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ffffff800ee09780: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc ================================================================== Fixes: abae8e57e49a ("clk: generalize devm_clk_get() a bit") Signed-off-by: Andrey Skvortsov Link: https://lore.kernel.org/r/20230805084847.3110586-1-andrej.skvortzov@gmail.com Signed-off-by: Stephen Boyd Signed-off-by: Greg Kroah-Hartman --- drivers/clk/clk-devres.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/clk/clk-devres.c b/drivers/clk/clk-devres.c index 4fb4fd4b06bd..737aa70e2cb3 100644 --- a/drivers/clk/clk-devres.c +++ b/drivers/clk/clk-devres.c @@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put); struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id) { - struct clk **ptr, *clk; + struct devm_clk_state *state; + struct clk *clk; - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) + state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL); + if (!state) return ERR_PTR(-ENOMEM); clk = of_clk_get_by_name(np, con_id); if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); + state->clk = clk; + devres_add(dev, state); } else { - devres_free(ptr); + devres_free(state); } return clk; From 091591f6e7c35fc2f0a74c9d044b0638a37b0f3f Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Thu, 17 Aug 2023 13:57:59 -0400 Subject: [PATCH 068/123] mm,ima,kexec,of: use memblock_free_late from ima_free_kexec_buffer commit f0362a253606e2031f8d61c74195d4d6556e12a4 upstream. The code calling ima_free_kexec_buffer runs long after the memblock allocator has already been torn down, potentially resulting in a use after free in memblock_isolate_range. With KASAN or KFENCE, this use after free will result in a BUG from the idle task, and a subsequent kernel panic. Switch ima_free_kexec_buffer over to memblock_free_late to avoid that issue. Fixes: fee3ff99bc67 ("powerpc: Move arch independent ima kexec functions to drivers/of/kexec.c") Cc: stable@kernel.org Signed-off-by: Rik van Riel Suggested-by: Mike Rappoport Link: https://lore.kernel.org/r/20230817135759.0888e5ef@imladris.surriel.com Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/of/kexec.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/of/kexec.c b/drivers/of/kexec.c index f26d2ba8a371..68278340cecf 100644 --- a/drivers/of/kexec.c +++ b/drivers/of/kexec.c @@ -184,7 +184,8 @@ int __init ima_free_kexec_buffer(void) if (ret) return ret; - return memblock_phys_free(addr, size); + memblock_free_late(addr, size); + return 0; } #endif From d13f3a63d236d311fceece5f34133636fd334bde Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Tue, 22 Aug 2023 22:14:47 -0700 Subject: [PATCH 069/123] shmem: fix smaps BUG sleeping while atomic commit e5548f85b4527c4c803b7eae7887c10bf8f90c97 upstream. smaps_pte_hole_lookup() is calling shmem_partial_swap_usage() with page table lock held: but shmem_partial_swap_usage() does cond_resched_rcu() if need_resched(): "BUG: sleeping function called from invalid context". Since shmem_partial_swap_usage() is designed to count across a range, but smaps_pte_hole_lookup() only calls it for a single page slot, just break out of the loop on the last or only page, before checking need_resched(). Link: https://lkml.kernel.org/r/6fe3b3ec-abdf-332f-5c23-6a3b3a3b11a9@google.com Fixes: 230100321518 ("mm/smaps: simplify shmem handling of pte holes") Signed-off-by: Hugh Dickins Acked-by: Peter Xu Cc: [5.16+] Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/shmem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index aba041a3df73..10365ced5b1f 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -800,14 +800,16 @@ unsigned long shmem_partial_swap_usage(struct address_space *mapping, XA_STATE(xas, &mapping->i_pages, start); struct page *page; unsigned long swapped = 0; + unsigned long max = end - 1; rcu_read_lock(); - xas_for_each(&xas, page, end - 1) { + xas_for_each(&xas, page, max) { if (xas_retry(&xas, page)) continue; if (xa_is_value(page)) swapped++; - + if (xas.xa_index == max) + break; if (need_resched()) { xas_pause(&xas); cond_resched_rcu(); From d4e11b85a2690c31b3d45b1dfa7e61d637a8f2fa Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 23 Aug 2023 18:16:25 +0200 Subject: [PATCH 070/123] ALSA: ymfpci: Fix the missing snd_card_free() call at probe error commit 1d0eb6143c1e85d3f9a3f5a616ee7e5dc351d33b upstream. Like a few other drivers, YMFPCI driver needs to clean up with snd_card_free() call at an error path of the probe; otherwise the other devres resources are released before the card and it results in the UAF. This patch uses the helper for handling the probe error gracefully. Fixes: f33fc1576757 ("ALSA: ymfpci: Create card with device-managed snd_devm_card_new()") Cc: Reported-and-tested-by: Takashi Yano Closes: https://lore.kernel.org/r/20230823135846.1812-1-takashi.yano@nifty.ne.jp Link: https://lore.kernel.org/r/20230823161625.5807-1-tiwai@suse.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/ymfpci/ymfpci.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c index 82d4e0fda91b..d62a0e2ddf60 100644 --- a/sound/pci/ymfpci/ymfpci.c +++ b/sound/pci/ymfpci/ymfpci.c @@ -150,8 +150,8 @@ static inline int snd_ymfpci_create_gameport(struct snd_ymfpci *chip, int dev, i void snd_ymfpci_free_gameport(struct snd_ymfpci *chip) { } #endif /* SUPPORT_JOYSTICK */ -static int snd_card_ymfpci_probe(struct pci_dev *pci, - const struct pci_device_id *pci_id) +static int __snd_card_ymfpci_probe(struct pci_dev *pci, + const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; @@ -333,6 +333,12 @@ static int snd_card_ymfpci_probe(struct pci_dev *pci, return 0; } +static int snd_card_ymfpci_probe(struct pci_dev *pci, + const struct pci_device_id *pci_id) +{ + return snd_card_free_on_error(&pci->dev, __snd_card_ymfpci_probe(pci, pci_id)); +} + static struct pci_driver ymfpci_driver = { .name = KBUILD_MODNAME, .id_table = snd_ymfpci_ids, From a8a60bc8027e099b8dd9e13b49b393db2d940004 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Sat, 5 Aug 2023 12:12:56 +0200 Subject: [PATCH 071/123] mm/gup: handle cont-PTE hugetlb pages correctly in gup_must_unshare() via GUP-fast commit 5805192c7b7257d290474cb1a3897d0567281bbc upstream. In contrast to most other GUP code, GUP-fast common page table walking code like gup_pte_range() also handles hugetlb pages. But in contrast to other hugetlb page table walking code, it does not look at the hugetlb PTE abstraction whereby we have only a single logical hugetlb PTE per hugetlb page, even when using multiple cont-PTEs underneath -- which is for example what huge_ptep_get() abstracts. So when we have a hugetlb page that is mapped via cont-PTEs, GUP-fast might stumble over a PTE that does not map the head page of a hugetlb page -- not the first "head" PTE of such a cont mapping. Logically, the whole hugetlb page is mapped (entire_mapcount == 1), but we might end up calling gup_must_unshare() with a tail page of a hugetlb page. We only maintain a single PageAnonExclusive flag per hugetlb page (as hugetlb pages cannot get partially COW-shared), stored for the head page. That flag is clear for all tail pages. So when gup_must_unshare() ends up calling PageAnonExclusive() with a tail page of a hugetlb page: 1) With CONFIG_DEBUG_VM_PGFLAGS Stumbles over the: VM_BUG_ON_PGFLAGS(PageHuge(page) && !PageHead(page), page); For example, when executing the COW selftests with 64k hugetlb pages on arm64: [ 61.082187] page:00000000829819ff refcount:3 mapcount:1 mapping:0000000000000000 index:0x1 pfn:0x11ee11 [ 61.082842] head:0000000080f79bf7 order:4 entire_mapcount:1 nr_pages_mapped:0 pincount:2 [ 61.083384] anon flags: 0x17ffff80003000e(referenced|uptodate|dirty|head|mappedtodisk|node=0|zone=2|lastcpupid=0xfffff) [ 61.084101] page_type: 0xffffffff() [ 61.084332] raw: 017ffff800000000 fffffc00037b8401 0000000000000402 0000000200000000 [ 61.084840] raw: 0000000000000010 0000000000000000 00000000ffffffff 0000000000000000 [ 61.085359] head: 017ffff80003000e ffffd9e95b09b788 ffffd9e95b09b788 ffff0007ff63cf71 [ 61.085885] head: 0000000000000000 0000000000000002 00000003ffffffff 0000000000000000 [ 61.086415] page dumped because: VM_BUG_ON_PAGE(PageHuge(page) && !PageHead(page)) [ 61.086914] ------------[ cut here ]------------ [ 61.087220] kernel BUG at include/linux/page-flags.h:990! [ 61.087591] Internal error: Oops - BUG: 00000000f2000800 [#1] SMP [ 61.087999] Modules linked in: ... [ 61.089404] CPU: 0 PID: 4612 Comm: cow Kdump: loaded Not tainted 6.5.0-rc4+ #3 [ 61.089917] Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 [ 61.090409] pstate: 604000c5 (nZCv daIF +PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 61.090897] pc : gup_must_unshare.part.0+0x64/0x98 [ 61.091242] lr : gup_must_unshare.part.0+0x64/0x98 [ 61.091592] sp : ffff8000825eb940 [ 61.091826] x29: ffff8000825eb940 x28: 0000000000000000 x27: fffffc00037b8440 [ 61.092329] x26: 0400000000000001 x25: 0000000000080101 x24: 0000000000080000 [ 61.092835] x23: 0000000000080100 x22: ffff0000cffb9588 x21: ffff0000c8ec6b58 [ 61.093341] x20: 0000ffffad6b1000 x19: fffffc00037b8440 x18: ffffffffffffffff [ 61.093850] x17: 2864616548656761 x16: 5021202626202965 x15: 6761702865677548 [ 61.094358] x14: 6567615028454741 x13: 2929656761702864 x12: 6165486567615021 [ 61.094858] x11: 00000000ffff7fff x10: 00000000ffff7fff x9 : ffffd9e958b7a1c0 [ 61.095359] x8 : 00000000000bffe8 x7 : c0000000ffff7fff x6 : 00000000002bffa8 [ 61.095873] x5 : ffff0008bb19e708 x4 : 0000000000000000 x3 : 0000000000000000 [ 61.096380] x2 : 0000000000000000 x1 : ffff0000cf6636c0 x0 : 0000000000000046 [ 61.096894] Call trace: [ 61.097080] gup_must_unshare.part.0+0x64/0x98 [ 61.097392] gup_pte_range+0x3a8/0x3f0 [ 61.097662] gup_pgd_range+0x1ec/0x280 [ 61.097942] lockless_pages_from_mm+0x64/0x1a0 [ 61.098258] internal_get_user_pages_fast+0xe4/0x1d0 [ 61.098612] pin_user_pages_fast+0x58/0x78 [ 61.098917] pin_longterm_test_start+0xf4/0x2b8 [ 61.099243] gup_test_ioctl+0x170/0x3b0 [ 61.099528] __arm64_sys_ioctl+0xa8/0xf0 [ 61.099822] invoke_syscall.constprop.0+0x7c/0xd0 [ 61.100160] el0_svc_common.constprop.0+0xe8/0x100 [ 61.100500] do_el0_svc+0x38/0xa0 [ 61.100736] el0_svc+0x3c/0x198 [ 61.100971] el0t_64_sync_handler+0x134/0x150 [ 61.101280] el0t_64_sync+0x17c/0x180 [ 61.101543] Code: aa1303e0 f00074c1 912b0021 97fffeb2 (d4210000) 2) Without CONFIG_DEBUG_VM_PGFLAGS Always detects "not exclusive" for passed tail pages and refuses to PIN the tail pages R/O, as gup_must_unshare() == true. GUP-fast will fallback to ordinary GUP. As ordinary GUP properly considers the logical hugetlb PTE abstraction in hugetlb_follow_page_mask(), pinning the page will succeed when looking at the PageAnonExclusive on the head page only. So the only real effect of this is that with cont-PTE hugetlb pages, we'll always fallback from GUP-fast to ordinary GUP when not working on the head page, which ends up checking the head page and do the right thing. Consequently, the cow selftests pass with cont-PTE hugetlb pages as well without CONFIG_DEBUG_VM_PGFLAGS. Note that this only applies to anon hugetlb pages that are mapped using cont-PTEs: for example 64k hugetlb pages on a 4k arm64 kernel. ... and only when R/O-pinning (FOLL_PIN) such pages that are mapped into the page table R/O using GUP-fast. On production kernels (and even most debug kernels, that don't set CONFIG_DEBUG_VM_PGFLAGS) this patch should theoretically not be required to be backported. But of course, it does not hurt. Link: https://lkml.kernel.org/r/20230805101256.87306-1-david@redhat.com Fixes: a7f226604170 ("mm/gup: trigger FAULT_FLAG_UNSHARE when R/O-pinning a possibly shared anonymous page") Signed-off-by: David Hildenbrand Reported-by: Ryan Roberts Reviewed-by: Ryan Roberts Tested-by: Ryan Roberts Cc: Vlastimil Babka Cc: John Hubbard Cc: Jason Gunthorpe Cc: Peter Xu Cc: Mike Kravetz Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- include/linux/mm.h | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/include/linux/mm.h b/include/linux/mm.h index b8ed44f401b5..f83a1c9ec8e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3091,6 +3091,16 @@ static inline bool gup_must_unshare(unsigned int flags, struct page *page) if (IS_ENABLED(CONFIG_HAVE_FAST_GUP)) smp_rmb(); + /* + * During GUP-fast we might not get called on the head page for a + * hugetlb page that is mapped using cont-PTE, because GUP-fast does + * not work with the abstracted hugetlb PTEs that always point at the + * head page. For hugetlb, PageAnonExclusive only applies on the head + * page (as it cannot be partially COW-shared), so lookup the head page. + */ + if (unlikely(!PageHead(page) && PageHuge(page))) + page = compound_head(page); + /* * Note that PageKsm() pages cannot be exclusive, and consequently, * cannot get pinned. From 07fad410aa6e90131cd1ba8d12bd1f9488f85af5 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Wed, 9 Aug 2023 18:46:33 +0200 Subject: [PATCH 072/123] mm: add a call to flush_cache_vmap() in vmap_pfn() commit a50420c79731fc5cf27ad43719c1091e842a2606 upstream. flush_cache_vmap() must be called after new vmalloc mappings are installed in the page table in order to allow architectures to make sure the new mapping is visible. It could lead to a panic since on some architectures (like powerpc), the page table walker could see the wrong pte value and trigger a spurious page fault that can not be resolved (see commit f1cb8f9beba8 ("powerpc/64s/radix: avoid ptesync after set_pte and ptep_set_access_flags")). But actually the patch is aiming at riscv: the riscv specification allows the caching of invalid entries in the TLB, and since we recently removed the vmalloc page fault handling, we now need to emit a tlb shootdown whenever a new vmalloc mapping is emitted (https://lore.kernel.org/linux-riscv/20230725132246.817726-1-alexghiti@rivosinc.com/). That's a temporary solution, there are ways to avoid that :) Link: https://lkml.kernel.org/r/20230809164633.1556126-1-alexghiti@rivosinc.com Fixes: 3e9a9e256b1e ("mm: add a vmap_pfn function") Reported-by: Dylan Jhong Closes: https://lore.kernel.org/linux-riscv/ZMytNY2J8iyjbPPy@atctrx.andestech.com/ Signed-off-by: Alexandre Ghiti Reviewed-by: Christoph Hellwig Reviewed-by: Palmer Dabbelt Acked-by: Palmer Dabbelt Reviewed-by: Dylan Jhong Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/vmalloc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d5dc361dc104..80bd104a4d42 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2909,6 +2909,10 @@ void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot) free_vm_area(area); return NULL; } + + flush_cache_vmap((unsigned long)area->addr, + (unsigned long)area->addr + count * PAGE_SIZE); + return area->addr; } EXPORT_SYMBOL_GPL(vmap_pfn); From bdc544a87d43a4102aa08114fc939dbddf175f98 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 27 Jun 2023 19:28:08 +0800 Subject: [PATCH 073/123] mm: memory-failure: fix unexpected return value in soft_offline_page() commit e2c1ab070fdc81010ec44634838d24fce9ff9e53 upstream. When page_handle_poison() fails to handle the hugepage or free page in retry path, soft_offline_page() will return 0 while -EBUSY is expected in this case. Consequently the user will think soft_offline_page succeeds while it in fact failed. So the user will not try again later in this case. Link: https://lkml.kernel.org/r/20230627112808.1275241-1-linmiaohe@huawei.com Fixes: b94e02822deb ("mm,hwpoison: try to narrow window race for free pages") Signed-off-by: Miaohe Lin Acked-by: Naoya Horiguchi Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- mm/memory-failure.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4457f9423e2c..99de0328d1be 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -2591,10 +2591,13 @@ retry: if (ret > 0) { ret = soft_offline_in_use_page(page); } else if (ret == 0) { - if (!page_handle_poison(page, true, false) && try_again) { - try_again = false; - flags &= ~MF_COUNT_INCREASED; - goto retry; + if (!page_handle_poison(page, true, false)) { + if (try_again) { + try_again = false; + flags &= ~MF_COUNT_INCREASED; + goto retry; + } + ret = -EBUSY; } } From 96fb46ef8281c749abe114ed9385cec39bae00e4 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 8 Aug 2023 21:17:11 -0400 Subject: [PATCH 074/123] NFS: Fix a use after free in nfs_direct_join_group() commit be2fd1560eb57b7298aa3c258ddcca0d53ecdea3 upstream. Be more careful when tearing down the subrequests of an O_DIRECT write as part of a retransmission. Reported-by: Chris Mason Fixes: ed5d588fe47f ("NFS: Try to join page groups before an O_DIRECT retransmission") Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust Signed-off-by: Greg Kroah-Hartman --- fs/nfs/direct.c | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 1707f46b1335..cf34d0c30945 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -474,20 +474,26 @@ out: return result; } -static void -nfs_direct_join_group(struct list_head *list, struct inode *inode) +static void nfs_direct_join_group(struct list_head *list, struct inode *inode) { - struct nfs_page *req, *next; + struct nfs_page *req, *subreq; list_for_each_entry(req, list, wb_list) { - if (req->wb_head != req || req->wb_this_page == req) + if (req->wb_head != req) continue; - for (next = req->wb_this_page; - next != req->wb_head; - next = next->wb_this_page) { - nfs_list_remove_request(next); - nfs_release_request(next); - } + subreq = req->wb_this_page; + if (subreq == req) + continue; + do { + /* + * Remove subrequests from this list before freeing + * them in the call to nfs_join_page_group(). + */ + if (!list_empty(&subreq->wb_list)) { + nfs_list_remove_request(subreq); + nfs_release_request(subreq); + } + } while ((subreq = subreq->wb_this_page) != req); nfs_join_page_group(req, inode); } } From 36c5aecc789d4f881d18e6a8f4539636e11ab85e Mon Sep 17 00:00:00 2001 From: Benjamin Coddington Date: Fri, 4 Aug 2023 10:52:20 -0400 Subject: [PATCH 075/123] nfsd: Fix race to FREE_STATEID and cl_revoked commit 3b816601e279756e781e6c4d9b3f3bd21a72ac67 upstream. We have some reports of linux NFS clients that cannot satisfy a linux knfsd server that always sets SEQ4_STATUS_RECALLABLE_STATE_REVOKED even though those clients repeatedly walk all their known state using TEST_STATEID and receive NFS4_OK for all. Its possible for revoke_delegation() to set NFS4_REVOKED_DELEG_STID, then nfsd4_free_stateid() finds the delegation and returns NFS4_OK to FREE_STATEID. Afterward, revoke_delegation() moves the same delegation to cl_revoked. This would produce the observed client/server effect. Fix this by ensuring that the setting of sc_type to NFS4_REVOKED_DELEG_STID and move to cl_revoked happens within the same cl_lock. This will allow nfsd4_free_stateid() to properly remove the delegation from cl_revoked. Link: https://bugzilla.redhat.com/show_bug.cgi?id=2217103 Link: https://bugzilla.redhat.com/show_bug.cgi?id=2176575 Signed-off-by: Benjamin Coddington Cc: stable@vger.kernel.org # v4.17+ Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c5dc0cd6f703..96714e105d7b 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1368,9 +1368,9 @@ static void revoke_delegation(struct nfs4_delegation *dp) WARN_ON(!list_empty(&dp->dl_recall_lru)); if (clp->cl_minorversion) { + spin_lock(&clp->cl_lock); dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; refcount_inc(&dp->dl_stid.sc_count); - spin_lock(&clp->cl_lock); list_add(&dp->dl_recall_lru, &clp->cl_revoked); spin_unlock(&clp->cl_lock); } From d6b64d710e9bcaa22fa99fdc1cb32e19a0eb5b38 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20G=C3=B6ttsche?= Date: Fri, 18 Aug 2023 17:33:58 +0200 Subject: [PATCH 076/123] selinux: set next pointer before attaching to list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 70d91dc9b2ac91327d0eefd86163abc3548effa6 upstream. Set the next pointer in filename_trans_read_helper() before attaching the new node under construction to the list, otherwise garbage would be dereferenced on subsequent failure during cleanup in the out goto label. Cc: Fixes: 430059024389 ("selinux: implement new format of filename transitions") Signed-off-by: Christian Göttsche Signed-off-by: Paul Moore Signed-off-by: Greg Kroah-Hartman --- security/selinux/ss/policydb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index adcfb63b3550..6f9ff4643dcb 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -2005,6 +2005,7 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) if (!datum) goto out; + datum->next = NULL; *dst = datum; /* ebitmap_read() will at least init the bitmap */ @@ -2017,7 +2018,6 @@ static int filename_trans_read_helper(struct policydb *p, void *fp) goto out; datum->otype = le32_to_cpu(buf[0]); - datum->next = NULL; dst = &datum->next; } From efef746c5a387a5a9553d405735884f7ce7852b6 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 19 Jul 2023 09:29:29 +0200 Subject: [PATCH 077/123] batman-adv: Trigger events for auto adjusted MTU commit c6a953cce8d0438391e6da48c8d0793d3fbfcfa6 upstream. If an interface changes the MTU, it is expected that an NETDEV_PRECHANGEMTU and NETDEV_CHANGEMTU notification events is triggered. This worked fine for .ndo_change_mtu based changes because core networking code took care of it. But for auto-adjustments after hard-interfaces changes, these events were simply missing. Due to this problem, non-batman-adv components weren't aware of MTU changes and thus couldn't perform their own tasks correctly. Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol") Cc: stable@vger.kernel.org Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Greg Kroah-Hartman --- net/batman-adv/hard-interface.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 41c1ad33d009..ae5762af0146 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -630,7 +630,7 @@ out: */ void batadv_update_min_mtu(struct net_device *soft_iface) { - soft_iface->mtu = batadv_hardif_min_mtu(soft_iface); + dev_set_mtu(soft_iface, batadv_hardif_min_mtu(soft_iface)); /* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. From ed1eb19806ae645c20890b70b091c751dfd08f53 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Wed, 19 Jul 2023 10:01:15 +0200 Subject: [PATCH 078/123] batman-adv: Don't increase MTU when set by user commit d8e42a2b0addf238be8b3b37dcd9795a5c1be459 upstream. If the user set an MTU value, it usually means that there are special requirements for the MTU. But if an interface gots activated, the MTU was always recalculated and then the user set value was overwritten. The only reason why this user set value has to be overwritten, is when the MTU has to be decreased because batman-adv is not able to transfer packets with the user specified size. Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol") Cc: stable@vger.kernel.org Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Greg Kroah-Hartman --- net/batman-adv/hard-interface.c | 14 +++++++++++++- net/batman-adv/soft-interface.c | 3 +++ net/batman-adv/types.h | 6 ++++++ 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index ae5762af0146..24c9c0c3f316 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c @@ -630,7 +630,19 @@ out: */ void batadv_update_min_mtu(struct net_device *soft_iface) { - dev_set_mtu(soft_iface, batadv_hardif_min_mtu(soft_iface)); + struct batadv_priv *bat_priv = netdev_priv(soft_iface); + int limit_mtu; + int mtu; + + mtu = batadv_hardif_min_mtu(soft_iface); + + if (bat_priv->mtu_set_by_user) + limit_mtu = bat_priv->mtu_set_by_user; + else + limit_mtu = ETH_DATA_LEN; + + mtu = min(mtu, limit_mtu); + dev_set_mtu(soft_iface, mtu); /* Check if the local translate table should be cleaned up to match a * new (and smaller) MTU. diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 0f5c0679b55a..38d411a52f33 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -154,11 +154,14 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) { + struct batadv_priv *bat_priv = netdev_priv(dev); + /* check ranges */ if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev)) return -EINVAL; dev->mtu = new_mtu; + bat_priv->mtu_set_by_user = new_mtu; return 0; } diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h index 758cd797a063..76791815b26b 100644 --- a/net/batman-adv/types.h +++ b/net/batman-adv/types.h @@ -1546,6 +1546,12 @@ struct batadv_priv { /** @soft_iface: net device which holds this struct as private data */ struct net_device *soft_iface; + /** + * @mtu_set_by_user: MTU was set once by user + * protected by rtnl_lock + */ + int mtu_set_by_user; + /** * @bat_counters: mesh internal traffic statistic counters (see * batadv_counters) From fc9b87d8b741a955d462fc14da4fcd788d00748a Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Fri, 28 Jul 2023 15:38:50 +0200 Subject: [PATCH 079/123] batman-adv: Do not get eth header before batadv_check_management_packet commit eac27a41ab641de074655d2932fc7f8cdb446881 upstream. If received skb in batadv_v_elp_packet_recv or batadv_v_ogm_packet_recv is either cloned or non linearized then its data buffer will be reallocated by batadv_check_management_packet when skb_cow or skb_linearize get called. Thus geting ethernet header address inside skb data buffer before batadv_check_management_packet had any chance to reallocate it could lead to the following kernel panic: Unable to handle kernel paging request at virtual address ffffff8020ab069a Mem abort info: ESR = 0x96000007 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 FSC = 0x07: level 3 translation fault Data abort info: ISV = 0, ISS = 0x00000007 CM = 0, WnR = 0 swapper pgtable: 4k pages, 39-bit VAs, pgdp=0000000040f45000 [ffffff8020ab069a] pgd=180000007fffa003, p4d=180000007fffa003, pud=180000007fffa003, pmd=180000007fefe003, pte=0068000020ab0706 Internal error: Oops: 96000007 [#1] SMP Modules linked in: ahci_mvebu libahci_platform libahci dvb_usb_af9035 dvb_usb_dib0700 dib0070 dib7000m dibx000_common ath11k_pci ath10k_pci ath10k_core mwl8k_new nf_nat_sip nf_conntrack_sip xhci_plat_hcd xhci_hcd nf_nat_pptp nf_conntrack_pptp at24 sbsa_gwdt CPU: 1 PID: 16 Comm: ksoftirqd/1 Not tainted 5.15.42-00066-g3242268d425c-dirty #550 Hardware name: A8k (DT) pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : batadv_is_my_mac+0x60/0xc0 lr : batadv_v_ogm_packet_recv+0x98/0x5d0 sp : ffffff8000183820 x29: ffffff8000183820 x28: 0000000000000001 x27: ffffff8014f9af00 x26: 0000000000000000 x25: 0000000000000543 x24: 0000000000000003 x23: ffffff8020ab0580 x22: 0000000000000110 x21: ffffff80168ae880 x20: 0000000000000000 x19: ffffff800b561000 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 00dc098924ae0032 x14: 0f0405433e0054b0 x13: ffffffff00000080 x12: 0000004000000001 x11: 0000000000000000 x10: 0000000000000000 x9 : 0000000000000000 x8 : 0000000000000000 x7 : ffffffc076dae000 x6 : ffffff8000183700 x5 : ffffffc00955e698 x4 : ffffff80168ae000 x3 : ffffff80059cf000 x2 : ffffff800b561000 x1 : ffffff8020ab0696 x0 : ffffff80168ae880 Call trace: batadv_is_my_mac+0x60/0xc0 batadv_v_ogm_packet_recv+0x98/0x5d0 batadv_batman_skb_recv+0x1b8/0x244 __netif_receive_skb_core.isra.0+0x440/0xc74 __netif_receive_skb_one_core+0x14/0x20 netif_receive_skb+0x68/0x140 br_pass_frame_up+0x70/0x80 br_handle_frame_finish+0x108/0x284 br_handle_frame+0x190/0x250 __netif_receive_skb_core.isra.0+0x240/0xc74 __netif_receive_skb_list_core+0x6c/0x90 netif_receive_skb_list_internal+0x1f4/0x310 napi_complete_done+0x64/0x1d0 gro_cell_poll+0x7c/0xa0 __napi_poll+0x34/0x174 net_rx_action+0xf8/0x2a0 _stext+0x12c/0x2ac run_ksoftirqd+0x4c/0x7c smpboot_thread_fn+0x120/0x210 kthread+0x140/0x150 ret_from_fork+0x10/0x20 Code: f9403844 eb03009f 54fffee1 f94 Thus ethernet header address should only be fetched after batadv_check_management_packet has been called. Fixes: 0da0035942d4 ("batman-adv: OGMv2 - add basic infrastructure") Cc: stable@vger.kernel.org Signed-off-by: Remi Pommarel Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Greg Kroah-Hartman --- net/batman-adv/bat_v_elp.c | 3 ++- net/batman-adv/bat_v_ogm.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index f1741fbfb617..98a624f32b94 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c @@ -506,7 +506,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_elp_packet *elp_packet; struct batadv_hard_iface *primary_if; - struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb); + struct ethhdr *ethhdr; bool res; int ret = NET_RX_DROP; @@ -514,6 +514,7 @@ int batadv_v_elp_packet_recv(struct sk_buff *skb, if (!res) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 033639df96d8..38b4239625fe 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -986,7 +986,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct batadv_ogm2_packet *ogm_packet; - struct ethhdr *ethhdr = eth_hdr(skb); + struct ethhdr *ethhdr; int ogm_offset; u8 *packet_pos; int ret = NET_RX_DROP; @@ -1000,6 +1000,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, if (!batadv_check_management_packet(skb, if_incoming, BATADV_OGM2_HLEN)) goto free_skb; + ethhdr = eth_hdr(skb); if (batadv_is_my_mac(bat_priv, ethhdr->h_source)) goto free_skb; From f1bead97f0ad91bb3c1b90449400654fd5814bf6 Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Fri, 4 Aug 2023 11:39:36 +0200 Subject: [PATCH 080/123] batman-adv: Fix TT global entry leak when client roamed back commit d25ddb7e788d34cf27ff1738d11a87cb4b67d446 upstream. When a client roamed back to a node before it got time to destroy the pending local entry (i.e. within the same originator interval) the old global one is directly removed from hash table and left as such. But because this entry had an extra reference taken at lookup (i.e using batadv_tt_global_hash_find) there is no way its memory will be reclaimed at any time causing the following memory leak: unreferenced object 0xffff0000073c8000 (size 18560): comm "softirq", pid 0, jiffies 4294907738 (age 228.644s) hex dump (first 32 bytes): 06 31 ac 12 c7 7a 05 00 01 00 00 00 00 00 00 00 .1...z.......... 2c ad be 08 00 80 ff ff 6c b6 be 08 00 80 ff ff ,.......l....... backtrace: [<00000000ee6e0ffa>] kmem_cache_alloc+0x1b4/0x300 [<000000000ff2fdbc>] batadv_tt_global_add+0x700/0xe20 [<00000000443897c7>] _batadv_tt_update_changes+0x21c/0x790 [<000000005dd90463>] batadv_tt_update_changes+0x3c/0x110 [<00000000a2d7fc57>] batadv_tt_tvlv_unicast_handler_v1+0xafc/0xe10 [<0000000011793f2a>] batadv_tvlv_containers_process+0x168/0x2b0 [<00000000b7cbe2ef>] batadv_recv_unicast_tvlv+0xec/0x1f4 [<0000000042aef1d8>] batadv_batman_skb_recv+0x25c/0x3a0 [<00000000bbd8b0a2>] __netif_receive_skb_core.isra.0+0x7a8/0xe90 [<000000004033d428>] __netif_receive_skb_one_core+0x64/0x74 [<000000000f39a009>] __netif_receive_skb+0x48/0xe0 [<00000000f2cd8888>] process_backlog+0x174/0x344 [<00000000507d6564>] __napi_poll+0x58/0x1f4 [<00000000b64ef9eb>] net_rx_action+0x504/0x590 [<00000000056fa5e4>] _stext+0x1b8/0x418 [<00000000878879d6>] run_ksoftirqd+0x74/0xa4 unreferenced object 0xffff00000bae1a80 (size 56): comm "softirq", pid 0, jiffies 4294910888 (age 216.092s) hex dump (first 32 bytes): 00 78 b1 0b 00 00 ff ff 0d 50 00 00 00 00 00 00 .x.......P...... 00 00 00 00 00 00 00 00 50 c8 3c 07 00 00 ff ff ........P.<..... backtrace: [<00000000ee6e0ffa>] kmem_cache_alloc+0x1b4/0x300 [<00000000d9aaa49e>] batadv_tt_global_add+0x53c/0xe20 [<00000000443897c7>] _batadv_tt_update_changes+0x21c/0x790 [<000000005dd90463>] batadv_tt_update_changes+0x3c/0x110 [<00000000a2d7fc57>] batadv_tt_tvlv_unicast_handler_v1+0xafc/0xe10 [<0000000011793f2a>] batadv_tvlv_containers_process+0x168/0x2b0 [<00000000b7cbe2ef>] batadv_recv_unicast_tvlv+0xec/0x1f4 [<0000000042aef1d8>] batadv_batman_skb_recv+0x25c/0x3a0 [<00000000bbd8b0a2>] __netif_receive_skb_core.isra.0+0x7a8/0xe90 [<000000004033d428>] __netif_receive_skb_one_core+0x64/0x74 [<000000000f39a009>] __netif_receive_skb+0x48/0xe0 [<00000000f2cd8888>] process_backlog+0x174/0x344 [<00000000507d6564>] __napi_poll+0x58/0x1f4 [<00000000b64ef9eb>] net_rx_action+0x504/0x590 [<00000000056fa5e4>] _stext+0x1b8/0x418 [<00000000878879d6>] run_ksoftirqd+0x74/0xa4 Releasing the extra reference from batadv_tt_global_hash_find even at roam back when batadv_tt_global_free is called fixes this memory leak. Cc: stable@vger.kernel.org Fixes: 068ee6e204e1 ("batman-adv: roaming handling mechanism redesign") Signed-off-by: Remi Pommarel Signed-off-by; Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Greg Kroah-Hartman --- net/batman-adv/translation-table.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 01d30c1e412c..5d8cee74772f 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -774,7 +774,6 @@ check_roaming: if (roamed_back) { batadv_tt_global_free(bat_priv, tt_global, "Roaming canceled"); - tt_global = NULL; } else { /* The global entry has to be marked as ROAMING and * has to be kept for consistency purpose From cb1f73e691bb6c46eda7ed7e60c6ce6672094169 Mon Sep 17 00:00:00 2001 From: Remi Pommarel Date: Wed, 9 Aug 2023 17:29:13 +0200 Subject: [PATCH 081/123] batman-adv: Fix batadv_v_ogm_aggr_send memory leak commit 421d467dc2d483175bad4fb76a31b9e5a3d744cf upstream. When batadv_v_ogm_aggr_send is called for an inactive interface, the skb is silently dropped by batadv_v_ogm_send_to_if() but never freed causing the following memory leak: unreferenced object 0xffff00000c164800 (size 512): comm "kworker/u8:1", pid 2648, jiffies 4295122303 (age 97.656s) hex dump (first 32 bytes): 00 80 af 09 00 00 ff ff e1 09 00 00 75 01 60 83 ............u.`. 1f 00 00 00 b8 00 00 00 15 00 05 00 da e3 d3 64 ...............d backtrace: [<0000000007ad20f6>] __kmalloc_track_caller+0x1a8/0x310 [<00000000d1029e55>] kmalloc_reserve.constprop.0+0x70/0x13c [<000000008b9d4183>] __alloc_skb+0xec/0x1fc [<00000000c7af5051>] __netdev_alloc_skb+0x48/0x23c [<00000000642ee5f5>] batadv_v_ogm_aggr_send+0x50/0x36c [<0000000088660bd7>] batadv_v_ogm_aggr_work+0x24/0x40 [<0000000042fc2606>] process_one_work+0x3b0/0x610 [<000000002f2a0b1c>] worker_thread+0xa0/0x690 [<0000000059fae5d4>] kthread+0x1fc/0x210 [<000000000c587d3a>] ret_from_fork+0x10/0x20 Free the skb in that case to fix this leak. Cc: stable@vger.kernel.org Fixes: 0da0035942d4 ("batman-adv: OGMv2 - add basic infrastructure") Signed-off-by: Remi Pommarel Signed-off-by: Sven Eckelmann Signed-off-by: Simon Wunderlich Signed-off-by: Greg Kroah-Hartman --- net/batman-adv/bat_v_ogm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c index 38b4239625fe..9f4815f4c8e8 100644 --- a/net/batman-adv/bat_v_ogm.c +++ b/net/batman-adv/bat_v_ogm.c @@ -124,8 +124,10 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb, { struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface); - if (hard_iface->if_status != BATADV_IF_ACTIVE) + if (hard_iface->if_status != BATADV_IF_ACTIVE) { + kfree_skb(skb); return; + } batadv_inc_counter(bat_priv, BATADV_CNT_MGMT_TX); batadv_add_counter(bat_priv, BATADV_CNT_MGMT_TX_BYTES, From 82bb5f8aba00cb38759957346119c6fc37f09518 Mon Sep 17 00:00:00 2001 From: Sven Eckelmann Date: Mon, 21 Aug 2023 21:48:48 +0200 Subject: [PATCH 082/123] batman-adv: Hold rtnl lock during MTU update via netlink commit 987aae75fc1041072941ffb622b45ce2359a99b9 upstream. The automatic recalculation of the maximum allowed MTU is usually triggered by code sections which are already rtnl lock protected by callers outside of batman-adv. But when the fragmentation setting is changed via batman-adv's own batadv genl family, then the rtnl lock is not yet taken. But dev_set_mtu requires that the caller holds the rtnl lock because it uses netdevice notifiers. And this code will then fail the check for this lock: RTNL: assertion failed at net/core/dev.c (1953) Cc: stable@vger.kernel.org Reported-by: syzbot+f8812454d9b3ac00d282@syzkaller.appspotmail.com Fixes: c6a953cce8d0 ("batman-adv: Trigger events for auto adjusted MTU") Signed-off-by: Sven Eckelmann Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230821-batadv-missing-mtu-rtnl-lock-v1-1-1c5a7bfe861e@narfation.org Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/batman-adv/netlink.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c index a5e4a4e976cf..86e0664e0511 100644 --- a/net/batman-adv/netlink.c +++ b/net/batman-adv/netlink.c @@ -495,7 +495,10 @@ static int batadv_netlink_set_mesh(struct sk_buff *skb, struct genl_info *info) attr = info->attrs[BATADV_ATTR_FRAGMENTATION_ENABLED]; atomic_set(&bat_priv->fragmentation, !!nla_get_u8(attr)); + + rtnl_lock(); batadv_update_min_mtu(bat_priv->soft_iface); + rtnl_unlock(); } if (info->attrs[BATADV_ATTR_GW_BANDWIDTH_DOWN]) { From 30ffd5890a0349187b820cc09b182f41f267d7b9 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Fri, 25 Aug 2023 21:50:33 +0200 Subject: [PATCH 083/123] lib/clz_ctz.c: Fix __clzdi2() and __ctzdi2() for 32-bit kernels commit 382d4cd1847517ffcb1800fd462b625db7b2ebea upstream. The gcc compiler translates on some architectures the 64-bit __builtin_clzll() function to a call to the libgcc function __clzdi2(), which should take a 64-bit parameter on 32- and 64-bit platforms. But in the current kernel code, the built-in __clzdi2() function is defined to operate (wrongly) on 32-bit parameters if BITS_PER_LONG == 32, thus the return values on 32-bit kernels are in the range from [0..31] instead of the expected [0..63] range. This patch fixes the in-kernel functions __clzdi2() and __ctzdi2() to take a 64-bit parameter on 32-bit kernels as well, thus it makes the functions identical for 32- and 64-bit kernels. This bug went unnoticed since kernel 3.11 for over 10 years, and here are some possible reasons for that: a) Some architectures have assembly instructions to count the bits and which are used instead of calling __clzdi2(), e.g. on x86 the bsr instruction and on ppc cntlz is used. On such architectures the wrong __clzdi2() implementation isn't used and as such the bug has no effect and won't be noticed. b) Some architectures link to libgcc.a, and the in-kernel weak functions get replaced by the correct 64-bit variants from libgcc.a. c) __builtin_clzll() and __clzdi2() doesn't seem to be used in many places in the kernel, and most likely only in uncritical functions, e.g. when printing hex values via seq_put_hex_ll(). The wrong return value will still print the correct number, but just in a wrong formatting (e.g. with too many leading zeroes). d) 32-bit kernels aren't used that much any longer, so they are less tested. A trivial testcase to verify if the currently running 32-bit kernel is affected by the bug is to look at the output of /proc/self/maps: Here the kernel uses a correct implementation of __clzdi2(): root@debian:~# cat /proc/self/maps 00010000-00019000 r-xp 00000000 08:05 787324 /usr/bin/cat 00019000-0001a000 rwxp 00009000 08:05 787324 /usr/bin/cat 0001a000-0003b000 rwxp 00000000 00:00 0 [heap] f7551000-f770d000 r-xp 00000000 08:05 794765 /usr/lib/hppa-linux-gnu/libc.so.6 ... and this kernel uses the broken implementation of __clzdi2(): root@debian:~# cat /proc/self/maps 0000000010000-0000000019000 r-xp 00000000 000000008:000000005 787324 /usr/bin/cat 0000000019000-000000001a000 rwxp 000000009000 000000008:000000005 787324 /usr/bin/cat 000000001a000-000000003b000 rwxp 00000000 00:00 0 [heap] 00000000f73d1000-00000000f758d000 r-xp 00000000 000000008:000000005 794765 /usr/lib/hppa-linux-gnu/libc.so.6 ... Signed-off-by: Helge Deller Fixes: 4df87bb7b6a22 ("lib: add weak clz/ctz functions") Cc: Chanho Min Cc: Geert Uytterhoeven Cc: stable@vger.kernel.org # v3.11+ Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- lib/clz_ctz.c | 32 ++++++-------------------------- 1 file changed, 6 insertions(+), 26 deletions(-) diff --git a/lib/clz_ctz.c b/lib/clz_ctz.c index 0d3a686b5ba2..fb8c0c5c2bd2 100644 --- a/lib/clz_ctz.c +++ b/lib/clz_ctz.c @@ -28,36 +28,16 @@ int __weak __clzsi2(int val) } EXPORT_SYMBOL(__clzsi2); -int __weak __clzdi2(long val); -int __weak __ctzdi2(long val); -#if BITS_PER_LONG == 32 - -int __weak __clzdi2(long val) +int __weak __clzdi2(u64 val); +int __weak __clzdi2(u64 val) { - return 32 - fls((int)val); + return 64 - fls64(val); } EXPORT_SYMBOL(__clzdi2); -int __weak __ctzdi2(long val) +int __weak __ctzdi2(u64 val); +int __weak __ctzdi2(u64 val) { - return __ffs((u32)val); + return __ffs64(val); } EXPORT_SYMBOL(__ctzdi2); - -#elif BITS_PER_LONG == 64 - -int __weak __clzdi2(long val) -{ - return 64 - fls64((u64)val); -} -EXPORT_SYMBOL(__clzdi2); - -int __weak __ctzdi2(long val) -{ - return __ffs64((u64)val); -} -EXPORT_SYMBOL(__ctzdi2); - -#else -#error BITS_PER_LONG not 32 or 64 -#endif From 33835975740e65c42ad147799bcd6aa430a7eef0 Mon Sep 17 00:00:00 2001 From: Mingzheng Xing Date: Thu, 10 Aug 2023 00:56:48 +0800 Subject: [PATCH 084/123] riscv: Handle zicsr/zifencei issue between gcc and binutils commit ca09f772cccaeec4cd05a21528c37a260aa2dd2c upstream. Binutils-2.38 and GCC-12.1.0 bumped[0][1] the default ISA spec to the newer 20191213 version which moves some instructions from the I extension to the Zicsr and Zifencei extensions. So if one of the binutils and GCC exceeds that version, we should explicitly specifying Zicsr and Zifencei via -march to cope with the new changes. but this only occurs when binutils >= 2.36 and GCC >= 11.1.0. It's a different story when binutils < 2.36. binutils-2.36 supports the Zifencei extension[2] and splits Zifencei and Zicsr from I[3]. GCC-11.1.0 is particular[4] because it add support Zicsr and Zifencei extension for -march. binutils-2.35 does not support the Zifencei extension, and does not need to specify Zicsr and Zifencei when working with GCC >= 12.1.0. To make our lives easier, let's relax the check to binutils >= 2.36 in CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. For the other two cases, where clang < 17 or GCC < 11.1.0, we will deal with them in CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC. For more information, please refer to: commit 6df2a016c0c8 ("riscv: fix build with binutils 2.38") commit e89c2e815e76 ("riscv: Handle zicsr/zifencei issues between clang and binutils") Link: https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc [0] Link: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd [1] Link: https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=5a1b31e1e1cee6e9f1c92abff59cdcfff0dddf30 [2] Link: https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=729a53530e86972d1143553a415db34e6e01d5d2 [3] Link: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=b03be74bad08c382da47e048007a78fa3fb4ef49 [4] Link: https://lore.kernel.org/all/20230308220842.1231003-1-conor@kernel.org Link: https://lore.kernel.org/all/20230223220546.52879-1-conor@kernel.org Reviewed-by: Conor Dooley Acked-by: Guo Ren Cc: Signed-off-by: Mingzheng Xing Link: https://lore.kernel.org/r/20230809165648.21071-1-xingmingzheng@iscas.ac.cn Signed-off-by: Palmer Dabbelt Signed-off-by: Greg Kroah-Hartman --- arch/riscv/Kconfig | 28 ++++++++++++++++---------- arch/riscv/kernel/compat_vdso/Makefile | 8 +++++++- 2 files changed, 24 insertions(+), 12 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 6bf8dc0b8f93..b665ee412aed 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -447,24 +447,30 @@ config TOOLCHAIN_HAS_ZIHINTPAUSE config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI def_bool y # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc - depends on AS_IS_GNU && AS_VERSION >= 23800 + # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=98416dbb0a62579d4a7a4a76bab51b5b52fec2cd + depends on AS_IS_GNU && AS_VERSION >= 23600 help - Newer binutils versions default to ISA spec version 20191213 which - moves some instructions from the I extension to the Zicsr and Zifencei - extensions. + Binutils-2.38 and GCC-12.1.0 bumped the default ISA spec to the newer + 20191213 version, which moves some instructions from the I extension to + the Zicsr and Zifencei extensions. This requires explicitly specifying + Zicsr and Zifencei when binutils >= 2.38 or GCC >= 12.1.0. Zicsr + and Zifencei are supported in binutils from version 2.36 onwards. + To make life easier, and avoid forcing toolchains that default to a + newer ISA spec to version 2.2, relax the check to binutils >= 2.36. + For clang < 17 or GCC < 11.1.0, for which this is not possible, this is + dealt with in CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC. config TOOLCHAIN_NEEDS_OLD_ISA_SPEC def_bool y depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16 - depends on CC_IS_CLANG && CLANG_VERSION < 170000 + # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=b03be74bad08c382da47e048007a78fa3fb4ef49 + depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110100) help - Certain versions of clang do not support zicsr and zifencei via -march - but newer versions of binutils require it for the reasons noted in the - help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This - option causes an older ISA spec compatible with these older versions - of clang to be passed to GAS, which has the same result as passing zicsr - and zifencei to -march. + Certain versions of clang and GCC do not support zicsr and zifencei via + -march. This option causes an older ISA spec compatible with these older + versions of clang and GCC to be passed to GAS, which has the same result + as passing zicsr and zifencei to -march. config FPU bool "FPU support" diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile index 7f34f3c7c882..737c0857b14c 100644 --- a/arch/riscv/kernel/compat_vdso/Makefile +++ b/arch/riscv/kernel/compat_vdso/Makefile @@ -11,7 +11,13 @@ compat_vdso-syms += flush_icache COMPAT_CC := $(CC) COMPAT_LD := $(LD) -COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 +# binutils 2.35 does not support the zifencei extension, but in the ISA +# spec 20191213, G stands for IMAFD_ZICSR_ZIFENCEI. +ifdef CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI + COMPAT_CC_FLAGS := -march=rv32g -mabi=ilp32 +else + COMPAT_CC_FLAGS := -march=rv32imafd -mabi=ilp32 +endif COMPAT_LD_FLAGS := -melf32lriscv # Disable attributes, as they're useless and break the build. From aa096bc3c8c09fbd836130292ff08cfe0df40d79 Mon Sep 17 00:00:00 2001 From: Mingzheng Xing Date: Fri, 25 Aug 2023 03:08:52 +0800 Subject: [PATCH 085/123] riscv: Fix build errors using binutils2.37 toolchains commit ef21fa7c198e04f3d3053b1c5b5f2b4b225c3350 upstream. When building the kernel with binutils 2.37 and GCC-11.1.0/GCC-11.2.0, the following error occurs: Assembler messages: Error: cannot find default versions of the ISA extension `zicsr' Error: cannot find default versions of the ISA extension `zifencei' The above error originated from this commit of binutils[0], which has been resolved and backported by GCC-12.1.0[1] and GCC-11.3.0[2]. So fix this by change the GCC version in CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC to GCC-11.3.0. Link: https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f0bae2552db1dd4f1995608fbf6648fcee4e9e0c [0] Link: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ca2bbb88f999f4d3cc40e89bc1aba712505dd598 [1] Link: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671 [2] Fixes: ca09f772ccca ("riscv: Handle zicsr/zifencei issue between gcc and binutils") Reported-by: Conor Dooley Cc: Signed-off-by: Mingzheng Xing Link: https://lore.kernel.org/r/20230824190852.45470-1-xingmingzheng@iscas.ac.cn Closes: https://lore.kernel.org/all/20230823-captive-abdomen-befd942a4a73@wendy/ Reviewed-by: Conor Dooley Tested-by: Conor Dooley Signed-off-by: Palmer Dabbelt Signed-off-by: Greg Kroah-Hartman --- arch/riscv/Kconfig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index b665ee412aed..d702359f8ab5 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -457,15 +457,15 @@ config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI and Zifencei are supported in binutils from version 2.36 onwards. To make life easier, and avoid forcing toolchains that default to a newer ISA spec to version 2.2, relax the check to binutils >= 2.36. - For clang < 17 or GCC < 11.1.0, for which this is not possible, this is - dealt with in CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC. + For clang < 17 or GCC < 11.3.0, for which this is not possible or need + special treatment, this is dealt with in TOOLCHAIN_NEEDS_OLD_ISA_SPEC. config TOOLCHAIN_NEEDS_OLD_ISA_SPEC def_bool y depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16 - # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=b03be74bad08c382da47e048007a78fa3fb4ef49 - depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110100) + # https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d29f5d6ab513c52fd872f532c492e35ae9fd6671 + depends on (CC_IS_CLANG && CLANG_VERSION < 170000) || (CC_IS_GCC && GCC_VERSION < 110300) help Certain versions of clang and GCC do not support zicsr and zifencei via -march. This option causes an older ISA spec compatible with these older From e75de82b378617afd20805551e2e3596fbb447a1 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 11 Aug 2023 15:10:13 +0200 Subject: [PATCH 086/123] radix tree: remove unused variable commit d59070d1076ec5114edb67c87658aeb1d691d381 upstream. Recent versions of clang warn about an unused variable, though older versions saw the 'slot++' as a use and did not warn: radix-tree.c:1136:50: error: parameter 'slot' set but not used [-Werror,-Wunused-but-set-parameter] It's clearly not needed any more, so just remove it. Link: https://lkml.kernel.org/r/20230811131023.2226509-1-arnd@kernel.org Fixes: 3a08cd52c37c7 ("radix tree: Remove multiorder support") Signed-off-by: Arnd Bergmann Cc: Matthew Wilcox Cc: Nathan Chancellor Cc: Nick Desaulniers Cc: Peng Zhang Cc: Rong Tao Cc: Tom Rix Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- lib/radix-tree.c | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 3c78e1e8b2ad..2ec38f08e4f0 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1134,7 +1134,6 @@ static void set_iter_tags(struct radix_tree_iter *iter, void __rcu **radix_tree_iter_resume(void __rcu **slot, struct radix_tree_iter *iter) { - slot++; iter->index = __radix_tree_iter_add(iter, 1); iter->next_index = iter->index; iter->tags = 0; From 2d00ca90b81e2ea5f2b604947546be08bbe14094 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 18 Aug 2023 15:40:56 -0500 Subject: [PATCH 087/123] of: unittest: Fix EXPECT for parse_phandle_with_args_map() test commit 0aeae3788e28f64ccb95405d4dc8cd80637ffaea upstream. Commit 12e17243d8a1 ("of: base: improve error msg in of_phandle_iterator_next()") added printing of the phandle value on error, but failed to update the unittest. Fixes: 12e17243d8a1 ("of: base: improve error msg in of_phandle_iterator_next()") Cc: stable@vger.kernel.org Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230801-dt-changeset-fixes-v3-1-5f0410e007dd@kernel.org Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/of/unittest.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index b89ab5d9fea5..9be6ed47a1ce 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -657,12 +657,12 @@ static void __init of_unittest_parse_phandle_with_args_map(void) memset(&args, 0, sizeof(args)); EXPECT_BEGIN(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); rc = of_parse_phandle_with_args_map(np, "phandle-list-bad-phandle", "phandle", 0, &args); EXPECT_END(KERN_INFO, - "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle"); + "OF: /testcase-data/phandle-tests/consumer-b: could not find phandle 12345678"); unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); From c6b7d8902025a96e61c731dcb75dd8fb91c813e5 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 18 Aug 2023 15:40:57 -0500 Subject: [PATCH 088/123] of: dynamic: Refactor action prints to not use "%pOF" inside devtree_lock commit 914d9d831e6126a6e7a92e27fcfaa250671be42c upstream. While originally it was fine to format strings using "%pOF" while holding devtree_lock, this now causes a deadlock. Lockdep reports: of_get_parent from of_fwnode_get_parent+0x18/0x24 ^^^^^^^^^^^^^ of_fwnode_get_parent from fwnode_count_parents+0xc/0x28 fwnode_count_parents from fwnode_full_name_string+0x18/0xac fwnode_full_name_string from device_node_string+0x1a0/0x404 device_node_string from pointer+0x3c0/0x534 pointer from vsnprintf+0x248/0x36c vsnprintf from vprintk_store+0x130/0x3b4 Fix this by moving the printing in __of_changeset_entry_apply() outside the lock. As the only difference in the multiple prints is the action name, use the existing "action_names" to refactor the prints into a single print. Fixes: a92eb7621b9fb2c2 ("lib/vsprintf: Make use of fwnode API to obtain node names and separators") Cc: stable@vger.kernel.org Reported-by: Geert Uytterhoeven Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230801-dt-changeset-fixes-v3-2-5f0410e007dd@kernel.org Signed-off-by: Rob Herring Signed-off-by: Greg Kroah-Hartman --- drivers/of/dynamic.c | 31 +++++++++---------------------- 1 file changed, 9 insertions(+), 22 deletions(-) diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c index cd3821a6444f..4e436f2d13ae 100644 --- a/drivers/of/dynamic.c +++ b/drivers/of/dynamic.c @@ -63,15 +63,14 @@ int of_reconfig_notifier_unregister(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister); -#ifdef DEBUG -const char *action_names[] = { +static const char *action_names[] = { + [0] = "INVALID", [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE", [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE", [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY", [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY", [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY", }; -#endif int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p) { @@ -594,21 +593,9 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_add_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: add_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_REMOVE_PROPERTY: ret = __of_remove_property(ce->np, ce->prop); - if (ret) { - pr_err("changeset: remove_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; case OF_RECONFIG_UPDATE_PROPERTY: @@ -622,20 +609,17 @@ static int __of_changeset_entry_apply(struct of_changeset_entry *ce) } ret = __of_update_property(ce->np, ce->prop, &old_prop); - if (ret) { - pr_err("changeset: update_property failed @%pOF/%s\n", - ce->np, - ce->prop->name); - break; - } break; default: ret = -EINVAL; } raw_spin_unlock_irqrestore(&devtree_lock, flags); - if (ret) + if (ret) { + pr_err("changeset: apply failed: %-15s %pOF:%s\n", + action_names[ce->action], ce->np, ce->prop->name); return ret; + } switch (ce->action) { case OF_RECONFIG_ATTACH_NODE: @@ -921,6 +905,9 @@ int of_changeset_action(struct of_changeset *ocs, unsigned long action, if (!ce) return -ENOMEM; + if (WARN_ON(action >= ARRAY_SIZE(action_names))) + return -EINVAL; + /* get a reference to the node */ ce->action = action; ce->np = of_node_get(np); From 4919043ab93bb662961ffe34c55557b0aafd3bcd Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 18 Aug 2023 09:48:50 -0500 Subject: [PATCH 089/123] pinctrl: amd: Mask wake bits on probe again commit 6bc3462a0f5ecaa376a0b3d76dafc55796799e17 upstream. Shubhra reports that their laptop is heating up over s2idle. Even though it's getting into the deepest state, it appears to be having spurious wakeup events. While debugging a tangential issue with the RTC Carsten reports that recent 6.1.y based kernel face a similar problem. Looking at acpidump and GPIO register comparisons these spurious wakeup events are from the GPIO associated with the I2C touchpad on both laptops and occur even when the touchpad is not marked as a wake source by the kernel. This means that the boot firmware has programmed these bits and because Linux didn't touch them lead to spurious wakeup events from that GPIO. To fix this issue, restore most of the code that previously would clear all the bits associated with wakeup sources. This will allow the kernel to only program the wake up sources that are necessary. This is similar to what was done previously; but only the wake bits are cleared by default instead of interrupts and wake bits. If any other problems are reported then it may make sense to clear interrupts again too. Cc: Sachi King Cc: stable@vger.kernel.org Cc: Thorsten Leemhuis Fixes: 65f6c7c91cb2 ("pinctrl: amd: Revert "pinctrl: amd: disable and mask interrupts on probe"") Reported-by: Shubhra Prakash Nandi Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217754 Reported-by: Carsten Hatger Link: https://bugzilla.kernel.org/show_bug.cgi?id=217626#c28 Signed-off-by: Mario Limonciello Link: https://lore.kernel.org/r/20230818144850.1439-1-mario.limonciello@amd.com Signed-off-by: Linus Walleij Signed-off-by: Greg Kroah-Hartman --- drivers/pinctrl/pinctrl-amd.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index a8df77e80549..be6838c252f0 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -862,6 +862,33 @@ static const struct pinconf_ops amd_pinconf_ops = { .pin_config_group_set = amd_pinconf_group_set, }; +static void amd_gpio_irq_init(struct amd_gpio *gpio_dev) +{ + struct pinctrl_desc *desc = gpio_dev->pctrl->desc; + unsigned long flags; + u32 pin_reg, mask; + int i; + + mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) | + BIT(WAKE_CNTRL_OFF_S4); + + for (i = 0; i < desc->npins; i++) { + int pin = desc->pins[i].number; + const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin); + + if (!pd) + continue; + + raw_spin_lock_irqsave(&gpio_dev->lock, flags); + + pin_reg = readl(gpio_dev->base + pin * 4); + pin_reg &= ~mask; + writel(pin_reg, gpio_dev->base + pin * 4); + + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); + } +} + #ifdef CONFIG_PM_SLEEP static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin) { @@ -1099,6 +1126,9 @@ static int amd_gpio_probe(struct platform_device *pdev) return PTR_ERR(gpio_dev->pctrl); } + /* Disable and mask interrupts */ + amd_gpio_irq_init(gpio_dev); + girq = &gpio_dev->gc.irq; gpio_irq_chip_set_chip(girq, &amd_gpio_irqchip); /* This will let us handle the parent IRQ in the driver */ From fe04122b932118e968ea4ba88bdc62aa806b88d1 Mon Sep 17 00:00:00 2001 From: Wei Chen Date: Thu, 10 Aug 2023 08:23:33 +0000 Subject: [PATCH 090/123] media: vcodec: Fix potential array out-of-bounds in encoder queue_setup commit e7f2e65699e2290fd547ec12a17008764e5d9620 upstream. variable *nplanes is provided by user via system call argument. The possible value of q_data->fmt->num_planes is 1-3, while the value of *nplanes can be 1-8. The array access by index i can cause array out-of-bounds. Fix this bug by checking *nplanes against the array size. Fixes: 4e855a6efa54 ("[media] vcodec: mediatek: Add Mediatek V4L2 Video Encoder Driver") Signed-off-by: Wei Chen Cc: stable@vger.kernel.org Reviewed-by: Chen-Yu Tsai Signed-off-by: Hans Verkuil Signed-off-by: Greg Kroah-Hartman --- drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c index d810a78dde51..31e3c3766218 100644 --- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c +++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c @@ -821,6 +821,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq, return -EINVAL; if (*nplanes) { + if (*nplanes != q_data->fmt->num_planes) + return -EINVAL; for (i = 0; i < *nplanes; i++) if (sizes[i] < q_data->sizeimage[i]) return -EINVAL; From 1900e193b5ddd32f55fef7369152531a1886879e Mon Sep 17 00:00:00 2001 From: Igor Mammedov Date: Wed, 26 Jul 2023 14:35:18 +0200 Subject: [PATCH 091/123] PCI: acpiphp: Use pci_assign_unassigned_bridge_resources() only for non-root bus MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit cc22522fd55e257c86d340ae9aedc122e705a435 upstream. 40613da52b13 ("PCI: acpiphp: Reassign resources on bridge if necessary") changed acpiphp hotplug to use pci_assign_unassigned_bridge_resources() which depends on bridge being available, however enable_slot() can be called without bridge associated: 1. Legitimate case of hotplug on root bus (widely used in virt world) 2. A (misbehaving) firmware, that sends ACPI Bus Check notifications to non existing root ports (Dell Inspiron 7352/0W6WV0), which end up at enable_slot(..., bridge = 0) where bus has no bridge assigned to it. acpihp doesn't know that it's a bridge, and bus specific 'PCI subsystem' can't augment ACPI context with bridge information since the PCI device to get this data from is/was not available. Issue is easy to reproduce with QEMU's 'pc' machine, which supports PCI hotplug on hostbridge slots. To reproduce, boot kernel at commit 40613da52b13 in VM started with following CLI (assuming guest root fs is installed on sda1 partition): # qemu-system-x86_64 -M pc -m 1G -enable-kvm -cpu host \ -monitor stdio -serial file:serial.log \ -kernel arch/x86/boot/bzImage \ -append "root=/dev/sda1 console=ttyS0" \ guest_disk.img Once guest OS is fully booted at qemu prompt: (qemu) device_add e1000 (check serial.log) it will cause NULL pointer dereference at: void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge) { struct pci_bus *parent = bridge->subordinate; BUG: kernel NULL pointer dereference, address: 0000000000000018 ? pci_assign_unassigned_bridge_resources+0x1f/0x260 enable_slot+0x21f/0x3e0 acpiphp_hotplug_notify+0x13d/0x260 acpi_device_hotplug+0xbc/0x540 acpi_hotplug_work_fn+0x15/0x20 process_one_work+0x1f7/0x370 worker_thread+0x45/0x3b0 The issue was discovered on Dell Inspiron 7352/0W6WV0 laptop with following sequence: 1. Suspend to RAM 2. Wake up with the same backtrace being observed: 3. 2nd suspend to RAM attempt makes laptop freeze Fix it by using __pci_bus_assign_resources() instead of pci_assign_unassigned_bridge_resources() as we used to do, but only in case when bus doesn't have a bridge associated (to cover for the case of ACPI event on hostbridge or non existing root port). That lets us keep hotplug on root bus working like it used to and at the same time keeps resource reassignment usable on root ports (and other 1st level bridges) that was fixed by 40613da52b13. Fixes: 40613da52b13 ("PCI: acpiphp: Reassign resources on bridge if necessary") Link: https://lore.kernel.org/r/20230726123518.2361181-2-imammedo@redhat.com Reported-by: Woody Suwalski Tested-by: Woody Suwalski Tested-by: Michal Koutný Link: https://lore.kernel.org/r/11fc981c-af49-ce64-6b43-3e282728bd1a@gmail.com Signed-off-by: Igor Mammedov Signed-off-by: Bjorn Helgaas Acked-by: Rafael J. Wysocki Acked-by: Michael S. Tsirkin Signed-off-by: Greg Kroah-Hartman --- drivers/pci/hotplug/acpiphp_glue.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index 393f341d9d76..ea0195337bab 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -490,6 +490,7 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) acpiphp_native_scan_bridge(dev); } } else { + LIST_HEAD(add_list); int max, pass; acpiphp_rescan_slot(slot); @@ -503,10 +504,15 @@ static void enable_slot(struct acpiphp_slot *slot, bool bridge) if (pass && dev->subordinate) { check_hotplug_bridge(slot, dev); pcibios_resource_survey_bus(dev->subordinate); + if (pci_is_root_bus(bus)) + __pci_bus_size_bridges(dev->subordinate, &add_list); } } } - pci_assign_unassigned_bridge_resources(bus->self); + if (pci_is_root_bus(bus)) + __pci_bus_assign_resources(bus, &add_list, NULL); + else + pci_assign_unassigned_bridge_resources(bus->self); } acpiphp_sanitize_bus(bus); From 115f2ccd3a998fe7247f59f8fb5feffc878bcbb7 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Fri, 16 Jun 2023 15:09:34 -0400 Subject: [PATCH 092/123] drm/vmwgfx: Fix shader stage validation commit 14abdfae508228a7307f7491b5c4215ae70c6542 upstream. For multiple commands the driver was not correctly validating the shader stages resulting in possible kernel oopses. The validation code was only. if ever, checking the upper bound on the shader stages but never a lower bound (valid shader stages start at 1 not 0). Fixes kernel oopses ending up in vmw_binding_add, e.g.: Oops: 0000 [#1] PREEMPT SMP PTI CPU: 1 PID: 2443 Comm: testcase Not tainted 6.3.0-rc4-vmwgfx #1 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 11/12/2020 RIP: 0010:vmw_binding_add+0x4c/0x140 [vmwgfx] Code: 7e 30 49 83 ff 0e 0f 87 ea 00 00 00 4b 8d 04 7f 89 d2 89 cb 48 c1 e0 03 4c 8b b0 40 3d 93 c0 48 8b 80 48 3d 93 c0 49 0f af de <48> 03 1c d0 4c 01 e3 49 8> RSP: 0018:ffffb8014416b968 EFLAGS: 00010206 RAX: ffffffffc0933ec0 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 00000000ffffffff RSI: ffffb8014416b9c0 RDI: ffffb8014316f000 RBP: ffffb8014416b998 R08: 0000000000000003 R09: 746f6c735f726564 R10: ffffffffaaf2bda0 R11: 732e676e69646e69 R12: ffffb8014316f000 R13: ffffb8014416b9c0 R14: 0000000000000040 R15: 0000000000000006 FS: 00007fba8c0af740(0000) GS:ffff8a1277c80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000007c0933eb8 CR3: 0000000118244001 CR4: 00000000003706e0 Call Trace: vmw_view_bindings_add+0xf5/0x1b0 [vmwgfx] ? ___drm_dbg+0x8a/0xb0 [drm] vmw_cmd_dx_set_shader_res+0x8f/0xc0 [vmwgfx] vmw_execbuf_process+0x590/0x1360 [vmwgfx] vmw_execbuf_ioctl+0x173/0x370 [vmwgfx] ? __drm_dev_dbg+0xb4/0xe0 [drm] ? __pfx_vmw_execbuf_ioctl+0x10/0x10 [vmwgfx] drm_ioctl_kernel+0xbc/0x160 [drm] drm_ioctl+0x2d2/0x580 [drm] ? __pfx_vmw_execbuf_ioctl+0x10/0x10 [vmwgfx] ? do_fault+0x1a6/0x420 vmw_generic_ioctl+0xbd/0x180 [vmwgfx] vmw_unlocked_ioctl+0x19/0x20 [vmwgfx] __x64_sys_ioctl+0x96/0xd0 do_syscall_64+0x5d/0x90 ? handle_mm_fault+0xe4/0x2f0 ? debug_smp_processor_id+0x1b/0x30 ? fpregs_assert_state_consistent+0x2e/0x50 ? exit_to_user_mode_prepare+0x40/0x180 ? irqentry_exit_to_user_mode+0xd/0x20 ? irqentry_exit+0x3f/0x50 ? exc_page_fault+0x8b/0x180 entry_SYSCALL_64_after_hwframe+0x72/0xdc Signed-off-by: Zack Rusin Cc: security@openanolis.org Reported-by: Ziming Zhang Testcase-found-by: Niels De Graef Fixes: d80efd5cb3de ("drm/vmwgfx: Initial DX support") Cc: # v4.3+ Reviewed-by: Maaz Mombasawala Reviewed-by: Martin Krastev Link: https://patchwork.freedesktop.org/patch/msgid/20230616190934.54828-1-zack@kde.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 12 ++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 29 ++++++++++--------------- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 1ec9c53a7bf4..8459fab9d979 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1683,4 +1683,16 @@ static inline bool vmw_has_fences(struct vmw_private *vmw) return (vmw_fifo_caps(vmw) & SVGA_FIFO_CAP_FENCE) != 0; } +static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model, + u32 shader_type) +{ + SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX; + + if (shader_model >= VMW_SM_5) + max_allowed = SVGA3D_SHADERTYPE_MAX; + else if (shader_model >= VMW_SM_4) + max_allowed = SVGA3D_SHADERTYPE_DX10_MAX; + return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed; +} + #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 1c88b74d68cf..58ca9adf0987 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1985,7 +1985,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { + if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; @@ -2108,8 +2108,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer); - SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ? - SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); @@ -2126,6 +2124,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, if (unlikely(ret != 0)) return ret; + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) || + cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { + VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", + (unsigned int) cmd->body.type, + (unsigned int) cmd->body.slot); + return -EINVAL; + } + binding.bi.ctx = ctx_node->ctx; binding.bi.res = res; binding.bi.bt = vmw_ctx_binding_cb; @@ -2134,14 +2140,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, binding.size = cmd->body.sizeInBytes; binding.slot = cmd->body.slot; - if (binding.shader_slot >= max_shader_num || - binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { - VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n", - (unsigned int) cmd->body.type, - (unsigned int) binding.slot); - return -EINVAL; - } - vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, binding.slot); @@ -2200,15 +2198,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) = container_of(header, typeof(*cmd), header); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dShaderResourceViewId); if ((u64) cmd->body.startView + (u64) num_sr_view > (u64) SVGA3D_DX_MAX_SRVIEWS || - cmd->body.type >= max_allowed) { + !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Invalid shader binding.\n"); return -EINVAL; } @@ -2232,8 +2228,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, SVGA3dCmdHeader *header) { VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader); - SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ? - SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX; struct vmw_resource *res = NULL; struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); struct vmw_ctx_bindinfo_shader binding; @@ -2244,8 +2238,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, cmd = container_of(header, typeof(*cmd), header); - if (cmd->body.type >= max_allowed || - cmd->body.type < SVGA3D_SHADERTYPE_MIN) { + if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) { VMW_DEBUG_USER("Illegal shader type %u.\n", (unsigned int) cmd->body.type); return -EINVAL; From 3abffee6091c5a2716963c229e192a36a9590a88 Mon Sep 17 00:00:00 2001 From: Anshuman Gupta Date: Wed, 16 Aug 2023 18:22:16 +0530 Subject: [PATCH 093/123] drm/i915/dgfx: Enable d3cold at s2idle commit 2872144aec04baa7e43ecd2a60f7f0be3aa843fd upstream. System wide suspend already has support for lmem save/restore during suspend therefore enabling d3cold for s2idle and keepng it disable for runtime PM.(Refer below commit for d3cold runtime PM disable justification) 'commit 66eb93e71a7a ("drm/i915/dgfx: Keep PCI autosuspend control 'on' by default on all dGPU")' It will reduce the DG2 Card power consumption to ~0 Watt for s2idle power KPI. v2: - Added "Cc: stable@vger.kernel.org". Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/8755 Cc: stable@vger.kernel.org Cc: Rodrigo Vivi Signed-off-by: Anshuman Gupta Reviewed-by: Rodrigo Vivi Tested-by: Aaron Ma Tested-by: Jianshui Yu Link: https://patchwork.freedesktop.org/patch/msgid/20230816125216.1722002-1-anshuman.gupta@intel.com (cherry picked from commit 2643e6d1f2a5e51877be24042d53cf956589be10) Signed-off-by: Rodrigo Vivi Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/i915/i915_driver.c | 33 ++++++++++++++++-------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 35bc2a3fa811..75a93951fe42 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -574,7 +574,6 @@ static int i915_pcode_init(struct drm_i915_private *i915) static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; int ret; if (i915_inject_probe_failure(dev_priv)) @@ -686,15 +685,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) intel_bw_init_hw(dev_priv); - /* - * FIXME: Temporary hammer to avoid freezing the machine on our DGFX - * This should be totally removed when we handle the pci states properly - * on runtime PM and on s2idle cases. - */ - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_disable(root_pdev); - return 0; err_msi: @@ -718,16 +708,11 @@ err_perf: static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) { struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - struct pci_dev *root_pdev; i915_perf_fini(dev_priv); if (pdev->msi_enabled) pci_disable_msi(pdev); - - root_pdev = pcie_find_root_port(pdev); - if (root_pdev) - pci_d3cold_enable(root_pdev); } /** @@ -1625,6 +1610,8 @@ static int intel_runtime_suspend(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1674,6 +1661,15 @@ static int intel_runtime_suspend(struct device *kdev) drm_err(&dev_priv->drm, "Unclaimed access detected prior to suspending\n"); + /* + * FIXME: Temporary hammer to avoid freezing the machine on our DGFX + * This should be totally removed when we handle the pci states properly + * on runtime PM. + */ + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_disable(root_pdev); + rpm->suspended = true; /* @@ -1712,6 +1708,8 @@ static int intel_runtime_resume(struct device *kdev) { struct drm_i915_private *dev_priv = kdev_to_i915(kdev); struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); + struct pci_dev *root_pdev; struct intel_gt *gt; int ret, i; @@ -1725,6 +1723,11 @@ static int intel_runtime_resume(struct device *kdev) intel_opregion_notify_adapter(dev_priv, PCI_D0); rpm->suspended = false; + + root_pdev = pcie_find_root_port(pdev); + if (root_pdev) + pci_d3cold_enable(root_pdev); + if (intel_uncore_unclaimed_mmio(&dev_priv->uncore)) drm_dbg(&dev_priv->drm, "Unclaimed access during suspend, bios?\n"); From 3bc9b0364a8c64d1bb1757b620ea3b9104e8054b Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Fri, 18 Aug 2023 10:14:36 +0530 Subject: [PATCH 094/123] drm/display/dp: Fix the DP DSC Receiver cap size commit 5ad1ab30ac0809d2963ddcf39ac34317a24a2f17 upstream. DP DSC Receiver Capabilities are exposed via DPCD 60h-6Fh. Fix the DSC RECEIVER CAP SIZE accordingly. Fixes: ffddc4363c28 ("drm/dp: Add DP DSC DPCD receiver capability size define and missing SHIFT") Cc: Anusha Srivatsa Cc: Manasi Navare Cc: # v5.0+ Signed-off-by: Ankit Nautiyal Reviewed-by: Stanislav Lisovskiy Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20230818044436.177806-1-ankit.k.nautiyal@intel.com Signed-off-by: Greg Kroah-Hartman --- include/drm/display/drm_dp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h index 05f2cc03d03d..b235d6833e27 100644 --- a/include/drm/display/drm_dp.h +++ b/include/drm/display/drm_dp.h @@ -1525,7 +1525,7 @@ enum drm_dp_phy { #define DP_BRANCH_OUI_HEADER_SIZE 0xc #define DP_RECEIVER_CAP_SIZE 0xf -#define DP_DSC_RECEIVER_CAP_SIZE 0xf +#define DP_DSC_RECEIVER_CAP_SIZE 0x10 /* DSC Capabilities 0x60 through 0x6F */ #define EDP_PSR_RECEIVER_CAP_SIZE 2 #define EDP_DISPLAY_CTL_CAP_SIZE 3 #define DP_LTTPR_COMMON_CAP_SIZE 8 From 6bcb9c7d043578a71a2409a7c4bdb2febe471cce Mon Sep 17 00:00:00 2001 From: Rick Edgecombe Date: Fri, 18 Aug 2023 10:03:05 -0700 Subject: [PATCH 095/123] x86/fpu: Invalidate FPU state correctly on exec() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 1f69383b203e28cf8a4ca9570e572da1699f76cd upstream. The thread flag TIF_NEED_FPU_LOAD indicates that the FPU saved state is valid and should be reloaded when returning to userspace. However, the kernel will skip doing this if the FPU registers are already valid as determined by fpregs_state_valid(). The logic embedded there considers the state valid if two cases are both true: 1: fpu_fpregs_owner_ctx points to the current tasks FPU state 2: the last CPU the registers were live in was the current CPU. This is usually correct logic. A CPU’s fpu_fpregs_owner_ctx is set to the current FPU during the fpregs_restore_userregs() operation, so it indicates that the registers have been restored on this CPU. But this alone doesn’t preclude that the task hasn’t been rescheduled to a different CPU, where the registers were modified, and then back to the current CPU. To verify that this was not the case the logic relies on the second condition. So the assumption is that if the registers have been restored, AND they haven’t had the chance to be modified (by being loaded on another CPU), then they MUST be valid on the current CPU. Besides the lazy FPU optimizations, the other cases where the FPU registers might not be valid are when the kernel modifies the FPU register state or the FPU saved buffer. In this case the operation modifying the FPU state needs to let the kernel know the correspondence has been broken. The comment in “arch/x86/kernel/fpu/context.h” has: /* ... * If the FPU register state is valid, the kernel can skip restoring the * FPU state from memory. * * Any code that clobbers the FPU registers or updates the in-memory * FPU state for a task MUST let the rest of the kernel know that the * FPU registers are no longer valid for this task. * * Either one of these invalidation functions is enough. Invalidate * a resource you control: CPU if using the CPU for something else * (with preemption disabled), FPU for the current task, or a task that * is prevented from running by the current task. */ However, this is not completely true. When the kernel modifies the registers or saved FPU state, it can only rely on __fpu_invalidate_fpregs_state(), which wipes the FPU’s last_cpu tracking. The exec path instead relies on fpregs_deactivate(), which sets the CPU’s FPU context to NULL. This was observed to fail to restore the reset FPU state to the registers when returning to userspace in the following scenario: 1. A task is executing in userspace on CPU0 - CPU0’s FPU context points to tasks - fpu->last_cpu=CPU0 2. The task exec()’s 3. While in the kernel the task is preempted - CPU0 gets a thread executing in the kernel (such that no other FPU context is activated) - Scheduler sets task’s fpu->last_cpu=CPU0 when scheduling out 4. Task is migrated to CPU1 5. Continuing the exec(), the task gets to fpu_flush_thread()->fpu_reset_fpregs() - Sets CPU1’s fpu context to NULL - Copies the init state to the task’s FPU buffer - Sets TIF_NEED_FPU_LOAD on the task 6. The task reschedules back to CPU0 before completing the exec() and returning to userspace - During the reschedule, scheduler finds TIF_NEED_FPU_LOAD is set - Skips saving the registers and updating task’s fpu→last_cpu, because TIF_NEED_FPU_LOAD is the canonical source. 7. Now CPU0’s FPU context is still pointing to the task’s, and fpu->last_cpu is still CPU0. So fpregs_state_valid() returns true even though the reset FPU state has not been restored. So the root cause is that exec() is doing the wrong kind of invalidate. It should reset fpu->last_cpu via __fpu_invalidate_fpregs_state(). Further, fpu__drop() doesn't really seem appropriate as the task (and FPU) are not going away, they are just getting reset as part of an exec. So switch to __fpu_invalidate_fpregs_state(). Also, delete the misleading comment that says that either kind of invalidate will be enough, because it’s not always the case. Fixes: 33344368cb08 ("x86/fpu: Clean up the fpu__clear() variants") Reported-by: Lei Wang Signed-off-by: Rick Edgecombe Signed-off-by: Thomas Gleixner Tested-by: Lijun Pan Reviewed-by: Sohil Mehta Acked-by: Lijun Pan Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230818170305.502891-1-rick.p.edgecombe@intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/fpu/context.h | 3 +-- arch/x86/kernel/fpu/core.c | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/fpu/context.h b/arch/x86/kernel/fpu/context.h index 9fcfa5c4dad7..71b5059e092a 100644 --- a/arch/x86/kernel/fpu/context.h +++ b/arch/x86/kernel/fpu/context.h @@ -19,8 +19,7 @@ * FPU state for a task MUST let the rest of the kernel know that the * FPU registers are no longer valid for this task. * - * Either one of these invalidation functions is enough. Invalidate - * a resource you control: CPU if using the CPU for something else + * Invalidate a resource you control: CPU if using the CPU for something else * (with preemption disabled), FPU for the current task, or a task that * is prevented from running by the current task. */ diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index caf33486dc5e..a083f9ac9e4f 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -679,7 +679,7 @@ static void fpu_reset_fpregs(void) struct fpu *fpu = ¤t->thread.fpu; fpregs_lock(); - fpu__drop(fpu); + __fpu_invalidate_fpregs_state(fpu); /* * This does not change the actual hardware registers. It just * resets the memory image and sets TIF_NEED_FPU_LOAD so a From d8f9a9cfdcd31290cb8b720746458cb110301c68 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Wed, 23 Aug 2023 14:57:47 +0800 Subject: [PATCH 096/123] x86/fpu: Set X86_FEATURE_OSXSAVE feature after enabling OSXSAVE in CR4 commit 2c66ca3949dc701da7f4c9407f2140ae425683a5 upstream. 0-Day found a 34.6% regression in stress-ng's 'af-alg' test case, and bisected it to commit b81fac906a8f ("x86/fpu: Move FPU initialization into arch_cpu_finalize_init()"), which optimizes the FPU init order, and moves the CR4_OSXSAVE enabling into a later place: arch_cpu_finalize_init identify_boot_cpu identify_cpu generic_identify get_cpu_cap --> setup cpu capability ... fpu__init_cpu fpu__init_cpu_xstate cr4_set_bits(X86_CR4_OSXSAVE); As the FPU is not yet initialized the CPU capability setup fails to set X86_FEATURE_OSXSAVE. Many security module like 'camellia_aesni_avx_x86_64' depend on this feature and therefore fail to load, causing the regression. Cure this by setting X86_FEATURE_OSXSAVE feature right after OSXSAVE enabling. [ tglx: Moved it into the actual BSP FPU initialization code and added a comment ] Fixes: b81fac906a8f ("x86/fpu: Move FPU initialization into arch_cpu_finalize_init()") Reported-by: kernel test robot Signed-off-by: Feng Tang Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Link: https://lore.kernel.org/lkml/202307192135.203ac24e-oliver.sang@intel.com Link: https://lore.kernel.org/lkml/20230823065747.92257-1-feng.tang@intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/fpu/xstate.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 0bab497c9436..1afbc4866b10 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -882,6 +882,13 @@ void __init fpu__init_system_xstate(unsigned int legacy_size) goto out_disable; } + /* + * CPU capabilities initialization runs before FPU init. So + * X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely + * functional, set the feature bit so depending code works. + */ + setup_force_cpu_cap(X86_FEATURE_OSXSAVE); + print_xstate_offset_size(); pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n", fpu_kernel_cfg.max_features, From f1fa6e6f85cb5cbf3f1e78c42c25f3b974584543 Mon Sep 17 00:00:00 2001 From: Aleksa Savic Date: Mon, 7 Aug 2023 19:20:03 +0200 Subject: [PATCH 097/123] hwmon: (aquacomputer_d5next) Add selective 200ms delay after sending ctrl report commit 56b930dcd88c2adc261410501c402c790980bdb5 upstream. Add a 200ms delay after sending a ctrl report to Quadro, Octo, D5 Next and Aquaero to give them enough time to process the request and save the data to memory. Otherwise, under heavier userspace loads where multiple sysfs entries are usually set in quick succession, a new ctrl report could be requested from the device while it's still processing the previous one and fail with -EPIPE. The delay is only applied if two ctrl report operations are near each other in time. Reported by a user on Github [1] and tested by both of us. [1] https://github.com/aleksamagicka/aquacomputer_d5next-hwmon/issues/82 Fixes: 752b927951ea ("hwmon: (aquacomputer_d5next) Add support for Aquacomputer Octo") Signed-off-by: Aleksa Savic Link: https://lore.kernel.org/r/20230807172004.456968-1-savicaleksa83@gmail.com Signed-off-by: Guenter Roeck [ removed Aquaero support as it's not in 6.1 ] Signed-off-by: Aleksa Savic Signed-off-by: Greg Kroah-Hartman --- drivers/hwmon/aquacomputer_d5next.c | 36 ++++++++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/drivers/hwmon/aquacomputer_d5next.c b/drivers/hwmon/aquacomputer_d5next.c index c51a2678f0eb..8c7796d3fdd2 100644 --- a/drivers/hwmon/aquacomputer_d5next.c +++ b/drivers/hwmon/aquacomputer_d5next.c @@ -12,9 +12,11 @@ #include #include +#include #include #include #include +#include #include #include #include @@ -49,6 +51,8 @@ static const char *const aqc_device_names[] = { #define CTRL_REPORT_ID 0x03 +#define CTRL_REPORT_DELAY 200 /* ms */ + /* The HID report that the official software always sends * after writing values, currently same for all devices */ @@ -269,6 +273,9 @@ struct aqc_data { enum kinds kind; const char *name; + ktime_t last_ctrl_report_op; + int ctrl_report_delay; /* Delay between two ctrl report operations, in ms */ + int buffer_size; u8 *buffer; int checksum_start; @@ -325,17 +332,35 @@ static int aqc_pwm_to_percent(long val) return DIV_ROUND_CLOSEST(val * 100 * 100, 255); } +static void aqc_delay_ctrl_report(struct aqc_data *priv) +{ + /* + * If previous read or write is too close to this one, delay the current operation + * to give the device enough time to process the previous one. + */ + if (priv->ctrl_report_delay) { + s64 delta = ktime_ms_delta(ktime_get(), priv->last_ctrl_report_op); + + if (delta < priv->ctrl_report_delay) + msleep(priv->ctrl_report_delay - delta); + } +} + /* Expects the mutex to be locked */ static int aqc_get_ctrl_data(struct aqc_data *priv) { int ret; + aqc_delay_ctrl_report(priv); + memset(priv->buffer, 0x00, priv->buffer_size); ret = hid_hw_raw_request(priv->hdev, CTRL_REPORT_ID, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_GET_REPORT); if (ret < 0) ret = -ENODATA; + priv->last_ctrl_report_op = ktime_get(); + return ret; } @@ -345,6 +370,8 @@ static int aqc_send_ctrl_data(struct aqc_data *priv) int ret; u16 checksum; + aqc_delay_ctrl_report(priv); + /* Init and xorout value for CRC-16/USB is 0xffff */ checksum = crc16(0xffff, priv->buffer + priv->checksum_start, priv->checksum_length); checksum ^= 0xffff; @@ -356,12 +383,16 @@ static int aqc_send_ctrl_data(struct aqc_data *priv) ret = hid_hw_raw_request(priv->hdev, CTRL_REPORT_ID, priv->buffer, priv->buffer_size, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); if (ret < 0) - return ret; + goto record_access_and_ret; /* The official software sends this report after every change, so do it here as well */ ret = hid_hw_raw_request(priv->hdev, SECONDARY_CTRL_REPORT_ID, secondary_ctrl_report, SECONDARY_CTRL_REPORT_SIZE, HID_FEATURE_REPORT, HID_REQ_SET_REPORT); + +record_access_and_ret: + priv->last_ctrl_report_op = ktime_get(); + return ret; } @@ -853,6 +884,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->virtual_temp_sensor_start_offset = D5NEXT_VIRTUAL_SENSORS_START; priv->power_cycle_count_offset = D5NEXT_POWER_CYCLES; priv->buffer_size = D5NEXT_CTRL_REPORT_SIZE; + priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->temp_label = label_d5next_temp; priv->virtual_temp_label = label_virtual_temp_sensors; @@ -893,6 +925,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->virtual_temp_sensor_start_offset = OCTO_VIRTUAL_SENSORS_START; priv->power_cycle_count_offset = OCTO_POWER_CYCLES; priv->buffer_size = OCTO_CTRL_REPORT_SIZE; + priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->temp_label = label_temp_sensors; priv->virtual_temp_label = label_virtual_temp_sensors; @@ -913,6 +946,7 @@ static int aqc_probe(struct hid_device *hdev, const struct hid_device_id *id) priv->virtual_temp_sensor_start_offset = QUADRO_VIRTUAL_SENSORS_START; priv->power_cycle_count_offset = QUADRO_POWER_CYCLES; priv->buffer_size = QUADRO_CTRL_REPORT_SIZE; + priv->ctrl_report_delay = CTRL_REPORT_DELAY; priv->flow_sensor_offset = QUADRO_FLOW_SENSOR_OFFSET; priv->temp_label = label_temp_sensors; From a0ec52f36ce98796aa3076885b6b96dc7e093384 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Wed, 18 Jan 2023 10:09:27 +0800 Subject: [PATCH 098/123] selftests/net: mv bpf/nat6to4.c to net folder MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 3c107f36db061603bee7564fbd6388b1f1879fd3 upstream. There are some issues with the bpf/nat6to4.c building. 1. It use TEST_CUSTOM_PROGS, which will add the nat6to4.o to kselftest-list file and run by common run_tests. 2. When building the test via `make -C tools/testing/selftests/ TARGETS="net"`, the nat6to4.o will be build in selftests/net/bpf/ folder. But in test udpgro_frglist.sh it refers to ../bpf/nat6to4.o. The correct path should be ./bpf/nat6to4.o. 3. If building the test via `make -C tools/testing/selftests/ TARGETS="net" install`. The nat6to4.o will be installed to kselftest_install/net/ folder. Then the udpgro_frglist.sh should refer to ./nat6to4.o. To fix the confusing test path, let's just move the nat6to4.c to net folder and build it as TEST_GEN_FILES. Fixes: edae34a3ed92 ("selftests net: add UDP GRO fraglist + bpf self-tests") Tested-by: Björn Töpel Signed-off-by: Hangbin Liu Link: https://lore.kernel.org/r/20230118020927.3971864-1-liuhangbin@gmail.com Signed-off-by: Paolo Abeni Signed-off-by: Hardik Garg Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/Makefile | 50 ++++++++++++++++++- tools/testing/selftests/net/bpf/Makefile | 14 ------ .../testing/selftests/net/{bpf => }/nat6to4.c | 0 tools/testing/selftests/net/udpgro_frglist.sh | 8 +-- 4 files changed, 52 insertions(+), 20 deletions(-) delete mode 100644 tools/testing/selftests/net/bpf/Makefile rename tools/testing/selftests/net/{bpf => }/nat6to4.c (100%) diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 69c58362c0ed..48d1a68be1d5 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -71,14 +71,60 @@ TEST_GEN_FILES += bind_bhash TEST_GEN_PROGS += sk_bind_sendto_listen TEST_GEN_PROGS += sk_connect_zero_addr TEST_PROGS += test_ingress_egress_chaining.sh +TEST_GEN_FILES += nat6to4.o TEST_FILES := settings include ../lib.mk -include bpf/Makefile - $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma $(OUTPUT)/tcp_mmap: LDLIBS += -lpthread $(OUTPUT)/tcp_inq: LDLIBS += -lpthread $(OUTPUT)/bind_bhash: LDLIBS += -lpthread + +# Rules to generate bpf obj nat6to4.o +CLANG ?= clang +SCRATCH_DIR := $(OUTPUT)/tools +BUILD_DIR := $(SCRATCH_DIR)/build +BPFDIR := $(abspath ../../../lib/bpf) +APIDIR := $(abspath ../../../include/uapi) + +CCINCLUDE += -I../bpf +CCINCLUDE += -I../../../../usr/include/ +CCINCLUDE += -I$(SCRATCH_DIR)/include + +BPFOBJ := $(BUILD_DIR)/libbpf/libbpf.a + +MAKE_DIRS := $(BUILD_DIR)/libbpf +$(MAKE_DIRS): + mkdir -p $@ + +# Get Clang's default includes on this system, as opposed to those seen by +# '-target bpf'. This fixes "missing" files on some architectures/distros, +# such as asm/byteorder.h, asm/socket.h, asm/sockios.h, sys/cdefs.h etc. +# +# Use '-idirafter': Don't interfere with include mechanics except where the +# build would have failed anyways. +define get_sys_includes +$(shell $(1) $(2) -v -E - &1 \ + | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') \ +$(shell $(1) $(2) -dM -E - Date: Tue, 14 Mar 2023 12:51:10 +0100 Subject: [PATCH 099/123] nfs: use vfs setgid helper commit 4f704d9a8352f5c0a8fcdb6213b934630342bd44 upstream. We've aligned setgid behavior over multiple kernel releases. The details can be found in the following two merge messages: cf619f891971 ("Merge tag 'fs.ovl.setgid.v6.2') 426b4ca2d6a5 ("Merge tag 'fs.setgid.v6.0') Consistent setgid stripping behavior is now encapsulated in the setattr_should_drop_sgid() helper which is used by all filesystems that strip setgid bits outside of vfs proper. Switch nfs to rely on this helper as well. Without this patch the setgid stripping tests in xfstests will fail. Signed-off-by: Christian Brauner (Microsoft) Reviewed-by: Christoph Hellwig Message-Id: <20230313-fs-nfs-setgid-v2-1-9a59f436cfc0@kernel.org> Signed-off-by: Christian Brauner [ Harshit: backport to 6.1.y: fs/internal.h -- minor conflict due to code change differences. include/linux/fs.h -- Used struct user_namespace *mnt_userns instead of struct mnt_idmap *idmap fs/nfs/inode.c -- Used init_user_ns instead of nop_mnt_idmap ] Signed-off-by: Harshit Mogalapalli Signed-off-by: Greg Kroah-Hartman --- fs/attr.c | 1 + fs/internal.h | 2 -- fs/nfs/inode.c | 4 +--- include/linux/fs.h | 2 ++ 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/attr.c b/fs/attr.c index b45f30e516fa..9b9a70e0cc54 100644 --- a/fs/attr.c +++ b/fs/attr.c @@ -47,6 +47,7 @@ int setattr_should_drop_sgid(struct user_namespace *mnt_userns, return ATTR_KILL_SGID; return 0; } +EXPORT_SYMBOL(setattr_should_drop_sgid); /** * setattr_should_drop_suidgid - determine whether the set{g,u}id bit needs to diff --git a/fs/internal.h b/fs/internal.h index 46caa33373a4..42df013f7fe7 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -242,5 +242,3 @@ ssize_t __kernel_write_iter(struct file *file, struct iov_iter *from, loff_t *po /* * fs/attr.c */ -int setattr_should_drop_sgid(struct user_namespace *mnt_userns, - const struct inode *inode); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 6b2cfa59a1a2..e0c1fb98f907 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -717,9 +717,7 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr, if ((attr->ia_valid & ATTR_KILL_SUID) != 0 && inode->i_mode & S_ISUID) inode->i_mode &= ~S_ISUID; - if ((attr->ia_valid & ATTR_KILL_SGID) != 0 && - (inode->i_mode & (S_ISGID | S_IXGRP)) == - (S_ISGID | S_IXGRP)) + if (setattr_should_drop_sgid(&init_user_ns, inode)) inode->i_mode &= ~S_ISGID; if ((attr->ia_valid & ATTR_MODE) != 0) { int mode = attr->ia_mode & S_IALLUGO; diff --git a/include/linux/fs.h b/include/linux/fs.h index a2b5592c6828..26ea1a0a59a1 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -3120,6 +3120,8 @@ extern struct inode *new_inode(struct super_block *sb); extern void free_inode_nonrcu(struct inode *inode); extern int setattr_should_drop_suidgid(struct user_namespace *, struct inode *); extern int file_remove_privs(struct file *); +int setattr_should_drop_sgid(struct user_namespace *mnt_userns, + const struct inode *inode); /* * This must be used for allocating filesystems specific inodes to set From ce59b7c1b027bb2da6a69a81bcca63783561ae9b Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 2 May 2023 15:36:02 +0200 Subject: [PATCH 100/123] nfsd: use vfs setgid helper commit 2d8ae8c417db284f598dffb178cc01e7db0f1821 upstream. We've aligned setgid behavior over multiple kernel releases. The details can be found in commit cf619f891971 ("Merge tag 'fs.ovl.setgid.v6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/idmapping") and commit 426b4ca2d6a5 ("Merge tag 'fs.setgid.v6.0' of git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux"). Consistent setgid stripping behavior is now encapsulated in the setattr_should_drop_sgid() helper which is used by all filesystems that strip setgid bits outside of vfs proper. Usually ATTR_KILL_SGID is raised in e.g., chown_common() and is subject to the setattr_should_drop_sgid() check to determine whether the setgid bit can be retained. Since nfsd is raising ATTR_KILL_SGID unconditionally it will cause notify_change() to strip it even if the caller had the necessary privileges to retain it. Ensure that nfsd only raises ATR_KILL_SGID if the caller lacks the necessary privileges to retain the setgid bit. Without this patch the setgid stripping tests in LTP will fail: > As you can see, the problem is S_ISGID (0002000) was dropped on a > non-group-executable file while chown was invoked by super-user, while [...] > fchown02.c:66: TFAIL: testfile2: wrong mode permissions 0100700, expected 0102700 [...] > chown02.c:57: TFAIL: testfile2: wrong mode permissions 0100700, expected 0102700 With this patch all tests pass. Reported-by: Sherry Yang Signed-off-by: Christian Brauner Reviewed-by: Jeff Layton Cc: Signed-off-by: Chuck Lever [Harshit: backport to 6.1.y: Use init_user_ns instead of nop_mnt_idmap as we don't have commit abf08576afe3 ("fs: port vfs_*() helpers to struct mnt_idmap")] Signed-off-by: Harshit Mogalapalli Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/vfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 155b34c4683c..4c11046800ab 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -321,7 +321,9 @@ nfsd_sanitize_attrs(struct inode *inode, struct iattr *iap) iap->ia_mode &= ~S_ISGID; } else { /* set ATTR_KILL_* bits and let VFS handle it */ - iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID); + iap->ia_valid |= ATTR_KILL_SUID; + iap->ia_valid |= + setattr_should_drop_sgid(&init_user_ns, inode); } } } From 7030fbf75f260924b2ab98195e6f9fe7b8c80884 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Sun, 20 Aug 2023 16:24:12 +0100 Subject: [PATCH 101/123] cgroup/cpuset: Rename functions dealing with DEADLINE accounting commit ad3a557daf6915296a43ef97a3e9c48e076c9dd8 upstream. rebuild_root_domains() and update_tasks_root_domain() have neutral names, but actually deal with DEADLINE bandwidth accounting. Rename them to use 'dl_' prefix so that intent is more clear. No functional change. Suggested-by: Qais Yousef (Google) Signed-off-by: Juri Lelli Reviewed-by: Waiman Long Signed-off-by: Tejun Heo Signed-off-by: Qais Yousef (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/cgroup/cpuset.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index e276db722845..888602c54209 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1066,7 +1066,7 @@ done: return ndoms; } -static void update_tasks_root_domain(struct cpuset *cs) +static void dl_update_tasks_root_domain(struct cpuset *cs) { struct css_task_iter it; struct task_struct *task; @@ -1079,7 +1079,7 @@ static void update_tasks_root_domain(struct cpuset *cs) css_task_iter_end(&it); } -static void rebuild_root_domains(void) +static void dl_rebuild_rd_accounting(void) { struct cpuset *cs = NULL; struct cgroup_subsys_state *pos_css; @@ -1107,7 +1107,7 @@ static void rebuild_root_domains(void) rcu_read_unlock(); - update_tasks_root_domain(cs); + dl_update_tasks_root_domain(cs); rcu_read_lock(); css_put(&cs->css); @@ -1121,7 +1121,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], { mutex_lock(&sched_domains_mutex); partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); - rebuild_root_domains(); + dl_rebuild_rd_accounting(); mutex_unlock(&sched_domains_mutex); } From 9bcfe1527882deac71d367ea1ff59d38c0fe9486 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Sun, 20 Aug 2023 16:24:13 +0100 Subject: [PATCH 102/123] sched/cpuset: Bring back cpuset_mutex commit 111cd11bbc54850f24191c52ff217da88a5e639b upstream. Turns out percpu_cpuset_rwsem - commit 1243dc518c9d ("cgroup/cpuset: Convert cpuset_mutex to percpu_rwsem") - wasn't such a brilliant idea, as it has been reported to cause slowdowns in workloads that need to change cpuset configuration frequently and it is also not implementing priority inheritance (which causes troubles with realtime workloads). Convert percpu_cpuset_rwsem back to regular cpuset_mutex. Also grab it only for SCHED_DEADLINE tasks (other policies don't care about stable cpusets anyway). Signed-off-by: Juri Lelli Reviewed-by: Waiman Long Signed-off-by: Tejun Heo [ Conflict in kernel/cgroup/cpuset.c due to pulling new code/comments. Reject all new code. Remove BUG_ON() about rwsem that doesn't exist on mainline. ] Signed-off-by: Qais Yousef (Google) Signed-off-by: Greg Kroah-Hartman --- include/linux/cpuset.h | 8 +- kernel/cgroup/cpuset.c | 161 ++++++++++++++++++++--------------------- kernel/sched/core.c | 22 ++++-- 3 files changed, 99 insertions(+), 92 deletions(-) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index d58e0476ee8e..355f796c5f07 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -71,8 +71,8 @@ extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); extern void cpuset_wait_for_hotplug(void); -extern void cpuset_read_lock(void); -extern void cpuset_read_unlock(void); +extern void cpuset_lock(void); +extern void cpuset_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); extern bool cpuset_cpus_allowed_fallback(struct task_struct *p); extern nodemask_t cpuset_mems_allowed(struct task_struct *p); @@ -196,8 +196,8 @@ static inline void cpuset_update_active_cpus(void) static inline void cpuset_wait_for_hotplug(void) { } -static inline void cpuset_read_lock(void) { } -static inline void cpuset_read_unlock(void) { } +static inline void cpuset_lock(void) { } +static inline void cpuset_unlock(void) { } static inline void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 888602c54209..a8a060fea074 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -366,22 +366,23 @@ static struct cpuset top_cpuset = { if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) /* - * There are two global locks guarding cpuset structures - cpuset_rwsem and + * There are two global locks guarding cpuset structures - cpuset_mutex and * callback_lock. We also require taking task_lock() when dereferencing a * task's cpuset pointer. See "The task_lock() exception", at the end of this - * comment. The cpuset code uses only cpuset_rwsem write lock. Other - * kernel subsystems can use cpuset_read_lock()/cpuset_read_unlock() to - * prevent change to cpuset structures. + * comment. The cpuset code uses only cpuset_mutex. Other kernel subsystems + * can use cpuset_lock()/cpuset_unlock() to prevent change to cpuset + * structures. Note that cpuset_mutex needs to be a mutex as it is used in + * paths that rely on priority inheritance (e.g. scheduler - on RT) for + * correctness. * * A task must hold both locks to modify cpusets. If a task holds - * cpuset_rwsem, it blocks others wanting that rwsem, ensuring that it - * is the only task able to also acquire callback_lock and be able to - * modify cpusets. It can perform various checks on the cpuset structure - * first, knowing nothing will change. It can also allocate memory while - * just holding cpuset_rwsem. While it is performing these checks, various - * callback routines can briefly acquire callback_lock to query cpusets. - * Once it is ready to make the changes, it takes callback_lock, blocking - * everyone else. + * cpuset_mutex, it blocks others, ensuring that it is the only task able to + * also acquire callback_lock and be able to modify cpusets. It can perform + * various checks on the cpuset structure first, knowing nothing will change. + * It can also allocate memory while just holding cpuset_mutex. While it is + * performing these checks, various callback routines can briefly acquire + * callback_lock to query cpusets. Once it is ready to make the changes, it + * takes callback_lock, blocking everyone else. * * Calls to the kernel memory allocator can not be made while holding * callback_lock, as that would risk double tripping on callback_lock @@ -403,16 +404,16 @@ static struct cpuset top_cpuset = { * guidelines for accessing subsystem state in kernel/cgroup.c */ -DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem); +static DEFINE_MUTEX(cpuset_mutex); -void cpuset_read_lock(void) +void cpuset_lock(void) { - percpu_down_read(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); } -void cpuset_read_unlock(void) +void cpuset_unlock(void) { - percpu_up_read(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } static DEFINE_SPINLOCK(callback_lock); @@ -496,7 +497,7 @@ static inline bool partition_is_populated(struct cpuset *cs, * One way or another, we guarantee to return some non-empty subset * of cpu_online_mask. * - * Call with callback_lock or cpuset_rwsem held. + * Call with callback_lock or cpuset_mutex held. */ static void guarantee_online_cpus(struct task_struct *tsk, struct cpumask *pmask) @@ -538,7 +539,7 @@ out_unlock: * One way or another, we guarantee to return some non-empty subset * of node_states[N_MEMORY]. * - * Call with callback_lock or cpuset_rwsem held. + * Call with callback_lock or cpuset_mutex held. */ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) { @@ -550,7 +551,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) /* * update task's spread flag if cpuset's page/slab spread flag is set * - * Call with callback_lock or cpuset_rwsem held. The check can be skipped + * Call with callback_lock or cpuset_mutex held. The check can be skipped * if on default hierarchy. */ static void cpuset_update_task_spread_flags(struct cpuset *cs, @@ -575,7 +576,7 @@ static void cpuset_update_task_spread_flags(struct cpuset *cs, * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags - * are only set if the other's are set. Call holding cpuset_rwsem. + * are only set if the other's are set. Call holding cpuset_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) @@ -713,7 +714,7 @@ out: * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes - * cpuset_rwsem held. + * cpuset_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the @@ -829,7 +830,7 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr, rcu_read_unlock(); } -/* Must be called with cpuset_rwsem held. */ +/* Must be called with cpuset_mutex held. */ static inline int nr_cpusets(void) { /* jump label reference count + the top-level cpuset */ @@ -855,7 +856,7 @@ static inline int nr_cpusets(void) * domains when operating in the severe memory shortage situations * that could cause allocation failures below. * - * Must be called with cpuset_rwsem held. + * Must be called with cpuset_mutex held. * * The three key local variables below are: * cp - cpuset pointer, used (together with pos_css) to perform a @@ -1084,7 +1085,7 @@ static void dl_rebuild_rd_accounting(void) struct cpuset *cs = NULL; struct cgroup_subsys_state *pos_css; - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); lockdep_assert_cpus_held(); lockdep_assert_held(&sched_domains_mutex); @@ -1134,7 +1135,7 @@ partition_and_rebuild_sched_domains(int ndoms_new, cpumask_var_t doms_new[], * 'cpus' is removed, then call this routine to rebuild the * scheduler's dynamic sched domains. * - * Call with cpuset_rwsem held. Takes cpus_read_lock(). + * Call with cpuset_mutex held. Takes cpus_read_lock(). */ static void rebuild_sched_domains_locked(void) { @@ -1145,7 +1146,7 @@ static void rebuild_sched_domains_locked(void) int ndoms; lockdep_assert_cpus_held(); - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); /* * If we have raced with CPU hotplug, return early to avoid @@ -1196,9 +1197,9 @@ static void rebuild_sched_domains_locked(void) void rebuild_sched_domains(void) { cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); rebuild_sched_domains_locked(); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); } @@ -1208,7 +1209,7 @@ void rebuild_sched_domains(void) * @new_cpus: the temp variable for the new effective_cpus mask * * Iterate through each task of @cs updating its cpus_allowed to the - * effective cpuset's. As this function is called with cpuset_rwsem held, + * effective cpuset's. As this function is called with cpuset_mutex held, * cpuset membership stays stable. */ static void update_tasks_cpumask(struct cpuset *cs, struct cpumask *new_cpus) @@ -1317,7 +1318,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, int old_prs, new_prs; int part_error = PERR_NONE; /* Partition error? */ - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); /* * The parent must be a partition root. @@ -1540,7 +1541,7 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, * * On legacy hierarchy, effective_cpus will be the same with cpu_allowed. * - * Called with cpuset_rwsem held + * Called with cpuset_mutex held */ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, bool force) @@ -1700,7 +1701,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, struct cpuset *sibling; struct cgroup_subsys_state *pos_css; - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); /* * Check all its siblings and call update_cpumasks_hier() @@ -1950,12 +1951,12 @@ static void *cpuset_being_rebound; * @cs: the cpuset in which each task's mems_allowed mask needs to be changed * * Iterate through each task of @cs updating its mems_allowed to the - * effective cpuset's. As this function is called with cpuset_rwsem held, + * effective cpuset's. As this function is called with cpuset_mutex held, * cpuset membership stays stable. */ static void update_tasks_nodemask(struct cpuset *cs) { - static nodemask_t newmems; /* protected by cpuset_rwsem */ + static nodemask_t newmems; /* protected by cpuset_mutex */ struct css_task_iter it; struct task_struct *task; @@ -1968,7 +1969,7 @@ static void update_tasks_nodemask(struct cpuset *cs) * take while holding tasklist_lock. Forks can happen - the * mpol_dup() cpuset_being_rebound check will catch such forks, * and rebind their vma mempolicies too. Because we still hold - * the global cpuset_rwsem, we know that no other rebind effort + * the global cpuset_mutex, we know that no other rebind effort * will be contending for the global variable cpuset_being_rebound. * It's ok if we rebind the same mm twice; mpol_rebind_mm() * is idempotent. Also migrate pages in each mm to new nodes. @@ -2014,7 +2015,7 @@ static void update_tasks_nodemask(struct cpuset *cs) * * On legacy hierarchy, effective_mems will be the same with mems_allowed. * - * Called with cpuset_rwsem held + * Called with cpuset_mutex held */ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) { @@ -2067,7 +2068,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) * mempolicies and if the cpuset is marked 'memory_migrate', * migrate the tasks pages to the new memory. * - * Call with cpuset_rwsem held. May take callback_lock during call. + * Call with cpuset_mutex held. May take callback_lock during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each such tasks mm->mmap_lock, scan its vma's and rebind * their mempolicies to the cpusets new mems_allowed. @@ -2159,7 +2160,7 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) * @cs: the cpuset in which each task's spread flags needs to be changed * * Iterate through each task of @cs updating its spread flags. As this - * function is called with cpuset_rwsem held, cpuset membership stays + * function is called with cpuset_mutex held, cpuset membership stays * stable. */ static void update_tasks_flags(struct cpuset *cs) @@ -2179,7 +2180,7 @@ static void update_tasks_flags(struct cpuset *cs) * cs: the cpuset to update * turning_on: whether the flag is being set or cleared * - * Call with cpuset_rwsem held. + * Call with cpuset_mutex held. */ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, @@ -2229,7 +2230,7 @@ out: * @new_prs: new partition root state * Return: 0 if successful, != 0 if error * - * Call with cpuset_rwsem held. + * Call with cpuset_mutex held. */ static int update_prstate(struct cpuset *cs, int new_prs) { @@ -2467,7 +2468,7 @@ static int cpuset_can_attach_check(struct cpuset *cs) return 0; } -/* Called by cgroups to determine if a cpuset is usable; cpuset_rwsem held */ +/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ static int cpuset_can_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; @@ -2479,7 +2480,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); cs = css_cs(css); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); /* Check to see if task is allowed in the cpuset */ ret = cpuset_can_attach_check(cs); @@ -2501,7 +2502,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) */ cs->attach_in_progress++; out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); return ret; } @@ -2513,15 +2514,15 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset) cgroup_taskset_first(tset, &css); cs = css_cs(css); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /* - * Protected by cpuset_rwsem. cpus_attach is used only by cpuset_attach_task() + * Protected by cpuset_mutex. cpus_attach is used only by cpuset_attach_task() * but we can't allocate it dynamically there. Define it global and * allocate from cpuset_init(). */ @@ -2530,7 +2531,7 @@ static nodemask_t cpuset_attach_nodemask_to; static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) { - percpu_rwsem_assert_held(&cpuset_rwsem); + lockdep_assert_held(&cpuset_mutex); if (cs != &top_cpuset) guarantee_online_cpus(task, cpus_attach); @@ -2558,7 +2559,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) cs = css_cs(css); lockdep_assert_cpus_held(); /* see cgroup_attach_lock() */ - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); guarantee_online_mems(cs, &cpuset_attach_nodemask_to); @@ -2598,7 +2599,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /* The various types of files and directories in a cpuset file system */ @@ -2630,7 +2631,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, int retval = 0; cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) { retval = -ENODEV; goto out_unlock; @@ -2666,7 +2667,7 @@ static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft, break; } out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); return retval; } @@ -2679,7 +2680,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, int retval = -ENODEV; cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -2692,7 +2693,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, break; } out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); return retval; } @@ -2725,7 +2726,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, * operation like this one can lead to a deadlock through kernfs * active_ref protection. Let's break the protection. Losing the * protection is okay as we check whether @cs is online after - * grabbing cpuset_rwsem anyway. This only happens on the legacy + * grabbing cpuset_mutex anyway. This only happens on the legacy * hierarchies. */ css_get(&cs->css); @@ -2733,7 +2734,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, flush_work(&cpuset_hotplug_work); cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; @@ -2757,7 +2758,7 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of, free_cpuset(trialcs); out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); kernfs_unbreak_active_protection(of->kn); css_put(&cs->css); @@ -2905,13 +2906,13 @@ static ssize_t sched_partition_write(struct kernfs_open_file *of, char *buf, css_get(&cs->css); cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (!is_cpuset_online(cs)) goto out_unlock; retval = update_prstate(cs, val); out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); css_put(&cs->css); return retval ?: nbytes; @@ -3124,7 +3125,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) return 0; cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); set_bit(CS_ONLINE, &cs->flags); if (is_spread_page(parent)) @@ -3175,7 +3176,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cpumask_copy(cs->effective_cpus, parent->cpus_allowed); spin_unlock_irq(&callback_lock); out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); return 0; } @@ -3196,7 +3197,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) struct cpuset *cs = css_cs(css); cpus_read_lock(); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); if (is_partition_valid(cs)) update_prstate(cs, 0); @@ -3215,7 +3216,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) cpuset_dec(); clear_bit(CS_ONLINE, &cs->flags); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); cpus_read_unlock(); } @@ -3228,7 +3229,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) static void cpuset_bind(struct cgroup_subsys_state *root_css) { - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); spin_lock_irq(&callback_lock); if (is_in_v2_mode()) { @@ -3241,7 +3242,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) } spin_unlock_irq(&callback_lock); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /* @@ -3262,7 +3263,7 @@ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset) return 0; lockdep_assert_held(&cgroup_mutex); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); /* Check to see if task is allowed in the cpuset */ ret = cpuset_can_attach_check(cs); @@ -3283,7 +3284,7 @@ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset) */ cs->attach_in_progress++; out_unlock: - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); return ret; } @@ -3299,11 +3300,11 @@ static void cpuset_cancel_fork(struct task_struct *task, struct css_set *cset) if (same_cs) return; - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /* @@ -3331,7 +3332,7 @@ static void cpuset_fork(struct task_struct *task) } /* CLONE_INTO_CGROUP */ - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); guarantee_online_mems(cs, &cpuset_attach_nodemask_to); cpuset_attach_task(cs, task); @@ -3339,7 +3340,7 @@ static void cpuset_fork(struct task_struct *task) if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } struct cgroup_subsys cpuset_cgrp_subsys = { @@ -3369,8 +3370,6 @@ struct cgroup_subsys cpuset_cgrp_subsys = { int __init cpuset_init(void) { - BUG_ON(percpu_init_rwsem(&cpuset_rwsem)); - BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL)); BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL)); BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL)); @@ -3442,7 +3441,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs, is_empty = cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); /* * Move tasks to the nearest ancestor with execution resources, @@ -3452,7 +3451,7 @@ hotplug_update_tasks_legacy(struct cpuset *cs, if (is_empty) remove_tasks_in_empty_cpuset(cs); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); } static void @@ -3503,14 +3502,14 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs, struct tmpmasks *tmp) retry: wait_event(cpuset_attach_wq, cs->attach_in_progress == 0); - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); /* * We have raced with task attaching. We wait until attaching * is finished, so we won't attach a task to an empty cpuset. */ if (cs->attach_in_progress) { - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); goto retry; } @@ -3604,7 +3603,7 @@ update_tasks: hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems, cpus_updated, mems_updated); - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); } /** @@ -3634,7 +3633,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) if (on_dfl && !alloc_cpumasks(NULL, &tmp)) ptmp = &tmp; - percpu_down_write(&cpuset_rwsem); + mutex_lock(&cpuset_mutex); /* fetch the available cpus/mems and find out which changed how */ cpumask_copy(&new_cpus, cpu_active_mask); @@ -3691,7 +3690,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) update_tasks_nodemask(&top_cpuset); } - percpu_up_write(&cpuset_rwsem); + mutex_unlock(&cpuset_mutex); /* if cpus or mems changed, we need to propagate to descendants */ if (cpus_updated || mems_updated) { @@ -4101,7 +4100,7 @@ void __cpuset_memory_pressure_bump(void) * - Used for /proc//cpuset. * - No need to task_lock(tsk) on this tsk->cpuset reference, as it * doesn't really matter if tsk->cpuset changes after we read it, - * and we take cpuset_rwsem, keeping cpuset_attach() from changing it + * and we take cpuset_mutex, keeping cpuset_attach() from changing it * anyway. */ int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b23dcbeacdf3..8a8dbb2dac03 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7475,6 +7475,7 @@ static int __sched_setscheduler(struct task_struct *p, int reset_on_fork; int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; struct rq *rq; + bool cpuset_locked = false; /* The pi code expects interrupts enabled */ BUG_ON(pi && in_interrupt()); @@ -7524,8 +7525,14 @@ recheck: return retval; } - if (pi) - cpuset_read_lock(); + /* + * SCHED_DEADLINE bandwidth accounting relies on stable cpusets + * information. + */ + if (dl_policy(policy) || dl_policy(p->policy)) { + cpuset_locked = true; + cpuset_lock(); + } /* * Make sure no PI-waiters arrive (or leave) while we are @@ -7601,8 +7608,8 @@ change: if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { policy = oldpolicy = -1; task_rq_unlock(rq, p, &rf); - if (pi) - cpuset_read_unlock(); + if (cpuset_locked) + cpuset_unlock(); goto recheck; } @@ -7669,7 +7676,8 @@ change: task_rq_unlock(rq, p, &rf); if (pi) { - cpuset_read_unlock(); + if (cpuset_locked) + cpuset_unlock(); rt_mutex_adjust_pi(p); } @@ -7681,8 +7689,8 @@ change: unlock: task_rq_unlock(rq, p, &rf); - if (pi) - cpuset_read_unlock(); + if (cpuset_locked) + cpuset_unlock(); return retval; } From d1b4262b78cc7638642833252ae92fc586854ffb Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Sun, 20 Aug 2023 16:24:14 +0100 Subject: [PATCH 103/123] sched/cpuset: Keep track of SCHED_DEADLINE task in cpusets commit 6c24849f5515e4966d94fa5279bdff4acf2e9489 upstream. Qais reported that iterating over all tasks when rebuilding root domains for finding out which ones are DEADLINE and need their bandwidth correctly restored on such root domains can be a costly operation (10+ ms delays on suspend-resume). To fix the problem keep track of the number of DEADLINE tasks belonging to each cpuset and then use this information (followup patch) to only perform the above iteration if DEADLINE tasks are actually present in the cpuset for which a corresponding root domain is being rebuilt. Reported-by: Qais Yousef (Google) Link: https://lore.kernel.org/lkml/20230206221428.2125324-1-qyousef@layalina.io/ Signed-off-by: Juri Lelli Reviewed-by: Waiman Long Signed-off-by: Tejun Heo Signed-off-by: Qais Yousef (Google) Signed-off-by: Greg Kroah-Hartman --- include/linux/cpuset.h | 4 ++++ kernel/cgroup/cgroup.c | 4 ++++ kernel/cgroup/cpuset.c | 25 +++++++++++++++++++++++++ kernel/sched/deadline.c | 14 ++++++++++++++ 4 files changed, 47 insertions(+) diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 355f796c5f07..0348dba5680e 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -71,6 +71,8 @@ extern void cpuset_init_smp(void); extern void cpuset_force_rebuild(void); extern void cpuset_update_active_cpus(void); extern void cpuset_wait_for_hotplug(void); +extern void inc_dl_tasks_cs(struct task_struct *task); +extern void dec_dl_tasks_cs(struct task_struct *task); extern void cpuset_lock(void); extern void cpuset_unlock(void); extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask); @@ -196,6 +198,8 @@ static inline void cpuset_update_active_cpus(void) static inline void cpuset_wait_for_hotplug(void) { } +static inline void inc_dl_tasks_cs(struct task_struct *task) { } +static inline void dec_dl_tasks_cs(struct task_struct *task) { } static inline void cpuset_lock(void) { } static inline void cpuset_unlock(void) { } diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 73f11e4db3a4..97ecca43386d 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include @@ -6681,6 +6682,9 @@ void cgroup_exit(struct task_struct *tsk) list_add_tail(&tsk->cg_list, &cset->dying_tasks); cset->nr_tasks--; + if (dl_task(tsk)) + dec_dl_tasks_cs(tsk); + WARN_ON_ONCE(cgroup_task_frozen(tsk)); if (unlikely(!(tsk->flags & PF_KTHREAD) && test_bit(CGRP_FREEZE, &task_dfl_cgroup(tsk)->flags))) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index a8a060fea074..5f57829bb986 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -193,6 +193,12 @@ struct cpuset { int use_parent_ecpus; int child_ecpus_count; + /* + * number of SCHED_DEADLINE tasks attached to this cpuset, so that we + * know when to rebuild associated root domain bandwidth information. + */ + int nr_deadline_tasks; + /* Invalid partition error code, not lock protected */ enum prs_errcode prs_err; @@ -245,6 +251,20 @@ static inline struct cpuset *parent_cs(struct cpuset *cs) return css_cs(cs->css.parent); } +void inc_dl_tasks_cs(struct task_struct *p) +{ + struct cpuset *cs = task_cs(p); + + cs->nr_deadline_tasks++; +} + +void dec_dl_tasks_cs(struct task_struct *p) +{ + struct cpuset *cs = task_cs(p); + + cs->nr_deadline_tasks--; +} + /* bits in struct cpuset flags field */ typedef enum { CS_ONLINE, @@ -2494,6 +2514,11 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) ret = security_task_setscheduler(task); if (ret) goto out_unlock; + + if (dl_task(task)) { + cs->nr_deadline_tasks++; + cpuset_attach_old_cs->nr_deadline_tasks--; + } } /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index f7d381b6c313..98154a93e05d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -16,6 +16,8 @@ * Fabio Checconi */ +#include + /* * Default limits for DL period; on the top end we guard against small util * tasks still getting ridiculously long effective runtimes, on the bottom end we @@ -2597,6 +2599,12 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) if (task_on_rq_queued(p) && p->dl.dl_runtime) task_non_contending(p); + /* + * In case a task is setscheduled out from SCHED_DEADLINE we need to + * keep track of that on its cpuset (for correct bandwidth tracking). + */ + dec_dl_tasks_cs(p); + if (!task_on_rq_queued(p)) { /* * Inactive timer is armed. However, p is leaving DEADLINE and @@ -2637,6 +2645,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p) if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) put_task_struct(p); + /* + * In case a task is setscheduled to SCHED_DEADLINE we need to keep + * track of that on its cpuset (for correct bandwidth tracking). + */ + inc_dl_tasks_cs(p); + /* If p is not queued we will update its parameters at next wakeup. */ if (!task_on_rq_queued(p)) { add_rq_bw(&p->dl, &rq->dl); From 064b960dbe942114a397788a57474c40cea04185 Mon Sep 17 00:00:00 2001 From: Juri Lelli Date: Sun, 20 Aug 2023 16:24:15 +0100 Subject: [PATCH 104/123] cgroup/cpuset: Iterate only if DEADLINE tasks are present commit c0f78fd5edcf29b2822ac165f9248a6c165e8554 upstream. update_tasks_root_domain currently iterates over all tasks even if no DEADLINE task is present on the cpuset/root domain for which bandwidth accounting is being rebuilt. This has been reported to introduce 10+ ms delays on suspend-resume operations. Skip the costly iteration for cpusets that don't contain DEADLINE tasks. Reported-by: Qais Yousef (Google) Link: https://lore.kernel.org/lkml/20230206221428.2125324-1-qyousef@layalina.io/ Signed-off-by: Juri Lelli Reviewed-by: Waiman Long Signed-off-by: Tejun Heo Signed-off-by: Qais Yousef (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/cgroup/cpuset.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 5f57829bb986..eca07ff71656 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1092,6 +1092,9 @@ static void dl_update_tasks_root_domain(struct cpuset *cs) struct css_task_iter it; struct task_struct *task; + if (cs->nr_deadline_tasks == 0) + return; + css_task_iter_start(&cs->css, 0, &it); while ((task = css_task_iter_next(&it))) From f0135131bb0e5b02313898da809822f4edca7e4f Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Sun, 20 Aug 2023 16:24:16 +0100 Subject: [PATCH 105/123] sched/deadline: Create DL BW alloc, free & check overflow interface commit 85989106feb734437e2d598b639991b9185a43a6 upstream. While moving a set of tasks between exclusive cpusets, cpuset_can_attach() -> task_can_attach() calls dl_cpu_busy(..., p) for DL BW overflow checking and per-task DL BW allocation on the destination root_domain for the DL tasks in this set. This approach has the issue of not freeing already allocated DL BW in the following error cases: (1) The set of tasks includes multiple DL tasks and DL BW overflow checking fails for one of the subsequent DL tasks. (2) Another controller next to the cpuset controller which is attached to the same cgroup fails in its can_attach(). To address this problem rework dl_cpu_busy(): (1) Split it into dl_bw_check_overflow() & dl_bw_alloc() and add a dedicated dl_bw_free(). (2) dl_bw_alloc() & dl_bw_free() take a `u64 dl_bw` parameter instead of a `struct task_struct *p` used in dl_cpu_busy(). This allows to allocate DL BW for a set of tasks too rather than only for a single task. Signed-off-by: Dietmar Eggemann Signed-off-by: Juri Lelli Signed-off-by: Tejun Heo Signed-off-by: Qais Yousef (Google) Signed-off-by: Greg Kroah-Hartman --- include/linux/sched.h | 2 ++ kernel/sched/core.c | 4 ++-- kernel/sched/deadline.c | 53 +++++++++++++++++++++++++++++++---------- kernel/sched/sched.h | 2 +- 4 files changed, 45 insertions(+), 16 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index ffb6eb55cd13..b2e30fbbeef0 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1847,6 +1847,8 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags) extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); +extern int dl_bw_alloc(int cpu, u64 dl_bw); +extern void dl_bw_free(int cpu, u64 dl_bw); #ifdef CONFIG_SMP extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask); extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8a8dbb2dac03..6963fc4ef897 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9108,7 +9108,7 @@ int task_can_attach(struct task_struct *p, if (unlikely(cpu >= nr_cpu_ids)) return -EINVAL; - ret = dl_cpu_busy(cpu, p); + ret = dl_bw_alloc(cpu, p->dl.dl_bw); } out: @@ -9393,7 +9393,7 @@ static void cpuset_cpu_active(void) static int cpuset_cpu_inactive(unsigned int cpu) { if (!cpuhp_tasks_frozen) { - int ret = dl_cpu_busy(cpu, NULL); + int ret = dl_bw_check_overflow(cpu); if (ret) return ret; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 98154a93e05d..9ce9810861ba 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -3037,26 +3037,38 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, return ret; } -int dl_cpu_busy(int cpu, struct task_struct *p) +enum dl_bw_request { + dl_bw_req_check_overflow = 0, + dl_bw_req_alloc, + dl_bw_req_free +}; + +static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw) { - unsigned long flags, cap; + unsigned long flags; struct dl_bw *dl_b; - bool overflow; + bool overflow = 0; rcu_read_lock_sched(); dl_b = dl_bw_of(cpu); raw_spin_lock_irqsave(&dl_b->lock, flags); - cap = dl_bw_capacity(cpu); - overflow = __dl_overflow(dl_b, cap, 0, p ? p->dl.dl_bw : 0); - if (!overflow && p) { - /* - * We reserve space for this task in the destination - * root_domain, as we can't fail after this point. - * We will free resources in the source root_domain - * later on (see set_cpus_allowed_dl()). - */ - __dl_add(dl_b, p->dl.dl_bw, dl_bw_cpus(cpu)); + if (req == dl_bw_req_free) { + __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu)); + } else { + unsigned long cap = dl_bw_capacity(cpu); + + overflow = __dl_overflow(dl_b, cap, 0, dl_bw); + + if (req == dl_bw_req_alloc && !overflow) { + /* + * We reserve space in the destination + * root_domain, as we can't fail after this point. + * We will free resources in the source root_domain + * later on (see set_cpus_allowed_dl()). + */ + __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu)); + } } raw_spin_unlock_irqrestore(&dl_b->lock, flags); @@ -3064,6 +3076,21 @@ int dl_cpu_busy(int cpu, struct task_struct *p) return overflow ? -EBUSY : 0; } + +int dl_bw_check_overflow(int cpu) +{ + return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0); +} + +int dl_bw_alloc(int cpu, u64 dl_bw) +{ + return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw); +} + +void dl_bw_free(int cpu, u64 dl_bw) +{ + dl_bw_manage(dl_bw_req_free, cpu, dl_bw); +} #endif #ifdef CONFIG_SCHED_DEBUG diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d6d488e8eb55..b62d53d7c264 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -330,7 +330,7 @@ extern void __getparam_dl(struct task_struct *p, struct sched_attr *attr); extern bool __checkparam_dl(const struct sched_attr *attr); extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int dl_cpu_busy(int cpu, struct task_struct *p); +extern int dl_bw_check_overflow(int cpu); #ifdef CONFIG_CGROUP_SCHED From d3ff67076bed6b09be43faec7339cc51dfba451d Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Sun, 20 Aug 2023 16:24:17 +0100 Subject: [PATCH 106/123] cgroup/cpuset: Free DL BW in case can_attach() fails commit 2ef269ef1ac006acf974793d975539244d77b28f upstream. cpuset_can_attach() can fail. Postpone DL BW allocation until all tasks have been checked. DL BW is not allocated per-task but as a sum over all DL tasks migrating. If multiple controllers are attached to the cgroup next to the cpuset controller a non-cpuset can_attach() can fail. In this case free DL BW in cpuset_cancel_attach(). Finally, update cpuset DL task count (nr_deadline_tasks) only in cpuset_attach(). Suggested-by: Waiman Long Signed-off-by: Dietmar Eggemann Signed-off-by: Juri Lelli Reviewed-by: Waiman Long Signed-off-by: Tejun Heo Signed-off-by: Qais Yousef (Google) Signed-off-by: Greg Kroah-Hartman --- include/linux/sched.h | 2 +- kernel/cgroup/cpuset.c | 53 ++++++++++++++++++++++++++++++++++++++---- kernel/sched/core.c | 17 ++------------ 3 files changed, 51 insertions(+), 21 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index b2e30fbbeef0..0cac69902ec5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1846,7 +1846,7 @@ current_restore_flags(unsigned long orig_flags, unsigned long flags) } extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); -extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_effective_cpus); +extern int task_can_attach(struct task_struct *p); extern int dl_bw_alloc(int cpu, u64 dl_bw); extern void dl_bw_free(int cpu, u64 dl_bw); #ifdef CONFIG_SMP diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index eca07ff71656..db3e05b6b4dd 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -198,6 +198,8 @@ struct cpuset { * know when to rebuild associated root domain bandwidth information. */ int nr_deadline_tasks; + int nr_migrate_dl_tasks; + u64 sum_migrate_dl_bw; /* Invalid partition error code, not lock protected */ enum prs_errcode prs_err; @@ -2491,16 +2493,23 @@ static int cpuset_can_attach_check(struct cpuset *cs) return 0; } +static void reset_migrate_dl_data(struct cpuset *cs) +{ + cs->nr_migrate_dl_tasks = 0; + cs->sum_migrate_dl_bw = 0; +} + /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ static int cpuset_can_attach(struct cgroup_taskset *tset) { struct cgroup_subsys_state *css; - struct cpuset *cs; + struct cpuset *cs, *oldcs; struct task_struct *task; int ret; /* used later by cpuset_attach() */ cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css)); + oldcs = cpuset_attach_old_cs; cs = css_cs(css); mutex_lock(&cpuset_mutex); @@ -2511,7 +2520,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) goto out_unlock; cgroup_taskset_for_each(task, css, tset) { - ret = task_can_attach(task, cs->effective_cpus); + ret = task_can_attach(task); if (ret) goto out_unlock; ret = security_task_setscheduler(task); @@ -2519,11 +2528,31 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) goto out_unlock; if (dl_task(task)) { - cs->nr_deadline_tasks++; - cpuset_attach_old_cs->nr_deadline_tasks--; + cs->nr_migrate_dl_tasks++; + cs->sum_migrate_dl_bw += task->dl.dl_bw; } } + if (!cs->nr_migrate_dl_tasks) + goto out_success; + + if (!cpumask_intersects(oldcs->effective_cpus, cs->effective_cpus)) { + int cpu = cpumask_any_and(cpu_active_mask, cs->effective_cpus); + + if (unlikely(cpu >= nr_cpu_ids)) { + reset_migrate_dl_data(cs); + ret = -EINVAL; + goto out_unlock; + } + + ret = dl_bw_alloc(cpu, cs->sum_migrate_dl_bw); + if (ret) { + reset_migrate_dl_data(cs); + goto out_unlock; + } + } + +out_success: /* * Mark attach is in progress. This makes validate_change() fail * changes which zero cpus/mems_allowed. @@ -2546,6 +2575,14 @@ static void cpuset_cancel_attach(struct cgroup_taskset *tset) cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); + + if (cs->nr_migrate_dl_tasks) { + int cpu = cpumask_any(cs->effective_cpus); + + dl_bw_free(cpu, cs->sum_migrate_dl_bw); + reset_migrate_dl_data(cs); + } + mutex_unlock(&cpuset_mutex); } @@ -2623,6 +2660,12 @@ static void cpuset_attach(struct cgroup_taskset *tset) cs->old_mems_allowed = cpuset_attach_nodemask_to; + if (cs->nr_migrate_dl_tasks) { + cs->nr_deadline_tasks += cs->nr_migrate_dl_tasks; + oldcs->nr_deadline_tasks -= cs->nr_migrate_dl_tasks; + reset_migrate_dl_data(cs); + } + cs->attach_in_progress--; if (!cs->attach_in_progress) wake_up(&cpuset_attach_wq); @@ -3298,7 +3341,7 @@ static int cpuset_can_fork(struct task_struct *task, struct css_set *cset) if (ret) goto out_unlock; - ret = task_can_attach(task, cs->effective_cpus); + ret = task_can_attach(task); if (ret) goto out_unlock; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6963fc4ef897..0f6a92737c91 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -9083,8 +9083,7 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur, return ret; } -int task_can_attach(struct task_struct *p, - const struct cpumask *cs_effective_cpus) +int task_can_attach(struct task_struct *p) { int ret = 0; @@ -9097,21 +9096,9 @@ int task_can_attach(struct task_struct *p, * success of set_cpus_allowed_ptr() on all attached tasks * before cpus_mask may be changed. */ - if (p->flags & PF_NO_SETAFFINITY) { + if (p->flags & PF_NO_SETAFFINITY) ret = -EINVAL; - goto out; - } - if (dl_task(p) && !cpumask_intersects(task_rq(p)->rd->span, - cs_effective_cpus)) { - int cpu = cpumask_any_and(cpu_active_mask, cs_effective_cpus); - - if (unlikely(cpu >= nr_cpu_ids)) - return -EINVAL; - ret = dl_bw_alloc(cpu, p->dl.dl_bw); - } - -out: return ret; } From f016326d31d010433b2a1a08a4856c214ae829eb Mon Sep 17 00:00:00 2001 From: Sanjay R Mehta Date: Wed, 2 Aug 2023 06:11:49 -0500 Subject: [PATCH 107/123] thunderbolt: Fix Thunderbolt 3 display flickering issue on 2nd hot plug onwards commit 583893a66d731f5da010a3fa38a0460e05f0149b upstream. Previously, on unplug events, the TMU mode was disabled first followed by the Time Synchronization Handshake, irrespective of whether the tb_switch_tmu_rate_write() API was successful or not. However, this caused a problem with Thunderbolt 3 (TBT3) devices, as the TSPacketInterval bits were always enabled by default, leading the host router to assume that the device router's TMU was already enabled and preventing it from initiating the Time Synchronization Handshake. As a result, TBT3 monitors experienced display flickering from the second hot plug onwards. To address this issue, we have modified the code to only disable the Time Synchronization Handshake during TMU disable if the tb_switch_tmu_rate_write() function is successful. This ensures that the TBT3 devices function correctly and eliminates the display flickering issue. Co-developed-by: Sanath S Signed-off-by: Sanath S Signed-off-by: Sanjay R Mehta Cc: stable@vger.kernel.org Signed-off-by: Mika Westerberg [ USB4v2 introduced support for uni-directional TMU mode as part of d49b4f043d63 ("thunderbolt: Add support for enhanced uni-directional TMU mode") This is not a stable candidate commit, so adjust the code for backport. ] Signed-off-by: Mario Limonciello Signed-off-by: Greg Kroah-Hartman --- drivers/thunderbolt/tmu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index 626aca3124b1..d9544600b386 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -415,7 +415,8 @@ int tb_switch_tmu_disable(struct tb_switch *sw) * uni-directional mode and we don't want to change it's TMU * mode. */ - tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + return ret; tb_port_tmu_time_sync_disable(up); ret = tb_port_tmu_time_sync_disable(down); From b7803afc77bee77e7df6662e1959df0038fbaac3 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Mon, 20 Feb 2023 12:14:13 +0800 Subject: [PATCH 108/123] ublk: remove check IO_URING_F_SQE128 in ublk_ch_uring_cmd commit 9c7c4bc986932218fd0df9d2a100509772028fb1 upstream. sizeof(struct ublksrv_io_cmd) is 16bytes, which can be held in 64byte SQE, so not necessary to check IO_URING_F_SQE128. With this change, we get chance to save half SQ ring memory. Fixed: 71f28f3136af ("ublk_drv: add io_uring based userspace block driver") Signed-off-by: Ming Lei Link: https://lore.kernel.org/r/20230220041413.1524335-1-ming.lei@redhat.com Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/block/ublk_drv.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 4459cfbdbcb1..c2f0f74193f0 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -1223,9 +1223,6 @@ static int __ublk_ch_uring_cmd(struct io_uring_cmd *cmd, __func__, cmd->cmd_op, ub_cmd->q_id, tag, ub_cmd->result); - if (!(issue_flags & IO_URING_F_SQE128)) - goto out; - if (ub_cmd->q_id >= ub->dev_info.nr_hw_queues) goto out; From f67e3a725b4975e3be3fda61ea6e4978213dcc2f Mon Sep 17 00:00:00 2001 From: Oliver Hartkopp Date: Mon, 21 Aug 2023 16:45:47 +0200 Subject: [PATCH 109/123] can: raw: add missing refcount for memory leak fix commit c275a176e4b69868576e543409927ae75e3a3288 upstream. Commit ee8b94c8510c ("can: raw: fix receiver memory leak") introduced a new reference to the CAN netdevice that has assigned CAN filters. But this new ro->dev reference did not maintain its own refcount which lead to another KASAN use-after-free splat found by Eric Dumazet. This patch ensures a proper refcount for the CAN nedevice. Fixes: ee8b94c8510c ("can: raw: fix receiver memory leak") Reported-by: Eric Dumazet Cc: Ziyang Xuan Signed-off-by: Oliver Hartkopp Link: https://lore.kernel.org/r/20230821144547.6658-3-socketcan@hartkopp.net Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/can/raw.c | 35 ++++++++++++++++++++++++++--------- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/net/can/raw.c b/net/can/raw.c index 0dd3259357a3..8c104339d538 100644 --- a/net/can/raw.c +++ b/net/can/raw.c @@ -85,6 +85,7 @@ struct raw_sock { int bound; int ifindex; struct net_device *dev; + netdevice_tracker dev_tracker; struct list_head notifier; int loopback; int recv_own_msgs; @@ -285,8 +286,10 @@ static void raw_notify(struct raw_sock *ro, unsigned long msg, case NETDEV_UNREGISTER: lock_sock(sk); /* remove current filters & unregister */ - if (ro->bound) + if (ro->bound) { raw_disable_allfilters(dev_net(dev), dev, sk); + netdev_put(dev, &ro->dev_tracker); + } if (ro->count > 1) kfree(ro->filter); @@ -391,10 +394,12 @@ static int raw_release(struct socket *sock) /* remove current filters & unregister */ if (ro->bound) { - if (ro->dev) + if (ro->dev) { raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); - else + netdev_put(ro->dev, &ro->dev_tracker); + } else { raw_disable_allfilters(sock_net(sk), NULL, sk); + } } if (ro->count > 1) @@ -445,10 +450,10 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) goto out; } if (dev->type != ARPHRD_CAN) { - dev_put(dev); err = -ENODEV; - goto out; + goto out_put_dev; } + if (!(dev->flags & IFF_UP)) notify_enetdown = 1; @@ -456,7 +461,9 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) /* filters set by default/setsockopt */ err = raw_enable_allfilters(sock_net(sk), dev, sk); - dev_put(dev); + if (err) + goto out_put_dev; + } else { ifindex = 0; @@ -467,18 +474,28 @@ static int raw_bind(struct socket *sock, struct sockaddr *uaddr, int len) if (!err) { if (ro->bound) { /* unregister old filters */ - if (ro->dev) + if (ro->dev) { raw_disable_allfilters(dev_net(ro->dev), ro->dev, sk); - else + /* drop reference to old ro->dev */ + netdev_put(ro->dev, &ro->dev_tracker); + } else { raw_disable_allfilters(sock_net(sk), NULL, sk); + } } ro->ifindex = ifindex; ro->bound = 1; + /* bind() ok -> hold a reference for new ro->dev */ ro->dev = dev; + if (ro->dev) + netdev_hold(ro->dev, &ro->dev_tracker, GFP_KERNEL); } - out: +out_put_dev: + /* remove potential reference from dev_get_by_index() */ + if (dev) + dev_put(dev); +out: release_sock(sk); rtnl_unlock(); From bd20e20c4d64e131498ec91f1b066cd4708088b3 Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Tue, 8 Aug 2023 10:09:17 +0800 Subject: [PATCH 110/123] madvise:madvise_free_pte_range(): don't use mapcount() against large folio for sharing check commit 0e0e9bd5f7b9d40fd03b70092367247d52da1db0 upstream. Commit 98b211d6415f ("madvise: convert madvise_free_pte_range() to use a folio") replaced the page_mapcount() with folio_mapcount() to check whether the folio is shared by other mapping. It's not correct for large folios. folio_mapcount() returns the total mapcount of large folio which is not suitable to detect whether the folio is shared. Use folio_estimated_sharers() which returns a estimated number of shares. That means it's not 100% correct. It should be OK for madvise case here. User-visible effects is that the THP is skipped when user call madvise. But the correct behavior is THP should be split and processed then. NOTE: this change is a temporary fix to reduce the user-visible effects before the long term fix from David is ready. Link: https://lkml.kernel.org/r/20230808020917.2230692-4-fengwei.yin@intel.com Fixes: 98b211d6415f ("madvise: convert madvise_free_pte_range() to use a folio") Signed-off-by: Yin Fengwei Reviewed-by: Yu Zhao Reviewed-by: Ryan Roberts Cc: David Hildenbrand Cc: Kefeng Wang Cc: Matthew Wilcox Cc: Minchan Kim Cc: Vishal Moola (Oracle) Cc: Yang Shi Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- include/linux/mm.h | 19 +++++++++++++++++++ mm/madvise.c | 4 ++-- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index f83a1c9ec8e4..104ec00823da 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1727,6 +1727,25 @@ static inline size_t folio_size(struct folio *folio) return PAGE_SIZE << folio_order(folio); } +/** + * folio_estimated_sharers - Estimate the number of sharers of a folio. + * @folio: The folio. + * + * folio_estimated_sharers() aims to serve as a function to efficiently + * estimate the number of processes sharing a folio. This is done by + * looking at the precise mapcount of the first subpage in the folio, and + * assuming the other subpages are the same. This may not be true for large + * folios. If you want exact mapcounts for exact calculations, look at + * page_mapcount() or folio_total_mapcount(). + * + * Return: The estimated number of processes sharing a folio. + */ +static inline int folio_estimated_sharers(struct folio *folio) +{ + return page_mapcount(folio_page(folio, 0)); +} + + #ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE static inline int arch_make_page_accessible(struct page *page) { diff --git a/mm/madvise.c b/mm/madvise.c index d03e149ffe6e..5973399b2f9b 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -654,8 +654,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, * deactivate all pages. */ if (folio_test_large(folio)) { - if (folio_mapcount(folio) != 1) - goto out; + if (folio_estimated_sharers(folio) != 1) + break; folio_get(folio); if (!folio_trylock(folio)) { folio_put(folio); From 774cb3de7ac93b1e3cdbcf29cc0e908b12796473 Mon Sep 17 00:00:00 2001 From: Zhu Wang Date: Sat, 19 Aug 2023 08:39:41 +0000 Subject: [PATCH 111/123] scsi: snic: Fix double free in snic_tgt_create() commit 1bd3a76880b2bce017987cf53780b372cf59528e upstream. Commit 41320b18a0e0 ("scsi: snic: Fix possible memory leak if device_add() fails") fixed the memory leak caused by dev_set_name() when device_add() failed. However, it did not consider that 'tgt' has already been released when put_device(&tgt->dev) is called. Remove kfree(tgt) in the error path to avoid double free of 'tgt' and move put_device(&tgt->dev) after the removed kfree(tgt) to avoid a use-after-free. Fixes: 41320b18a0e0 ("scsi: snic: Fix possible memory leak if device_add() fails") Signed-off-by: Zhu Wang Link: https://lore.kernel.org/r/20230819083941.164365-1-wangzhu9@huawei.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/snic/snic_disc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index cd27562ec922..6c529b37f3b4 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -303,12 +303,11 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) "Snic Tgt: device_add, with err = %d\n", ret); - put_device(&tgt->dev); put_device(&snic->shost->shost_gendev); spin_lock_irqsave(snic->shost->host_lock, flags); list_del(&tgt->list); spin_unlock_irqrestore(snic->shost->host_lock, flags); - kfree(tgt); + put_device(&tgt->dev); tgt = NULL; return tgt; From 70461151d0eb765976faf79424385dfdb3e1e468 Mon Sep 17 00:00:00 2001 From: Zhu Wang Date: Tue, 22 Aug 2023 01:52:54 +0000 Subject: [PATCH 112/123] scsi: core: raid_class: Remove raid_component_add() commit 60c5fd2e8f3c42a5abc565ba9876ead1da5ad2b7 upstream. The raid_component_add() function was added to the kernel tree via patch "[SCSI] embryonic RAID class" (2005). Remove this function since it never has had any callers in the Linux kernel. And also raid_component_release() is only used in raid_component_add(), so it is also removed. Signed-off-by: Zhu Wang Link: https://lore.kernel.org/r/20230822015254.184270-1-wangzhu9@huawei.com Reviewed-by: Bart Van Assche Fixes: 04b5b5cb0136 ("scsi: core: Fix possible memory leak if device_add() fails") Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/raid_class.c | 48 -------------------------------------- include/linux/raid_class.h | 4 ---- 2 files changed, 52 deletions(-) diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 711252e52d8e..95a86e0dfd77 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -209,54 +209,6 @@ raid_attr_ro_state(level); raid_attr_ro_fn(resync); raid_attr_ro_state_fn(state); -static void raid_component_release(struct device *dev) -{ - struct raid_component *rc = - container_of(dev, struct raid_component, dev); - dev_printk(KERN_ERR, rc->dev.parent, "COMPONENT RELEASE\n"); - put_device(rc->dev.parent); - kfree(rc); -} - -int raid_component_add(struct raid_template *r,struct device *raid_dev, - struct device *component_dev) -{ - struct device *cdev = - attribute_container_find_class_device(&r->raid_attrs.ac, - raid_dev); - struct raid_component *rc; - struct raid_data *rd = dev_get_drvdata(cdev); - int err; - - rc = kzalloc(sizeof(*rc), GFP_KERNEL); - if (!rc) - return -ENOMEM; - - INIT_LIST_HEAD(&rc->node); - device_initialize(&rc->dev); - rc->dev.release = raid_component_release; - rc->dev.parent = get_device(component_dev); - rc->num = rd->component_count++; - - dev_set_name(&rc->dev, "component-%d", rc->num); - list_add_tail(&rc->node, &rd->component_list); - rc->dev.class = &raid_class.class; - err = device_add(&rc->dev); - if (err) - goto err_out; - - return 0; - -err_out: - put_device(&rc->dev); - list_del(&rc->node); - rd->component_count--; - put_device(component_dev); - kfree(rc); - return err; -} -EXPORT_SYMBOL(raid_component_add); - struct raid_template * raid_class_attach(struct raid_function_template *ft) { diff --git a/include/linux/raid_class.h b/include/linux/raid_class.h index 5cdfcb873a8f..772d45b2a60a 100644 --- a/include/linux/raid_class.h +++ b/include/linux/raid_class.h @@ -77,7 +77,3 @@ DEFINE_RAID_ATTRIBUTE(enum raid_state, state) struct raid_template *raid_class_attach(struct raid_function_template *); void raid_class_release(struct raid_template *); - -int __must_check raid_component_add(struct raid_template *, struct device *, - struct device *); - From 0ba9a242a6b37be06e8fc5be4ecbbc254df37ff0 Mon Sep 17 00:00:00 2001 From: Biju Das Date: Tue, 25 Jul 2023 18:51:40 +0100 Subject: [PATCH 113/123] clk: Fix undefined reference to `clk_rate_exclusive_{get,put}' [ Upstream commit 2746f13f6f1df7999001d6595b16f789ecc28ad1 ] The COMMON_CLK config is not enabled in some of the architectures. This causes below build issues: pwm-rz-mtu3.c:(.text+0x114): undefined reference to `clk_rate_exclusive_put' pwm-rz-mtu3.c:(.text+0x32c): undefined reference to `clk_rate_exclusive_get' Fix these issues by moving clk_rate_exclusive_{get,put} inside COMMON_CLK code block, as clk.c is enabled by COMMON_CLK. Fixes: 55e9b8b7b806 ("clk: add clk_rate_exclusive api") Reported-by: kernel test robot Closes: https://lore.kernel.org/all/202307251752.vLfmmhYm-lkp@intel.com/ Signed-off-by: Biju Das Link: https://lore.kernel.org/r/20230725175140.361479-1-biju.das.jz@bp.renesas.com Signed-off-by: Stephen Boyd Signed-off-by: Sasha Levin --- include/linux/clk.h | 80 ++++++++++++++++++++++----------------------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/include/linux/clk.h b/include/linux/clk.h index 1ef013324237..06f1b292f8a0 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -183,6 +183,39 @@ int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale); */ bool clk_is_match(const struct clk *p, const struct clk *q); +/** + * clk_rate_exclusive_get - get exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to get exclusive control over the rate of a + * provider. It prevents any other consumer to execute, even indirectly, + * opereation which could alter the rate of the provider or cause glitches + * + * If exlusivity is claimed more than once on clock, even by the same driver, + * the rate effectively gets locked as exclusivity can't be preempted. + * + * Must not be called from within atomic context. + * + * Returns success (0) or negative errno. + */ +int clk_rate_exclusive_get(struct clk *clk); + +/** + * clk_rate_exclusive_put - release exclusivity over the rate control of a + * producer + * @clk: clock source + * + * This function allows drivers to release the exclusivity it previously got + * from clk_rate_exclusive_get() + * + * The caller must balance the number of clk_rate_exclusive_get() and + * clk_rate_exclusive_put() calls. + * + * Must not be called from within atomic context. + */ +void clk_rate_exclusive_put(struct clk *clk); + #else static inline int clk_notifier_register(struct clk *clk, @@ -236,6 +269,13 @@ static inline bool clk_is_match(const struct clk *p, const struct clk *q) return p == q; } +static inline int clk_rate_exclusive_get(struct clk *clk) +{ + return 0; +} + +static inline void clk_rate_exclusive_put(struct clk *clk) {} + #endif #ifdef CONFIG_HAVE_CLK_PREPARE @@ -583,38 +623,6 @@ struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); */ struct clk *devm_get_clk_from_child(struct device *dev, struct device_node *np, const char *con_id); -/** - * clk_rate_exclusive_get - get exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to get exclusive control over the rate of a - * provider. It prevents any other consumer to execute, even indirectly, - * opereation which could alter the rate of the provider or cause glitches - * - * If exlusivity is claimed more than once on clock, even by the same driver, - * the rate effectively gets locked as exclusivity can't be preempted. - * - * Must not be called from within atomic context. - * - * Returns success (0) or negative errno. - */ -int clk_rate_exclusive_get(struct clk *clk); - -/** - * clk_rate_exclusive_put - release exclusivity over the rate control of a - * producer - * @clk: clock source - * - * This function allows drivers to release the exclusivity it previously got - * from clk_rate_exclusive_get() - * - * The caller must balance the number of clk_rate_exclusive_get() and - * clk_rate_exclusive_put() calls. - * - * Must not be called from within atomic context. - */ -void clk_rate_exclusive_put(struct clk *clk); /** * clk_enable - inform the system when the clock source should be running. @@ -974,14 +982,6 @@ static inline void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks) {} static inline void devm_clk_put(struct device *dev, struct clk *clk) {} - -static inline int clk_rate_exclusive_get(struct clk *clk) -{ - return 0; -} - -static inline void clk_rate_exclusive_put(struct clk *clk) {} - static inline int clk_enable(struct clk *clk) { return 0; From 4a75bf3f6f4f276a58c4647433ec8676af59ac7c Mon Sep 17 00:00:00 2001 From: Biju Das Date: Tue, 15 Aug 2023 14:15:56 +0100 Subject: [PATCH 114/123] pinctrl: renesas: rzg2l: Fix NULL pointer dereference in rzg2l_dt_subnode_to_map() [ Upstream commit 661efa2284bbc2338da0424e219603f034072c74 ] Fix the below random NULL pointer crash during boot by serializing pinctrl group and function creation/remove calls in rzg2l_dt_subnode_to_map() with mutex lock. Crash log: pc : __pi_strcmp+0x20/0x140 lr : pinmux_func_name_to_selector+0x68/0xa4 Call trace: __pi_strcmp+0x20/0x140 pinmux_generic_add_function+0x34/0xcc rzg2l_dt_subnode_to_map+0x314/0x44c rzg2l_dt_node_to_map+0x164/0x194 pinctrl_dt_to_map+0x218/0x37c create_pinctrl+0x70/0x3d8 While at it, add comments for bitmap_lock and lock. Fixes: c4c4637eb57f ("pinctrl: renesas: Add RZ/G2L pin and gpio controller driver") Tested-by: Chris Paterson Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230815131558.33787-2-biju.das.jz@bp.renesas.com Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- drivers/pinctrl/renesas/pinctrl-rzg2l.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c index fd11d28e5a1e..2a617832a7e6 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c +++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -146,10 +147,11 @@ struct rzg2l_pinctrl { struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; DECLARE_BITMAP(tint_slot, RZG2L_TINT_MAX_INTERRUPT); - spinlock_t bitmap_lock; + spinlock_t bitmap_lock; /* protect tint_slot bitmap */ unsigned int hwirq[RZG2L_TINT_MAX_INTERRUPT]; - spinlock_t lock; + spinlock_t lock; /* lock read/write registers */ + struct mutex mutex; /* serialize adding groups and functions */ }; static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 }; @@ -359,11 +361,13 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev, name = np->name; } + mutex_lock(&pctrl->mutex); + /* Register a single pin group listing all the pins we read from DT */ gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL); if (gsel < 0) { ret = gsel; - goto done; + goto unlock; } /* @@ -377,6 +381,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev, goto remove_group; } + mutex_unlock(&pctrl->mutex); + maps[idx].type = PIN_MAP_TYPE_MUX_GROUP; maps[idx].data.mux.group = name; maps[idx].data.mux.function = name; @@ -388,6 +394,8 @@ static int rzg2l_dt_subnode_to_map(struct pinctrl_dev *pctldev, remove_group: pinctrl_generic_remove_group(pctldev, gsel); +unlock: + mutex_unlock(&pctrl->mutex); done: *index = idx; kfree(configs); @@ -1501,6 +1509,7 @@ static int rzg2l_pinctrl_probe(struct platform_device *pdev) spin_lock_init(&pctrl->lock); spin_lock_init(&pctrl->bitmap_lock); + mutex_init(&pctrl->mutex); platform_set_drvdata(pdev, pctrl); From 3fb1b959af17a723491fb8776562274d229c9bfb Mon Sep 17 00:00:00 2001 From: Biju Das Date: Tue, 15 Aug 2023 14:15:57 +0100 Subject: [PATCH 115/123] pinctrl: renesas: rzv2m: Fix NULL pointer dereference in rzv2m_dt_subnode_to_map() [ Upstream commit f982b9d57e7f834138fc908804fe66f646f2b108 ] Fix the below random NULL pointer crash during boot by serializing pinctrl group and function creation/remove calls in rzv2m_dt_subnode_to_map() with mutex lock. Crash logs: pc : __pi_strcmp+0x20/0x140 lr : pinmux_func_name_to_selector+0x68/0xa4 Call trace: __pi_strcmp+0x20/0x140 pinmux_generic_add_function+0x34/0xcc rzv2m_dt_subnode_to_map+0x2e4/0x418 rzv2m_dt_node_to_map+0x15c/0x18c pinctrl_dt_to_map+0x218/0x37c create_pinctrl+0x70/0x3d8 While at it, add a comment for lock. Fixes: 92a9b8252576 ("pinctrl: renesas: Add RZ/V2M pin and gpio controller driver") Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230815131558.33787-3-biju.das.jz@bp.renesas.com Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- drivers/pinctrl/renesas/pinctrl-rzv2m.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/pinctrl/renesas/pinctrl-rzv2m.c b/drivers/pinctrl/renesas/pinctrl-rzv2m.c index 35f382b055e8..2858800288bb 100644 --- a/drivers/pinctrl/renesas/pinctrl-rzv2m.c +++ b/drivers/pinctrl/renesas/pinctrl-rzv2m.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -121,7 +122,8 @@ struct rzv2m_pinctrl { struct gpio_chip gpio_chip; struct pinctrl_gpio_range gpio_range; - spinlock_t lock; + spinlock_t lock; /* lock read/write registers */ + struct mutex mutex; /* serialize adding groups and functions */ }; static const unsigned int drv_1_8V_group2_uA[] = { 1800, 3800, 7800, 11000 }; @@ -320,11 +322,13 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev, name = np->name; } + mutex_lock(&pctrl->mutex); + /* Register a single pin group listing all the pins we read from DT */ gsel = pinctrl_generic_add_group(pctldev, name, pins, num_pinmux, NULL); if (gsel < 0) { ret = gsel; - goto done; + goto unlock; } /* @@ -338,6 +342,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev, goto remove_group; } + mutex_unlock(&pctrl->mutex); + maps[idx].type = PIN_MAP_TYPE_MUX_GROUP; maps[idx].data.mux.group = name; maps[idx].data.mux.function = name; @@ -349,6 +355,8 @@ static int rzv2m_dt_subnode_to_map(struct pinctrl_dev *pctldev, remove_group: pinctrl_generic_remove_group(pctldev, gsel); +unlock: + mutex_unlock(&pctrl->mutex); done: *index = idx; kfree(configs); @@ -1070,6 +1078,7 @@ static int rzv2m_pinctrl_probe(struct platform_device *pdev) } spin_lock_init(&pctrl->lock); + mutex_init(&pctrl->mutex); platform_set_drvdata(pdev, pctrl); From 6ed06b94f68363f4bc53608623059170d3689f4e Mon Sep 17 00:00:00 2001 From: Biju Das Date: Tue, 15 Aug 2023 14:15:58 +0100 Subject: [PATCH 116/123] pinctrl: renesas: rza2: Add lock around pinctrl_generic{{add,remove}_group,{add,remove}_function} [ Upstream commit 8fcc1c40b747069644db6102c1d84c942c9d4d86 ] The pinctrl group and function creation/remove calls expect caller to take care of locking. Add lock around these functions. Fixes: b59d0e782706 ("pinctrl: Add RZ/A2 pin and gpio controller") Signed-off-by: Biju Das Reviewed-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230815131558.33787-4-biju.das.jz@bp.renesas.com Signed-off-by: Geert Uytterhoeven Signed-off-by: Sasha Levin --- drivers/pinctrl/renesas/pinctrl-rza2.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c index c0a04f1ee994..12126e30dc20 100644 --- a/drivers/pinctrl/renesas/pinctrl-rza2.c +++ b/drivers/pinctrl/renesas/pinctrl-rza2.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -46,6 +47,7 @@ struct rza2_pinctrl_priv { struct pinctrl_dev *pctl; struct pinctrl_gpio_range gpio_range; int npins; + struct mutex mutex; /* serialize adding groups and functions */ }; #define RZA2_PDR(port) (0x0000 + (port) * 2) /* Direction 16-bit */ @@ -358,10 +360,14 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev, psel_val[i] = MUX_FUNC(value); } + mutex_lock(&priv->mutex); + /* Register a single pin group listing all the pins we read from DT */ gsel = pinctrl_generic_add_group(pctldev, np->name, pins, npins, NULL); - if (gsel < 0) - return gsel; + if (gsel < 0) { + ret = gsel; + goto unlock; + } /* * Register a single group function where the 'data' is an array PSEL @@ -390,6 +396,8 @@ static int rza2_dt_node_to_map(struct pinctrl_dev *pctldev, (*map)->data.mux.function = np->name; *num_maps = 1; + mutex_unlock(&priv->mutex); + return 0; remove_function: @@ -398,6 +406,9 @@ remove_function: remove_group: pinctrl_generic_remove_group(pctldev, gsel); +unlock: + mutex_unlock(&priv->mutex); + dev_err(priv->dev, "Unable to parse DT node %s\n", np->name); return ret; @@ -473,6 +484,8 @@ static int rza2_pinctrl_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + mutex_init(&priv->mutex); + platform_set_drvdata(pdev, priv); priv->npins = (int)(uintptr_t)of_device_get_match_data(&pdev->dev) * From 3282e79a85c1096b1f59925d7a04a4303d605759 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Fri, 18 Aug 2023 07:59:38 -0700 Subject: [PATCH 117/123] dma-buf/sw_sync: Avoid recursive lock during fence signal MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e531fdb5cd5ee2564b7fe10c8a9219e2b2fac61e ] If a signal callback releases the sw_sync fence, that will trigger a deadlock as the timeline_fence_release recurses onto the fence->lock (used both for signaling and the the timeline tree). To avoid that, temporarily hold an extra reference to the signalled fences until after we drop the lock. (This is an alternative implementation of https://patchwork.kernel.org/patch/11664717/ which avoids some potential UAF issues with the original patch.) v2: Remove now obsolete comment, use list_move_tail() and list_del_init() Reported-by: Bas Nieuwenhuizen Fixes: d3c6dd1fb30d ("dma-buf/sw_sync: Synchronize signal vs syncpt free") Signed-off-by: Rob Clark Link: https://patchwork.freedesktop.org/patch/msgid/20230818145939.39697-1-robdclark@gmail.com Reviewed-by: Christian König Signed-off-by: Christian König Signed-off-by: Sasha Levin --- drivers/dma-buf/sw_sync.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 348b3a9170fa..7f5ed1aa7a9f 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = { */ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) { + LIST_HEAD(signalled); struct sync_pt *pt, *next; trace_sync_timeline(obj); @@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc) if (!timeline_fence_signaled(&pt->base)) break; - list_del_init(&pt->link); + dma_fence_get(&pt->base); + + list_move_tail(&pt->link, &signalled); rb_erase(&pt->node, &obj->pt_tree); - /* - * A signal callback may release the last reference to this - * fence, causing it to be freed. That operation has to be - * last to avoid a use after free inside this loop, and must - * be after we remove the fence from the timeline in order to - * prevent deadlocking on timeline->lock inside - * timeline_fence_release(). - */ dma_fence_signal_locked(&pt->base); } spin_unlock_irq(&obj->lock); + + list_for_each_entry_safe(pt, next, &signalled, link) { + list_del_init(&pt->link); + dma_fence_put(&pt->base); + } } /** From 3c839f8332dfca0e7480b6c03785cc85da2c5415 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 22 Aug 2023 21:29:42 +0200 Subject: [PATCH 118/123] gpio: sim: dispose of irq mappings before destroying the irq_sim domain [ Upstream commit ab4109f91b328ff5cb5e1279f64d443241add2d1 ] If a GPIO simulator device is unbound with interrupts still requested, we will hit a use-after-free issue in __irq_domain_deactivate_irq(). The owner of the irq domain must dispose of all mappings before destroying the domain object. Fixes: cb8c474e79be ("gpio: sim: new testing module") Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko Signed-off-by: Sasha Levin --- drivers/gpio/gpio-sim.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index fef12e57b1f1..3fa123bb72ee 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -290,6 +290,15 @@ static void gpio_sim_mutex_destroy(void *data) mutex_destroy(lock); } +static void gpio_sim_dispose_mappings(void *data) +{ + struct gpio_sim_chip *chip = data; + unsigned int i; + + for (i = 0; i < chip->gc.ngpio; i++) + irq_dispose_mapping(irq_find_mapping(chip->irq_sim, i)); +} + static void gpio_sim_sysfs_remove(void *data) { struct gpio_sim_chip *chip = data; @@ -402,6 +411,10 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) if (IS_ERR(chip->irq_sim)) return PTR_ERR(chip->irq_sim); + ret = devm_add_action_or_reset(dev, gpio_sim_dispose_mappings, chip); + if (ret) + return ret; + mutex_init(&chip->lock); ret = devm_add_action_or_reset(dev, gpio_sim_mutex_destroy, &chip->lock); From d10ab996bd5cb63e854924dd3d6ffb036d4d432a Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Tue, 22 Aug 2023 21:29:43 +0200 Subject: [PATCH 119/123] gpio: sim: pass the GPIO device's software node to irq domain [ Upstream commit 6e39c1ac688161b4db3617aabbca589b395242bc ] Associate the swnode of the GPIO device's (which is the interrupt controller here) with the irq domain. Otherwise the interrupt-controller device attribute is a no-op. Fixes: cb8c474e79be ("gpio: sim: new testing module") Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko Signed-off-by: Sasha Levin --- drivers/gpio/gpio-sim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c index 3fa123bb72ee..b352775e5e0b 100644 --- a/drivers/gpio/gpio-sim.c +++ b/drivers/gpio/gpio-sim.c @@ -407,7 +407,7 @@ static int gpio_sim_add_bank(struct fwnode_handle *swnode, struct device *dev) if (!chip->pull_map) return -ENOMEM; - chip->irq_sim = devm_irq_domain_create_sim(dev, NULL, num_lines); + chip->irq_sim = devm_irq_domain_create_sim(dev, swnode, num_lines); if (IS_ERR(chip->irq_sim)) return PTR_ERR(chip->irq_sim); From 936cf79649e0b9cbe6b638e6cc12712a9c6b26de Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Wed, 23 Aug 2023 20:11:49 -0500 Subject: [PATCH 120/123] ASoC: amd: yc: Fix a non-functional mic on Lenovo 82SJ [ Upstream commit c008323fe361bd62a43d9fb29737dacd5c067fb7 ] Lenovo 82SJ doesn't have DMIC connected like 82V2 does. Narrow the match down to only cover 82V2. Reported-by: prosenfeld@Yuhsbstudents.org Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217063 Fixes: 2232b2dd8cd4 ("ASoC: amd: yc: Add Lenovo Yoga Slim 7 Pro X to quirks table") Signed-off-by: Mario Limonciello --- sound/soc/amd/yc/acp6x-mach.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 26101299af4d..9a9571c3f08c 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -217,7 +217,7 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { .driver_data = &acp6x_card, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_NAME, "82"), + DMI_MATCH(DMI_PRODUCT_NAME, "82V2"), } }, { From 9d5a3b4aee11301acce8c1752cb272478d082357 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Fri, 18 Aug 2023 20:43:55 -0400 Subject: [PATCH 121/123] maple_tree: disable mas_wr_append() when other readers are possible [ Upstream commit cfeb6ae8bcb96ccf674724f223661bbcef7b0d0b ] The current implementation of append may cause duplicate data and/or incorrect ranges to be returned to a reader during an update. Although this has not been reported or seen, disable the append write operation while the tree is in rcu mode out of an abundance of caution. During the analysis of the mas_next_slot() the following was artificially created by separating the writer and reader code: Writer: reader: mas_wr_append set end pivot updates end metata Detects write to last slot last slot write is to start of slot store current contents in slot overwrite old end pivot mas_next_slot(): read end metadata read old end pivot return with incorrect range store new value Alternatively: Writer: reader: mas_wr_append set end pivot updates end metata Detects write to last slot last lost write to end of slot store value mas_next_slot(): read end metadata read old end pivot read new end pivot return with incorrect range set old end pivot There may be other accesses that are not safe since we are now updating both metadata and pointers, so disabling append if there could be rcu readers is the safest action. Link: https://lkml.kernel.org/r/20230819004356.1454718-2-Liam.Howlett@oracle.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Liam R. Howlett Cc: Signed-off-by: Andrew Morton Signed-off-by: Sasha Levin --- lib/maple_tree.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 47d0c95b9a01..250b4c67fac8 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4333,6 +4333,9 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) struct ma_state *mas = wr_mas->mas; unsigned char node_pivots = mt_pivots[wr_mas->type]; + if (mt_in_rcu(mas->tree)) + return false; + if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) { if (new_end < node_pivots) wr_mas->pivots[new_end] = wr_mas->pivots[end]; From 19641b979b24b31c6da4b5ec29f6c57057499657 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 5 Jun 2023 10:58:29 +0200 Subject: [PATCH 122/123] ASoC: amd: vangogh: select CONFIG_SND_AMD_ACP_CONFIG commit fd0a7ec379dbf21b7bfd81914381ae5281706ef5 upstream. The vangogh driver just gained a link time dependency that now causes randconfig builds to fail: x86_64-linux-ld: sound/soc/amd/vangogh/pci-acp5x.o: in function `snd_acp5x_probe': pci-acp5x.c:(.text+0xbb): undefined reference to `snd_amd_acp_find_config' Fixes: e89f45edb747e ("ASoC: amd: vangogh: Add check for acp config flags in vangogh platform") Signed-off-by: Arnd Bergmann Link: https://lore.kernel.org/r/20230605085839.2157268-1-arnd@kernel.org Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/amd/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig index 3968c478c938..44d4e6e51a35 100644 --- a/sound/soc/amd/Kconfig +++ b/sound/soc/amd/Kconfig @@ -71,6 +71,7 @@ config SND_SOC_AMD_RENOIR_MACH config SND_SOC_AMD_ACP5x tristate "AMD Audio Coprocessor-v5.x I2S support" depends on X86 && PCI + select SND_AMD_ACP_CONFIG help This option enables ACP v5.x support on AMD platform From a2943d2d9a00ae7c5c1fde2b2e7e9cdb47e7db05 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 30 Aug 2023 16:11:13 +0200 Subject: [PATCH 123/123] Linux 6.1.50 Link: https://lore.kernel.org/r/20230828101156.480754469@linuxfoundation.org Tested-by: Conor Dooley Tested-by: Linux Kernel Functional Testing Tested-by: Salvatore Bonaccorso Tested-by: Joel Fernandes (Google) Tested-by: SeongJae Park Tested-by: Bagas Sanjaya Tested-by: Sudip Mukherjee Tested-by: Takeshi Ogasawara Tested-by: Shuah Khan Tested-by: Florian Fainelli Tested-by: Guenter Roeck Tested-by: Jon Hunter Tested-by: Pavel Machek (CIP) Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 61ebd54aba89..e5e1fdeef8bf 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 PATCHLEVEL = 1 -SUBLEVEL = 49 +SUBLEVEL = 50 EXTRAVERSION = NAME = Curry Ramen