bnxt_en: Fix page pool logic for page size >= 64K
[ Upstream commit f6974b4c2d8e1062b5a52228ee47293c15b4ee1e ]
The RXBD length field on all bnxt chips is 16-bit and so we cannot
support a full page when the native page size is 64K or greater.
The non-XDP (non page pool) code path has logic to handle this but
the XDP page pool code path does not handle this. Add the missing
logic to use page_pool_dev_alloc_frag() to allocate 32K chunks if
the page size is 64K or greater.
Fixes: 9f4b28301c ("bnxt: XDP multibuffer enablement")
Link: https://lore.kernel.org/netdev/20230728231829.235716-2-michael.chan@broadcom.com/
Reviewed-by: Andy Gospodarek <andrew.gospodarek@broadcom.com>
Signed-off-by: Somnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Link: https://lore.kernel.org/r/20230731142043.58855-2-michael.chan@broadcom.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
64763dd851
commit
e9f11bfc03
2 changed files with 29 additions and 19 deletions
|
|
@ -721,17 +721,24 @@ next_tx_int:
|
|||
|
||||
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
|
||||
struct bnxt_rx_ring_info *rxr,
|
||||
unsigned int *offset,
|
||||
gfp_t gfp)
|
||||
{
|
||||
struct device *dev = &bp->pdev->dev;
|
||||
struct page *page;
|
||||
|
||||
page = page_pool_dev_alloc_pages(rxr->page_pool);
|
||||
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
|
||||
page = page_pool_dev_alloc_frag(rxr->page_pool, offset,
|
||||
BNXT_RX_PAGE_SIZE);
|
||||
} else {
|
||||
page = page_pool_dev_alloc_pages(rxr->page_pool);
|
||||
*offset = 0;
|
||||
}
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
|
||||
DMA_ATTR_WEAK_ORDERING);
|
||||
*mapping = dma_map_page_attrs(dev, page, *offset, BNXT_RX_PAGE_SIZE,
|
||||
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
|
||||
if (dma_mapping_error(dev, *mapping)) {
|
||||
page_pool_recycle_direct(rxr->page_pool, page);
|
||||
return NULL;
|
||||
|
|
@ -771,15 +778,16 @@ int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
|||
dma_addr_t mapping;
|
||||
|
||||
if (BNXT_RX_PAGE_MODE(bp)) {
|
||||
unsigned int offset;
|
||||
struct page *page =
|
||||
__bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
|
||||
__bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
mapping += bp->rx_dma_offset;
|
||||
rx_buf->data = page;
|
||||
rx_buf->data_ptr = page_address(page) + bp->rx_offset;
|
||||
rx_buf->data_ptr = page_address(page) + offset + bp->rx_offset;
|
||||
} else {
|
||||
u8 *data = __bnxt_alloc_rx_frag(bp, &mapping, gfp);
|
||||
|
||||
|
|
@ -839,7 +847,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
|
|||
unsigned int offset = 0;
|
||||
|
||||
if (BNXT_RX_PAGE_MODE(bp)) {
|
||||
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, gfp);
|
||||
page = __bnxt_alloc_rx_page(bp, &mapping, rxr, &offset, gfp);
|
||||
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
|
@ -986,15 +994,15 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
|
|||
return NULL;
|
||||
}
|
||||
dma_addr -= bp->rx_dma_offset;
|
||||
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
|
||||
DMA_ATTR_WEAK_ORDERING);
|
||||
skb = build_skb(page_address(page), PAGE_SIZE);
|
||||
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
|
||||
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
|
||||
skb = build_skb(data_ptr - bp->rx_offset, BNXT_RX_PAGE_SIZE);
|
||||
if (!skb) {
|
||||
page_pool_recycle_direct(rxr->page_pool, page);
|
||||
return NULL;
|
||||
}
|
||||
skb_mark_for_recycle(skb);
|
||||
skb_reserve(skb, bp->rx_dma_offset);
|
||||
skb_reserve(skb, bp->rx_offset);
|
||||
__skb_put(skb, len);
|
||||
|
||||
return skb;
|
||||
|
|
@ -1020,8 +1028,8 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
|
|||
return NULL;
|
||||
}
|
||||
dma_addr -= bp->rx_dma_offset;
|
||||
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
|
||||
DMA_ATTR_WEAK_ORDERING);
|
||||
dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, BNXT_RX_PAGE_SIZE,
|
||||
bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
|
||||
|
||||
if (unlikely(!payload))
|
||||
payload = eth_get_headlen(bp->dev, data_ptr, len);
|
||||
|
|
@ -1034,7 +1042,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
|
|||
|
||||
skb_mark_for_recycle(skb);
|
||||
off = (void *)data_ptr - page_address(page);
|
||||
skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
|
||||
skb_add_rx_frag(skb, 0, page, off, len, BNXT_RX_PAGE_SIZE);
|
||||
memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
|
||||
payload + NET_IP_ALIGN);
|
||||
|
||||
|
|
@ -1169,7 +1177,7 @@ static struct sk_buff *bnxt_rx_agg_pages_skb(struct bnxt *bp,
|
|||
|
||||
skb->data_len += total_frag_len;
|
||||
skb->len += total_frag_len;
|
||||
skb->truesize += PAGE_SIZE * agg_bufs;
|
||||
skb->truesize += BNXT_RX_PAGE_SIZE * agg_bufs;
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
|
@ -2972,8 +2980,8 @@ skip_rx_tpa_free:
|
|||
rx_buf->data = NULL;
|
||||
if (BNXT_RX_PAGE_MODE(bp)) {
|
||||
mapping -= bp->rx_dma_offset;
|
||||
dma_unmap_page_attrs(&pdev->dev, mapping, PAGE_SIZE,
|
||||
bp->rx_dir,
|
||||
dma_unmap_page_attrs(&pdev->dev, mapping,
|
||||
BNXT_RX_PAGE_SIZE, bp->rx_dir,
|
||||
DMA_ATTR_WEAK_ORDERING);
|
||||
page_pool_recycle_direct(rxr->page_pool, data);
|
||||
} else {
|
||||
|
|
@ -3241,6 +3249,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
|
|||
pp.nid = dev_to_node(&bp->pdev->dev);
|
||||
pp.dev = &bp->pdev->dev;
|
||||
pp.dma_dir = DMA_BIDIRECTIONAL;
|
||||
if (PAGE_SIZE > BNXT_RX_PAGE_SIZE)
|
||||
pp.flags |= PP_FLAG_PAGE_FRAG;
|
||||
|
||||
rxr->page_pool = page_pool_create(&pp);
|
||||
if (IS_ERR(rxr->page_pool)) {
|
||||
|
|
|
|||
|
|
@ -180,8 +180,8 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
|
|||
u16 cons, u8 *data_ptr, unsigned int len,
|
||||
struct xdp_buff *xdp)
|
||||
{
|
||||
u32 buflen = BNXT_RX_PAGE_SIZE;
|
||||
struct bnxt_sw_rx_bd *rx_buf;
|
||||
u32 buflen = PAGE_SIZE;
|
||||
struct pci_dev *pdev;
|
||||
dma_addr_t mapping;
|
||||
u32 offset;
|
||||
|
|
@ -297,7 +297,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
|
|||
rx_buf = &rxr->rx_buf_ring[cons];
|
||||
mapping = rx_buf->mapping - bp->rx_dma_offset;
|
||||
dma_unmap_page_attrs(&pdev->dev, mapping,
|
||||
PAGE_SIZE, bp->rx_dir,
|
||||
BNXT_RX_PAGE_SIZE, bp->rx_dir,
|
||||
DMA_ATTR_WEAK_ORDERING);
|
||||
|
||||
/* if we are unable to allocate a new buffer, abort and reuse */
|
||||
|
|
@ -478,7 +478,7 @@ bnxt_xdp_build_skb(struct bnxt *bp, struct sk_buff *skb, u8 num_frags,
|
|||
}
|
||||
xdp_update_skb_shared_info(skb, num_frags,
|
||||
sinfo->xdp_frags_size,
|
||||
PAGE_SIZE * sinfo->nr_frags,
|
||||
BNXT_RX_PAGE_SIZE * sinfo->nr_frags,
|
||||
xdp_buff_is_frag_pfmemalloc(xdp));
|
||||
return skb;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue