mirror of
git://git.openwrt.org/openwrt/openwrt.git
synced 2025-12-10 14:42:11 -05:00
Changelog: https://cdn.kernel.org/pub/linux/kernel/v6.x/ChangeLog-6.12.46 Removed upstreamed: backport-6.12/600-01-v6.14-net-dsa-add-hook-to-determine-whether-EEE-is-support.patch[1] generic-backport/600-02-v6.14-net-dsa-provide-implementation-of-.support_eee.patch[2] generic/backport-6.12/610-02-v6.14-net-dsa-b53-bcm_sf2-implement-.support_eee-method.patch[3] generic/backport-6.12/610-05-v6.16-net-dsa-b53-do-not-enable-EEE-on-bcm63xx.patch[4] generic/backport-6.12/621-proc-fix-missing-pde_set_flags.patch[5] generic/pending-6.12/742-net-ethernet-mtk_eth_soc-fix-tx-vlan-tag-for-llc-pac.patch[6] Manually rebased: bcm27xx/patches-6.12/950-0347-net-macb-Also-set-DMA-coherent-mask.patch All other patches automatically rebased. 1. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.12.46&id=f7976772b16a7da725f9156c5ab6472ba22e3bc0 2. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.12.46&id=cda6c5c095e1997e63ed805ed3191f3d2af806a0 3. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.12.46&id=b765b9ee4e5a82e9d0e5d0649bf031e8a8b90b3d 4. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.12.46&id=3fbe3f4c57fda09f32e13fa05f53a0cc6f500619 5. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.12.46&id=3eebe856d09b6bdd8df99eb67203c831f23e21d7 6. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.12.46&id=f8b4b6f7c2bbfa33e50b8cc946c161172cdefbd5 Build system: x86/64 Build-tested: flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3, x86/64-glibc Run-tested: flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3, x86/64-glibc Signed-off-by: John Audia <therealgraysky@proton.me> Link: https://github.com/openwrt/openwrt/pull/20003 Signed-off-by: Robert Marko <robimarko@gmail.com>
487 lines
16 KiB
Diff
487 lines
16 KiB
Diff
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Tue, 15 Oct 2024 12:52:56 +0200
|
|
Subject: [PATCH] net: ethernet: mtk_eth_soc: optimize dma ring address/index
|
|
calculation
|
|
|
|
Since DMA descriptor sizes are all power of 2, we can avoid costly integer
|
|
division in favor or simple shifts.
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
@@ -43,6 +43,11 @@ MODULE_PARM_DESC(msg_level, "Message lev
|
|
offsetof(struct mtk_hw_stats, xdp_stats.x) / \
|
|
sizeof(u64) }
|
|
|
|
+#define RX_DESC_OFS(eth, i) \
|
|
+ ((i) << (eth)->soc->rx.desc_shift)
|
|
+#define TX_DESC_OFS(eth, i) \
|
|
+ ((i) << (eth)->soc->tx.desc_shift)
|
|
+
|
|
static const struct mtk_reg_map mtk_reg_map = {
|
|
.tx_irq_mask = 0x1a1c,
|
|
.tx_irq_status = 0x1a18,
|
|
@@ -1160,14 +1165,14 @@ static int mtk_init_fq_dma(struct mtk_et
|
|
eth->scratch_ring = eth->sram_base;
|
|
else
|
|
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
|
|
- cnt * soc->tx.desc_size,
|
|
+ TX_DESC_OFS(eth, cnt),
|
|
ð->phy_scratch_ring,
|
|
GFP_KERNEL);
|
|
|
|
if (unlikely(!eth->scratch_ring))
|
|
return -ENOMEM;
|
|
|
|
- phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
|
|
+ phy_ring_tail = eth->phy_scratch_ring + TX_DESC_OFS(eth, cnt - 1);
|
|
|
|
for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
|
|
len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
|
|
@@ -1186,11 +1191,11 @@ static int mtk_init_fq_dma(struct mtk_et
|
|
for (i = 0; i < len; i++) {
|
|
struct mtk_tx_dma_v2 *txd;
|
|
|
|
- txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
|
|
+ txd = eth->scratch_ring + TX_DESC_OFS(eth, j * MTK_FQ_DMA_LENGTH + i);
|
|
txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
|
|
if (j * MTK_FQ_DMA_LENGTH + i < cnt)
|
|
txd->txd2 = eth->phy_scratch_ring +
|
|
- (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
|
|
+ TX_DESC_OFS(eth, j * MTK_FQ_DMA_LENGTH + i + 1);
|
|
|
|
txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
|
|
if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
|
|
@@ -1220,9 +1225,9 @@ static void *mtk_qdma_phys_to_virt(struc
|
|
}
|
|
|
|
static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
|
|
- void *txd, u32 txd_size)
|
|
+ void *txd, u32 txd_shift)
|
|
{
|
|
- int idx = (txd - ring->dma) / txd_size;
|
|
+ int idx = (txd - ring->dma) >> txd_shift;
|
|
|
|
return &ring->buf[idx];
|
|
}
|
|
@@ -1233,9 +1238,9 @@ static struct mtk_tx_dma *qdma_to_pdma(s
|
|
return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
|
|
}
|
|
|
|
-static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
|
|
+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_shift)
|
|
{
|
|
- return (dma - ring->dma) / txd_size;
|
|
+ return (dma - ring->dma) >> txd_shift;
|
|
}
|
|
|
|
static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
|
|
@@ -1443,7 +1448,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
if (itxd == ring->last_free)
|
|
return -ENOMEM;
|
|
|
|
- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
|
|
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_shift);
|
|
memset(itx_buf, 0, sizeof(*itx_buf));
|
|
|
|
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
|
|
@@ -1497,7 +1502,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
mtk_tx_set_dma_desc(dev, txd, &txd_info);
|
|
|
|
tx_buf = mtk_desc_to_tx_buf(ring, txd,
|
|
- soc->tx.desc_size);
|
|
+ soc->tx.desc_shift);
|
|
if (new_desc)
|
|
memset(tx_buf, 0, sizeof(*tx_buf));
|
|
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
|
|
@@ -1540,7 +1545,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
} else {
|
|
int next_idx;
|
|
|
|
- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
|
|
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_shift),
|
|
ring->dma_size);
|
|
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
|
|
}
|
|
@@ -1549,7 +1554,7 @@ static int mtk_tx_map(struct sk_buff *sk
|
|
|
|
err_dma:
|
|
do {
|
|
- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_shift);
|
|
|
|
/* unmap dma */
|
|
mtk_tx_unmap(eth, tx_buf, NULL, false);
|
|
@@ -1723,7 +1728,7 @@ static struct mtk_rx_ring *mtk_get_rx_ri
|
|
|
|
ring = ð->rx_ring[i];
|
|
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
|
- rxd = ring->dma + idx * eth->soc->rx.desc_size;
|
|
+ rxd = ring->dma + RX_DESC_OFS(eth, idx);
|
|
if (rxd->rxd2 & RX_DMA_DONE) {
|
|
ring->calc_idx_update = true;
|
|
return ring;
|
|
@@ -1891,7 +1896,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
}
|
|
htxd = txd;
|
|
|
|
- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_shift);
|
|
memset(tx_buf, 0, sizeof(*tx_buf));
|
|
htx_buf = tx_buf;
|
|
|
|
@@ -1910,7 +1915,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
goto unmap;
|
|
|
|
tx_buf = mtk_desc_to_tx_buf(ring, txd,
|
|
- soc->tx.desc_size);
|
|
+ soc->tx.desc_shift);
|
|
memset(tx_buf, 0, sizeof(*tx_buf));
|
|
n_desc++;
|
|
}
|
|
@@ -1948,7 +1953,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
} else {
|
|
int idx;
|
|
|
|
- idx = txd_to_idx(ring, txd, soc->tx.desc_size);
|
|
+ idx = txd_to_idx(ring, txd, soc->tx.desc_shift);
|
|
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
|
|
MT7628_TX_CTX_IDX0);
|
|
}
|
|
@@ -1959,7 +1964,7 @@ static int mtk_xdp_submit_frame(struct m
|
|
|
|
unmap:
|
|
while (htxd != txd) {
|
|
- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
|
|
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_shift);
|
|
mtk_tx_unmap(eth, tx_buf, NULL, false);
|
|
|
|
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
|
|
@@ -2091,7 +2096,7 @@ static int mtk_poll_rx(struct napi_struc
|
|
goto rx_done;
|
|
|
|
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
|
|
- rxd = ring->dma + idx * eth->soc->rx.desc_size;
|
|
+ rxd = ring->dma + RX_DESC_OFS(eth, idx);
|
|
data = ring->data[idx];
|
|
|
|
if (!mtk_rx_get_desc(eth, &trxd, rxd))
|
|
@@ -2355,7 +2360,7 @@ static int mtk_poll_tx_qdma(struct mtk_e
|
|
break;
|
|
|
|
tx_buf = mtk_desc_to_tx_buf(ring, desc,
|
|
- eth->soc->tx.desc_size);
|
|
+ eth->soc->tx.desc_shift);
|
|
if (!tx_buf->data)
|
|
break;
|
|
|
|
@@ -2406,7 +2411,7 @@ static int mtk_poll_tx_pdma(struct mtk_e
|
|
}
|
|
mtk_tx_unmap(eth, tx_buf, &bq, true);
|
|
|
|
- desc = ring->dma + cpu * eth->soc->tx.desc_size;
|
|
+ desc = ring->dma + TX_DESC_OFS(eth, cpu);
|
|
ring->last_free = desc;
|
|
atomic_inc(&ring->free_count);
|
|
|
|
@@ -2524,7 +2529,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
{
|
|
const struct mtk_soc_data *soc = eth->soc;
|
|
struct mtk_tx_ring *ring = ð->tx_ring;
|
|
- int i, sz = soc->tx.desc_size;
|
|
+ int i, sz = TX_DESC_OFS(eth, 1);
|
|
struct mtk_tx_dma_v2 *txd;
|
|
int ring_size;
|
|
u32 ofs, val;
|
|
@@ -2571,7 +2576,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
* descriptors in ring->dma_pdma.
|
|
*/
|
|
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
|
- ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
|
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, TX_DESC_OFS(eth, ring_size),
|
|
&ring->phys_pdma, GFP_KERNEL);
|
|
if (!ring->dma_pdma)
|
|
goto no_tx_mem;
|
|
@@ -2586,7 +2591,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
atomic_set(&ring->free_count, ring_size - 2);
|
|
ring->next_free = ring->dma;
|
|
ring->last_free = (void *)txd;
|
|
- ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
|
|
+ ring->last_free_ptr = (u32)(ring->phys + TX_DESC_OFS(eth, ring_size - 1));
|
|
ring->thresh = MAX_SKB_FRAGS;
|
|
|
|
/* make sure that all changes to the dma ring are flushed before we
|
|
@@ -2598,7 +2603,7 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
|
|
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
|
|
mtk_w32(eth,
|
|
- ring->phys + ((ring_size - 1) * sz),
|
|
+ ring->phys + TX_DESC_OFS(eth, ring_size - 1),
|
|
soc->reg_map->qdma.crx_ptr);
|
|
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
|
|
|
|
@@ -2647,14 +2652,14 @@ static void mtk_tx_clean(struct mtk_eth
|
|
}
|
|
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
|
|
dma_free_coherent(eth->dma_dev,
|
|
- ring->dma_size * soc->tx.desc_size,
|
|
+ TX_DESC_OFS(eth, ring->dma_size),
|
|
ring->dma, ring->phys);
|
|
ring->dma = NULL;
|
|
}
|
|
|
|
if (ring->dma_pdma) {
|
|
dma_free_coherent(eth->dma_dev,
|
|
- ring->dma_size * soc->tx.desc_size,
|
|
+ TX_DESC_OFS(eth, ring->dma_size),
|
|
ring->dma_pdma, ring->phys_pdma);
|
|
ring->dma_pdma = NULL;
|
|
}
|
|
@@ -2710,15 +2715,13 @@ static int mtk_rx_alloc(struct mtk_eth *
|
|
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
|
|
rx_flag != MTK_RX_FLAGS_NORMAL) {
|
|
ring->dma = dma_alloc_coherent(eth->dma_dev,
|
|
- rx_dma_size * eth->soc->rx.desc_size,
|
|
+ RX_DESC_OFS(eth, rx_dma_size),
|
|
&ring->phys, GFP_KERNEL);
|
|
} else {
|
|
struct mtk_tx_ring *tx_ring = ð->tx_ring;
|
|
|
|
- ring->dma = tx_ring->dma + tx_ring_size *
|
|
- eth->soc->tx.desc_size * (ring_no + 1);
|
|
- ring->phys = tx_ring->phys + tx_ring_size *
|
|
- eth->soc->tx.desc_size * (ring_no + 1);
|
|
+ ring->dma = tx_ring->dma + TX_DESC_OFS(eth, tx_ring_size * (ring_no + 1));
|
|
+ ring->phys = tx_ring->phys + TX_DESC_OFS(eth, tx_ring_size * (ring_no + 1));
|
|
}
|
|
|
|
if (!ring->dma)
|
|
@@ -2729,7 +2732,7 @@ static int mtk_rx_alloc(struct mtk_eth *
|
|
dma_addr_t dma_addr;
|
|
void *data;
|
|
|
|
- rxd = ring->dma + i * eth->soc->rx.desc_size;
|
|
+ rxd = ring->dma + RX_DESC_OFS(eth, i);
|
|
if (ring->page_pool) {
|
|
data = mtk_page_pool_get_buff(ring->page_pool,
|
|
&dma_addr, GFP_KERNEL);
|
|
@@ -2820,7 +2823,7 @@ static void mtk_rx_clean(struct mtk_eth
|
|
if (!ring->data[i])
|
|
continue;
|
|
|
|
- rxd = ring->dma + i * eth->soc->rx.desc_size;
|
|
+ rxd = ring->dma + RX_DESC_OFS(eth, i);
|
|
if (!rxd->rxd1)
|
|
continue;
|
|
|
|
@@ -2837,7 +2840,7 @@ static void mtk_rx_clean(struct mtk_eth
|
|
|
|
if (!in_sram && ring->dma) {
|
|
dma_free_coherent(eth->dma_dev,
|
|
- ring->dma_size * eth->soc->rx.desc_size,
|
|
+ RX_DESC_OFS(eth, ring->dma_size),
|
|
ring->dma, ring->phys);
|
|
ring->dma = NULL;
|
|
}
|
|
@@ -3208,7 +3211,7 @@ static void mtk_dma_free(struct mtk_eth
|
|
|
|
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
|
|
dma_free_coherent(eth->dma_dev,
|
|
- MTK_QDMA_RING_SIZE * soc->tx.desc_size,
|
|
+ TX_DESC_OFS(eth, MTK_QDMA_RING_SIZE),
|
|
eth->scratch_ring, eth->phy_scratch_ring);
|
|
eth->scratch_ring = NULL;
|
|
eth->phy_scratch_ring = 0;
|
|
@@ -5236,6 +5239,9 @@ static void mtk_remove(struct platform_d
|
|
mtk_mdio_cleanup(eth);
|
|
}
|
|
|
|
+#define DESC_SIZE(struct_name) \
|
|
+ .desc_shift = const_ilog2(sizeof(struct_name))
|
|
+
|
|
static const struct mtk_soc_data mt2701_data = {
|
|
.reg_map = &mtk_reg_map,
|
|
.caps = MT7623_CAPS | MTK_HWLRO,
|
|
@@ -5244,14 +5250,14 @@ static const struct mtk_soc_data mt2701_
|
|
.required_pctl = true,
|
|
.version = 1,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma),
|
|
+ DESC_SIZE(struct mtk_tx_dma),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
.dma_len_offset = 16,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
@@ -5272,14 +5278,14 @@ static const struct mtk_soc_data mt7621_
|
|
.hash_offset = 2,
|
|
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma),
|
|
+ DESC_SIZE(struct mtk_tx_dma),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
.dma_len_offset = 16,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
@@ -5302,14 +5308,14 @@ static const struct mtk_soc_data mt7622_
|
|
.has_accounting = true,
|
|
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma),
|
|
+ DESC_SIZE(struct mtk_tx_dma),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
.dma_len_offset = 16,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
@@ -5331,14 +5337,14 @@ static const struct mtk_soc_data mt7623_
|
|
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
|
|
.disable_pll_modes = true,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma),
|
|
+ DESC_SIZE(struct mtk_tx_dma),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
.dma_len_offset = 16,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
@@ -5357,14 +5363,14 @@ static const struct mtk_soc_data mt7629_
|
|
.has_accounting = true,
|
|
.version = 1,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma),
|
|
+ DESC_SIZE(struct mtk_tx_dma),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
.dma_len_offset = 16,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
@@ -5387,14 +5393,14 @@ static const struct mtk_soc_data mt7981_
|
|
.has_accounting = true,
|
|
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma_v2),
|
|
+ DESC_SIZE(struct mtk_tx_dma_v2),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
.dma_len_offset = 8,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
@@ -5417,14 +5423,14 @@ static const struct mtk_soc_data mt7986_
|
|
.has_accounting = true,
|
|
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma_v2),
|
|
+ DESC_SIZE(struct mtk_tx_dma_v2),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
.dma_len_offset = 8,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
@@ -5447,14 +5453,14 @@ static const struct mtk_soc_data mt7988_
|
|
.has_accounting = true,
|
|
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma_v2),
|
|
+ DESC_SIZE(struct mtk_tx_dma_v2),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
.dma_len_offset = 8,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
.fq_dma_size = MTK_DMA_SIZE(4K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma_v2),
|
|
+ DESC_SIZE(struct mtk_rx_dma_v2),
|
|
.irq_done_mask = MTK_RX_DONE_INT_V2,
|
|
.dma_l4_valid = RX_DMA_L4_VALID_V2,
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
|
|
@@ -5471,13 +5477,13 @@ static const struct mtk_soc_data rt5350_
|
|
.required_pctl = false,
|
|
.version = 1,
|
|
.tx = {
|
|
- .desc_size = sizeof(struct mtk_tx_dma),
|
|
+ DESC_SIZE(struct mtk_tx_dma),
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
.dma_len_offset = 16,
|
|
.dma_size = MTK_DMA_SIZE(2K),
|
|
},
|
|
.rx = {
|
|
- .desc_size = sizeof(struct mtk_rx_dma),
|
|
+ DESC_SIZE(struct mtk_rx_dma),
|
|
.irq_done_mask = MTK_RX_DONE_INT,
|
|
.dma_l4_valid = RX_DMA_L4_VALID_PDMA,
|
|
.dma_max_len = MTK_TX_DMA_BUF_LEN,
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
|
@@ -1160,7 +1160,7 @@ struct mtk_reg_map {
|
|
* @foe_entry_size Foe table entry size.
|
|
* @has_accounting Bool indicating support for accounting of
|
|
* offloaded flows.
|
|
- * @desc_size Tx/Rx DMA descriptor size.
|
|
+ * @desc_shift Tx/Rx DMA descriptor size (in power-of-2).
|
|
* @irq_done_mask Rx irq done register mask.
|
|
* @dma_l4_valid Rx DMA valid register mask.
|
|
* @dma_max_len Max DMA tx/rx buffer length.
|
|
@@ -1181,14 +1181,14 @@ struct mtk_soc_data {
|
|
bool has_accounting;
|
|
bool disable_pll_modes;
|
|
struct {
|
|
- u32 desc_size;
|
|
+ u32 desc_shift;
|
|
u32 dma_max_len;
|
|
u32 dma_len_offset;
|
|
u32 dma_size;
|
|
u32 fq_dma_size;
|
|
} tx;
|
|
struct {
|
|
- u32 desc_size;
|
|
+ u32 desc_shift;
|
|
u32 irq_done_mask;
|
|
u32 dma_l4_valid;
|
|
u32 dma_max_len;
|