1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
7 #include "otx2_ethdev.h"
9 #define NIX_XMIT_FC_OR_RETURN(txq, pkts) do { \
10 /* Cached value is low, Update the fc_cache_pkts */ \
11 if (unlikely((txq)->fc_cache_pkts < (pkts))) { \
12 /* Multiply with sqe_per_sqb to express in pkts */ \
13 (txq)->fc_cache_pkts = \
14 ((txq)->nb_sqb_bufs_adj - *(txq)->fc_mem) << \
15 (txq)->sqes_per_sqb_log2; \
16 /* Check it again for the room */ \
17 if (unlikely((txq)->fc_cache_pkts < (pkts))) \
23 static __rte_always_inline uint16_t
24 nix_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
25 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
27 struct otx2_eth_txq *txq = tx_queue; uint16_t i;
28 const rte_iova_t io_addr = txq->io_addr;
29 void *lmt_addr = txq->lmt_addr;
31 NIX_XMIT_FC_OR_RETURN(txq, pkts);
33 otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
35 /* Lets commit any changes in the packet */
38 for (i = 0; i < pkts; i++) {
39 otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
40 /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
41 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
42 tx_pkts[i]->ol_flags, 4, flags);
43 otx2_nix_xmit_one(cmd, lmt_addr, io_addr, flags);
46 /* Reduce the cached count */
47 txq->fc_cache_pkts -= pkts;
52 static __rte_always_inline uint16_t
53 nix_xmit_pkts_mseg(void *tx_queue, struct rte_mbuf **tx_pkts,
54 uint16_t pkts, uint64_t *cmd, const uint16_t flags)
56 struct otx2_eth_txq *txq = tx_queue; uint64_t i;
57 const rte_iova_t io_addr = txq->io_addr;
58 void *lmt_addr = txq->lmt_addr;
61 NIX_XMIT_FC_OR_RETURN(txq, pkts);
63 otx2_lmt_mov(cmd, &txq->cmd[0], otx2_nix_tx_ext_subs(flags));
65 /* Lets commit any changes in the packet */
68 for (i = 0; i < pkts; i++) {
69 otx2_nix_xmit_prepare(tx_pkts[i], cmd, flags);
70 segdw = otx2_nix_prepare_mseg(tx_pkts[i], cmd, flags);
71 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
72 tx_pkts[i]->ol_flags, segdw,
74 otx2_nix_xmit_mseg_one(cmd, lmt_addr, io_addr, segdw);
77 /* Reduce the cached count */
78 txq->fc_cache_pkts -= pkts;
83 #define T(name, f4, f3, f2, f1, f0, sz, flags) \
84 static uint16_t __rte_noinline __hot \
85 otx2_nix_xmit_pkts_ ## name(void *tx_queue, \
86 struct rte_mbuf **tx_pkts, uint16_t pkts) \
90 return nix_xmit_pkts(tx_queue, tx_pkts, pkts, cmd, flags); \
96 #define T(name, f4, f3, f2, f1, f0, sz, flags) \
97 static uint16_t __rte_noinline __hot \
98 otx2_nix_xmit_pkts_mseg_ ## name(void *tx_queue, \
99 struct rte_mbuf **tx_pkts, uint16_t pkts) \
101 uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
103 return nix_xmit_pkts_mseg(tx_queue, tx_pkts, pkts, cmd, \
104 (flags) | NIX_TX_MULTI_SEG_F); \
107 NIX_TX_FASTPATH_MODES
111 pick_tx_func(struct rte_eth_dev *eth_dev,
112 const eth_tx_burst_t tx_burst[2][2][2][2][2])
114 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
116 /* [TSTMP] [NOFF] [VLAN] [OL3_OL4_CSUM] [IL3_IL4_CSUM] */
117 eth_dev->tx_pkt_burst = tx_burst
118 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_TSTAMP_F)]
119 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_MBUF_NOFF_F)]
120 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_VLAN_QINQ_F)]
121 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
122 [!!(dev->tx_offload_flags & NIX_TX_OFFLOAD_L3_L4_CSUM_F)];
126 otx2_eth_set_tx_function(struct rte_eth_dev *eth_dev)
128 struct otx2_eth_dev *dev = otx2_eth_pmd_priv(eth_dev);
130 const eth_tx_burst_t nix_eth_tx_burst[2][2][2][2][2] = {
131 #define T(name, f4, f3, f2, f1, f0, sz, flags) \
132 [f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_ ## name,
134 NIX_TX_FASTPATH_MODES
138 const eth_tx_burst_t nix_eth_tx_burst_mseg[2][2][2][2][2] = {
139 #define T(name, f4, f3, f2, f1, f0, sz, flags) \
140 [f4][f3][f2][f1][f0] = otx2_nix_xmit_pkts_mseg_ ## name,
142 NIX_TX_FASTPATH_MODES
146 pick_tx_func(eth_dev, nix_eth_tx_burst);
148 if (dev->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
149 pick_tx_func(eth_dev, nix_eth_tx_burst_mseg);