1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
13 #include "bnxt_ring.h"
16 #include "hsi_struct_def_dpdk.h"
23 void bnxt_free_tx_rings(struct bnxt *bp)
27 for (i = 0; i < (int)bp->tx_nr_rings; i++) {
28 struct bnxt_tx_queue *txq = bp->tx_queues[i];
33 bnxt_free_ring(txq->tx_ring->tx_ring_struct);
34 rte_free(txq->tx_ring->tx_ring_struct);
35 rte_free(txq->tx_ring);
37 bnxt_free_ring(txq->cp_ring->cp_ring_struct);
38 rte_free(txq->cp_ring->cp_ring_struct);
39 rte_free(txq->cp_ring);
42 bp->tx_queues[i] = NULL;
46 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
48 struct bnxt_tx_ring_info *txr = txq->tx_ring;
49 struct bnxt_ring *ring = txr->tx_ring_struct;
51 txq->tx_wake_thresh = ring->ring_size / 2;
52 ring->fw_ring_id = INVALID_HW_RING_ID;
57 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
59 struct bnxt_cp_ring_info *cpr;
60 struct bnxt_tx_ring_info *txr;
61 struct bnxt_ring *ring;
63 txr = rte_zmalloc_socket("bnxt_tx_ring",
64 sizeof(struct bnxt_tx_ring_info),
65 RTE_CACHE_LINE_SIZE, socket_id);
70 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
71 sizeof(struct bnxt_ring),
72 RTE_CACHE_LINE_SIZE, socket_id);
75 txr->tx_ring_struct = ring;
76 ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
77 ring->ring_mask = ring->ring_size - 1;
78 ring->bd = (void *)txr->tx_desc_ring;
79 ring->bd_dma = txr->tx_desc_mapping;
80 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
81 ring->vmem = (void **)&txr->tx_buf_ring;
83 cpr = rte_zmalloc_socket("bnxt_tx_ring",
84 sizeof(struct bnxt_cp_ring_info),
85 RTE_CACHE_LINE_SIZE, socket_id);
90 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
91 sizeof(struct bnxt_ring),
92 RTE_CACHE_LINE_SIZE, socket_id);
95 cpr->cp_ring_struct = ring;
96 ring->ring_size = txr->tx_ring_struct->ring_size;
97 ring->ring_mask = ring->ring_size - 1;
98 ring->bd = (void *)cpr->cp_desc_ring;
99 ring->bd_dma = cpr->cp_desc_mapping;
106 static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr)
108 /* Tell compiler to fetch tx indices from memory. */
109 rte_compiler_barrier();
111 return txr->tx_ring_struct->ring_size -
112 ((txr->tx_prod - txr->tx_cons) &
113 txr->tx_ring_struct->ring_mask) - 1;
116 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
117 struct bnxt_tx_queue *txq)
119 struct bnxt_tx_ring_info *txr = txq->tx_ring;
120 struct tx_bd_long *txbd;
121 struct tx_bd_long_hi *txbd1;
122 uint32_t vlan_tag_flags, cfa_action;
123 bool long_bd = false;
124 uint16_t last_prod = 0;
125 struct rte_mbuf *m_seg;
126 struct bnxt_sw_tx_bd *tx_buf;
127 static const uint32_t lhint_arr[4] = {
128 TX_BD_LONG_FLAGS_LHINT_LT512,
129 TX_BD_LONG_FLAGS_LHINT_LT1K,
130 TX_BD_LONG_FLAGS_LHINT_LT2K,
131 TX_BD_LONG_FLAGS_LHINT_LT2K
134 if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
135 PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
136 PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM))
139 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
140 tx_buf->mbuf = tx_pkt;
141 tx_buf->nr_bds = long_bd + tx_pkt->nb_segs;
142 last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) &
143 txr->tx_ring_struct->ring_mask;
145 if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds))
148 txbd = &txr->tx_desc_ring[txr->tx_prod];
149 txbd->opaque = txr->tx_prod;
150 txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
151 txbd->len = tx_pkt->data_len;
152 if (txbd->len >= 2014)
153 txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
155 txbd->flags_type |= lhint_arr[txbd->len >> 9];
156 txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(tx_buf->mbuf));
159 txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
162 if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
163 /* shurd: Should this mask at
164 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
166 vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
167 tx_buf->mbuf->vlan_tci;
168 /* Currently supports 8021Q, 8021AD vlan offloads
169 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
171 /* DPDK only supports 802.11q VLAN packets */
173 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
176 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
178 txbd1 = (struct tx_bd_long_hi *)
179 &txr->tx_desc_ring[txr->tx_prod];
181 txbd1->cfa_meta = vlan_tag_flags;
182 txbd1->cfa_action = cfa_action;
184 if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
186 txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO;
187 txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
188 tx_pkt->l4_len + tx_pkt->outer_l2_len +
189 tx_pkt->outer_l3_len;
190 txbd1->mss = tx_pkt->tso_segsz;
192 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
193 PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
194 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
195 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
197 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
198 PKT_TX_IIP_TCP_UDP_CKSUM) {
199 /* (Inner) IP, (Inner) TCP/UDP CSO */
200 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
202 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
203 PKT_TX_OIP_TCP_UDP_CKSUM) {
204 /* Outer IP, (Inner) TCP/UDP CSO */
205 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
207 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
208 PKT_TX_OIP_IIP_CKSUM) {
209 /* Outer IP, Inner IP CSO */
210 txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
212 } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
213 PKT_TX_TCP_UDP_CKSUM) {
215 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
217 } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) {
219 txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
221 } else if (tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) {
223 txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
227 txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
230 m_seg = tx_pkt->next;
231 /* i is set at the end of the if(long_bd) block */
232 while (txr->tx_prod != last_prod) {
233 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
234 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
236 txbd = &txr->tx_desc_ring[txr->tx_prod];
237 txbd->address = rte_cpu_to_le_32(rte_mbuf_data_iova(m_seg));
238 txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
239 txbd->len = m_seg->data_len;
244 txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
246 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
251 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
253 struct bnxt_tx_ring_info *txr = txq->tx_ring;
254 uint16_t cons = txr->tx_cons;
257 for (i = 0; i < nr_pkts; i++) {
258 struct bnxt_sw_tx_bd *tx_buf;
259 struct rte_mbuf *mbuf;
261 tx_buf = &txr->tx_buf_ring[cons];
262 cons = RING_NEXT(txr->tx_ring_struct, cons);
266 /* EW - no need to unmap DMA memory? */
268 for (j = 1; j < tx_buf->nr_bds; j++)
269 cons = RING_NEXT(txr->tx_ring_struct, cons);
270 rte_pktmbuf_free(mbuf);
276 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
278 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
279 uint32_t raw_cons = cpr->cp_raw_cons;
282 struct tx_cmpl *txcmp;
284 if ((txq->tx_ring->tx_ring_struct->ring_size -
285 (bnxt_tx_avail(txq->tx_ring))) >
286 txq->tx_free_thresh) {
288 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
289 txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
291 if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct))
293 cpr->valid = FLIP_VALID(cons,
294 cpr->cp_ring_struct->ring_mask,
297 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
300 RTE_LOG_DP(DEBUG, PMD,
301 "Unhandled CMP type %02x\n",
303 raw_cons = NEXT_RAW_CMP(raw_cons);
306 bnxt_tx_cmp(txq, nb_tx_pkts);
307 cpr->cp_raw_cons = raw_cons;
308 B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
313 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
316 struct bnxt_tx_queue *txq = tx_queue;
317 uint16_t nb_tx_pkts = 0;
318 uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2;
319 uint16_t last_db_mask = 0;
321 /* Handle TX completions */
322 bnxt_handle_tx_cp(txq);
324 /* Tx queue was stopped; wait for it to be restarted */
325 if (txq->tx_deferred_start) {
326 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
329 /* Handle TX burst request */
330 for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
331 if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) {
333 } else if ((nb_tx_pkts & db_mask) != last_db_mask) {
334 B_TX_DB(txq->tx_ring->tx_doorbell,
335 txq->tx_ring->tx_prod);
336 last_db_mask = nb_tx_pkts & db_mask;
340 B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod);
345 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
347 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
348 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
350 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
351 txq->tx_deferred_start = false;
352 PMD_DRV_LOG(DEBUG, "Tx queue started\n");
357 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
359 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
360 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
362 /* Handle TX completions */
363 bnxt_handle_tx_cp(txq);
365 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
366 txq->tx_deferred_start = true;
367 PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");