1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
13 #include "bnxt_ring.h"
16 #include "hsi_struct_def_dpdk.h"
23 void bnxt_free_tx_rings(struct bnxt *bp)
27 for (i = 0; i < (int)bp->tx_nr_rings; i++) {
28 struct bnxt_tx_queue *txq = bp->tx_queues[i];
33 bnxt_free_ring(txq->tx_ring->tx_ring_struct);
34 rte_free(txq->tx_ring->tx_ring_struct);
35 rte_free(txq->tx_ring);
37 bnxt_free_ring(txq->cp_ring->cp_ring_struct);
38 rte_free(txq->cp_ring->cp_ring_struct);
39 rte_free(txq->cp_ring);
42 bp->tx_queues[i] = NULL;
46 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
48 struct bnxt_tx_ring_info *txr = txq->tx_ring;
49 struct bnxt_ring *ring = txr->tx_ring_struct;
51 txq->tx_wake_thresh = ring->ring_size / 2;
52 ring->fw_ring_id = INVALID_HW_RING_ID;
57 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
59 struct bnxt_cp_ring_info *cpr;
60 struct bnxt_cp_ring_info *nqr;
61 struct bnxt_tx_ring_info *txr;
62 struct bnxt_ring *ring;
64 txr = rte_zmalloc_socket("bnxt_tx_ring",
65 sizeof(struct bnxt_tx_ring_info),
66 RTE_CACHE_LINE_SIZE, socket_id);
71 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
72 sizeof(struct bnxt_ring),
73 RTE_CACHE_LINE_SIZE, socket_id);
76 txr->tx_ring_struct = ring;
77 ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
78 ring->ring_mask = ring->ring_size - 1;
79 ring->bd = (void *)txr->tx_desc_ring;
80 ring->bd_dma = txr->tx_desc_mapping;
81 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
82 ring->vmem = (void **)&txr->tx_buf_ring;
84 cpr = rte_zmalloc_socket("bnxt_tx_ring",
85 sizeof(struct bnxt_cp_ring_info),
86 RTE_CACHE_LINE_SIZE, socket_id);
91 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
92 sizeof(struct bnxt_ring),
93 RTE_CACHE_LINE_SIZE, socket_id);
96 cpr->cp_ring_struct = ring;
97 ring->ring_size = txr->tx_ring_struct->ring_size;
98 ring->ring_mask = ring->ring_size - 1;
99 ring->bd = (void *)cpr->cp_desc_ring;
100 ring->bd_dma = cpr->cp_desc_mapping;
104 if (BNXT_HAS_NQ(txq->bp)) {
105 nqr = rte_zmalloc_socket("bnxt_tx_ring_nq",
106 sizeof(struct bnxt_cp_ring_info),
107 RTE_CACHE_LINE_SIZE, socket_id);
113 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
114 sizeof(struct bnxt_ring),
115 RTE_CACHE_LINE_SIZE, socket_id);
119 nqr->cp_ring_struct = ring;
120 ring->ring_size = txr->tx_ring_struct->ring_size;
121 ring->ring_mask = ring->ring_size - 1;
122 ring->bd = (void *)nqr->cp_desc_ring;
123 ring->bd_dma = nqr->cp_desc_mapping;
131 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
132 struct bnxt_tx_queue *txq,
134 struct tx_bd_long **last_txbd)
136 struct bnxt_tx_ring_info *txr = txq->tx_ring;
137 struct tx_bd_long *txbd;
138 struct tx_bd_long_hi *txbd1 = NULL;
139 uint32_t vlan_tag_flags, cfa_action;
140 bool long_bd = false;
141 unsigned short nr_bds = 0;
142 struct rte_mbuf *m_seg;
143 struct bnxt_sw_tx_bd *tx_buf;
144 static const uint32_t lhint_arr[4] = {
145 TX_BD_LONG_FLAGS_LHINT_LT512,
146 TX_BD_LONG_FLAGS_LHINT_LT1K,
147 TX_BD_LONG_FLAGS_LHINT_LT2K,
148 TX_BD_LONG_FLAGS_LHINT_LT2K
151 if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
152 PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
153 PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
154 PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
155 PKT_TX_TUNNEL_GENEVE))
158 nr_bds = long_bd + tx_pkt->nb_segs;
159 if (unlikely(bnxt_tx_avail(txq) < nr_bds))
162 /* Check if number of Tx descriptors is above HW limit */
163 if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
165 "Num descriptors %d exceeds HW limit\n", nr_bds);
169 /* If packet length is less than minimum packet size, pad it */
170 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < BNXT_MIN_PKT_SIZE)) {
171 uint8_t pad = BNXT_MIN_PKT_SIZE - rte_pktmbuf_pkt_len(tx_pkt);
172 char *seg = rte_pktmbuf_append(tx_pkt, pad);
176 "Failed to pad mbuf by %d bytes\n",
181 /* Note: data_len, pkt len are updated in rte_pktmbuf_append */
185 /* Check non zero data_len */
186 RTE_VERIFY(tx_pkt->data_len);
188 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
189 tx_buf->mbuf = tx_pkt;
190 tx_buf->nr_bds = nr_bds;
192 txbd = &txr->tx_desc_ring[txr->tx_prod];
193 txbd->opaque = *coal_pkts;
194 txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
195 txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
196 txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
197 txbd->len = tx_pkt->data_len;
198 if (tx_pkt->pkt_len >= 2014)
199 txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
201 txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
202 txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
206 txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
209 if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
210 /* shurd: Should this mask at
211 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
213 vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
214 tx_buf->mbuf->vlan_tci;
215 /* Currently supports 8021Q, 8021AD vlan offloads
216 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
218 /* DPDK only supports 802.11q VLAN packets */
220 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
223 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
225 txbd1 = (struct tx_bd_long_hi *)
226 &txr->tx_desc_ring[txr->tx_prod];
228 txbd1->cfa_meta = vlan_tag_flags;
229 txbd1->cfa_action = cfa_action;
231 if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
235 txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO |
236 TX_BD_LONG_LFLAGS_T_IPID;
237 hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
239 hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ?
240 tx_pkt->outer_l2_len +
241 tx_pkt->outer_l3_len : 0;
242 /* The hdr_size is multiple of 16bit units not 8bit.
245 txbd1->hdr_size = hdr_size >> 1;
246 txbd1->mss = tx_pkt->tso_segsz;
247 RTE_VERIFY(txbd1->mss);
249 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
250 PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
251 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
252 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
254 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
255 PKT_TX_OIP_IIP_TCP_CKSUM) {
256 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
257 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
259 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
260 PKT_TX_OIP_IIP_UDP_CKSUM) {
261 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
262 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
264 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
265 PKT_TX_IIP_TCP_UDP_CKSUM) {
266 /* (Inner) IP, (Inner) TCP/UDP CSO */
267 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
269 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
270 PKT_TX_IIP_UDP_CKSUM) {
271 /* (Inner) IP, (Inner) TCP/UDP CSO */
272 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
274 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
275 PKT_TX_IIP_TCP_CKSUM) {
276 /* (Inner) IP, (Inner) TCP/UDP CSO */
277 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
279 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
280 PKT_TX_OIP_TCP_UDP_CKSUM) {
281 /* Outer IP, (Inner) TCP/UDP CSO */
282 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
284 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
285 PKT_TX_OIP_UDP_CKSUM) {
286 /* Outer IP, (Inner) TCP/UDP CSO */
287 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
289 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
290 PKT_TX_OIP_TCP_CKSUM) {
291 /* Outer IP, (Inner) TCP/UDP CSO */
292 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
294 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
295 PKT_TX_OIP_IIP_CKSUM) {
296 /* Outer IP, Inner IP CSO */
297 txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
299 } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
300 PKT_TX_TCP_UDP_CKSUM) {
302 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
304 } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
307 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
309 } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
312 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
314 } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
317 txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
319 } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
320 PKT_TX_OUTER_IP_CKSUM) {
322 txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
326 txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
329 m_seg = tx_pkt->next;
331 /* Check non zero data_len */
332 RTE_VERIFY(m_seg->data_len);
333 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
334 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
335 tx_buf->mbuf = m_seg;
337 txbd = &txr->tx_desc_ring[txr->tx_prod];
338 txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
339 txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
340 txbd->len = m_seg->data_len;
345 txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
347 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
352 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
354 struct bnxt_tx_ring_info *txr = txq->tx_ring;
355 struct rte_mempool *pool = NULL;
356 struct rte_mbuf **free = txq->free;
357 uint16_t cons = txr->tx_cons;
358 unsigned int blk = 0;
361 for (i = 0; i < nr_pkts; i++) {
362 struct rte_mbuf *mbuf;
363 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
364 unsigned short nr_bds = tx_buf->nr_bds;
366 for (j = 0; j < nr_bds; j++) {
369 cons = RING_NEXT(txr->tx_ring_struct, cons);
370 tx_buf = &txr->tx_buf_ring[cons];
371 if (!mbuf) /* long_bd's tx_buf ? */
374 mbuf = rte_pktmbuf_prefree_seg(mbuf);
378 /* EW - no need to unmap DMA memory? */
380 if (likely(mbuf->pool == pool)) {
381 /* Add mbuf to the bulk free array */
384 /* Found an mbuf from a different pool. Free
385 * mbufs accumulated so far to the previous
388 if (likely(pool != NULL))
389 rte_mempool_put_bulk(pool,
393 /* Start accumulating mbufs in a new pool */
401 rte_mempool_put_bulk(pool, (void *)free, blk);
406 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
408 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
409 uint32_t raw_cons = cpr->cp_raw_cons;
411 uint32_t nb_tx_pkts = 0;
412 struct tx_cmpl *txcmp;
413 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
414 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
415 uint32_t ring_mask = cp_ring_struct->ring_mask;
418 if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
422 cons = RING_CMPL(ring_mask, raw_cons);
423 txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
424 rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
427 if (!CMPL_VALID(txcmp, cpr->valid))
429 opaque = rte_cpu_to_le_32(txcmp->opaque);
430 NEXT_CMPL(cpr, cons, cpr->valid, 1);
431 rte_prefetch0(&cp_desc_ring[cons]);
433 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
434 nb_tx_pkts += opaque;
437 "Unhandled CMP type %02x\n",
440 } while (nb_tx_pkts < ring_mask);
443 bnxt_tx_cmp(txq, nb_tx_pkts);
444 cpr->cp_raw_cons = raw_cons;
451 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
455 uint16_t nb_tx_pkts = 0;
456 uint16_t coal_pkts = 0;
457 struct bnxt_tx_queue *txq = tx_queue;
458 struct tx_bd_long *last_txbd = NULL;
460 /* Handle TX completions */
461 bnxt_handle_tx_cp(txq);
463 /* Tx queue was stopped; wait for it to be restarted */
464 if (txq->tx_deferred_start) {
465 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
469 /* Handle TX burst request */
470 for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
472 rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
473 &coal_pkts, &last_txbd);
479 if (likely(nb_tx_pkts)) {
480 /* Request a completion on the last packet */
481 last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
482 bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_prod);
488 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
490 struct bnxt *bp = dev->data->dev_private;
491 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
493 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
494 txq->tx_deferred_start = false;
495 PMD_DRV_LOG(DEBUG, "Tx queue started\n");
500 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
502 struct bnxt *bp = dev->data->dev_private;
503 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
505 /* Handle TX completions */
506 bnxt_handle_tx_cp(txq);
508 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
509 txq->tx_deferred_start = true;
510 PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");