1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
12 #include "bnxt_ring.h"
15 #include "hsi_struct_def_dpdk.h"
22 void bnxt_free_tx_rings(struct bnxt *bp)
26 for (i = 0; i < (int)bp->tx_nr_rings; i++) {
27 struct bnxt_tx_queue *txq = bp->tx_queues[i];
32 bnxt_free_ring(txq->tx_ring->tx_ring_struct);
33 rte_free(txq->tx_ring->tx_ring_struct);
34 rte_free(txq->tx_ring);
36 bnxt_free_ring(txq->cp_ring->cp_ring_struct);
37 rte_free(txq->cp_ring->cp_ring_struct);
38 rte_free(txq->cp_ring);
41 bp->tx_queues[i] = NULL;
45 int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq)
47 struct bnxt_tx_ring_info *txr = txq->tx_ring;
48 struct bnxt_ring *ring = txr->tx_ring_struct;
50 txq->tx_wake_thresh = ring->ring_size / 2;
51 ring->fw_ring_id = INVALID_HW_RING_ID;
56 int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id)
58 struct bnxt_cp_ring_info *cpr;
59 struct bnxt_tx_ring_info *txr;
60 struct bnxt_ring *ring;
62 txr = rte_zmalloc_socket("bnxt_tx_ring",
63 sizeof(struct bnxt_tx_ring_info),
64 RTE_CACHE_LINE_SIZE, socket_id);
69 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
70 sizeof(struct bnxt_ring),
71 RTE_CACHE_LINE_SIZE, socket_id);
74 txr->tx_ring_struct = ring;
75 ring->ring_size = rte_align32pow2(txq->nb_tx_desc);
76 ring->ring_mask = ring->ring_size - 1;
77 ring->bd = (void *)txr->tx_desc_ring;
78 ring->bd_dma = txr->tx_desc_mapping;
79 ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd);
80 ring->vmem = (void **)&txr->tx_buf_ring;
81 ring->fw_ring_id = INVALID_HW_RING_ID;
83 cpr = rte_zmalloc_socket("bnxt_tx_ring",
84 sizeof(struct bnxt_cp_ring_info),
85 RTE_CACHE_LINE_SIZE, socket_id);
90 ring = rte_zmalloc_socket("bnxt_tx_ring_struct",
91 sizeof(struct bnxt_ring),
92 RTE_CACHE_LINE_SIZE, socket_id);
95 cpr->cp_ring_struct = ring;
96 ring->ring_size = txr->tx_ring_struct->ring_size;
97 ring->ring_mask = ring->ring_size - 1;
98 ring->bd = (void *)cpr->cp_desc_ring;
99 ring->bd_dma = cpr->cp_desc_mapping;
102 ring->fw_ring_id = INVALID_HW_RING_ID;
107 static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt,
108 struct bnxt_tx_queue *txq,
110 struct tx_bd_long **last_txbd)
112 struct bnxt_tx_ring_info *txr = txq->tx_ring;
113 uint32_t outer_tpid_bd = 0;
114 struct tx_bd_long *txbd;
115 struct tx_bd_long_hi *txbd1 = NULL;
116 uint32_t vlan_tag_flags;
117 bool long_bd = false;
118 unsigned short nr_bds = 0;
119 struct rte_mbuf *m_seg;
120 struct bnxt_sw_tx_bd *tx_buf;
121 static const uint32_t lhint_arr[4] = {
122 TX_BD_LONG_FLAGS_LHINT_LT512,
123 TX_BD_LONG_FLAGS_LHINT_LT1K,
124 TX_BD_LONG_FLAGS_LHINT_LT2K,
125 TX_BD_LONG_FLAGS_LHINT_LT2K
128 if (unlikely(is_bnxt_in_error(txq->bp)))
131 if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM |
132 PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM |
133 PKT_TX_VLAN_PKT | PKT_TX_OUTER_IP_CKSUM |
134 PKT_TX_TUNNEL_GRE | PKT_TX_TUNNEL_VXLAN |
135 PKT_TX_TUNNEL_GENEVE | PKT_TX_IEEE1588_TMST |
137 (BNXT_TRUFLOW_EN(txq->bp) &&
138 (txq->bp->tx_cfa_action || txq->vfr_tx_cfa_action)))
141 nr_bds = long_bd + tx_pkt->nb_segs;
142 if (unlikely(bnxt_tx_avail(txq) < nr_bds))
145 /* Check if number of Tx descriptors is above HW limit */
146 if (unlikely(nr_bds > BNXT_MAX_TSO_SEGS)) {
148 "Num descriptors %d exceeds HW limit\n", nr_bds);
152 /* If packet length is less than minimum packet size, pad it */
153 if (unlikely(rte_pktmbuf_pkt_len(tx_pkt) < BNXT_MIN_PKT_SIZE)) {
154 uint8_t pad = BNXT_MIN_PKT_SIZE - rte_pktmbuf_pkt_len(tx_pkt);
155 char *seg = rte_pktmbuf_append(tx_pkt, pad);
159 "Failed to pad mbuf by %d bytes\n",
164 /* Note: data_len, pkt len are updated in rte_pktmbuf_append */
168 /* Check non zero data_len */
169 RTE_VERIFY(tx_pkt->data_len);
171 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
172 tx_buf->mbuf = tx_pkt;
173 tx_buf->nr_bds = nr_bds;
175 txbd = &txr->tx_desc_ring[txr->tx_prod];
176 txbd->opaque = *coal_pkts;
177 txbd->flags_type = nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT;
178 txbd->flags_type |= TX_BD_SHORT_FLAGS_COAL_NOW;
179 txbd->flags_type |= TX_BD_LONG_FLAGS_NO_CMPL;
180 txbd->len = tx_pkt->data_len;
181 if (tx_pkt->pkt_len >= 2014)
182 txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K;
184 txbd->flags_type |= lhint_arr[tx_pkt->pkt_len >> 9];
185 txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(tx_buf->mbuf));
189 txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG;
192 /* HW can accelerate only outer vlan in QinQ mode */
193 if (tx_buf->mbuf->ol_flags & PKT_TX_QINQ_PKT) {
194 vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
195 tx_buf->mbuf->vlan_tci_outer;
196 outer_tpid_bd = txq->bp->outer_tpid_bd &
197 BNXT_OUTER_TPID_BD_MASK;
198 vlan_tag_flags |= outer_tpid_bd;
199 } else if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) {
200 /* shurd: Should this mask at
201 * TX_BD_LONG_CFA_META_VLAN_VID_MASK?
203 vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG |
204 tx_buf->mbuf->vlan_tci;
205 /* Currently supports 8021Q, 8021AD vlan offloads
206 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
208 /* DPDK only supports 802.11q VLAN packets */
210 TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100;
213 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
215 txbd1 = (struct tx_bd_long_hi *)
216 &txr->tx_desc_ring[txr->tx_prod];
218 txbd1->cfa_meta = vlan_tag_flags;
219 /* Legacy tx_bd_long_hi->mss =
220 * tx_bd_long_hi->kid_or_ts_high_mss
222 txbd1->kid_or_ts_high_mss = 0;
224 if (txq->vfr_tx_cfa_action)
225 txbd1->cfa_action = txq->vfr_tx_cfa_action;
227 txbd1->cfa_action = txq->bp->tx_cfa_action;
229 if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) {
233 txbd1->lflags |= TX_BD_LONG_LFLAGS_LSO |
234 TX_BD_LONG_LFLAGS_T_IPID;
235 hdr_size = tx_pkt->l2_len + tx_pkt->l3_len +
237 hdr_size += (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK) ?
238 tx_pkt->outer_l2_len +
239 tx_pkt->outer_l3_len : 0;
240 /* The hdr_size is multiple of 16bit units not 8bit.
242 * Also legacy hdr_size = kid_or_ts_low_hdr_size.
244 txbd1->kid_or_ts_low_hdr_size = hdr_size >> 1;
245 txbd1->kid_or_ts_high_mss = tx_pkt->tso_segsz;
246 RTE_VERIFY(txbd1->kid_or_ts_high_mss);
248 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_UDP_CKSUM) ==
249 PKT_TX_OIP_IIP_TCP_UDP_CKSUM) {
250 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
251 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
252 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_TCP_CKSUM) ==
253 PKT_TX_OIP_IIP_TCP_CKSUM) {
254 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
255 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
256 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_UDP_CKSUM) ==
257 PKT_TX_OIP_IIP_UDP_CKSUM) {
258 /* Outer IP, Inner IP, Inner TCP/UDP CSO */
259 txbd1->lflags |= TX_BD_FLG_TIP_IP_TCP_UDP_CHKSUM;
260 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_UDP_CKSUM) ==
261 PKT_TX_IIP_TCP_UDP_CKSUM) {
262 /* (Inner) IP, (Inner) TCP/UDP CSO */
263 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
264 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_UDP_CKSUM) ==
265 PKT_TX_IIP_UDP_CKSUM) {
266 /* (Inner) IP, (Inner) TCP/UDP CSO */
267 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
268 } else if ((tx_pkt->ol_flags & PKT_TX_IIP_TCP_CKSUM) ==
269 PKT_TX_IIP_TCP_CKSUM) {
270 /* (Inner) IP, (Inner) TCP/UDP CSO */
271 txbd1->lflags |= TX_BD_FLG_IP_TCP_UDP_CHKSUM;
272 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_UDP_CKSUM) ==
273 PKT_TX_OIP_TCP_UDP_CKSUM) {
274 /* Outer IP, (Inner) TCP/UDP CSO */
275 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
276 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_UDP_CKSUM) ==
277 PKT_TX_OIP_UDP_CKSUM) {
278 /* Outer IP, (Inner) TCP/UDP CSO */
279 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
280 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_TCP_CKSUM) ==
281 PKT_TX_OIP_TCP_CKSUM) {
282 /* Outer IP, (Inner) TCP/UDP CSO */
283 txbd1->lflags |= TX_BD_FLG_TIP_TCP_UDP_CHKSUM;
284 } else if ((tx_pkt->ol_flags & PKT_TX_OIP_IIP_CKSUM) ==
285 PKT_TX_OIP_IIP_CKSUM) {
286 /* Outer IP, Inner IP CSO */
287 txbd1->lflags |= TX_BD_FLG_TIP_IP_CHKSUM;
288 } else if ((tx_pkt->ol_flags & PKT_TX_TCP_UDP_CKSUM) ==
289 PKT_TX_TCP_UDP_CKSUM) {
291 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
292 } else if ((tx_pkt->ol_flags & PKT_TX_TCP_CKSUM) ==
295 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
296 } else if ((tx_pkt->ol_flags & PKT_TX_UDP_CKSUM) ==
299 txbd1->lflags |= TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM;
300 } else if ((tx_pkt->ol_flags & PKT_TX_IP_CKSUM) ==
303 txbd1->lflags |= TX_BD_LONG_LFLAGS_IP_CHKSUM;
304 } else if ((tx_pkt->ol_flags & PKT_TX_OUTER_IP_CKSUM) ==
305 PKT_TX_OUTER_IP_CKSUM) {
307 txbd1->lflags |= TX_BD_LONG_LFLAGS_T_IP_CHKSUM;
308 } else if ((tx_pkt->ol_flags & PKT_TX_IEEE1588_TMST) ==
309 PKT_TX_IEEE1588_TMST) {
311 txbd1->lflags |= TX_BD_LONG_LFLAGS_STAMP;
314 txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT;
317 m_seg = tx_pkt->next;
319 /* Check non zero data_len */
320 RTE_VERIFY(m_seg->data_len);
321 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
322 tx_buf = &txr->tx_buf_ring[txr->tx_prod];
323 tx_buf->mbuf = m_seg;
325 txbd = &txr->tx_desc_ring[txr->tx_prod];
326 txbd->address = rte_cpu_to_le_64(rte_mbuf_data_iova(m_seg));
327 txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT;
328 txbd->len = m_seg->data_len;
333 txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END;
335 txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod);
341 * Transmit completion function for use when DEV_TX_OFFLOAD_MBUF_FAST_FREE
344 static void bnxt_tx_cmp_fast(struct bnxt_tx_queue *txq, int nr_pkts)
346 struct bnxt_tx_ring_info *txr = txq->tx_ring;
347 struct rte_mbuf **free = txq->free;
348 uint16_t cons = txr->tx_cons;
349 unsigned int blk = 0;
352 for (i = 0; i < nr_pkts; i++) {
353 struct bnxt_sw_tx_bd *tx_buf;
354 unsigned short nr_bds;
356 tx_buf = &txr->tx_buf_ring[cons];
357 nr_bds = tx_buf->nr_bds;
358 for (j = 0; j < nr_bds; j++) {
360 /* Add mbuf to the bulk free array */
361 free[blk++] = tx_buf->mbuf;
364 cons = RING_NEXT(txr->tx_ring_struct, cons);
365 tx_buf = &txr->tx_buf_ring[cons];
369 rte_mempool_put_bulk(free[0]->pool, (void *)free, blk);
374 static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts)
376 struct bnxt_tx_ring_info *txr = txq->tx_ring;
377 struct rte_mempool *pool = NULL;
378 struct rte_mbuf **free = txq->free;
379 uint16_t cons = txr->tx_cons;
380 unsigned int blk = 0;
383 for (i = 0; i < nr_pkts; i++) {
384 struct rte_mbuf *mbuf;
385 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[cons];
386 unsigned short nr_bds = tx_buf->nr_bds;
388 for (j = 0; j < nr_bds; j++) {
391 cons = RING_NEXT(txr->tx_ring_struct, cons);
392 tx_buf = &txr->tx_buf_ring[cons];
393 if (!mbuf) /* long_bd's tx_buf ? */
396 mbuf = rte_pktmbuf_prefree_seg(mbuf);
400 /* EW - no need to unmap DMA memory? */
402 if (likely(mbuf->pool == pool)) {
403 /* Add mbuf to the bulk free array */
406 /* Found an mbuf from a different pool. Free
407 * mbufs accumulated so far to the previous
410 if (likely(pool != NULL))
411 rte_mempool_put_bulk(pool,
415 /* Start accumulating mbufs in a new pool */
423 rte_mempool_put_bulk(pool, (void *)free, blk);
428 static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq)
430 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
431 uint32_t raw_cons = cpr->cp_raw_cons;
433 uint32_t nb_tx_pkts = 0;
434 struct tx_cmpl *txcmp;
435 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
436 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
437 uint32_t ring_mask = cp_ring_struct->ring_mask;
440 if (bnxt_tx_bds_in_hw(txq) < txq->tx_free_thresh)
444 cons = RING_CMPL(ring_mask, raw_cons);
445 txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons];
446 rte_prefetch_non_temporal(&cp_desc_ring[(cons + 2) &
449 if (!CMPL_VALID(txcmp, cpr->valid))
451 opaque = rte_cpu_to_le_32(txcmp->opaque);
452 NEXT_CMPL(cpr, cons, cpr->valid, 1);
453 rte_prefetch0(&cp_desc_ring[cons]);
455 if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2)
456 nb_tx_pkts += opaque;
459 "Unhandled CMP type %02x\n",
462 } while (nb_tx_pkts < ring_mask);
465 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
466 bnxt_tx_cmp_fast(txq, nb_tx_pkts);
468 bnxt_tx_cmp(txq, nb_tx_pkts);
469 cpr->cp_raw_cons = raw_cons;
476 uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
480 uint16_t nb_tx_pkts = 0;
481 uint16_t coal_pkts = 0;
482 struct bnxt_tx_queue *txq = tx_queue;
483 struct tx_bd_long *last_txbd = NULL;
485 /* Handle TX completions */
486 bnxt_handle_tx_cp(txq);
488 /* Tx queue was stopped; wait for it to be restarted */
489 if (unlikely(!txq->tx_started)) {
490 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
494 /* Handle TX burst request */
495 for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) {
497 rc = bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq,
498 &coal_pkts, &last_txbd);
504 if (likely(nb_tx_pkts)) {
505 /* Request a completion on the last packet */
506 last_txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
507 bnxt_db_write(&txq->tx_ring->tx_db, txq->tx_ring->tx_prod);
514 * Dummy DPDK callback for TX.
516 * This function is used to temporarily replace the real callback during
517 * unsafe control operations on the queue, or in case of error.
520 bnxt_dummy_xmit_pkts(void *tx_queue __rte_unused,
521 struct rte_mbuf **tx_pkts __rte_unused,
522 uint16_t nb_pkts __rte_unused)
527 int bnxt_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
529 struct bnxt *bp = dev->data->dev_private;
530 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
533 rc = is_bnxt_in_error(bp);
537 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
538 txq->tx_started = true;
539 PMD_DRV_LOG(DEBUG, "Tx queue started\n");
544 int bnxt_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
546 struct bnxt *bp = dev->data->dev_private;
547 struct bnxt_tx_queue *txq = bp->tx_queues[tx_queue_id];
550 rc = is_bnxt_in_error(bp);
554 /* Handle TX completions */
555 bnxt_handle_tx_cp(txq);
557 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
558 txq->tx_started = false;
559 PMD_DRV_LOG(DEBUG, "Tx queue stopped\n");