1 // SPDX-License-Identifier: BSD-3-Clause
2 /* Copyright(c) 2019 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
11 #if defined(RTE_ARCH_X86)
12 #include <tmmintrin.h>
14 #error "bnxt vector pmd: unsupported target."
19 #include "bnxt_ring.h"
22 #include "hsi_struct_def_dpdk.h"
31 #define RTE_BNXT_MAX_RX_BURST 32
32 #define RTE_BNXT_MAX_TX_BURST 32
33 #define RTE_BNXT_RXQ_REARM_THRESH 32
34 #define RTE_BNXT_DESCS_PER_LOOP 4
37 bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
39 struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
40 struct bnxt_sw_rx_bd *rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
41 struct rte_mbuf *mb0, *mb1;
44 const __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, 0);
45 const __m128i addrmask = _mm_set_epi64x(UINT64_MAX, 0);
47 /* Pull RTE_BNXT_RXQ_REARM_THRESH more mbufs into the software ring */
48 if (rte_mempool_get_bulk(rxq->mb_pool,
50 RTE_BNXT_RXQ_REARM_THRESH) < 0) {
51 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
52 RTE_BNXT_RXQ_REARM_THRESH;
57 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
58 for (i = 0; i < RTE_BNXT_RXQ_REARM_THRESH; i += 2, rx_bufs += 2) {
59 __m128i buf_addr0, buf_addr1;
62 mb0 = rx_bufs[0].mbuf;
63 mb1 = rx_bufs[1].mbuf;
65 /* Load address fields from both mbufs */
66 buf_addr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr);
67 buf_addr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr);
69 /* Load both rx descriptors (preserving some existing fields) */
70 rxbd0 = _mm_loadu_si128((__m128i *)(rxbds + 0));
71 rxbd1 = _mm_loadu_si128((__m128i *)(rxbds + 1));
73 /* Add default offset to buffer address. */
74 buf_addr0 = _mm_add_epi64(buf_addr0, hdr_room);
75 buf_addr1 = _mm_add_epi64(buf_addr1, hdr_room);
77 /* Clear all fields except address. */
78 buf_addr0 = _mm_and_si128(buf_addr0, addrmask);
79 buf_addr1 = _mm_and_si128(buf_addr1, addrmask);
81 /* Clear address field in descriptor. */
82 rxbd0 = _mm_andnot_si128(addrmask, rxbd0);
83 rxbd1 = _mm_andnot_si128(addrmask, rxbd1);
85 /* Set address field in descriptor. */
86 rxbd0 = _mm_add_epi64(rxbd0, buf_addr0);
87 rxbd1 = _mm_add_epi64(rxbd1, buf_addr1);
89 /* Store descriptors to memory. */
90 _mm_store_si128((__m128i *)(rxbds++), rxbd0);
91 _mm_store_si128((__m128i *)(rxbds++), rxbd1);
94 rxq->rxrearm_start += RTE_BNXT_RXQ_REARM_THRESH;
95 bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
96 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
97 rxq->rxrearm_start = 0;
99 rxq->rxrearm_nb -= RTE_BNXT_RXQ_REARM_THRESH;
103 bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
105 uint32_t l3, pkt_type = 0;
106 uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
109 vlan = !!(rxcmp1->flags2 &
110 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
111 pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
113 t_ipcs = !!(rxcmp1->flags2 &
114 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
115 ip6 = !!(rxcmp1->flags2 &
116 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
118 flags_type = rxcmp->flags_type &
119 rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
122 l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
123 else if (!t_ipcs && ip6)
124 l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
125 else if (t_ipcs && !ip6)
126 l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
128 l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
130 switch (flags_type) {
131 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
133 pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
135 pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
138 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
140 pkt_type |= l3 | RTE_PTYPE_L4_TCP;
142 pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
145 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
147 pkt_type |= l3 | RTE_PTYPE_L4_UDP;
149 pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
152 case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
161 bnxt_parse_csum(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1)
165 flags = flags2_0xf(rxcmp1);
167 if (likely(IS_IP_NONTUNNEL_PKT(flags))) {
168 if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
169 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
171 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
172 } else if (IS_IP_TUNNEL_PKT(flags)) {
173 if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
174 RX_CMP_IP_CS_ERROR(rxcmp1)))
175 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
177 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
178 } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {
179 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
183 if (likely(IS_L4_NONTUNNEL_PKT(flags))) {
184 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
185 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
187 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
188 } else if (IS_L4_TUNNEL_PKT(flags)) {
189 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
190 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
192 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
193 if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
194 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
195 } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
197 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
199 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
201 } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
202 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
207 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
210 struct bnxt_rx_queue *rxq = rx_queue;
211 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
212 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
213 uint32_t raw_cons = cpr->cp_raw_cons;
216 struct rx_pkt_cmpl *rxcmp;
218 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
219 const __m128i shuf_msk =
220 _mm_set_epi8(15, 14, 13, 12, /* rss */
221 0xFF, 0xFF, /* vlan_tci (zeroes) */
223 0xFF, 0xFF, 3, 2, /* pkt_len */
224 0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
226 /* If Rx Q was stopped return */
227 if (unlikely(!rxq->rx_started))
230 if (rxq->rxrearm_nb >= RTE_BNXT_RXQ_REARM_THRESH)
231 bnxt_rxq_rearm(rxq, rxr);
233 /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
234 nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
236 /* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP */
237 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
239 /* Handle RX burst request */
241 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
243 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
245 if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
248 if (likely(CMP_TYPE(rxcmp) == RX_PKT_CMPL_TYPE_RX_L2)) {
249 struct rx_pkt_cmpl_hi *rxcmp1;
250 uint32_t tmp_raw_cons;
252 struct rte_mbuf *mbuf;
253 __m128i mm_rxcmp, pkt_mb;
255 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
256 cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
257 rxcmp1 = (struct rx_pkt_cmpl_hi *)
258 &cpr->cp_desc_ring[cp_cons];
260 if (!CMP_VALID(rxcmp1, tmp_raw_cons,
261 cpr->cp_ring_struct))
264 raw_cons = tmp_raw_cons;
265 cons = rxcmp->opaque;
267 mbuf = rxr->rx_buf_ring[cons].mbuf;
269 rxr->rx_buf_ring[cons].mbuf = NULL;
271 /* Set constant fields from mbuf initializer. */
272 _mm_store_si128((__m128i *)&mbuf->rearm_data,
275 /* Set mbuf pkt_len, data_len, and rss_hash fields. */
276 mm_rxcmp = _mm_load_si128((__m128i *)rxcmp);
277 pkt_mb = _mm_shuffle_epi8(mm_rxcmp, shuf_msk);
278 _mm_storeu_si128((void *)&mbuf->rx_descriptor_fields1,
281 rte_compiler_barrier();
283 if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID)
284 mbuf->ol_flags |= PKT_RX_RSS_HASH;
287 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
288 mbuf->vlan_tci = rxcmp1->metadata &
289 (RX_PKT_CMPL_METADATA_VID_MASK |
290 RX_PKT_CMPL_METADATA_DE |
291 RX_PKT_CMPL_METADATA_PRI_MASK);
292 mbuf->ol_flags |= PKT_RX_VLAN;
295 bnxt_parse_csum(mbuf, rxcmp1);
296 mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
298 rx_pkts[nb_rx_pkts++] = mbuf;
299 } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
301 bnxt_event_hwrm_resp_handler(rxq->bp,
302 (struct cmpl_base *)rxcmp);
305 raw_cons = NEXT_RAW_CMP(raw_cons);
306 if (nb_rx_pkts == nb_pkts || evt)
309 rxr->rx_prod = RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
311 rxq->rxrearm_nb += nb_rx_pkts;
312 cpr->cp_raw_cons = raw_cons;
313 cpr->valid = !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
314 if (nb_rx_pkts || evt)
321 bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
323 struct bnxt_tx_ring_info *txr = txq->tx_ring;
324 struct rte_mbuf **free = txq->free;
325 uint16_t cons = txr->tx_cons;
326 unsigned int blk = 0;
329 struct bnxt_sw_tx_bd *tx_buf;
330 struct rte_mbuf *mbuf;
332 tx_buf = &txr->tx_buf_ring[cons];
333 cons = RING_NEXT(txr->tx_ring_struct, cons);
334 mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
337 if (blk && mbuf->pool != free[0]->pool) {
338 rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
344 rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
350 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
352 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
353 uint32_t raw_cons = cpr->cp_raw_cons;
355 uint32_t nb_tx_pkts = 0;
356 struct tx_cmpl *txcmp;
357 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
358 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
359 uint32_t ring_mask = cp_ring_struct->ring_mask;
362 cons = RING_CMPL(ring_mask, raw_cons);
363 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
365 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
368 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
369 nb_tx_pkts += txcmp->opaque;
372 "Unhandled CMP type %02x\n",
374 raw_cons = NEXT_RAW_CMP(raw_cons);
375 } while (nb_tx_pkts < ring_mask);
377 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
379 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
380 cpr->cp_raw_cons = raw_cons;
385 #define TX_BD_FLAGS_CMPL ((1 << TX_BD_LONG_FLAGS_BD_CNT_SFT) | \
386 TX_BD_SHORT_FLAGS_COAL_NOW | \
387 TX_BD_SHORT_TYPE_TX_BD_SHORT | \
388 TX_BD_LONG_FLAGS_PACKET_END)
390 #define TX_BD_FLAGS_NOCMPL (TX_BD_FLAGS_CMPL | TX_BD_LONG_FLAGS_NO_CMPL)
392 static inline uint32_t
393 bnxt_xmit_flags_len(uint16_t len, uint16_t flags)
397 return flags | TX_BD_LONG_FLAGS_LHINT_LT512;
399 return flags | TX_BD_LONG_FLAGS_LHINT_LT1K;
401 return flags | TX_BD_LONG_FLAGS_LHINT_LT2K;
403 return flags | TX_BD_LONG_FLAGS_LHINT_LT2K;
405 return flags | TX_BD_LONG_FLAGS_LHINT_GTE2K;
410 bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
413 struct bnxt_tx_queue *txq = tx_queue;
414 struct bnxt_tx_ring_info *txr = txq->tx_ring;
415 uint16_t prod = txr->tx_prod;
416 struct rte_mbuf *tx_mbuf;
417 struct tx_bd_long *txbd = NULL;
418 struct bnxt_sw_tx_bd *tx_buf;
421 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
423 if (unlikely(nb_pkts == 0))
426 /* Handle TX burst request */
429 tx_mbuf = *tx_pkts++;
430 rte_prefetch0(tx_mbuf);
432 tx_buf = &txr->tx_buf_ring[prod];
433 tx_buf->mbuf = tx_mbuf;
436 txbd = &txr->tx_desc_ring[prod];
437 txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
438 txbd->len = tx_mbuf->data_len;
439 txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
441 prod = RING_NEXT(txr->tx_ring_struct, prod);
445 /* Request a completion for last packet in burst */
447 txbd->opaque = nb_pkts;
448 txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
451 rte_compiler_barrier();
452 bnxt_db_write(&txr->tx_db, prod);
460 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
464 struct bnxt_tx_queue *txq = tx_queue;
466 /* Tx queue was stopped; wait for it to be restarted */
467 if (unlikely(!txq->tx_started)) {
468 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
472 /* Handle TX completions */
473 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
474 bnxt_handle_tx_cp_vec(txq);
479 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
480 ret = bnxt_xmit_fixed_burst_vec(tx_queue,
492 int __attribute__((cold))
493 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
496 struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */
499 mb_def.data_off = RTE_PKTMBUF_HEADROOM;
500 mb_def.port = rxq->port_id;
501 rte_mbuf_refcnt_set(&mb_def, 1);
503 /* prevent compiler reordering: rearm_data covers previous fields */
504 rte_compiler_barrier();
505 p = (uintptr_t)&mb_def.rearm_data;
506 rxq->mbuf_initializer = *(uint64_t *)p;
508 rxq->rxrearm_start = 0;