1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2021 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
19 #include "bnxt_rxtx_vec_common.h"
25 #define GET_OL_FLAGS(rss_flags, ol_index, errors, pi, ol_flags) \
29 of = _mm_extract_epi32((rss_flags), (pi)) | \
30 rxr->ol_flags_table[_mm_extract_epi32((ol_index), (pi))]; \
32 tmp = _mm_extract_epi32((errors), (pi)); \
34 of |= rxr->ol_flags_err_table[tmp]; \
38 #define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pi, ret) \
43 /* Set mbuf pkt_len, data_len, and rss_hash fields. */ \
44 r = _mm_shuffle_epi8((rxcmp), (shuf_msk)); \
46 /* Set packet type. */ \
47 ptype = bnxt_ptype_table[_mm_extract_epi32((ptype_idx), (pi))]; \
48 r = _mm_blend_epi16(r, _mm_set_epi32(0, 0, 0, ptype), 0x3); \
51 r = _mm_blend_epi16(r, _mm_slli_si128((rxcmp1), 6), 0x20); \
56 descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4],
57 __m128i mbuf_init, struct rte_mbuf **mbuf,
58 struct bnxt_rx_ring_info *rxr)
60 const __m128i shuf_msk =
61 _mm_set_epi8(15, 14, 13, 12, /* rss */
62 0xFF, 0xFF, /* vlan_tci (zeroes) */
64 0xFF, 0xFF, 3, 2, /* pkt_len */
65 0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
66 const __m128i flags_type_mask =
67 _mm_set1_epi32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
68 const __m128i flags2_mask1 =
69 _mm_set1_epi32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
70 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC);
71 const __m128i flags2_mask2 =
72 _mm_set1_epi32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
73 const __m128i rss_mask =
74 _mm_set1_epi32(RX_PKT_CMPL_FLAGS_RSS_VALID);
75 __m128i t0, t1, flags_type, flags2, index, errors, rss_flags;
76 __m128i ptype_idx, is_tunnel;
79 /* Compute packet type table indexes for four packets */
80 t0 = _mm_unpacklo_epi32(mm_rxcmp[0], mm_rxcmp[1]);
81 t1 = _mm_unpacklo_epi32(mm_rxcmp[2], mm_rxcmp[3]);
82 flags_type = _mm_unpacklo_epi64(t0, t1);
84 _mm_srli_epi32(_mm_and_si128(flags_type, flags_type_mask), 9);
86 t0 = _mm_unpacklo_epi32(mm_rxcmp1[0], mm_rxcmp1[1]);
87 t1 = _mm_unpacklo_epi32(mm_rxcmp1[2], mm_rxcmp1[3]);
88 flags2 = _mm_unpacklo_epi64(t0, t1);
90 ptype_idx = _mm_or_si128(ptype_idx,
91 _mm_srli_epi32(_mm_and_si128(flags2, flags2_mask1), 2));
92 ptype_idx = _mm_or_si128(ptype_idx,
93 _mm_srli_epi32(_mm_and_si128(flags2, flags2_mask2), 7));
95 /* Extract RSS valid flags for four packets. */
96 rss_flags = _mm_srli_epi32(_mm_and_si128(flags_type, rss_mask), 9);
98 /* Extract errors_v2 fields for four packets. */
99 t0 = _mm_unpackhi_epi32(mm_rxcmp1[0], mm_rxcmp1[1]);
100 t1 = _mm_unpackhi_epi32(mm_rxcmp1[2], mm_rxcmp1[3]);
102 /* Compute ol_flags and checksum error indexes for four packets. */
103 is_tunnel = _mm_and_si128(flags2, _mm_set1_epi32(4));
104 is_tunnel = _mm_slli_epi32(is_tunnel, 3);
105 flags2 = _mm_and_si128(flags2, _mm_set1_epi32(0x1F));
107 errors = _mm_srli_epi32(_mm_unpacklo_epi64(t0, t1), 4);
108 errors = _mm_and_si128(errors, _mm_set1_epi32(0xF));
109 errors = _mm_and_si128(errors, flags2);
111 index = _mm_andnot_si128(errors, flags2);
112 errors = _mm_or_si128(errors, _mm_srli_epi32(is_tunnel, 1));
113 index = _mm_or_si128(index, is_tunnel);
115 /* Update mbuf rearm_data for four packets. */
116 GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
117 _mm_store_si128((void *)&mbuf[0]->rearm_data,
118 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
120 GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
121 _mm_store_si128((void *)&mbuf[1]->rearm_data,
122 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
124 GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
125 _mm_store_si128((void *)&mbuf[2]->rearm_data,
126 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
128 GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
129 _mm_store_si128((void *)&mbuf[3]->rearm_data,
130 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
132 /* Update mbuf rx_descriptor_fields1 for four packes. */
133 GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, t0);
134 _mm_store_si128((void *)&mbuf[0]->rx_descriptor_fields1, t0);
136 GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, t0);
137 _mm_store_si128((void *)&mbuf[1]->rx_descriptor_fields1, t0);
139 GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, t0);
140 _mm_store_si128((void *)&mbuf[2]->rx_descriptor_fields1, t0);
142 GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, t0);
143 _mm_store_si128((void *)&mbuf[3]->rx_descriptor_fields1, t0);
147 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
150 struct bnxt_rx_queue *rxq = rx_queue;
151 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
152 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
153 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
154 uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
155 uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
156 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
157 uint64_t valid, desc_valid_mask = ~0ULL;
158 const __m128i info3_v_mask = _mm_set1_epi32(CMPL_BASE_V);
159 uint32_t raw_cons = cpr->cp_raw_cons;
160 uint32_t cons, mbcons;
162 const __m128i valid_target =
163 _mm_set1_epi32(!!(raw_cons & cp_ring_size));
166 /* If Rx Q was stopped return */
167 if (unlikely(!rxq->rx_started))
170 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
171 bnxt_rxq_rearm(rxq, rxr);
173 /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
174 nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
176 cons = raw_cons & (cp_ring_size - 1);
177 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
179 /* Prefetch first four descriptor pairs. */
180 rte_prefetch0(&cp_desc_ring[cons]);
181 rte_prefetch0(&cp_desc_ring[cons + 4]);
183 /* Ensure that we do not go past the ends of the rings. */
184 nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
185 (cp_ring_size - cons) / 2));
187 * If we are at the end of the ring, ensure that descriptors after the
188 * last valid entry are not treated as valid. Otherwise, force the
189 * maximum number of packets to receive to be a multiple of the per-
192 if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
193 desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
195 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
197 /* Handle RX burst request */
198 for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
199 cons += RTE_BNXT_DESCS_PER_LOOP * 2,
200 mbcons += RTE_BNXT_DESCS_PER_LOOP) {
201 __m128i rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
202 __m128i rxcmp[RTE_BNXT_DESCS_PER_LOOP];
203 __m128i tmp0, tmp1, info3_v;
206 /* Copy four mbuf pointers to output array. */
207 tmp0 = _mm_loadu_si128((void *)&rxr->rx_buf_ring[mbcons]);
208 #ifdef RTE_ARCH_X86_64
209 tmp1 = _mm_loadu_si128((void *)&rxr->rx_buf_ring[mbcons + 2]);
211 _mm_storeu_si128((void *)&rx_pkts[i], tmp0);
212 #ifdef RTE_ARCH_X86_64
213 _mm_storeu_si128((void *)&rx_pkts[i + 2], tmp1);
216 /* Prefetch four descriptor pairs for next iteration. */
217 if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
218 rte_prefetch0(&cp_desc_ring[cons + 8]);
219 rte_prefetch0(&cp_desc_ring[cons + 12]);
223 * Load the four current descriptors into SSE registers in
224 * reverse order to ensure consistent state.
226 rxcmp1[3] = _mm_load_si128((void *)&cp_desc_ring[cons + 7]);
227 rte_compiler_barrier();
228 rxcmp[3] = _mm_load_si128((void *)&cp_desc_ring[cons + 6]);
230 rxcmp1[2] = _mm_load_si128((void *)&cp_desc_ring[cons + 5]);
231 rte_compiler_barrier();
232 rxcmp[2] = _mm_load_si128((void *)&cp_desc_ring[cons + 4]);
234 tmp1 = _mm_unpackhi_epi32(rxcmp1[2], rxcmp1[3]);
236 rxcmp1[1] = _mm_load_si128((void *)&cp_desc_ring[cons + 3]);
237 rte_compiler_barrier();
238 rxcmp[1] = _mm_load_si128((void *)&cp_desc_ring[cons + 2]);
240 rxcmp1[0] = _mm_load_si128((void *)&cp_desc_ring[cons + 1]);
241 rte_compiler_barrier();
242 rxcmp[0] = _mm_load_si128((void *)&cp_desc_ring[cons + 0]);
244 tmp0 = _mm_unpackhi_epi32(rxcmp1[0], rxcmp1[1]);
246 /* Isolate descriptor valid flags. */
247 info3_v = _mm_and_si128(_mm_unpacklo_epi64(tmp0, tmp1),
249 info3_v = _mm_xor_si128(info3_v, valid_target);
252 * Pack the 128-bit array of valid descriptor flags into 64
253 * bits and count the number of set bits in order to determine
254 * the number of valid descriptors.
256 valid = _mm_cvtsi128_si64(_mm_packs_epi32(info3_v, info3_v));
257 num_valid = __builtin_popcountll(valid & desc_valid_mask);
261 rxr->rx_buf_ring[mbcons + 3] = NULL;
264 rxr->rx_buf_ring[mbcons + 2] = NULL;
267 rxr->rx_buf_ring[mbcons + 1] = NULL;
270 rxr->rx_buf_ring[mbcons + 0] = NULL;
276 descs_to_mbufs(rxcmp, rxcmp1, mbuf_init, &rx_pkts[nb_rx_pkts],
278 nb_rx_pkts += num_valid;
280 if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
286 rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
288 rxq->rxrearm_nb += nb_rx_pkts;
289 cpr->cp_raw_cons += 2 * nb_rx_pkts;
291 !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
299 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
301 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
302 uint32_t raw_cons = cpr->cp_raw_cons;
304 uint32_t nb_tx_pkts = 0;
305 struct tx_cmpl *txcmp;
306 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
307 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
308 uint32_t ring_mask = cp_ring_struct->ring_mask;
311 cons = RING_CMPL(ring_mask, raw_cons);
312 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
314 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
317 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
318 nb_tx_pkts += txcmp->opaque;
321 "Unhandled CMP type %02x\n",
323 raw_cons = NEXT_RAW_CMP(raw_cons);
324 } while (nb_tx_pkts < ring_mask);
326 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
328 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
329 bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
331 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
332 cpr->cp_raw_cons = raw_cons;
338 bnxt_xmit_one(struct rte_mbuf *mbuf, struct tx_bd_long *txbd,
339 struct bnxt_sw_tx_bd *tx_buf)
346 desc = _mm_set_epi64x(mbuf->buf_iova + mbuf->data_off,
347 bnxt_xmit_flags_len(mbuf->data_len,
348 TX_BD_FLAGS_NOCMPL));
349 desc = _mm_blend_epi16(desc, _mm_set_epi16(0, 0, 0, 0, 0, 0,
350 mbuf->data_len, 0), 0x02);
351 _mm_store_si128((void *)txbd, desc);
355 bnxt_xmit_fixed_burst_vec(struct bnxt_tx_queue *txq, struct rte_mbuf **tx_pkts,
358 struct bnxt_tx_ring_info *txr = txq->tx_ring;
359 uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
360 struct tx_bd_long *txbd;
361 struct bnxt_sw_tx_bd *tx_buf;
364 tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
365 txbd = &txr->tx_desc_ring[tx_prod];
366 tx_buf = &txr->tx_buf_ring[tx_prod];
368 /* Prefetch next transmit buffer descriptors. */
370 rte_prefetch0(txbd + 3);
372 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
374 if (unlikely(nb_pkts == 0))
377 /* Handle TX burst request */
379 while (to_send >= RTE_BNXT_DESCS_PER_LOOP) {
380 /* Prefetch next transmit buffer descriptors. */
381 rte_prefetch0(txbd + 4);
382 rte_prefetch0(txbd + 7);
384 bnxt_xmit_one(tx_pkts[0], txbd++, tx_buf++);
385 bnxt_xmit_one(tx_pkts[1], txbd++, tx_buf++);
386 bnxt_xmit_one(tx_pkts[2], txbd++, tx_buf++);
387 bnxt_xmit_one(tx_pkts[3], txbd++, tx_buf++);
389 to_send -= RTE_BNXT_DESCS_PER_LOOP;
390 tx_pkts += RTE_BNXT_DESCS_PER_LOOP;
394 bnxt_xmit_one(tx_pkts[0], txbd++, tx_buf++);
399 /* Request a completion for the final packet of burst. */
400 rte_compiler_barrier();
401 txbd[-1].opaque = nb_pkts;
402 txbd[-1].flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
404 tx_raw_prod += nb_pkts;
405 bnxt_db_write(&txr->tx_db, tx_raw_prod);
407 txr->tx_raw_prod = tx_raw_prod;
413 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
417 struct bnxt_tx_queue *txq = tx_queue;
418 struct bnxt_tx_ring_info *txr = txq->tx_ring;
419 uint16_t ring_size = txr->tx_ring_struct->ring_size;
421 /* Tx queue was stopped; wait for it to be restarted */
422 if (unlikely(!txq->tx_started)) {
423 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
427 /* Handle TX completions */
428 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
429 bnxt_handle_tx_cp_vec(txq);
435 * Ensure that no more than RTE_BNXT_MAX_TX_BURST packets
436 * are transmitted before the next completion.
438 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
441 * Ensure that a ring wrap does not occur within a call to
442 * bnxt_xmit_fixed_burst_vec().
444 num = RTE_MIN(num, ring_size -
445 (txr->tx_raw_prod & (ring_size - 1)));
446 ret = bnxt_xmit_fixed_burst_vec(txq, &tx_pkts[nb_sent], num);
457 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
459 return bnxt_rxq_vec_setup_common(rxq);