1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2021 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
19 #include "bnxt_rxtx_vec_common.h"
25 #define GET_OL_FLAGS(rss_flags, ol_index, errors, pi, ol_flags) \
29 of = _mm_extract_epi32((rss_flags), (pi)) | \
30 rxr->ol_flags_table[_mm_extract_epi32((ol_index), (pi))]; \
32 tmp = _mm_extract_epi32((errors), (pi)); \
34 of |= rxr->ol_flags_err_table[tmp]; \
38 #define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pi, ret) \
43 /* Set mbuf pkt_len, data_len, and rss_hash fields. */ \
44 r = _mm_shuffle_epi8((rxcmp), (shuf_msk)); \
46 /* Set packet type. */ \
47 ptype = bnxt_ptype_table[_mm_extract_epi32((ptype_idx), (pi))]; \
48 r = _mm_blend_epi16(r, _mm_set_epi32(0, 0, 0, ptype), 0x3); \
51 r = _mm_blend_epi16(r, _mm_slli_si128((rxcmp1), 6), 0x20); \
56 descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4],
57 __m128i mbuf_init, struct rte_mbuf **mbuf,
58 struct bnxt_rx_ring_info *rxr)
60 const __m128i shuf_msk =
61 _mm_set_epi8(15, 14, 13, 12, /* rss */
62 0xFF, 0xFF, /* vlan_tci (zeroes) */
64 0xFF, 0xFF, 3, 2, /* pkt_len */
65 0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
66 const __m128i flags_type_mask =
67 _mm_set1_epi32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
68 const __m128i flags2_mask1 =
69 _mm_set1_epi32(CMPL_FLAGS2_VLAN_TUN_MSK);
70 const __m128i flags2_mask2 =
71 _mm_set1_epi32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
72 const __m128i rss_mask =
73 _mm_set1_epi32(RX_PKT_CMPL_FLAGS_RSS_VALID);
74 __m128i t0, t1, flags_type, flags2, index, errors, rss_flags;
75 __m128i ptype_idx, is_tunnel;
78 /* Validate ptype table indexing at build time. */
79 bnxt_check_ptype_constants();
81 /* Compute packet type table indexes for four packets */
82 t0 = _mm_unpacklo_epi32(mm_rxcmp[0], mm_rxcmp[1]);
83 t1 = _mm_unpacklo_epi32(mm_rxcmp[2], mm_rxcmp[3]);
84 flags_type = _mm_unpacklo_epi64(t0, t1);
85 ptype_idx = _mm_srli_epi32(_mm_and_si128(flags_type, flags_type_mask),
86 RX_PKT_CMPL_FLAGS_ITYPE_SFT - BNXT_PTYPE_TBL_TYPE_SFT);
88 t0 = _mm_unpacklo_epi32(mm_rxcmp1[0], mm_rxcmp1[1]);
89 t1 = _mm_unpacklo_epi32(mm_rxcmp1[2], mm_rxcmp1[3]);
90 flags2 = _mm_unpacklo_epi64(t0, t1);
92 ptype_idx = _mm_or_si128(ptype_idx,
93 _mm_srli_epi32(_mm_and_si128(flags2, flags2_mask1),
94 RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT -
95 BNXT_PTYPE_TBL_VLAN_SFT));
96 ptype_idx = _mm_or_si128(ptype_idx,
97 _mm_srli_epi32(_mm_and_si128(flags2, flags2_mask2),
98 RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT -
99 BNXT_PTYPE_TBL_IP_VER_SFT));
101 /* Extract RSS valid flags for four packets. */
102 rss_flags = _mm_srli_epi32(_mm_and_si128(flags_type, rss_mask), 9);
104 /* Extract errors_v2 fields for four packets. */
105 t0 = _mm_unpackhi_epi32(mm_rxcmp1[0], mm_rxcmp1[1]);
106 t1 = _mm_unpackhi_epi32(mm_rxcmp1[2], mm_rxcmp1[3]);
108 /* Compute ol_flags and checksum error indexes for four packets. */
109 is_tunnel = _mm_and_si128(flags2, _mm_set1_epi32(4));
110 is_tunnel = _mm_slli_epi32(is_tunnel, 3);
111 flags2 = _mm_and_si128(flags2, _mm_set1_epi32(0x1F));
113 errors = _mm_srli_epi32(_mm_unpacklo_epi64(t0, t1), 4);
114 errors = _mm_and_si128(errors, _mm_set1_epi32(0xF));
115 errors = _mm_and_si128(errors, flags2);
117 index = _mm_andnot_si128(errors, flags2);
118 errors = _mm_or_si128(errors, _mm_srli_epi32(is_tunnel, 1));
119 index = _mm_or_si128(index, is_tunnel);
121 /* Update mbuf rearm_data for four packets. */
122 GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
123 _mm_store_si128((void *)&mbuf[0]->rearm_data,
124 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
126 GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
127 _mm_store_si128((void *)&mbuf[1]->rearm_data,
128 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
130 GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
131 _mm_store_si128((void *)&mbuf[2]->rearm_data,
132 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
134 GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
135 _mm_store_si128((void *)&mbuf[3]->rearm_data,
136 _mm_or_si128(mbuf_init, _mm_set_epi64x(ol_flags, 0)));
138 /* Update mbuf rx_descriptor_fields1 for four packes. */
139 GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, t0);
140 _mm_store_si128((void *)&mbuf[0]->rx_descriptor_fields1, t0);
142 GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, t0);
143 _mm_store_si128((void *)&mbuf[1]->rx_descriptor_fields1, t0);
145 GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, t0);
146 _mm_store_si128((void *)&mbuf[2]->rx_descriptor_fields1, t0);
148 GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, t0);
149 _mm_store_si128((void *)&mbuf[3]->rx_descriptor_fields1, t0);
153 recv_burst_vec_sse(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
155 struct bnxt_rx_queue *rxq = rx_queue;
156 const __m128i mbuf_init = _mm_set_epi64x(0, rxq->mbuf_initializer);
157 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
158 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
159 uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
160 uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
161 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
162 uint64_t valid, desc_valid_mask = ~0ULL;
163 const __m128i info3_v_mask = _mm_set1_epi32(CMPL_BASE_V);
164 uint32_t raw_cons = cpr->cp_raw_cons;
165 uint32_t cons, mbcons;
167 const __m128i valid_target =
168 _mm_set1_epi32(!!(raw_cons & cp_ring_size));
171 /* If Rx Q was stopped return */
172 if (unlikely(!rxq->rx_started))
175 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
176 bnxt_rxq_rearm(rxq, rxr);
178 cons = raw_cons & (cp_ring_size - 1);
179 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
181 /* Prefetch first four descriptor pairs. */
182 rte_prefetch0(&cp_desc_ring[cons]);
183 rte_prefetch0(&cp_desc_ring[cons + 4]);
185 /* Ensure that we do not go past the ends of the rings. */
186 nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
187 (cp_ring_size - cons) / 2));
189 * If we are at the end of the ring, ensure that descriptors after the
190 * last valid entry are not treated as valid. Otherwise, force the
191 * maximum number of packets to receive to be a multiple of the per-
194 if (nb_pkts < BNXT_RX_DESCS_PER_LOOP_VEC128) {
196 16 * (BNXT_RX_DESCS_PER_LOOP_VEC128 - nb_pkts);
199 RTE_ALIGN_FLOOR(nb_pkts, BNXT_RX_DESCS_PER_LOOP_VEC128);
202 /* Handle RX burst request */
203 for (i = 0; i < nb_pkts; i += BNXT_RX_DESCS_PER_LOOP_VEC128,
204 cons += BNXT_RX_DESCS_PER_LOOP_VEC128 * 2,
205 mbcons += BNXT_RX_DESCS_PER_LOOP_VEC128) {
206 __m128i rxcmp1[BNXT_RX_DESCS_PER_LOOP_VEC128];
207 __m128i rxcmp[BNXT_RX_DESCS_PER_LOOP_VEC128];
208 __m128i tmp0, tmp1, info3_v;
211 /* Copy four mbuf pointers to output array. */
212 tmp0 = _mm_loadu_si128((void *)&rxr->rx_buf_ring[mbcons]);
213 #ifdef RTE_ARCH_X86_64
214 tmp1 = _mm_loadu_si128((void *)&rxr->rx_buf_ring[mbcons + 2]);
216 _mm_storeu_si128((void *)&rx_pkts[i], tmp0);
217 #ifdef RTE_ARCH_X86_64
218 _mm_storeu_si128((void *)&rx_pkts[i + 2], tmp1);
221 /* Prefetch four descriptor pairs for next iteration. */
222 if (i + BNXT_RX_DESCS_PER_LOOP_VEC128 < nb_pkts) {
223 rte_prefetch0(&cp_desc_ring[cons + 8]);
224 rte_prefetch0(&cp_desc_ring[cons + 12]);
228 * Load the four current descriptors into SSE registers in
229 * reverse order to ensure consistent state.
231 rxcmp1[3] = _mm_load_si128((void *)&cp_desc_ring[cons + 7]);
232 rte_compiler_barrier();
233 rxcmp[3] = _mm_load_si128((void *)&cp_desc_ring[cons + 6]);
235 rxcmp1[2] = _mm_load_si128((void *)&cp_desc_ring[cons + 5]);
236 rte_compiler_barrier();
237 rxcmp[2] = _mm_load_si128((void *)&cp_desc_ring[cons + 4]);
239 tmp1 = _mm_unpackhi_epi32(rxcmp1[2], rxcmp1[3]);
241 rxcmp1[1] = _mm_load_si128((void *)&cp_desc_ring[cons + 3]);
242 rte_compiler_barrier();
243 rxcmp[1] = _mm_load_si128((void *)&cp_desc_ring[cons + 2]);
245 rxcmp1[0] = _mm_load_si128((void *)&cp_desc_ring[cons + 1]);
246 rte_compiler_barrier();
247 rxcmp[0] = _mm_load_si128((void *)&cp_desc_ring[cons + 0]);
249 tmp0 = _mm_unpackhi_epi32(rxcmp1[0], rxcmp1[1]);
251 /* Isolate descriptor valid flags. */
252 info3_v = _mm_and_si128(_mm_unpacklo_epi64(tmp0, tmp1),
254 info3_v = _mm_xor_si128(info3_v, valid_target);
257 * Pack the 128-bit array of valid descriptor flags into 64
258 * bits and count the number of set bits in order to determine
259 * the number of valid descriptors.
261 valid = _mm_cvtsi128_si64(_mm_packs_epi32(info3_v, info3_v));
262 num_valid = __builtin_popcountll(valid & desc_valid_mask);
267 descs_to_mbufs(rxcmp, rxcmp1, mbuf_init, &rx_pkts[nb_rx_pkts],
269 nb_rx_pkts += num_valid;
271 if (num_valid < BNXT_RX_DESCS_PER_LOOP_VEC128)
276 rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
278 rxq->rxrearm_nb += nb_rx_pkts;
279 cpr->cp_raw_cons += 2 * nb_rx_pkts;
281 !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
289 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
293 while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
296 burst = recv_burst_vec_sse(rx_queue, rx_pkts + cnt,
297 RTE_BNXT_MAX_RX_BURST);
302 if (burst < RTE_BNXT_MAX_RX_BURST)
306 return cnt + recv_burst_vec_sse(rx_queue, rx_pkts + cnt, nb_pkts);
310 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
312 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
313 uint32_t raw_cons = cpr->cp_raw_cons;
315 uint32_t nb_tx_pkts = 0;
316 struct tx_cmpl *txcmp;
317 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
318 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
319 uint32_t ring_mask = cp_ring_struct->ring_mask;
322 cons = RING_CMPL(ring_mask, raw_cons);
323 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
325 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
328 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
329 nb_tx_pkts += txcmp->opaque;
332 "Unhandled CMP type %02x\n",
334 raw_cons = NEXT_RAW_CMP(raw_cons);
335 } while (nb_tx_pkts < ring_mask);
337 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
339 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
340 bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
342 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
343 cpr->cp_raw_cons = raw_cons;
349 bnxt_xmit_one(struct rte_mbuf *mbuf, struct tx_bd_long *txbd,
350 struct rte_mbuf **tx_buf)
356 desc = _mm_set_epi64x(mbuf->buf_iova + mbuf->data_off,
357 bnxt_xmit_flags_len(mbuf->data_len,
358 TX_BD_FLAGS_NOCMPL));
359 desc = _mm_blend_epi16(desc, _mm_set_epi16(0, 0, 0, 0, 0, 0,
360 mbuf->data_len, 0), 0x02);
361 _mm_store_si128((void *)txbd, desc);
365 bnxt_xmit_fixed_burst_vec(struct bnxt_tx_queue *txq, struct rte_mbuf **tx_pkts,
368 struct bnxt_tx_ring_info *txr = txq->tx_ring;
369 uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
370 struct tx_bd_long *txbd;
371 struct rte_mbuf **tx_buf;
374 tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
375 txbd = &txr->tx_desc_ring[tx_prod];
376 tx_buf = &txr->tx_buf_ring[tx_prod];
378 /* Prefetch next transmit buffer descriptors. */
380 rte_prefetch0(txbd + 3);
382 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
384 if (unlikely(nb_pkts == 0))
387 /* Handle TX burst request */
389 while (to_send >= BNXT_TX_DESCS_PER_LOOP) {
390 /* Prefetch next transmit buffer descriptors. */
391 rte_prefetch0(txbd + 4);
392 rte_prefetch0(txbd + 7);
394 bnxt_xmit_one(tx_pkts[0], txbd++, tx_buf++);
395 bnxt_xmit_one(tx_pkts[1], txbd++, tx_buf++);
396 bnxt_xmit_one(tx_pkts[2], txbd++, tx_buf++);
397 bnxt_xmit_one(tx_pkts[3], txbd++, tx_buf++);
399 to_send -= BNXT_TX_DESCS_PER_LOOP;
400 tx_pkts += BNXT_TX_DESCS_PER_LOOP;
404 bnxt_xmit_one(tx_pkts[0], txbd++, tx_buf++);
409 /* Request a completion for the final packet of burst. */
410 rte_compiler_barrier();
411 txbd[-1].opaque = nb_pkts;
412 txbd[-1].flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
414 tx_raw_prod += nb_pkts;
415 bnxt_db_write(&txr->tx_db, tx_raw_prod);
417 txr->tx_raw_prod = tx_raw_prod;
423 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
427 struct bnxt_tx_queue *txq = tx_queue;
428 struct bnxt_tx_ring_info *txr = txq->tx_ring;
429 uint16_t ring_size = txr->tx_ring_struct->ring_size;
431 /* Tx queue was stopped; wait for it to be restarted */
432 if (unlikely(!txq->tx_started)) {
433 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
437 /* Handle TX completions */
438 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
439 bnxt_handle_tx_cp_vec(txq);
445 * Ensure that no more than RTE_BNXT_MAX_TX_BURST packets
446 * are transmitted before the next completion.
448 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
451 * Ensure that a ring wrap does not occur within a call to
452 * bnxt_xmit_fixed_burst_vec().
454 num = RTE_MIN(num, ring_size -
455 (txr->tx_raw_prod & (ring_size - 1)));
456 ret = bnxt_xmit_fixed_burst_vec(txq, &tx_pkts[nb_sent], num);
467 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
469 return bnxt_rxq_vec_setup_common(rxq);