1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2021 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
19 #include "bnxt_rxtx_vec_common.h"
23 recv_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
25 struct bnxt_rx_queue *rxq = rx_queue;
26 const __m256i mbuf_init =
27 _mm256_set_epi64x(0, 0, 0, rxq->mbuf_initializer);
28 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
29 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
30 uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
31 uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
32 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
33 uint64_t valid, desc_valid_mask = ~0ULL;
34 const __m256i info3_v_mask = _mm256_set1_epi32(CMPL_BASE_V);
35 uint32_t raw_cons = cpr->cp_raw_cons;
36 uint32_t cons, mbcons;
39 const __m256i valid_target =
40 _mm256_set1_epi32(!!(raw_cons & cp_ring_size));
41 const __m256i dsc_shuf_msk =
42 _mm256_set_epi8(0xff, 0xff, 0xff, 0xff, /* Zeroes. */
43 7, 6, /* metadata type */
44 9, 8, /* flags2 low 16 */
47 0xff, 0xff, 0xff, 0xff, /* Zeroes. */
48 0xff, 0xff, 0xff, 0xff, /* Zeroes. */
49 7, 6, /* metadata type */
50 9, 8, /* flags2 low 16 */
53 0xff, 0xff, 0xff, 0xff); /* Zeroes. */
54 const __m256i shuf_msk =
55 _mm256_set_epi8(15, 14, 13, 12, /* rss */
58 0xFF, 0xFF, 3, 2, /* pkt_len */
59 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
60 15, 14, 13, 12, /* rss */
63 0xFF, 0xFF, 3, 2, /* pkt_len */
64 0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
65 const __m256i flags_type_mask =
66 _mm256_set1_epi32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
67 const __m256i flags2_mask1 =
68 _mm256_set1_epi32(CMPL_FLAGS2_VLAN_TUN_MSK);
69 const __m256i flags2_mask2 =
70 _mm256_set1_epi32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
71 const __m256i rss_mask =
72 _mm256_set1_epi32(RX_PKT_CMPL_FLAGS_RSS_VALID);
73 __m256i t0, t1, flags_type, flags2, index, errors;
74 __m256i ptype_idx, ptypes, is_tunnel;
75 __m256i mbuf01, mbuf23, mbuf45, mbuf67;
76 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, rearm6, rearm7;
77 __m256i ol_flags, ol_flags_hi;
80 /* Validate ptype table indexing at build time. */
81 bnxt_check_ptype_constants();
83 /* If Rx Q was stopped return */
84 if (unlikely(!rxq->rx_started))
87 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
88 bnxt_rxq_rearm(rxq, rxr);
90 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, BNXT_RX_DESCS_PER_LOOP_VEC256);
92 cons = raw_cons & (cp_ring_size - 1);
93 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
95 /* Prefetch first four descriptor pairs. */
96 rte_prefetch0(&cp_desc_ring[cons + 0]);
97 rte_prefetch0(&cp_desc_ring[cons + 4]);
98 rte_prefetch0(&cp_desc_ring[cons + 8]);
99 rte_prefetch0(&cp_desc_ring[cons + 12]);
101 /* Return immediately if there is not at least one completed packet. */
102 if (!bnxt_cpr_cmp_valid(&cp_desc_ring[cons], raw_cons, cp_ring_size))
105 /* Ensure that we do not go past the ends of the rings. */
106 nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
107 (cp_ring_size - cons) / 2));
109 * If we are at the end of the ring, ensure that descriptors after the
110 * last valid entry are not treated as valid. Otherwise, force the
111 * maximum number of packets to receive to be a multiple of the per-
114 if (nb_pkts < BNXT_RX_DESCS_PER_LOOP_VEC256) {
116 CHAR_BIT * (BNXT_RX_DESCS_PER_LOOP_VEC256 - nb_pkts);
119 RTE_ALIGN_FLOOR(nb_pkts, BNXT_RX_DESCS_PER_LOOP_VEC256);
122 /* Handle RX burst request */
123 for (i = 0; i < nb_pkts; i += BNXT_RX_DESCS_PER_LOOP_VEC256,
124 cons += BNXT_RX_DESCS_PER_LOOP_VEC256 * 2,
125 mbcons += BNXT_RX_DESCS_PER_LOOP_VEC256) {
126 __m256i desc0, desc1, desc2, desc3, desc4, desc5, desc6, desc7;
127 __m256i rxcmp0_1, rxcmp2_3, rxcmp4_5, rxcmp6_7, info3_v;
131 /* Copy eight mbuf pointers to output array. */
132 t0 = _mm256_loadu_si256((void *)&rxr->rx_buf_ring[mbcons]);
133 _mm256_storeu_si256((void *)&rx_pkts[i], t0);
134 #ifdef RTE_ARCH_X86_64
135 t0 = _mm256_loadu_si256((void *)&rxr->rx_buf_ring[mbcons + 4]);
136 _mm256_storeu_si256((void *)&rx_pkts[i + 4], t0);
139 /* Prefetch eight descriptor pairs for next iteration. */
140 if (i + BNXT_RX_DESCS_PER_LOOP_VEC256 < nb_pkts) {
141 rte_prefetch0(&cp_desc_ring[cons + 16]);
142 rte_prefetch0(&cp_desc_ring[cons + 20]);
143 rte_prefetch0(&cp_desc_ring[cons + 24]);
144 rte_prefetch0(&cp_desc_ring[cons + 28]);
148 * Load eight receive completion descriptors into 256-bit
149 * registers. Loads are issued in reverse order in order to
150 * ensure consistent state.
152 desc7 = _mm256_load_si256((void *)&cp_desc_ring[cons + 14]);
153 rte_compiler_barrier();
154 desc6 = _mm256_load_si256((void *)&cp_desc_ring[cons + 12]);
155 rte_compiler_barrier();
156 desc5 = _mm256_load_si256((void *)&cp_desc_ring[cons + 10]);
157 rte_compiler_barrier();
158 desc4 = _mm256_load_si256((void *)&cp_desc_ring[cons + 8]);
159 rte_compiler_barrier();
160 desc3 = _mm256_load_si256((void *)&cp_desc_ring[cons + 6]);
161 rte_compiler_barrier();
162 desc2 = _mm256_load_si256((void *)&cp_desc_ring[cons + 4]);
163 rte_compiler_barrier();
164 desc1 = _mm256_load_si256((void *)&cp_desc_ring[cons + 2]);
165 rte_compiler_barrier();
166 desc0 = _mm256_load_si256((void *)&cp_desc_ring[cons + 0]);
169 * Pack needed fields from each descriptor into a compressed
170 * 128-bit layout and pair two compressed descriptors into
171 * 256-bit registers. The 128-bit compressed layout is as
173 * Bits 0-15: flags_type field from low completion record.
174 * Bits 16-31: len field from low completion record.
175 * Bits 32-47: flags2 (low 16 bits) from high completion.
176 * Bits 48-79: metadata from high completion record.
177 * Bits 80-95: errors_v2 from high completion record.
178 * Bits 96-127: rss hash from low completion record.
180 t0 = _mm256_permute2f128_si256(desc6, desc7, 0x20);
181 t1 = _mm256_permute2f128_si256(desc6, desc7, 0x31);
182 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
183 rxcmp6_7 = _mm256_blend_epi32(t0, t1, 0x66);
185 t0 = _mm256_permute2f128_si256(desc4, desc5, 0x20);
186 t1 = _mm256_permute2f128_si256(desc4, desc5, 0x31);
187 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
188 rxcmp4_5 = _mm256_blend_epi32(t0, t1, 0x66);
190 t0 = _mm256_permute2f128_si256(desc2, desc3, 0x20);
191 t1 = _mm256_permute2f128_si256(desc2, desc3, 0x31);
192 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
193 rxcmp2_3 = _mm256_blend_epi32(t0, t1, 0x66);
195 t0 = _mm256_permute2f128_si256(desc0, desc1, 0x20);
196 t1 = _mm256_permute2f128_si256(desc0, desc1, 0x31);
197 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
198 rxcmp0_1 = _mm256_blend_epi32(t0, t1, 0x66);
200 /* Compute packet type table indices for eight packets. */
201 t0 = _mm256_unpacklo_epi32(rxcmp0_1, rxcmp2_3);
202 t1 = _mm256_unpacklo_epi32(rxcmp4_5, rxcmp6_7);
203 flags_type = _mm256_unpacklo_epi64(t0, t1);
204 ptype_idx = _mm256_and_si256(flags_type, flags_type_mask);
205 ptype_idx = _mm256_srli_epi32(ptype_idx,
206 RX_PKT_CMPL_FLAGS_ITYPE_SFT -
207 BNXT_PTYPE_TBL_TYPE_SFT);
209 t0 = _mm256_unpacklo_epi32(rxcmp0_1, rxcmp2_3);
210 t1 = _mm256_unpacklo_epi32(rxcmp4_5, rxcmp6_7);
211 flags2 = _mm256_unpackhi_epi64(t0, t1);
213 t0 = _mm256_srli_epi32(_mm256_and_si256(flags2, flags2_mask1),
214 RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT -
215 BNXT_PTYPE_TBL_VLAN_SFT);
216 ptype_idx = _mm256_or_si256(ptype_idx, t0);
218 t0 = _mm256_srli_epi32(_mm256_and_si256(flags2, flags2_mask2),
219 RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT -
220 BNXT_PTYPE_TBL_IP_VER_SFT);
221 ptype_idx = _mm256_or_si256(ptype_idx, t0);
224 * Load ptypes for eight packets using gather. Gather operations
225 * have extremely high latency (~19 cycles), execution and use
226 * of result should be separated as much as possible.
228 ptypes = _mm256_i32gather_epi32((int *)bnxt_ptype_table,
229 ptype_idx, sizeof(uint32_t));
231 * Compute ol_flags and checksum error table indices for eight
234 is_tunnel = _mm256_and_si256(flags2, _mm256_set1_epi32(4));
235 is_tunnel = _mm256_slli_epi32(is_tunnel, 3);
236 flags2 = _mm256_and_si256(flags2, _mm256_set1_epi32(0x1F));
238 /* Extract errors_v2 fields for eight packets. */
239 t0 = _mm256_unpackhi_epi32(rxcmp0_1, rxcmp2_3);
240 t1 = _mm256_unpackhi_epi32(rxcmp4_5, rxcmp6_7);
241 errors_v2 = _mm256_unpacklo_epi64(t0, t1);
243 errors = _mm256_srli_epi32(errors_v2, 4);
244 errors = _mm256_and_si256(errors, _mm256_set1_epi32(0xF));
245 errors = _mm256_and_si256(errors, flags2);
247 index = _mm256_andnot_si256(errors, flags2);
248 errors = _mm256_or_si256(errors,
249 _mm256_srli_epi32(is_tunnel, 1));
250 index = _mm256_or_si256(index, is_tunnel);
253 * Load ol_flags for eight packets using gather. Gather
254 * operations have extremely high latency (~19 cycles),
255 * execution and use of result should be separated as much
258 ol_flags = _mm256_i32gather_epi32((int *)rxr->ol_flags_table,
259 index, sizeof(uint32_t));
260 errors = _mm256_i32gather_epi32((int *)rxr->ol_flags_err_table,
261 errors, sizeof(uint32_t));
264 * Pack the 128-bit array of valid descriptor flags into 64
265 * bits and count the number of set bits in order to determine
266 * the number of valid descriptors.
268 const __m256i perm_msk =
269 _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
270 info3_v = _mm256_permutevar8x32_epi32(errors_v2, perm_msk);
271 info3_v = _mm256_and_si256(errors_v2, info3_v_mask);
272 info3_v = _mm256_xor_si256(info3_v, valid_target);
274 info3_v = _mm256_packs_epi32(info3_v, _mm256_setzero_si256());
275 valid = _mm_cvtsi128_si64(_mm256_extracti128_si256(info3_v, 1));
276 valid = (valid << CHAR_BIT) |
277 _mm_cvtsi128_si64(_mm256_castsi256_si128(info3_v));
278 num_valid = __builtin_popcountll(valid & desc_valid_mask);
283 /* Update mbuf rearm_data for eight packets. */
284 mbuf01 = _mm256_shuffle_epi8(rxcmp0_1, shuf_msk);
285 mbuf23 = _mm256_shuffle_epi8(rxcmp2_3, shuf_msk);
286 mbuf45 = _mm256_shuffle_epi8(rxcmp4_5, shuf_msk);
287 mbuf67 = _mm256_shuffle_epi8(rxcmp6_7, shuf_msk);
289 /* Blend in ptype field for two mbufs at a time. */
290 mbuf01 = _mm256_blend_epi32(mbuf01, ptypes, 0x11);
291 mbuf23 = _mm256_blend_epi32(mbuf23,
292 _mm256_srli_si256(ptypes, 4), 0x11);
293 mbuf45 = _mm256_blend_epi32(mbuf45,
294 _mm256_srli_si256(ptypes, 8), 0x11);
295 mbuf67 = _mm256_blend_epi32(mbuf67,
296 _mm256_srli_si256(ptypes, 12), 0x11);
298 /* Unpack rearm data, set fixed fields for first four mbufs. */
299 rearm0 = _mm256_permute2f128_si256(mbuf_init, mbuf01, 0x20);
300 rearm1 = _mm256_blend_epi32(mbuf_init, mbuf01, 0xF0);
301 rearm2 = _mm256_permute2f128_si256(mbuf_init, mbuf23, 0x20);
302 rearm3 = _mm256_blend_epi32(mbuf_init, mbuf23, 0xF0);
304 /* Compute final ol_flags values for eight packets. */
305 rss_flags = _mm256_and_si256(flags_type, rss_mask);
306 rss_flags = _mm256_srli_epi32(rss_flags, 9);
307 ol_flags = _mm256_or_si256(ol_flags, errors);
308 ol_flags = _mm256_or_si256(ol_flags, rss_flags);
309 ol_flags_hi = _mm256_permute2f128_si256(ol_flags,
312 /* Set ol_flags fields for first four packets. */
313 rearm0 = _mm256_blend_epi32(rearm0,
314 _mm256_slli_si256(ol_flags, 8),
316 rearm1 = _mm256_blend_epi32(rearm1,
317 _mm256_slli_si256(ol_flags_hi, 8),
319 rearm2 = _mm256_blend_epi32(rearm2,
320 _mm256_slli_si256(ol_flags, 4),
322 rearm3 = _mm256_blend_epi32(rearm3,
323 _mm256_slli_si256(ol_flags_hi, 4),
326 /* Store all mbuf fields for first four packets. */
327 _mm256_storeu_si256((void *)&rx_pkts[i + 0]->rearm_data,
329 _mm256_storeu_si256((void *)&rx_pkts[i + 1]->rearm_data,
331 _mm256_storeu_si256((void *)&rx_pkts[i + 2]->rearm_data,
333 _mm256_storeu_si256((void *)&rx_pkts[i + 3]->rearm_data,
336 /* Unpack rearm data, set fixed fields for final four mbufs. */
337 rearm4 = _mm256_permute2f128_si256(mbuf_init, mbuf45, 0x20);
338 rearm5 = _mm256_blend_epi32(mbuf_init, mbuf45, 0xF0);
339 rearm6 = _mm256_permute2f128_si256(mbuf_init, mbuf67, 0x20);
340 rearm7 = _mm256_blend_epi32(mbuf_init, mbuf67, 0xF0);
342 /* Set ol_flags fields for final four packets. */
343 rearm4 = _mm256_blend_epi32(rearm4, ol_flags, 0x04);
344 rearm5 = _mm256_blend_epi32(rearm5, ol_flags_hi, 0x04);
345 rearm6 = _mm256_blend_epi32(rearm6,
346 _mm256_srli_si256(ol_flags, 4),
348 rearm7 = _mm256_blend_epi32(rearm7,
349 _mm256_srli_si256(ol_flags_hi, 4),
352 /* Store all mbuf fields for final four packets. */
353 _mm256_storeu_si256((void *)&rx_pkts[i + 4]->rearm_data,
355 _mm256_storeu_si256((void *)&rx_pkts[i + 5]->rearm_data,
357 _mm256_storeu_si256((void *)&rx_pkts[i + 6]->rearm_data,
359 _mm256_storeu_si256((void *)&rx_pkts[i + 7]->rearm_data,
362 nb_rx_pkts += num_valid;
363 if (num_valid < BNXT_RX_DESCS_PER_LOOP_VEC256)
368 rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
370 rxq->rxrearm_nb += nb_rx_pkts;
371 cpr->cp_raw_cons += 2 * nb_rx_pkts;
379 bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
384 while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
387 burst = recv_burst_vec_avx2(rx_queue, rx_pkts + cnt,
388 RTE_BNXT_MAX_RX_BURST);
393 if (burst < RTE_BNXT_MAX_RX_BURST)
396 return cnt + recv_burst_vec_avx2(rx_queue, rx_pkts + cnt, nb_pkts);
400 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
402 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
403 uint32_t raw_cons = cpr->cp_raw_cons;
405 uint32_t nb_tx_pkts = 0;
406 struct tx_cmpl *txcmp;
407 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
408 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
409 uint32_t ring_mask = cp_ring_struct->ring_mask;
412 cons = RING_CMPL(ring_mask, raw_cons);
413 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
415 if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
418 nb_tx_pkts += txcmp->opaque;
419 raw_cons = NEXT_RAW_CMP(raw_cons);
420 } while (nb_tx_pkts < ring_mask);
423 if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
424 bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
426 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
427 cpr->cp_raw_cons = raw_cons;
433 bnxt_xmit_one(struct rte_mbuf *mbuf, struct tx_bd_long *txbd,
434 struct rte_mbuf **tx_buf)
436 uint64_t dsc_hi, dsc_lo;
441 dsc_hi = mbuf->buf_iova + mbuf->data_off;
442 dsc_lo = (mbuf->data_len << 16) |
443 bnxt_xmit_flags_len(mbuf->data_len, TX_BD_FLAGS_NOCMPL);
445 desc = _mm_set_epi64x(dsc_hi, dsc_lo);
446 _mm_store_si128((void *)txbd, desc);
450 bnxt_xmit_fixed_burst_vec(struct bnxt_tx_queue *txq, struct rte_mbuf **pkts,
453 struct bnxt_tx_ring_info *txr = txq->tx_ring;
454 uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
455 struct tx_bd_long *txbd;
456 struct rte_mbuf **tx_buf;
459 tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
460 txbd = &txr->tx_desc_ring[tx_prod];
461 tx_buf = &txr->tx_buf_ring[tx_prod];
463 /* Prefetch next transmit buffer descriptors. */
465 rte_prefetch0(txbd + 3);
467 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
469 if (unlikely(nb_pkts == 0))
472 /* Handle TX burst request */
476 * If current descriptor is not on a 32-byte boundary, send one packet
477 * to align for 32-byte stores.
480 bnxt_xmit_one(pkts[0], txbd++, tx_buf++);
486 * Send four packets per loop, with a single store for each pair
489 while (to_send >= BNXT_TX_DESCS_PER_LOOP) {
490 uint64_t dsc0_hi, dsc0_lo, dsc1_hi, dsc1_lo;
491 uint64_t dsc2_hi, dsc2_lo, dsc3_hi, dsc3_lo;
492 __m256i dsc01, dsc23;
494 /* Prefetch next transmit buffer descriptors. */
495 rte_prefetch0(txbd + 4);
496 rte_prefetch0(txbd + 7);
498 /* Copy four mbuf pointers to tx buf ring. */
499 #ifdef RTE_ARCH_X86_64
500 __m256i tmp = _mm256_loadu_si256((void *)pkts);
501 _mm256_storeu_si256((void *)tx_buf, tmp);
503 __m128i tmp = _mm_loadu_si128((void *)pkts);
504 _mm_storeu_si128((void *)tx_buf, tmp);
507 dsc0_hi = tx_buf[0]->buf_iova + tx_buf[0]->data_off;
508 dsc0_lo = (tx_buf[0]->data_len << 16) |
509 bnxt_xmit_flags_len(tx_buf[0]->data_len,
512 dsc1_hi = tx_buf[1]->buf_iova + tx_buf[1]->data_off;
513 dsc1_lo = (tx_buf[1]->data_len << 16) |
514 bnxt_xmit_flags_len(tx_buf[1]->data_len,
517 dsc01 = _mm256_set_epi64x(dsc1_hi, dsc1_lo, dsc0_hi, dsc0_lo);
519 dsc2_hi = tx_buf[2]->buf_iova + tx_buf[2]->data_off;
520 dsc2_lo = (tx_buf[2]->data_len << 16) |
521 bnxt_xmit_flags_len(tx_buf[2]->data_len,
524 dsc3_hi = tx_buf[3]->buf_iova + tx_buf[3]->data_off;
525 dsc3_lo = (tx_buf[3]->data_len << 16) |
526 bnxt_xmit_flags_len(tx_buf[3]->data_len,
529 dsc23 = _mm256_set_epi64x(dsc3_hi, dsc3_lo, dsc2_hi, dsc2_lo);
531 _mm256_store_si256((void *)txbd, dsc01);
532 _mm256_store_si256((void *)(txbd + 2), dsc23);
534 to_send -= BNXT_TX_DESCS_PER_LOOP;
535 pkts += BNXT_TX_DESCS_PER_LOOP;
536 txbd += BNXT_TX_DESCS_PER_LOOP;
537 tx_buf += BNXT_TX_DESCS_PER_LOOP;
540 /* Send any remaining packets, writing each descriptor individually. */
542 bnxt_xmit_one(pkts[0], txbd++, tx_buf++);
547 /* Request a completion for the final packet of the burst. */
548 txbd[-1].opaque = nb_pkts;
549 txbd[-1].flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
551 tx_raw_prod += nb_pkts;
552 bnxt_db_write(&txr->tx_db, tx_raw_prod);
554 txr->tx_raw_prod = tx_raw_prod;
560 bnxt_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
564 struct bnxt_tx_queue *txq = tx_queue;
565 struct bnxt_tx_ring_info *txr = txq->tx_ring;
566 uint16_t ring_size = txr->tx_ring_struct->ring_size;
568 /* Tx queue was stopped; wait for it to be restarted */
569 if (unlikely(!txq->tx_started)) {
570 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
574 /* Handle TX completions */
575 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
576 bnxt_handle_tx_cp_vec(txq);
582 * Ensure that no more than RTE_BNXT_MAX_TX_BURST packets
583 * are transmitted before the next completion.
585 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
588 * Ensure that a ring wrap does not occur within a call to
589 * bnxt_xmit_fixed_burst_vec().
591 num = RTE_MIN(num, ring_size -
592 (txr->tx_raw_prod & (ring_size - 1)));
593 ret = bnxt_xmit_fixed_burst_vec(txq, &tx_pkts[nb_sent], num);