1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2021 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
19 #include "bnxt_rxtx_vec_common.h"
23 recv_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
25 struct bnxt_rx_queue *rxq = rx_queue;
26 const __m256i mbuf_init =
27 _mm256_set_epi64x(0, 0, 0, rxq->mbuf_initializer);
28 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
29 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
30 uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
31 uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
32 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
33 uint64_t valid, desc_valid_mask = ~0ULL;
34 const __m256i info3_v_mask = _mm256_set1_epi32(CMPL_BASE_V);
35 uint32_t raw_cons = cpr->cp_raw_cons;
36 uint32_t cons, mbcons;
39 const __m256i valid_target =
40 _mm256_set1_epi32(!!(raw_cons & cp_ring_size));
41 const __m256i dsc_shuf_msk =
42 _mm256_set_epi8(0xff, 0xff, 0xff, 0xff, /* Zeroes. */
43 7, 6, /* metadata type */
44 9, 8, /* flags2 low 16 */
47 0xff, 0xff, 0xff, 0xff, /* Zeroes. */
48 0xff, 0xff, 0xff, 0xff, /* Zeroes. */
49 7, 6, /* metadata type */
50 9, 8, /* flags2 low 16 */
53 0xff, 0xff, 0xff, 0xff); /* Zeroes. */
54 const __m256i shuf_msk =
55 _mm256_set_epi8(15, 14, 13, 12, /* rss */
58 0xFF, 0xFF, 3, 2, /* pkt_len */
59 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
60 15, 14, 13, 12, /* rss */
63 0xFF, 0xFF, 3, 2, /* pkt_len */
64 0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
65 const __m256i flags_type_mask =
66 _mm256_set1_epi32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
67 const __m256i flags2_mask1 =
68 _mm256_set1_epi32(CMPL_FLAGS2_VLAN_TUN_MSK);
69 const __m256i flags2_mask2 =
70 _mm256_set1_epi32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
71 const __m256i rss_mask =
72 _mm256_set1_epi32(RX_PKT_CMPL_FLAGS_RSS_VALID);
73 __m256i t0, t1, flags_type, flags2, index, errors;
74 __m256i ptype_idx, ptypes, is_tunnel;
75 __m256i mbuf01, mbuf23, mbuf45, mbuf67;
76 __m256i rearm0, rearm1, rearm2, rearm3, rearm4, rearm5, rearm6, rearm7;
77 __m256i ol_flags, ol_flags_hi;
80 /* Validate ptype table indexing at build time. */
81 bnxt_check_ptype_constants();
83 /* If Rx Q was stopped return */
84 if (unlikely(!rxq->rx_started))
87 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
88 bnxt_rxq_rearm(rxq, rxr);
90 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, BNXT_RX_DESCS_PER_LOOP_VEC256);
92 cons = raw_cons & (cp_ring_size - 1);
93 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
95 /* Prefetch first four descriptor pairs. */
96 rte_prefetch0(&cp_desc_ring[cons + 0]);
97 rte_prefetch0(&cp_desc_ring[cons + 4]);
98 rte_prefetch0(&cp_desc_ring[cons + 8]);
99 rte_prefetch0(&cp_desc_ring[cons + 12]);
101 /* Ensure that we do not go past the ends of the rings. */
102 nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
103 (cp_ring_size - cons) / 2));
105 * If we are at the end of the ring, ensure that descriptors after the
106 * last valid entry are not treated as valid. Otherwise, force the
107 * maximum number of packets to receive to be a multiple of the per-
110 if (nb_pkts < BNXT_RX_DESCS_PER_LOOP_VEC256) {
112 CHAR_BIT * (BNXT_RX_DESCS_PER_LOOP_VEC256 - nb_pkts);
115 RTE_ALIGN_FLOOR(nb_pkts, BNXT_RX_DESCS_PER_LOOP_VEC256);
118 /* Handle RX burst request */
119 for (i = 0; i < nb_pkts; i += BNXT_RX_DESCS_PER_LOOP_VEC256,
120 cons += BNXT_RX_DESCS_PER_LOOP_VEC256 * 2,
121 mbcons += BNXT_RX_DESCS_PER_LOOP_VEC256) {
122 __m256i desc0, desc1, desc2, desc3, desc4, desc5, desc6, desc7;
123 __m256i rxcmp0_1, rxcmp2_3, rxcmp4_5, rxcmp6_7, info3_v;
127 /* Copy eight mbuf pointers to output array. */
128 t0 = _mm256_loadu_si256((void *)&rxr->rx_buf_ring[mbcons]);
129 _mm256_storeu_si256((void *)&rx_pkts[i], t0);
130 #ifdef RTE_ARCH_X86_64
131 t0 = _mm256_loadu_si256((void *)&rxr->rx_buf_ring[mbcons + 4]);
132 _mm256_storeu_si256((void *)&rx_pkts[i + 4], t0);
135 /* Prefetch eight descriptor pairs for next iteration. */
136 if (i + BNXT_RX_DESCS_PER_LOOP_VEC256 < nb_pkts) {
137 rte_prefetch0(&cp_desc_ring[cons + 16]);
138 rte_prefetch0(&cp_desc_ring[cons + 20]);
139 rte_prefetch0(&cp_desc_ring[cons + 24]);
140 rte_prefetch0(&cp_desc_ring[cons + 28]);
144 * Load eight receive completion descriptors into 256-bit
145 * registers. Loads are issued in reverse order in order to
146 * ensure consistent state.
148 desc7 = _mm256_load_si256((void *)&cp_desc_ring[cons + 14]);
149 rte_compiler_barrier();
150 desc6 = _mm256_load_si256((void *)&cp_desc_ring[cons + 12]);
151 rte_compiler_barrier();
152 desc5 = _mm256_load_si256((void *)&cp_desc_ring[cons + 10]);
153 rte_compiler_barrier();
154 desc4 = _mm256_load_si256((void *)&cp_desc_ring[cons + 8]);
155 rte_compiler_barrier();
156 desc3 = _mm256_load_si256((void *)&cp_desc_ring[cons + 6]);
157 rte_compiler_barrier();
158 desc2 = _mm256_load_si256((void *)&cp_desc_ring[cons + 4]);
159 rte_compiler_barrier();
160 desc1 = _mm256_load_si256((void *)&cp_desc_ring[cons + 2]);
161 rte_compiler_barrier();
162 desc0 = _mm256_load_si256((void *)&cp_desc_ring[cons + 0]);
165 * Pack needed fields from each descriptor into a compressed
166 * 128-bit layout and pair two compressed descriptors into
167 * 256-bit registers. The 128-bit compressed layout is as
169 * Bits 0-15: flags_type field from low completion record.
170 * Bits 16-31: len field from low completion record.
171 * Bits 32-47: flags2 (low 16 bits) from high completion.
172 * Bits 48-79: metadata from high completion record.
173 * Bits 80-95: errors_v2 from high completion record.
174 * Bits 96-127: rss hash from low completion record.
176 t0 = _mm256_permute2f128_si256(desc6, desc7, 0x20);
177 t1 = _mm256_permute2f128_si256(desc6, desc7, 0x31);
178 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
179 rxcmp6_7 = _mm256_blend_epi32(t0, t1, 0x66);
181 t0 = _mm256_permute2f128_si256(desc4, desc5, 0x20);
182 t1 = _mm256_permute2f128_si256(desc4, desc5, 0x31);
183 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
184 rxcmp4_5 = _mm256_blend_epi32(t0, t1, 0x66);
186 t0 = _mm256_permute2f128_si256(desc2, desc3, 0x20);
187 t1 = _mm256_permute2f128_si256(desc2, desc3, 0x31);
188 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
189 rxcmp2_3 = _mm256_blend_epi32(t0, t1, 0x66);
191 t0 = _mm256_permute2f128_si256(desc0, desc1, 0x20);
192 t1 = _mm256_permute2f128_si256(desc0, desc1, 0x31);
193 t1 = _mm256_shuffle_epi8(t1, dsc_shuf_msk);
194 rxcmp0_1 = _mm256_blend_epi32(t0, t1, 0x66);
196 /* Compute packet type table indices for eight packets. */
197 t0 = _mm256_unpacklo_epi32(rxcmp0_1, rxcmp2_3);
198 t1 = _mm256_unpacklo_epi32(rxcmp4_5, rxcmp6_7);
199 flags_type = _mm256_unpacklo_epi64(t0, t1);
200 ptype_idx = _mm256_and_si256(flags_type, flags_type_mask);
201 ptype_idx = _mm256_srli_epi32(ptype_idx,
202 RX_PKT_CMPL_FLAGS_ITYPE_SFT -
203 BNXT_PTYPE_TBL_TYPE_SFT);
205 t0 = _mm256_unpacklo_epi32(rxcmp0_1, rxcmp2_3);
206 t1 = _mm256_unpacklo_epi32(rxcmp4_5, rxcmp6_7);
207 flags2 = _mm256_unpackhi_epi64(t0, t1);
209 t0 = _mm256_srli_epi32(_mm256_and_si256(flags2, flags2_mask1),
210 RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT -
211 BNXT_PTYPE_TBL_VLAN_SFT);
212 ptype_idx = _mm256_or_si256(ptype_idx, t0);
214 t0 = _mm256_srli_epi32(_mm256_and_si256(flags2, flags2_mask2),
215 RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT -
216 BNXT_PTYPE_TBL_IP_VER_SFT);
217 ptype_idx = _mm256_or_si256(ptype_idx, t0);
220 * Load ptypes for eight packets using gather. Gather operations
221 * have extremely high latency (~19 cycles), execution and use
222 * of result should be separated as much as possible.
224 ptypes = _mm256_i32gather_epi32((int *)bnxt_ptype_table,
225 ptype_idx, sizeof(uint32_t));
227 * Compute ol_flags and checksum error table indices for eight
230 is_tunnel = _mm256_and_si256(flags2, _mm256_set1_epi32(4));
231 is_tunnel = _mm256_slli_epi32(is_tunnel, 3);
232 flags2 = _mm256_and_si256(flags2, _mm256_set1_epi32(0x1F));
234 /* Extract errors_v2 fields for eight packets. */
235 t0 = _mm256_unpackhi_epi32(rxcmp0_1, rxcmp2_3);
236 t1 = _mm256_unpackhi_epi32(rxcmp4_5, rxcmp6_7);
237 errors_v2 = _mm256_unpacklo_epi64(t0, t1);
239 errors = _mm256_srli_epi32(errors_v2, 4);
240 errors = _mm256_and_si256(errors, _mm256_set1_epi32(0xF));
241 errors = _mm256_and_si256(errors, flags2);
243 index = _mm256_andnot_si256(errors, flags2);
244 errors = _mm256_or_si256(errors,
245 _mm256_srli_epi32(is_tunnel, 1));
246 index = _mm256_or_si256(index, is_tunnel);
249 * Load ol_flags for eight packets using gather. Gather
250 * operations have extremely high latency (~19 cycles),
251 * execution and use of result should be separated as much
254 ol_flags = _mm256_i32gather_epi32((int *)rxr->ol_flags_table,
255 index, sizeof(uint32_t));
256 errors = _mm256_i32gather_epi32((int *)rxr->ol_flags_err_table,
257 errors, sizeof(uint32_t));
260 * Pack the 128-bit array of valid descriptor flags into 64
261 * bits and count the number of set bits in order to determine
262 * the number of valid descriptors.
264 const __m256i perm_msk =
265 _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
266 info3_v = _mm256_permutevar8x32_epi32(errors_v2, perm_msk);
267 info3_v = _mm256_and_si256(errors_v2, info3_v_mask);
268 info3_v = _mm256_xor_si256(info3_v, valid_target);
270 info3_v = _mm256_packs_epi32(info3_v, _mm256_setzero_si256());
271 valid = _mm_cvtsi128_si64(_mm256_extracti128_si256(info3_v, 1));
272 valid = (valid << CHAR_BIT) |
273 _mm_cvtsi128_si64(_mm256_castsi256_si128(info3_v));
274 num_valid = __builtin_popcountll(valid & desc_valid_mask);
279 /* Update mbuf rearm_data for eight packets. */
280 mbuf01 = _mm256_shuffle_epi8(rxcmp0_1, shuf_msk);
281 mbuf23 = _mm256_shuffle_epi8(rxcmp2_3, shuf_msk);
282 mbuf45 = _mm256_shuffle_epi8(rxcmp4_5, shuf_msk);
283 mbuf67 = _mm256_shuffle_epi8(rxcmp6_7, shuf_msk);
285 /* Blend in ptype field for two mbufs at a time. */
286 mbuf01 = _mm256_blend_epi32(mbuf01, ptypes, 0x11);
287 mbuf23 = _mm256_blend_epi32(mbuf23,
288 _mm256_srli_si256(ptypes, 4), 0x11);
289 mbuf45 = _mm256_blend_epi32(mbuf45,
290 _mm256_srli_si256(ptypes, 8), 0x11);
291 mbuf67 = _mm256_blend_epi32(mbuf67,
292 _mm256_srli_si256(ptypes, 12), 0x11);
294 /* Unpack rearm data, set fixed fields for first four mbufs. */
295 rearm0 = _mm256_permute2f128_si256(mbuf_init, mbuf01, 0x20);
296 rearm1 = _mm256_blend_epi32(mbuf_init, mbuf01, 0xF0);
297 rearm2 = _mm256_permute2f128_si256(mbuf_init, mbuf23, 0x20);
298 rearm3 = _mm256_blend_epi32(mbuf_init, mbuf23, 0xF0);
300 /* Compute final ol_flags values for eight packets. */
301 rss_flags = _mm256_and_si256(flags_type, rss_mask);
302 rss_flags = _mm256_srli_epi32(rss_flags, 9);
303 ol_flags = _mm256_or_si256(ol_flags, errors);
304 ol_flags = _mm256_or_si256(ol_flags, rss_flags);
305 ol_flags_hi = _mm256_permute2f128_si256(ol_flags,
308 /* Set ol_flags fields for first four packets. */
309 rearm0 = _mm256_blend_epi32(rearm0,
310 _mm256_slli_si256(ol_flags, 8),
312 rearm1 = _mm256_blend_epi32(rearm1,
313 _mm256_slli_si256(ol_flags_hi, 8),
315 rearm2 = _mm256_blend_epi32(rearm2,
316 _mm256_slli_si256(ol_flags, 4),
318 rearm3 = _mm256_blend_epi32(rearm3,
319 _mm256_slli_si256(ol_flags_hi, 4),
322 /* Store all mbuf fields for first four packets. */
323 _mm256_storeu_si256((void *)&rx_pkts[i + 0]->rearm_data,
325 _mm256_storeu_si256((void *)&rx_pkts[i + 1]->rearm_data,
327 _mm256_storeu_si256((void *)&rx_pkts[i + 2]->rearm_data,
329 _mm256_storeu_si256((void *)&rx_pkts[i + 3]->rearm_data,
332 /* Unpack rearm data, set fixed fields for final four mbufs. */
333 rearm4 = _mm256_permute2f128_si256(mbuf_init, mbuf45, 0x20);
334 rearm5 = _mm256_blend_epi32(mbuf_init, mbuf45, 0xF0);
335 rearm6 = _mm256_permute2f128_si256(mbuf_init, mbuf67, 0x20);
336 rearm7 = _mm256_blend_epi32(mbuf_init, mbuf67, 0xF0);
338 /* Set ol_flags fields for final four packets. */
339 rearm4 = _mm256_blend_epi32(rearm4, ol_flags, 0x04);
340 rearm5 = _mm256_blend_epi32(rearm5, ol_flags_hi, 0x04);
341 rearm6 = _mm256_blend_epi32(rearm6,
342 _mm256_srli_si256(ol_flags, 4),
344 rearm7 = _mm256_blend_epi32(rearm7,
345 _mm256_srli_si256(ol_flags_hi, 4),
348 /* Store all mbuf fields for final four packets. */
349 _mm256_storeu_si256((void *)&rx_pkts[i + 4]->rearm_data,
351 _mm256_storeu_si256((void *)&rx_pkts[i + 5]->rearm_data,
353 _mm256_storeu_si256((void *)&rx_pkts[i + 6]->rearm_data,
355 _mm256_storeu_si256((void *)&rx_pkts[i + 7]->rearm_data,
358 nb_rx_pkts += num_valid;
359 if (num_valid < BNXT_RX_DESCS_PER_LOOP_VEC256)
364 rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
366 rxq->rxrearm_nb += nb_rx_pkts;
367 cpr->cp_raw_cons += 2 * nb_rx_pkts;
375 bnxt_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
380 while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
383 burst = recv_burst_vec_avx2(rx_queue, rx_pkts + cnt,
384 RTE_BNXT_MAX_RX_BURST);
389 if (burst < RTE_BNXT_MAX_RX_BURST)
392 return cnt + recv_burst_vec_avx2(rx_queue, rx_pkts + cnt, nb_pkts);
396 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
398 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
399 uint32_t raw_cons = cpr->cp_raw_cons;
401 uint32_t nb_tx_pkts = 0;
402 struct tx_cmpl *txcmp;
403 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
404 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
405 uint32_t ring_mask = cp_ring_struct->ring_mask;
408 cons = RING_CMPL(ring_mask, raw_cons);
409 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
411 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
414 nb_tx_pkts += txcmp->opaque;
415 raw_cons = NEXT_RAW_CMP(raw_cons);
416 } while (nb_tx_pkts < ring_mask);
419 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
420 bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
422 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
423 cpr->cp_raw_cons = raw_cons;
429 bnxt_xmit_one(struct rte_mbuf *mbuf, struct tx_bd_long *txbd,
430 struct rte_mbuf **tx_buf)
432 uint64_t dsc_hi, dsc_lo;
437 dsc_hi = mbuf->buf_iova + mbuf->data_off;
438 dsc_lo = (mbuf->data_len << 16) |
439 bnxt_xmit_flags_len(mbuf->data_len, TX_BD_FLAGS_NOCMPL);
441 desc = _mm_set_epi64x(dsc_hi, dsc_lo);
442 _mm_store_si128((void *)txbd, desc);
446 bnxt_xmit_fixed_burst_vec(struct bnxt_tx_queue *txq, struct rte_mbuf **pkts,
449 struct bnxt_tx_ring_info *txr = txq->tx_ring;
450 uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
451 struct tx_bd_long *txbd;
452 struct rte_mbuf **tx_buf;
455 tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
456 txbd = &txr->tx_desc_ring[tx_prod];
457 tx_buf = &txr->tx_buf_ring[tx_prod];
459 /* Prefetch next transmit buffer descriptors. */
461 rte_prefetch0(txbd + 3);
463 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
465 if (unlikely(nb_pkts == 0))
468 /* Handle TX burst request */
472 * If current descriptor is not on a 32-byte boundary, send one packet
473 * to align for 32-byte stores.
476 bnxt_xmit_one(pkts[0], txbd++, tx_buf++);
482 * Send four packets per loop, with a single store for each pair
485 while (to_send >= BNXT_TX_DESCS_PER_LOOP) {
486 uint64_t dsc0_hi, dsc0_lo, dsc1_hi, dsc1_lo;
487 uint64_t dsc2_hi, dsc2_lo, dsc3_hi, dsc3_lo;
488 __m256i dsc01, dsc23;
490 /* Prefetch next transmit buffer descriptors. */
491 rte_prefetch0(txbd + 4);
492 rte_prefetch0(txbd + 7);
494 /* Copy four mbuf pointers to tx buf ring. */
495 #ifdef RTE_ARCH_X86_64
496 __m256i tmp = _mm256_loadu_si256((void *)pkts);
497 _mm256_storeu_si256((void *)tx_buf, tmp);
499 __m128i tmp = _mm_loadu_si128((void *)pkts);
500 _mm_storeu_si128((void *)tx_buf, tmp);
503 dsc0_hi = tx_buf[0]->buf_iova + tx_buf[0]->data_off;
504 dsc0_lo = (tx_buf[0]->data_len << 16) |
505 bnxt_xmit_flags_len(tx_buf[0]->data_len,
508 dsc1_hi = tx_buf[1]->buf_iova + tx_buf[1]->data_off;
509 dsc1_lo = (tx_buf[1]->data_len << 16) |
510 bnxt_xmit_flags_len(tx_buf[1]->data_len,
513 dsc01 = _mm256_set_epi64x(dsc1_hi, dsc1_lo, dsc0_hi, dsc0_lo);
515 dsc2_hi = tx_buf[2]->buf_iova + tx_buf[2]->data_off;
516 dsc2_lo = (tx_buf[2]->data_len << 16) |
517 bnxt_xmit_flags_len(tx_buf[2]->data_len,
520 dsc3_hi = tx_buf[3]->buf_iova + tx_buf[3]->data_off;
521 dsc3_lo = (tx_buf[3]->data_len << 16) |
522 bnxt_xmit_flags_len(tx_buf[3]->data_len,
525 dsc23 = _mm256_set_epi64x(dsc3_hi, dsc3_lo, dsc2_hi, dsc2_lo);
527 _mm256_store_si256((void *)txbd, dsc01);
528 _mm256_store_si256((void *)(txbd + 2), dsc23);
530 to_send -= BNXT_TX_DESCS_PER_LOOP;
531 pkts += BNXT_TX_DESCS_PER_LOOP;
532 txbd += BNXT_TX_DESCS_PER_LOOP;
533 tx_buf += BNXT_TX_DESCS_PER_LOOP;
536 /* Send any remaining packets, writing each descriptor individually. */
538 bnxt_xmit_one(pkts[0], txbd++, tx_buf++);
543 /* Request a completion for the final packet of the burst. */
544 txbd[-1].opaque = nb_pkts;
545 txbd[-1].flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
547 tx_raw_prod += nb_pkts;
548 bnxt_db_write(&txr->tx_db, tx_raw_prod);
550 txr->tx_raw_prod = tx_raw_prod;
556 bnxt_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
560 struct bnxt_tx_queue *txq = tx_queue;
561 struct bnxt_tx_ring_info *txr = txq->tx_ring;
562 uint16_t ring_size = txr->tx_ring_struct->ring_size;
564 /* Tx queue was stopped; wait for it to be restarted */
565 if (unlikely(!txq->tx_started)) {
566 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
570 /* Handle TX completions */
571 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
572 bnxt_handle_tx_cp_vec(txq);
578 * Ensure that no more than RTE_BNXT_MAX_TX_BURST packets
579 * are transmitted before the next completion.
581 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
584 * Ensure that a ring wrap does not occur within a call to
585 * bnxt_xmit_fixed_burst_vec().
587 num = RTE_MIN(num, ring_size -
588 (txr->tx_raw_prod & (ring_size - 1)));
589 ret = bnxt_xmit_fixed_burst_vec(txq, &tx_pkts[nb_sent], num);