1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2020 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
19 #include "bnxt_rxtx_vec_common.h"
25 #define GET_OL_FLAGS(rss_flags, ol_idx, errors, pi, ol_flags) \
29 of = vgetq_lane_u32((rss_flags), (pi)) | \
30 bnxt_ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
32 tmp = vgetq_lane_u32((errors), (pi)); \
34 of |= bnxt_ol_flags_err_table[tmp]; \
38 #define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pkt_idx, ret) \
44 /* Set mbuf pkt_len, data_len, and rss_hash fields. */ \
45 r = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(rxcmp), \
48 /* Set packet type. */ \
49 ptype = bnxt_ptype_table[vgetq_lane_u32((ptype_idx), (pkt_idx))]; \
50 r = vsetq_lane_u32(ptype, r, 0); \
53 vlan_tci = vgetq_lane_u32((rxcmp1), 1); \
54 r = vreinterpretq_u32_u16(vsetq_lane_u16(vlan_tci, \
55 vreinterpretq_u16_u32(r), 5)); \
60 descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
61 uint64x2_t mb_init, struct rte_mbuf **mbuf)
63 const uint8x16_t shuf_msk = {
64 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
65 2, 3, 0xFF, 0xFF, /* pkt_len */
67 0xFF, 0xFF, /* vlan_tci (zeroes) */
68 12, 13, 14, 15 /* rss hash */
70 const uint32x4_t flags_type_mask = {
71 RX_PKT_CMPL_FLAGS_ITYPE_MASK,
72 RX_PKT_CMPL_FLAGS_ITYPE_MASK,
73 RX_PKT_CMPL_FLAGS_ITYPE_MASK,
74 RX_PKT_CMPL_FLAGS_ITYPE_MASK
76 const uint32x4_t flags2_mask1 = {
77 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
78 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
79 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
80 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
81 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
82 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
83 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
84 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
86 const uint32x4_t flags2_mask2 = {
87 RX_PKT_CMPL_FLAGS2_IP_TYPE,
88 RX_PKT_CMPL_FLAGS2_IP_TYPE,
89 RX_PKT_CMPL_FLAGS2_IP_TYPE,
90 RX_PKT_CMPL_FLAGS2_IP_TYPE
92 const uint32x4_t rss_mask = {
93 RX_PKT_CMPL_FLAGS_RSS_VALID,
94 RX_PKT_CMPL_FLAGS_RSS_VALID,
95 RX_PKT_CMPL_FLAGS_RSS_VALID,
96 RX_PKT_CMPL_FLAGS_RSS_VALID
98 const uint32x4_t flags2_index_mask = {
99 0x1F, 0x1F, 0x1F, 0x1F
101 const uint32x4_t flags2_error_mask = {
104 uint32x4_t flags_type, flags2, index, errors, rss_flags;
105 uint32x4_t tmp, ptype_idx;
109 /* Compute packet type table indexes for four packets */
110 t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1]));
111 t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3]));
113 flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
116 vshrq_n_u32(vandq_u32(flags_type, flags_type_mask), 9);
118 t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
119 t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
121 flags2 = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
124 ptype_idx = vorrq_u32(ptype_idx,
125 vshrq_n_u32(vandq_u32(flags2, flags2_mask1), 2));
126 ptype_idx = vorrq_u32(ptype_idx,
127 vshrq_n_u32(vandq_u32(flags2, flags2_mask2), 7));
129 /* Extract RSS valid flags for four packets. */
130 rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9);
132 flags2 = vandq_u32(flags2, flags2_index_mask);
134 /* Extract errors_v2 fields for four packets. */
135 t0 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
136 t1 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
138 errors = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
141 /* Compute ol_flags and checksum error indexes for four packets. */
142 errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask);
143 errors = vandq_u32(errors, flags2);
145 index = vbicq_u32(flags2, errors);
147 /* Update mbuf rearm_data for four packets. */
148 GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
149 vst1q_u32((uint32_t *)&mbuf[0]->rearm_data,
150 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
151 GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
152 vst1q_u32((uint32_t *)&mbuf[1]->rearm_data,
153 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
154 GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
155 vst1q_u32((uint32_t *)&mbuf[2]->rearm_data,
156 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
157 GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
158 vst1q_u32((uint32_t *)&mbuf[3]->rearm_data,
159 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
161 /* Update mbuf rx_descriptor_fields1 for four packets. */
162 GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, tmp);
163 vst1q_u32((uint32_t *)&mbuf[0]->rx_descriptor_fields1, tmp);
164 GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, tmp);
165 vst1q_u32((uint32_t *)&mbuf[1]->rx_descriptor_fields1, tmp);
166 GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, tmp);
167 vst1q_u32((uint32_t *)&mbuf[2]->rx_descriptor_fields1, tmp);
168 GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, tmp);
169 vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
173 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
176 struct bnxt_rx_queue *rxq = rx_queue;
177 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
178 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
179 uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
180 uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
181 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
182 uint64_t valid, desc_valid_mask = ~0UL;
183 const uint32x4_t info3_v_mask = {
184 CMPL_BASE_V, CMPL_BASE_V,
185 CMPL_BASE_V, CMPL_BASE_V
187 uint32_t raw_cons = cpr->cp_raw_cons;
188 uint32_t cons, mbcons;
190 const uint64x2_t mb_init = {rxq->mbuf_initializer, 0};
191 const uint32x4_t valid_target = {
192 !!(raw_cons & cp_ring_size),
193 !!(raw_cons & cp_ring_size),
194 !!(raw_cons & cp_ring_size),
195 !!(raw_cons & cp_ring_size)
199 /* If Rx Q was stopped return */
200 if (unlikely(!rxq->rx_started))
203 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
204 bnxt_rxq_rearm(rxq, rxr);
206 /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
207 nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
209 cons = raw_cons & (cp_ring_size - 1);
210 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
212 /* Prefetch first four descriptor pairs. */
213 rte_prefetch0(&cp_desc_ring[cons]);
214 rte_prefetch0(&cp_desc_ring[cons + 4]);
216 /* Ensure that we do not go past the ends of the rings. */
217 nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
218 (cp_ring_size - cons) / 2));
220 * If we are at the end of the ring, ensure that descriptors after the
221 * last valid entry are not treated as valid. Otherwise, force the
222 * maximum number of packets to receive to be a multiple of the per-
225 if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
226 desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
228 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
230 /* Handle RX burst request */
231 for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
232 cons += RTE_BNXT_DESCS_PER_LOOP * 2,
233 mbcons += RTE_BNXT_DESCS_PER_LOOP) {
234 uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
235 uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP];
240 /* Copy four mbuf pointers to output array. */
241 t0 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons]);
242 #ifdef RTE_ARCH_ARM64
243 t1 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons + 2]);
245 vst1q_u64((void *)&rx_pkts[i], t0);
246 #ifdef RTE_ARCH_ARM64
247 vst1q_u64((void *)&rx_pkts[i + 2], t1);
250 /* Prefetch four descriptor pairs for next iteration. */
251 if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
252 rte_prefetch0(&cp_desc_ring[cons + 8]);
253 rte_prefetch0(&cp_desc_ring[cons + 12]);
257 * Load the four current descriptors into SSE registers in
258 * reverse order to ensure consistent state.
260 rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
262 rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
264 rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
266 rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
268 t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
270 rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
272 rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
274 rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
276 rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
278 t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
280 /* Isolate descriptor status flags. */
281 info3_v = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
283 info3_v = vandq_u32(info3_v, info3_v_mask);
284 info3_v = veorq_u32(info3_v, valid_target);
287 * Pack the 128-bit array of valid descriptor flags into 64
288 * bits and count the number of set bits in order to determine
289 * the number of valid descriptors.
291 valid = vget_lane_u64(vreinterpret_u64_u16(vqmovn_u32(info3_v)),
294 * At this point, 'valid' is a 64-bit value containing four
295 * 16-bit fields, each of which is either 0x0001 or 0x0000.
296 * Compute number of valid descriptors from the index of
297 * the highest non-zero field.
299 num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) -
300 (__builtin_clzl(valid & desc_valid_mask) / 16);
304 rxr->rx_buf_ring[mbcons + 3] = NULL;
307 rxr->rx_buf_ring[mbcons + 2] = NULL;
310 rxr->rx_buf_ring[mbcons + 1] = NULL;
313 rxr->rx_buf_ring[mbcons + 0] = NULL;
319 descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts]);
320 nb_rx_pkts += num_valid;
322 if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
329 RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
331 rxq->rxrearm_nb += nb_rx_pkts;
332 cpr->cp_raw_cons += 2 * nb_rx_pkts;
334 !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
342 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
344 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
345 uint32_t raw_cons = cpr->cp_raw_cons;
347 uint32_t nb_tx_pkts = 0;
348 struct tx_cmpl *txcmp;
349 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
350 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
351 uint32_t ring_mask = cp_ring_struct->ring_mask;
354 cons = RING_CMPL(ring_mask, raw_cons);
355 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
357 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
360 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
361 nb_tx_pkts += txcmp->opaque;
364 "Unhandled CMP type %02x\n",
366 raw_cons = NEXT_RAW_CMP(raw_cons);
367 } while (nb_tx_pkts < ring_mask);
369 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
371 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
372 bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
374 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
375 cpr->cp_raw_cons = raw_cons;
381 bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
384 struct bnxt_tx_queue *txq = tx_queue;
385 struct bnxt_tx_ring_info *txr = txq->tx_ring;
386 uint16_t prod = txr->tx_prod;
387 struct rte_mbuf *tx_mbuf;
388 struct tx_bd_long *txbd = NULL;
389 struct bnxt_sw_tx_bd *tx_buf;
392 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
394 if (unlikely(nb_pkts == 0))
397 /* Handle TX burst request */
400 tx_mbuf = *tx_pkts++;
401 rte_prefetch0(tx_mbuf);
403 tx_buf = &txr->tx_buf_ring[prod];
404 tx_buf->mbuf = tx_mbuf;
407 txbd = &txr->tx_desc_ring[prod];
408 txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
409 txbd->len = tx_mbuf->data_len;
410 txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
412 prod = RING_NEXT(txr->tx_ring_struct, prod);
416 /* Request a completion for last packet in burst */
418 txbd->opaque = nb_pkts;
419 txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
422 rte_compiler_barrier();
423 bnxt_db_write(&txr->tx_db, prod);
431 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
435 struct bnxt_tx_queue *txq = tx_queue;
437 /* Tx queue was stopped; wait for it to be restarted */
438 if (unlikely(!txq->tx_started)) {
439 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
443 /* Handle TX completions */
444 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
445 bnxt_handle_tx_cp_vec(txq);
450 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
451 ret = bnxt_xmit_fixed_burst_vec(tx_queue,
464 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
466 return bnxt_rxq_vec_setup_common(rxq);