1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2021 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
19 #include "bnxt_rxtx_vec_common.h"
25 #define GET_OL_FLAGS(rss_flags, ol_idx, errors, pi, ol_flags) \
29 of = vgetq_lane_u32((rss_flags), (pi)) | \
30 rxr->ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
32 tmp = vgetq_lane_u32((errors), (pi)); \
34 of |= rxr->ol_flags_err_table[tmp]; \
38 #define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pkt_idx, ret) \
44 /* Set mbuf pkt_len, data_len, and rss_hash fields. */ \
45 r = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(rxcmp), \
48 /* Set packet type. */ \
49 ptype = bnxt_ptype_table[vgetq_lane_u32((ptype_idx), (pkt_idx))]; \
50 r = vsetq_lane_u32(ptype, r, 0); \
53 vlan_tci = vgetq_lane_u32((rxcmp1), 1); \
54 r = vreinterpretq_u32_u16(vsetq_lane_u16(vlan_tci, \
55 vreinterpretq_u16_u32(r), 5)); \
60 descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
61 uint64x2_t mb_init, struct rte_mbuf **mbuf,
62 struct bnxt_rx_ring_info *rxr)
64 const uint8x16_t shuf_msk = {
65 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
66 2, 3, 0xFF, 0xFF, /* pkt_len */
68 0xFF, 0xFF, /* vlan_tci (zeroes) */
69 12, 13, 14, 15 /* rss hash */
71 const uint32x4_t flags_type_mask =
72 vdupq_n_u32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
73 const uint32x4_t flags2_mask1 =
74 vdupq_n_u32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
75 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC);
76 const uint32x4_t flags2_mask2 =
77 vdupq_n_u32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
78 const uint32x4_t rss_mask =
79 vdupq_n_u32(RX_PKT_CMPL_FLAGS_RSS_VALID);
80 const uint32x4_t flags2_index_mask = vdupq_n_u32(0x1F);
81 const uint32x4_t flags2_error_mask = vdupq_n_u32(0x0F);
82 uint32x4_t flags_type, flags2, index, errors, rss_flags;
83 uint32x4_t tmp, ptype_idx, is_tunnel;
87 /* Compute packet type table indexes for four packets */
88 t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1]));
89 t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3]));
91 flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
94 vshrq_n_u32(vandq_u32(flags_type, flags_type_mask), 9);
96 t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
97 t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
99 flags2 = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
102 ptype_idx = vorrq_u32(ptype_idx,
103 vshrq_n_u32(vandq_u32(flags2, flags2_mask1), 2));
104 ptype_idx = vorrq_u32(ptype_idx,
105 vshrq_n_u32(vandq_u32(flags2, flags2_mask2), 7));
107 /* Extract RSS valid flags for four packets. */
108 rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9);
110 flags2 = vandq_u32(flags2, flags2_index_mask);
112 /* Extract errors_v2 fields for four packets. */
113 t0 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
114 t1 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
116 errors = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
119 /* Compute ol_flags and checksum error indexes for four packets. */
120 is_tunnel = vandq_u32(flags2, vdupq_n_u32(4));
121 is_tunnel = vshlq_n_u32(is_tunnel, 3);
122 errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask);
123 errors = vandq_u32(errors, flags2);
125 index = vbicq_u32(flags2, errors);
126 errors = vorrq_u32(errors, vshrq_n_u32(is_tunnel, 1));
127 index = vorrq_u32(index, is_tunnel);
129 /* Update mbuf rearm_data for four packets. */
130 GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
131 vst1q_u32((uint32_t *)&mbuf[0]->rearm_data,
132 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
133 GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
134 vst1q_u32((uint32_t *)&mbuf[1]->rearm_data,
135 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
136 GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
137 vst1q_u32((uint32_t *)&mbuf[2]->rearm_data,
138 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
139 GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
140 vst1q_u32((uint32_t *)&mbuf[3]->rearm_data,
141 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
143 /* Update mbuf rx_descriptor_fields1 for four packets. */
144 GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, tmp);
145 vst1q_u32((uint32_t *)&mbuf[0]->rx_descriptor_fields1, tmp);
146 GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, tmp);
147 vst1q_u32((uint32_t *)&mbuf[1]->rx_descriptor_fields1, tmp);
148 GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, tmp);
149 vst1q_u32((uint32_t *)&mbuf[2]->rx_descriptor_fields1, tmp);
150 GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, tmp);
151 vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
155 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
158 struct bnxt_rx_queue *rxq = rx_queue;
159 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
160 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
161 uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
162 uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
163 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
164 uint64_t valid, desc_valid_mask = ~0UL;
165 const uint32x4_t info3_v_mask = vdupq_n_u32(CMPL_BASE_V);
166 uint32_t raw_cons = cpr->cp_raw_cons;
167 uint32_t cons, mbcons;
169 const uint64x2_t mb_init = {rxq->mbuf_initializer, 0};
170 const uint32x4_t valid_target =
171 vdupq_n_u32(!!(raw_cons & cp_ring_size));
174 /* If Rx Q was stopped return */
175 if (unlikely(!rxq->rx_started))
178 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
179 bnxt_rxq_rearm(rxq, rxr);
181 /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
182 nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
184 cons = raw_cons & (cp_ring_size - 1);
185 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
187 /* Prefetch first four descriptor pairs. */
188 rte_prefetch0(&cp_desc_ring[cons]);
189 rte_prefetch0(&cp_desc_ring[cons + 4]);
191 /* Ensure that we do not go past the ends of the rings. */
192 nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
193 (cp_ring_size - cons) / 2));
195 * If we are at the end of the ring, ensure that descriptors after the
196 * last valid entry are not treated as valid. Otherwise, force the
197 * maximum number of packets to receive to be a multiple of the per-
200 if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
201 desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
203 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
205 /* Handle RX burst request */
206 for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
207 cons += RTE_BNXT_DESCS_PER_LOOP * 2,
208 mbcons += RTE_BNXT_DESCS_PER_LOOP) {
209 uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
210 uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP];
215 /* Copy four mbuf pointers to output array. */
216 t0 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons]);
217 #ifdef RTE_ARCH_ARM64
218 t1 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons + 2]);
220 vst1q_u64((void *)&rx_pkts[i], t0);
221 #ifdef RTE_ARCH_ARM64
222 vst1q_u64((void *)&rx_pkts[i + 2], t1);
225 /* Prefetch four descriptor pairs for next iteration. */
226 if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
227 rte_prefetch0(&cp_desc_ring[cons + 8]);
228 rte_prefetch0(&cp_desc_ring[cons + 12]);
232 * Load the four current descriptors into SSE registers in
233 * reverse order to ensure consistent state.
235 rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
237 rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
239 rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
241 rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
243 t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
245 rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
247 rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
249 rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
251 rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
253 t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
255 /* Isolate descriptor status flags. */
256 info3_v = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
258 info3_v = vandq_u32(info3_v, info3_v_mask);
259 info3_v = veorq_u32(info3_v, valid_target);
262 * Pack the 128-bit array of valid descriptor flags into 64
263 * bits and count the number of set bits in order to determine
264 * the number of valid descriptors.
266 valid = vget_lane_u64(vreinterpret_u64_u16(vqmovn_u32(info3_v)),
269 * At this point, 'valid' is a 64-bit value containing four
270 * 16-bit fields, each of which is either 0x0001 or 0x0000.
271 * Compute number of valid descriptors from the index of
272 * the highest non-zero field.
274 num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) -
275 (__builtin_clzl(valid & desc_valid_mask) / 16);
279 rxr->rx_buf_ring[mbcons + 3] = NULL;
282 rxr->rx_buf_ring[mbcons + 2] = NULL;
285 rxr->rx_buf_ring[mbcons + 1] = NULL;
288 rxr->rx_buf_ring[mbcons + 0] = NULL;
294 descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts],
296 nb_rx_pkts += num_valid;
298 if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
304 rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
306 rxq->rxrearm_nb += nb_rx_pkts;
307 cpr->cp_raw_cons += 2 * nb_rx_pkts;
309 !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
317 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
319 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
320 uint32_t raw_cons = cpr->cp_raw_cons;
322 uint32_t nb_tx_pkts = 0;
323 struct tx_cmpl *txcmp;
324 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
325 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
326 uint32_t ring_mask = cp_ring_struct->ring_mask;
329 cons = RING_CMPL(ring_mask, raw_cons);
330 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
332 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
335 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
336 nb_tx_pkts += txcmp->opaque;
339 "Unhandled CMP type %02x\n",
341 raw_cons = NEXT_RAW_CMP(raw_cons);
342 } while (nb_tx_pkts < ring_mask);
344 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
346 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
347 bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
349 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
350 cpr->cp_raw_cons = raw_cons;
356 bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
359 struct bnxt_tx_queue *txq = tx_queue;
360 struct bnxt_tx_ring_info *txr = txq->tx_ring;
361 uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
362 struct rte_mbuf *tx_mbuf;
363 struct tx_bd_long *txbd = NULL;
364 struct bnxt_sw_tx_bd *tx_buf;
367 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
369 if (unlikely(nb_pkts == 0))
372 /* Handle TX burst request */
375 tx_mbuf = *tx_pkts++;
376 rte_prefetch0(tx_mbuf);
378 tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
379 tx_buf = &txr->tx_buf_ring[tx_prod];
380 tx_buf->mbuf = tx_mbuf;
383 txbd = &txr->tx_desc_ring[tx_prod];
384 txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
385 txbd->len = tx_mbuf->data_len;
386 txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
388 tx_raw_prod = RING_NEXT(tx_raw_prod);
392 /* Request a completion for last packet in burst */
394 txbd->opaque = nb_pkts;
395 txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
398 rte_compiler_barrier();
399 bnxt_db_write(&txr->tx_db, tx_raw_prod);
401 txr->tx_raw_prod = tx_raw_prod;
407 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
411 struct bnxt_tx_queue *txq = tx_queue;
413 /* Tx queue was stopped; wait for it to be restarted */
414 if (unlikely(!txq->tx_started)) {
415 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
419 /* Handle TX completions */
420 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
421 bnxt_handle_tx_cp_vec(txq);
426 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
427 ret = bnxt_xmit_fixed_burst_vec(tx_queue,
440 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
442 return bnxt_rxq_vec_setup_common(rxq);