1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2021 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
19 #include "bnxt_rxtx_vec_common.h"
25 #define GET_OL_FLAGS(rss_flags, ol_idx, errors, pi, ol_flags) \
29 of = vgetq_lane_u32((rss_flags), (pi)) | \
30 rxr->ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
32 tmp = vgetq_lane_u32((errors), (pi)); \
34 of |= rxr->ol_flags_err_table[tmp]; \
38 #define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pkt_idx, ret) \
44 /* Set mbuf pkt_len, data_len, and rss_hash fields. */ \
45 r = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(rxcmp), \
48 /* Set packet type. */ \
49 ptype = bnxt_ptype_table[vgetq_lane_u32((ptype_idx), (pkt_idx))]; \
50 r = vsetq_lane_u32(ptype, r, 0); \
53 vlan_tci = vgetq_lane_u32((rxcmp1), 1); \
54 r = vreinterpretq_u32_u16(vsetq_lane_u16(vlan_tci, \
55 vreinterpretq_u16_u32(r), 5)); \
60 descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
61 uint64x2_t mb_init, struct rte_mbuf **mbuf,
62 struct bnxt_rx_ring_info *rxr)
64 const uint8x16_t shuf_msk = {
65 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
66 2, 3, 0xFF, 0xFF, /* pkt_len */
68 0xFF, 0xFF, /* vlan_tci (zeroes) */
69 12, 13, 14, 15 /* rss hash */
71 const uint32x4_t flags_type_mask =
72 vdupq_n_u32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
73 const uint32x4_t flags2_mask1 =
74 vdupq_n_u32(CMPL_FLAGS2_VLAN_TUN_MSK);
75 const uint32x4_t flags2_mask2 =
76 vdupq_n_u32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
77 const uint32x4_t rss_mask =
78 vdupq_n_u32(RX_PKT_CMPL_FLAGS_RSS_VALID);
79 const uint32x4_t flags2_index_mask = vdupq_n_u32(0x1F);
80 const uint32x4_t flags2_error_mask = vdupq_n_u32(0x0F);
81 uint32x4_t flags_type, flags2, index, errors, rss_flags;
82 uint32x4_t tmp, ptype_idx, is_tunnel;
86 /* Validate ptype table indexing at build time. */
87 bnxt_check_ptype_constants();
89 /* Compute packet type table indexes for four packets */
90 t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1]));
91 t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3]));
93 flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
95 ptype_idx = vshrq_n_u32(vandq_u32(flags_type, flags_type_mask),
96 RX_PKT_CMPL_FLAGS_ITYPE_SFT -
97 BNXT_PTYPE_TBL_TYPE_SFT);
99 t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
100 t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
102 flags2 = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
105 ptype_idx = vorrq_u32(ptype_idx,
106 vshrq_n_u32(vandq_u32(flags2, flags2_mask1),
107 RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT -
108 BNXT_PTYPE_TBL_VLAN_SFT));
109 ptype_idx = vorrq_u32(ptype_idx,
110 vshrq_n_u32(vandq_u32(flags2, flags2_mask2),
111 RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT -
112 BNXT_PTYPE_TBL_IP_VER_SFT));
114 /* Extract RSS valid flags for four packets. */
115 rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9);
117 flags2 = vandq_u32(flags2, flags2_index_mask);
119 /* Extract errors_v2 fields for four packets. */
120 t0 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
121 t1 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
123 errors = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
126 /* Compute ol_flags and checksum error indexes for four packets. */
127 is_tunnel = vandq_u32(flags2, vdupq_n_u32(4));
128 is_tunnel = vshlq_n_u32(is_tunnel, 3);
129 errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask);
130 errors = vandq_u32(errors, flags2);
132 index = vbicq_u32(flags2, errors);
133 errors = vorrq_u32(errors, vshrq_n_u32(is_tunnel, 1));
134 index = vorrq_u32(index, is_tunnel);
136 /* Update mbuf rearm_data for four packets. */
137 GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
138 vst1q_u32((uint32_t *)&mbuf[0]->rearm_data,
139 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
140 GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
141 vst1q_u32((uint32_t *)&mbuf[1]->rearm_data,
142 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
143 GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
144 vst1q_u32((uint32_t *)&mbuf[2]->rearm_data,
145 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
146 GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
147 vst1q_u32((uint32_t *)&mbuf[3]->rearm_data,
148 vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
150 /* Update mbuf rx_descriptor_fields1 for four packets. */
151 GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, tmp);
152 vst1q_u32((uint32_t *)&mbuf[0]->rx_descriptor_fields1, tmp);
153 GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, tmp);
154 vst1q_u32((uint32_t *)&mbuf[1]->rx_descriptor_fields1, tmp);
155 GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, tmp);
156 vst1q_u32((uint32_t *)&mbuf[2]->rx_descriptor_fields1, tmp);
157 GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, tmp);
158 vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
162 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
165 struct bnxt_rx_queue *rxq = rx_queue;
166 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
167 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
168 uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
169 uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
170 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
171 uint64_t valid, desc_valid_mask = ~0UL;
172 const uint32x4_t info3_v_mask = vdupq_n_u32(CMPL_BASE_V);
173 uint32_t raw_cons = cpr->cp_raw_cons;
174 uint32_t cons, mbcons;
176 const uint64x2_t mb_init = {rxq->mbuf_initializer, 0};
177 const uint32x4_t valid_target =
178 vdupq_n_u32(!!(raw_cons & cp_ring_size));
181 /* If Rx Q was stopped return */
182 if (unlikely(!rxq->rx_started))
185 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
186 bnxt_rxq_rearm(rxq, rxr);
188 /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
189 nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
191 cons = raw_cons & (cp_ring_size - 1);
192 mbcons = (raw_cons / 2) & (rx_ring_size - 1);
194 /* Prefetch first four descriptor pairs. */
195 rte_prefetch0(&cp_desc_ring[cons]);
196 rte_prefetch0(&cp_desc_ring[cons + 4]);
198 /* Ensure that we do not go past the ends of the rings. */
199 nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
200 (cp_ring_size - cons) / 2));
202 * If we are at the end of the ring, ensure that descriptors after the
203 * last valid entry are not treated as valid. Otherwise, force the
204 * maximum number of packets to receive to be a multiple of the per-
207 if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
208 desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
210 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
212 /* Handle RX burst request */
213 for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
214 cons += RTE_BNXT_DESCS_PER_LOOP * 2,
215 mbcons += RTE_BNXT_DESCS_PER_LOOP) {
216 uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
217 uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP];
222 /* Copy four mbuf pointers to output array. */
223 t0 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons]);
224 #ifdef RTE_ARCH_ARM64
225 t1 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons + 2]);
227 vst1q_u64((void *)&rx_pkts[i], t0);
228 #ifdef RTE_ARCH_ARM64
229 vst1q_u64((void *)&rx_pkts[i + 2], t1);
232 /* Prefetch four descriptor pairs for next iteration. */
233 if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
234 rte_prefetch0(&cp_desc_ring[cons + 8]);
235 rte_prefetch0(&cp_desc_ring[cons + 12]);
239 * Load the four current descriptors into SSE registers in
240 * reverse order to ensure consistent state.
242 rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
244 rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
246 rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
248 rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
250 t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
252 rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
254 rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
256 rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
258 rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
260 t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
262 /* Isolate descriptor status flags. */
263 info3_v = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
265 info3_v = vandq_u32(info3_v, info3_v_mask);
266 info3_v = veorq_u32(info3_v, valid_target);
269 * Pack the 128-bit array of valid descriptor flags into 64
270 * bits and count the number of set bits in order to determine
271 * the number of valid descriptors.
273 valid = vget_lane_u64(vreinterpret_u64_u16(vqmovn_u32(info3_v)),
276 * At this point, 'valid' is a 64-bit value containing four
277 * 16-bit fields, each of which is either 0x0001 or 0x0000.
278 * Compute number of valid descriptors from the index of
279 * the highest non-zero field.
281 num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) -
282 (__builtin_clzl(valid & desc_valid_mask) / 16);
287 descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts],
289 nb_rx_pkts += num_valid;
291 if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
296 rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
298 rxq->rxrearm_nb += nb_rx_pkts;
299 cpr->cp_raw_cons += 2 * nb_rx_pkts;
301 !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
309 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
311 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
312 uint32_t raw_cons = cpr->cp_raw_cons;
314 uint32_t nb_tx_pkts = 0;
315 struct tx_cmpl *txcmp;
316 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
317 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
318 uint32_t ring_mask = cp_ring_struct->ring_mask;
321 cons = RING_CMPL(ring_mask, raw_cons);
322 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
324 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
327 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
328 nb_tx_pkts += txcmp->opaque;
331 "Unhandled CMP type %02x\n",
333 raw_cons = NEXT_RAW_CMP(raw_cons);
334 } while (nb_tx_pkts < ring_mask);
336 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
338 if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
339 bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
341 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
342 cpr->cp_raw_cons = raw_cons;
348 bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
351 struct bnxt_tx_queue *txq = tx_queue;
352 struct bnxt_tx_ring_info *txr = txq->tx_ring;
353 uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
354 struct rte_mbuf *tx_mbuf;
355 struct tx_bd_long *txbd = NULL;
356 struct rte_mbuf **tx_buf;
359 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
361 if (unlikely(nb_pkts == 0))
364 /* Handle TX burst request */
367 tx_mbuf = *tx_pkts++;
368 rte_prefetch0(tx_mbuf);
370 tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
371 tx_buf = &txr->tx_buf_ring[tx_prod];
374 txbd = &txr->tx_desc_ring[tx_prod];
375 txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
376 txbd->len = tx_mbuf->data_len;
377 txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
379 tx_raw_prod = RING_NEXT(tx_raw_prod);
383 /* Request a completion for last packet in burst */
385 txbd->opaque = nb_pkts;
386 txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
389 rte_compiler_barrier();
390 bnxt_db_write(&txr->tx_db, tx_raw_prod);
392 txr->tx_raw_prod = tx_raw_prod;
398 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
402 struct bnxt_tx_queue *txq = tx_queue;
404 /* Tx queue was stopped; wait for it to be restarted */
405 if (unlikely(!txq->tx_started)) {
406 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
410 /* Handle TX completions */
411 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
412 bnxt_handle_tx_cp_vec(txq);
417 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
418 ret = bnxt_xmit_fixed_burst_vec(tx_queue,
431 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
433 return bnxt_rxq_vec_setup_common(rxq);