1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2020 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
16 #include "bnxt_rxtx_vec_common.h"
26 bnxt_parse_pkt_type(uint32x4_t mm_rxcmp, uint32x4_t mm_rxcmp1)
28 uint32_t flags_type, flags2;
31 flags_type = vgetq_lane_u32(mm_rxcmp, 0);
32 flags2 = (uint16_t)vgetq_lane_u32(mm_rxcmp1, 0);
36 * bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
37 * bit 1: RX_CMPL_FLAGS2_IP_TYPE
38 * bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
39 * bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE
41 index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) |
42 ((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
43 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) |
44 ((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7);
46 return bnxt_ptype_table[index];
50 bnxt_set_ol_flags(uint32x4_t mm_rxcmp, uint32x4_t mm_rxcmp1)
52 uint16_t flags_type, errors, flags;
55 /* Extract rxcmp1->flags2. */
56 flags = vgetq_lane_u32(mm_rxcmp1, 0) & 0x1F;
57 /* Extract rxcmp->flags_type. */
58 flags_type = vgetq_lane_u32(mm_rxcmp, 0);
59 /* Extract rxcmp1->errors_v2. */
60 errors = (vgetq_lane_u32(mm_rxcmp1, 2) >> 4) & flags & 0xF;
62 ol_flags = bnxt_ol_flags_table[flags & ~errors];
64 if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID)
65 ol_flags |= PKT_RX_RSS_HASH;
68 ol_flags |= bnxt_ol_flags_err_table[errors];
74 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
77 struct bnxt_rx_queue *rxq = rx_queue;
78 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
79 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
80 uint32_t raw_cons = cpr->cp_raw_cons;
83 struct rx_pkt_cmpl *rxcmp;
84 const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
85 const uint8x16_t shuf_msk = {
86 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
87 2, 3, 0xFF, 0xFF, /* pkt_len */
89 0xFF, 0xFF, /* vlan_tci (zeroes) */
90 12, 13, 14, 15 /* rss hash */
94 /* If Rx Q was stopped return */
95 if (unlikely(!rxq->rx_started))
98 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
99 bnxt_rxq_rearm(rxq, rxr);
101 /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
102 nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
104 /* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP. */
105 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
109 /* Handle RX burst request */
110 for (i = 0; i < nb_pkts; i++) {
111 uint32x4_t mm_rxcmp, mm_rxcmp1;
112 struct rx_pkt_cmpl_hi *rxcmp1;
113 uint32x4_t pkt_mb, rearm;
114 uint32_t ptype, ol_flags;
115 struct rte_mbuf *mbuf;
120 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
122 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
123 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cons + 1];
125 if (!CMP_VALID(rxcmp1, raw_cons + 1, cpr->cp_ring_struct))
128 mm_rxcmp = vld1q_u32((uint32_t *)rxcmp);
129 mm_rxcmp1 = vld1q_u32((uint32_t *)rxcmp);
131 cons = rxcmp->opaque;
133 mbuf = rxr->rx_buf_ring[cons];
135 rxr->rx_buf_ring[cons] = NULL;
137 /* Set fields from mbuf initializer and ol_flags. */
138 ol_flags = bnxt_set_ol_flags(mm_rxcmp, mm_rxcmp1);
139 rearm = vsetq_lane_u32(ol_flags,
140 vreinterpretq_u32_u64(mbuf_init), 2);
141 vst1q_u32((uint32_t *)&mbuf->rearm_data, rearm);
143 /* Set mbuf pkt_len, data_len, and rss_hash fields. */
144 tmp = vqtbl1q_u8(vreinterpretq_u8_u32(mm_rxcmp), shuf_msk);
145 pkt_mb = vreinterpretq_u32_u8(tmp);
147 /* Set packet type. */
148 ptype = bnxt_parse_pkt_type(mm_rxcmp, mm_rxcmp1);
149 pkt_mb = vsetq_lane_u32(ptype, pkt_mb, 0);
152 vlan_tci = vgetq_lane_u32(mm_rxcmp1, 1);
153 tmp16 = vsetq_lane_u16(vlan_tci,
154 vreinterpretq_u16_u32(pkt_mb),
156 pkt_mb = vreinterpretq_u32_u16(tmp16);
158 /* Store descriptor fields. */
159 vst1q_u32((uint32_t *)&mbuf->rx_descriptor_fields1, pkt_mb);
161 rx_pkts[nb_rx_pkts++] = mbuf;
166 RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
168 rxq->rxrearm_nb += nb_rx_pkts;
169 cpr->cp_raw_cons = raw_cons;
171 !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
179 bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
181 struct bnxt_tx_ring_info *txr = txq->tx_ring;
182 struct rte_mbuf **free = txq->free;
183 uint16_t cons = txr->tx_cons;
184 unsigned int blk = 0;
187 struct bnxt_sw_tx_bd *tx_buf;
188 struct rte_mbuf *mbuf;
190 tx_buf = &txr->tx_buf_ring[cons];
191 cons = RING_NEXT(txr->tx_ring_struct, cons);
192 mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
193 if (unlikely(mbuf == NULL))
197 if (blk && mbuf->pool != free[0]->pool) {
198 rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
204 rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
210 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
212 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
213 uint32_t raw_cons = cpr->cp_raw_cons;
215 uint32_t nb_tx_pkts = 0;
216 struct tx_cmpl *txcmp;
217 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
218 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
219 uint32_t ring_mask = cp_ring_struct->ring_mask;
222 cons = RING_CMPL(ring_mask, raw_cons);
223 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
225 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
228 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
229 nb_tx_pkts += txcmp->opaque;
232 "Unhandled CMP type %02x\n",
234 raw_cons = NEXT_RAW_CMP(raw_cons);
235 } while (nb_tx_pkts < ring_mask);
237 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
239 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
240 cpr->cp_raw_cons = raw_cons;
246 bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
249 struct bnxt_tx_queue *txq = tx_queue;
250 struct bnxt_tx_ring_info *txr = txq->tx_ring;
251 uint16_t prod = txr->tx_prod;
252 struct rte_mbuf *tx_mbuf;
253 struct tx_bd_long *txbd = NULL;
254 struct bnxt_sw_tx_bd *tx_buf;
257 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
259 if (unlikely(nb_pkts == 0))
262 /* Handle TX burst request */
265 tx_mbuf = *tx_pkts++;
266 rte_prefetch0(tx_mbuf);
268 tx_buf = &txr->tx_buf_ring[prod];
269 tx_buf->mbuf = tx_mbuf;
272 txbd = &txr->tx_desc_ring[prod];
273 txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
274 txbd->len = tx_mbuf->data_len;
275 txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
277 prod = RING_NEXT(txr->tx_ring_struct, prod);
281 /* Request a completion for last packet in burst */
283 txbd->opaque = nb_pkts;
284 txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
287 rte_compiler_barrier();
288 bnxt_db_write(&txr->tx_db, prod);
296 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
300 struct bnxt_tx_queue *txq = tx_queue;
302 /* Tx queue was stopped; wait for it to be restarted */
303 if (unlikely(!txq->tx_started)) {
304 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
308 /* Handle TX completions */
309 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
310 bnxt_handle_tx_cp_vec(txq);
315 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
316 ret = bnxt_xmit_fixed_burst_vec(tx_queue,
329 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
331 return bnxt_rxq_vec_setup_common(rxq);