1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2019-2020 Broadcom All rights reserved. */
7 #include <rte_bitmap.h>
8 #include <rte_byteorder.h>
9 #include <rte_malloc.h>
10 #include <rte_memory.h>
15 #include "bnxt_ring.h"
16 #include "bnxt_rxtx_vec_common.h"
26 bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
28 struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
29 struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
30 struct rte_mbuf *mb0, *mb1;
33 const uint64x2_t hdr_room = {0, RTE_PKTMBUF_HEADROOM};
34 const uint64x2_t addrmask = {0, UINT64_MAX};
37 * Number of mbufs to allocate must be a multiple of two. The
38 * allocation must not go past the end of the ring.
40 nb = RTE_MIN(rxq->rxrearm_nb & ~0x1,
41 rxq->nb_rx_desc - rxq->rxrearm_start);
43 /* Allocate new mbufs into the software ring */
44 if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) {
45 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb;
50 /* Initialize the mbufs in vector, process 2 mbufs in one loop */
51 for (i = 0; i < nb; i += 2, rx_bufs += 2) {
52 uint64x2_t buf_addr0, buf_addr1;
53 uint64x2_t rxbd0, rxbd1;
58 /* Load address fields from both mbufs */
59 buf_addr0 = vld1q_u64((uint64_t *)&mb0->buf_addr);
60 buf_addr1 = vld1q_u64((uint64_t *)&mb1->buf_addr);
62 /* Load both rx descriptors (preserving some existing fields) */
63 rxbd0 = vld1q_u64((uint64_t *)(rxbds + 0));
64 rxbd1 = vld1q_u64((uint64_t *)(rxbds + 1));
66 /* Add default offset to buffer address. */
67 buf_addr0 = vaddq_u64(buf_addr0, hdr_room);
68 buf_addr1 = vaddq_u64(buf_addr1, hdr_room);
70 /* Clear all fields except address. */
71 buf_addr0 = vandq_u64(buf_addr0, addrmask);
72 buf_addr1 = vandq_u64(buf_addr1, addrmask);
74 /* Clear address field in descriptor. */
75 rxbd0 = vbicq_u64(rxbd0, addrmask);
76 rxbd1 = vbicq_u64(rxbd1, addrmask);
78 /* Set address field in descriptor. */
79 rxbd0 = vaddq_u64(rxbd0, buf_addr0);
80 rxbd1 = vaddq_u64(rxbd1, buf_addr1);
82 /* Store descriptors to memory. */
83 vst1q_u64((uint64_t *)(rxbds++), rxbd0);
84 vst1q_u64((uint64_t *)(rxbds++), rxbd1);
87 rxq->rxrearm_start += nb;
88 bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
89 if (rxq->rxrearm_start >= rxq->nb_rx_desc)
90 rxq->rxrearm_start = 0;
92 rxq->rxrearm_nb -= nb;
96 bnxt_parse_pkt_type(uint32x4_t mm_rxcmp, uint32x4_t mm_rxcmp1)
98 uint32_t flags_type, flags2;
101 flags_type = vgetq_lane_u32(mm_rxcmp, 0);
102 flags2 = (uint16_t)vgetq_lane_u32(mm_rxcmp1, 0);
106 * bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
107 * bit 1: RX_CMPL_FLAGS2_IP_TYPE
108 * bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN
109 * bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE
111 index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) |
112 ((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
113 RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) |
114 ((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7);
116 return bnxt_ptype_table[index];
120 bnxt_parse_csum(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1)
124 flags = flags2_0xf(rxcmp1);
126 if (likely(IS_IP_NONTUNNEL_PKT(flags))) {
127 if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
128 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
130 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
131 } else if (IS_IP_TUNNEL_PKT(flags)) {
132 if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
133 RX_CMP_IP_CS_ERROR(rxcmp1)))
134 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
136 mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
137 } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {
138 mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
142 if (likely(IS_L4_NONTUNNEL_PKT(flags))) {
143 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
144 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
146 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
147 } else if (IS_L4_TUNNEL_PKT(flags)) {
148 if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
149 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
151 mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
152 if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
153 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
154 } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
156 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
158 mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
160 } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
161 mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
166 bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
169 struct bnxt_rx_queue *rxq = rx_queue;
170 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
171 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
172 uint32_t raw_cons = cpr->cp_raw_cons;
175 struct rx_pkt_cmpl *rxcmp;
176 const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
177 const uint8x16_t shuf_msk = {
178 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
179 2, 3, 0xFF, 0xFF, /* pkt_len */
181 0xFF, 0xFF, /* vlan_tci (zeroes) */
182 12, 13, 14, 15 /* rss hash */
186 /* If Rx Q was stopped return */
187 if (unlikely(!rxq->rx_started))
190 if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
191 bnxt_rxq_rearm(rxq, rxr);
193 /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
194 nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
196 /* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP. */
197 nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
201 /* Handle RX burst request */
202 for (i = 0; i < nb_pkts; i++) {
203 uint32x4_t mm_rxcmp, mm_rxcmp1;
204 struct rx_pkt_cmpl_hi *rxcmp1;
205 struct rte_mbuf *mbuf;
210 cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
212 rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
213 rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cons + 1];
215 if (!CMP_VALID(rxcmp1, raw_cons + 1, cpr->cp_ring_struct))
218 mm_rxcmp = vld1q_u32((uint32_t *)rxcmp);
219 mm_rxcmp1 = vld1q_u32((uint32_t *)rxcmp);
221 cons = rxcmp->opaque;
223 mbuf = rxr->rx_buf_ring[cons];
225 rxr->rx_buf_ring[cons] = NULL;
227 /* Set constant fields from mbuf initializer. */
228 vst1q_u64((uint64_t *)&mbuf->rearm_data, mbuf_init);
230 /* Set mbuf pkt_len, data_len, and rss_hash fields. */
231 tmp = vqtbl1q_u8(vreinterpretq_u8_u32(mm_rxcmp), shuf_msk);
232 pkt_mb = vreinterpretq_u32_u8(tmp);
233 ptype = bnxt_parse_pkt_type(mm_rxcmp, mm_rxcmp1);
234 pkt_mb = vsetq_lane_u32(ptype, pkt_mb, 0);
236 vst1q_u32((uint32_t *)&mbuf->rx_descriptor_fields1, pkt_mb);
238 rte_compiler_barrier();
240 if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID)
241 mbuf->ol_flags |= PKT_RX_RSS_HASH;
244 RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
245 mbuf->vlan_tci = rxcmp1->metadata &
246 (RX_PKT_CMPL_METADATA_VID_MASK |
247 RX_PKT_CMPL_METADATA_DE |
248 RX_PKT_CMPL_METADATA_PRI_MASK);
250 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
253 bnxt_parse_csum(mbuf, rxcmp1);
254 rx_pkts[nb_rx_pkts++] = mbuf;
259 RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
261 rxq->rxrearm_nb += nb_rx_pkts;
262 cpr->cp_raw_cons = raw_cons;
264 !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
272 bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
274 struct bnxt_tx_ring_info *txr = txq->tx_ring;
275 struct rte_mbuf **free = txq->free;
276 uint16_t cons = txr->tx_cons;
277 unsigned int blk = 0;
280 struct bnxt_sw_tx_bd *tx_buf;
281 struct rte_mbuf *mbuf;
283 tx_buf = &txr->tx_buf_ring[cons];
284 cons = RING_NEXT(txr->tx_ring_struct, cons);
285 mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
286 if (unlikely(mbuf == NULL))
290 if (blk && mbuf->pool != free[0]->pool) {
291 rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
297 rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
303 bnxt_handle_tx_cp_vec(struct bnxt_tx_queue *txq)
305 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
306 uint32_t raw_cons = cpr->cp_raw_cons;
308 uint32_t nb_tx_pkts = 0;
309 struct tx_cmpl *txcmp;
310 struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
311 struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct;
312 uint32_t ring_mask = cp_ring_struct->ring_mask;
315 cons = RING_CMPL(ring_mask, raw_cons);
316 txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
318 if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
321 if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
322 nb_tx_pkts += txcmp->opaque;
325 "Unhandled CMP type %02x\n",
327 raw_cons = NEXT_RAW_CMP(raw_cons);
328 } while (nb_tx_pkts < ring_mask);
330 cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
332 bnxt_tx_cmp_vec(txq, nb_tx_pkts);
333 cpr->cp_raw_cons = raw_cons;
339 bnxt_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
342 struct bnxt_tx_queue *txq = tx_queue;
343 struct bnxt_tx_ring_info *txr = txq->tx_ring;
344 uint16_t prod = txr->tx_prod;
345 struct rte_mbuf *tx_mbuf;
346 struct tx_bd_long *txbd = NULL;
347 struct bnxt_sw_tx_bd *tx_buf;
350 nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
352 if (unlikely(nb_pkts == 0))
355 /* Handle TX burst request */
358 tx_mbuf = *tx_pkts++;
359 rte_prefetch0(tx_mbuf);
361 tx_buf = &txr->tx_buf_ring[prod];
362 tx_buf->mbuf = tx_mbuf;
365 txbd = &txr->tx_desc_ring[prod];
366 txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
367 txbd->len = tx_mbuf->data_len;
368 txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
370 prod = RING_NEXT(txr->tx_ring_struct, prod);
374 /* Request a completion for last packet in burst */
376 txbd->opaque = nb_pkts;
377 txbd->flags_type &= ~TX_BD_LONG_FLAGS_NO_CMPL;
380 rte_compiler_barrier();
381 bnxt_db_write(&txr->tx_db, prod);
389 bnxt_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
393 struct bnxt_tx_queue *txq = tx_queue;
395 /* Tx queue was stopped; wait for it to be restarted */
396 if (unlikely(!txq->tx_started)) {
397 PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
401 /* Handle TX completions */
402 if (bnxt_tx_bds_in_hw(txq) >= txq->tx_free_thresh)
403 bnxt_handle_tx_cp_vec(txq);
408 num = RTE_MIN(nb_pkts, RTE_BNXT_MAX_TX_BURST);
409 ret = bnxt_xmit_fixed_burst_vec(tx_queue,
422 bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
424 return bnxt_rxq_vec_setup_common(rxq);