/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2019-2020 Broadcom All rights reserved. */
+/* Copyright(c) 2019-2021 Broadcom All rights reserved. */
#include <inttypes.h>
#include <stdbool.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
#include "bnxt_ring.h"
-#include "bnxt_rxr.h"
-#include "bnxt_rxq.h"
-#include "hsi_struct_def_dpdk.h"
-#include "bnxt_rxtx_vec_common.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
+#include "bnxt_rxtx_vec_common.h"
/*
* RX Ring handling
*/
-static inline void
-bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr)
-{
- struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start];
- struct bnxt_sw_rx_bd *rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start];
- struct rte_mbuf *mb0, *mb1;
- int i;
-
- const uint64x2_t hdr_room = {0, RTE_PKTMBUF_HEADROOM};
- const uint64x2_t addrmask = {0, UINT64_MAX};
-
- /* Pull RTE_BNXT_RXQ_REARM_THRESH more mbufs into the software ring */
- if (rte_mempool_get_bulk(rxq->mb_pool,
- (void *)rx_bufs,
- RTE_BNXT_RXQ_REARM_THRESH) < 0) {
- rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
- RTE_BNXT_RXQ_REARM_THRESH;
-
- return;
- }
-
- /* Initialize the mbufs in vector, process 2 mbufs in one loop */
- for (i = 0; i < RTE_BNXT_RXQ_REARM_THRESH; i += 2, rx_bufs += 2) {
- uint64x2_t buf_addr0, buf_addr1;
- uint64x2_t rxbd0, rxbd1;
-
- mb0 = rx_bufs[0].mbuf;
- mb1 = rx_bufs[1].mbuf;
-
- /* Load address fields from both mbufs */
- buf_addr0 = vld1q_u64((uint64_t *)&mb0->buf_addr);
- buf_addr1 = vld1q_u64((uint64_t *)&mb1->buf_addr);
-
- /* Load both rx descriptors (preserving some existing fields) */
- rxbd0 = vld1q_u64((uint64_t *)(rxbds + 0));
- rxbd1 = vld1q_u64((uint64_t *)(rxbds + 1));
-
- /* Add default offset to buffer address. */
- buf_addr0 = vaddq_u64(buf_addr0, hdr_room);
- buf_addr1 = vaddq_u64(buf_addr1, hdr_room);
-
- /* Clear all fields except address. */
- buf_addr0 = vandq_u64(buf_addr0, addrmask);
- buf_addr1 = vandq_u64(buf_addr1, addrmask);
-
- /* Clear address field in descriptor. */
- rxbd0 = vbicq_u64(rxbd0, addrmask);
- rxbd1 = vbicq_u64(rxbd1, addrmask);
-
- /* Set address field in descriptor. */
- rxbd0 = vaddq_u64(rxbd0, buf_addr0);
- rxbd1 = vaddq_u64(rxbd1, buf_addr1);
-
- /* Store descriptors to memory. */
- vst1q_u64((uint64_t *)(rxbds++), rxbd0);
- vst1q_u64((uint64_t *)(rxbds++), rxbd1);
- }
-
- rxq->rxrearm_start += RTE_BNXT_RXQ_REARM_THRESH;
- bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
- if (rxq->rxrearm_start >= rxq->nb_rx_desc)
- rxq->rxrearm_start = 0;
-
- rxq->rxrearm_nb -= RTE_BNXT_RXQ_REARM_THRESH;
+#define GET_OL_FLAGS(rss_flags, ol_idx, errors, pi, ol_flags) \
+{ \
+ uint32_t tmp, of; \
+ \
+ of = vgetq_lane_u32((rss_flags), (pi)) | \
+ rxr->ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
+ \
+ tmp = vgetq_lane_u32((errors), (pi)); \
+ if (tmp) \
+ of |= rxr->ol_flags_err_table[tmp]; \
+ (ol_flags) = of; \
}
-static uint32_t
-bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1)
-{
- uint32_t l3, pkt_type = 0;
- uint32_t t_ipcs = 0, ip6 = 0, vlan = 0;
- uint32_t flags_type;
-
- vlan = !!(rxcmp1->flags2 &
- rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN));
- pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER;
-
- t_ipcs = !!(rxcmp1->flags2 &
- rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC));
- ip6 = !!(rxcmp1->flags2 &
- rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE));
-
- flags_type = rxcmp->flags_type &
- rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
-
- if (!t_ipcs && !ip6)
- l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
- else if (!t_ipcs && ip6)
- l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
- else if (t_ipcs && !ip6)
- l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
- else
- l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
-
- switch (flags_type) {
- case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP):
- if (!t_ipcs)
- pkt_type |= l3 | RTE_PTYPE_L4_ICMP;
- else
- pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP;
- break;
-
- case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP):
- if (!t_ipcs)
- pkt_type |= l3 | RTE_PTYPE_L4_TCP;
- else
- pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP;
- break;
-
- case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP):
- if (!t_ipcs)
- pkt_type |= l3 | RTE_PTYPE_L4_UDP;
- else
- pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP;
- break;
-
- case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP):
- pkt_type |= l3;
- break;
- }
-
- return pkt_type;
+#define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pkt_idx, ret) \
+{ \
+ uint32_t ptype; \
+ uint16_t vlan_tci; \
+ uint32x4_t r; \
+ \
+ /* Set mbuf pkt_len, data_len, and rss_hash fields. */ \
+ r = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(rxcmp), \
+ (shuf_msk))); \
+ \
+ /* Set packet type. */ \
+ ptype = bnxt_ptype_table[vgetq_lane_u32((ptype_idx), (pkt_idx))]; \
+ r = vsetq_lane_u32(ptype, r, 0); \
+ \
+ /* Set vlan_tci. */ \
+ vlan_tci = vgetq_lane_u32((rxcmp1), 1); \
+ r = vreinterpretq_u32_u16(vsetq_lane_u16(vlan_tci, \
+ vreinterpretq_u16_u32(r), 5)); \
+ (ret) = r; \
}
static void
-bnxt_parse_csum(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1)
+descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
+ uint64x2_t mb_init, struct rte_mbuf **mbuf,
+ struct bnxt_rx_ring_info *rxr)
{
- uint32_t flags;
-
- flags = flags2_0xf(rxcmp1);
- /* IP Checksum */
- if (likely(IS_IP_NONTUNNEL_PKT(flags))) {
- if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- else
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- } else if (IS_IP_TUNNEL_PKT(flags)) {
- if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
- RX_CMP_IP_CS_ERROR(rxcmp1)))
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
- else
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
- } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {
- mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
- }
-
- /* L4 Checksum */
- if (likely(IS_L4_NONTUNNEL_PKT(flags))) {
- if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- else
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
- } else if (IS_L4_TUNNEL_PKT(flags)) {
- if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
- else
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
- if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
- mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
- } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
- (flags))) {
- mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
- } else {
- mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
- }
- } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
- mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
- }
+ const uint8x16_t shuf_msk = {
+ 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
+ 2, 3, 0xFF, 0xFF, /* pkt_len */
+ 2, 3, /* data_len */
+ 0xFF, 0xFF, /* vlan_tci (zeroes) */
+ 12, 13, 14, 15 /* rss hash */
+ };
+ const uint32x4_t flags_type_mask =
+ vdupq_n_u32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
+ const uint32x4_t flags2_mask1 =
+ vdupq_n_u32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
+ RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC);
+ const uint32x4_t flags2_mask2 =
+ vdupq_n_u32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
+ const uint32x4_t rss_mask =
+ vdupq_n_u32(RX_PKT_CMPL_FLAGS_RSS_VALID);
+ const uint32x4_t flags2_index_mask = vdupq_n_u32(0x1F);
+ const uint32x4_t flags2_error_mask = vdupq_n_u32(0x0F);
+ uint32x4_t flags_type, flags2, index, errors, rss_flags;
+ uint32x4_t tmp, ptype_idx, is_tunnel;
+ uint64x2_t t0, t1;
+ uint32_t ol_flags;
+
+ /* Compute packet type table indexes for four packets */
+ t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1]));
+ t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3]));
+
+ flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+ vget_low_u64(t1)));
+ ptype_idx =
+ vshrq_n_u32(vandq_u32(flags_type, flags_type_mask), 9);
+
+ t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
+ t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
+
+ flags2 = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+ vget_low_u64(t1)));
+
+ ptype_idx = vorrq_u32(ptype_idx,
+ vshrq_n_u32(vandq_u32(flags2, flags2_mask1), 2));
+ ptype_idx = vorrq_u32(ptype_idx,
+ vshrq_n_u32(vandq_u32(flags2, flags2_mask2), 7));
+
+ /* Extract RSS valid flags for four packets. */
+ rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9);
+
+ flags2 = vandq_u32(flags2, flags2_index_mask);
+
+ /* Extract errors_v2 fields for four packets. */
+ t0 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
+ t1 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
+
+ errors = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+ vget_low_u64(t1)));
+
+ /* Compute ol_flags and checksum error indexes for four packets. */
+ is_tunnel = vandq_u32(flags2, vdupq_n_u32(4));
+ is_tunnel = vshlq_n_u32(is_tunnel, 3);
+ errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask);
+ errors = vandq_u32(errors, flags2);
+
+ index = vbicq_u32(flags2, errors);
+ errors = vorrq_u32(errors, vshrq_n_u32(is_tunnel, 1));
+ index = vorrq_u32(index, is_tunnel);
+
+ /* Update mbuf rearm_data for four packets. */
+ GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
+ vst1q_u32((uint32_t *)&mbuf[0]->rearm_data,
+ vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+ GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags);
+ vst1q_u32((uint32_t *)&mbuf[1]->rearm_data,
+ vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+ GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags);
+ vst1q_u32((uint32_t *)&mbuf[2]->rearm_data,
+ vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+ GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags);
+ vst1q_u32((uint32_t *)&mbuf[3]->rearm_data,
+ vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2));
+
+ /* Update mbuf rx_descriptor_fields1 for four packets. */
+ GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, tmp);
+ vst1q_u32((uint32_t *)&mbuf[0]->rx_descriptor_fields1, tmp);
+ GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, tmp);
+ vst1q_u32((uint32_t *)&mbuf[1]->rx_descriptor_fields1, tmp);
+ GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, tmp);
+ vst1q_u32((uint32_t *)&mbuf[2]->rx_descriptor_fields1, tmp);
+ GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, tmp);
+ vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
}
uint16_t
struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
+ uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size;
+ uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
+ struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
+ uint64_t valid, desc_valid_mask = ~0UL;
+ const uint32x4_t info3_v_mask = vdupq_n_u32(CMPL_BASE_V);
uint32_t raw_cons = cpr->cp_raw_cons;
- uint32_t cons;
+ uint32_t cons, mbcons;
int nb_rx_pkts = 0;
- struct rx_pkt_cmpl *rxcmp;
- bool evt = false;
- const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
- const uint8x16_t shuf_msk = {
- 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
- 2, 3, 0xFF, 0xFF, /* pkt_len */
- 2, 3, /* data_len */
- 0xFF, 0xFF, /* vlan_tci (zeroes) */
- 12, 13, 14, 15 /* rss hash */
- };
+ const uint64x2_t mb_init = {rxq->mbuf_initializer, 0};
+ const uint32x4_t valid_target =
+ vdupq_n_u32(!!(raw_cons & cp_ring_size));
+ int i;
/* If Rx Q was stopped return */
if (unlikely(!rxq->rx_started))
return 0;
- if (rxq->rxrearm_nb >= RTE_BNXT_RXQ_REARM_THRESH)
+ if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
bnxt_rxq_rearm(rxq, rxr);
/* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
- /* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP */
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
- if (!nb_pkts)
- return 0;
+ cons = raw_cons & (cp_ring_size - 1);
+ mbcons = (raw_cons / 2) & (rx_ring_size - 1);
+
+ /* Prefetch first four descriptor pairs. */
+ rte_prefetch0(&cp_desc_ring[cons]);
+ rte_prefetch0(&cp_desc_ring[cons + 4]);
+
+ /* Ensure that we do not go past the ends of the rings. */
+ nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons,
+ (cp_ring_size - cons) / 2));
+ /*
+ * If we are at the end of the ring, ensure that descriptors after the
+ * last valid entry are not treated as valid. Otherwise, force the
+ * maximum number of packets to receive to be a multiple of the per-
+ * loop count.
+ */
+ if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
+ desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
+ else
+ nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
/* Handle RX burst request */
- while (1) {
- cons = RING_CMP(cpr->cp_ring_struct, raw_cons);
-
- rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons];
+ for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
+ cons += RTE_BNXT_DESCS_PER_LOOP * 2,
+ mbcons += RTE_BNXT_DESCS_PER_LOOP) {
+ uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
+ uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP];
+ uint32x4_t info3_v;
+ uint64x2_t t0, t1;
+ uint32_t num_valid;
+
+ /* Copy four mbuf pointers to output array. */
+ t0 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons]);
+#ifdef RTE_ARCH_ARM64
+ t1 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons + 2]);
+#endif
+ vst1q_u64((void *)&rx_pkts[i], t0);
+#ifdef RTE_ARCH_ARM64
+ vst1q_u64((void *)&rx_pkts[i + 2], t1);
+#endif
+
+ /* Prefetch four descriptor pairs for next iteration. */
+ if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
+ rte_prefetch0(&cp_desc_ring[cons + 8]);
+ rte_prefetch0(&cp_desc_ring[cons + 12]);
+ }
- if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
+ /*
+ * Load the four current descriptors into SSE registers in
+ * reverse order to ensure consistent state.
+ */
+ rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
+ rte_io_rmb();
+ rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
+
+ rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
+ rte_io_rmb();
+ rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
+
+ t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
+
+ rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
+ rte_io_rmb();
+ rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
+
+ rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
+ rte_io_rmb();
+ rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
+
+ t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
+
+ /* Isolate descriptor status flags. */
+ info3_v = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
+ vget_low_u64(t1)));
+ info3_v = vandq_u32(info3_v, info3_v_mask);
+ info3_v = veorq_u32(info3_v, valid_target);
+
+ /*
+ * Pack the 128-bit array of valid descriptor flags into 64
+ * bits and count the number of set bits in order to determine
+ * the number of valid descriptors.
+ */
+ valid = vget_lane_u64(vreinterpret_u64_u16(vqmovn_u32(info3_v)),
+ 0);
+ /*
+ * At this point, 'valid' is a 64-bit value containing four
+ * 16-bit fields, each of which is either 0x0001 or 0x0000.
+ * Compute number of valid descriptors from the index of
+ * the highest non-zero field.
+ */
+ num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) -
+ (__builtin_clzl(valid & desc_valid_mask) / 16);
+
+ if (num_valid == 0)
break;
- if (likely(CMP_TYPE(rxcmp) == RX_PKT_CMPL_TYPE_RX_L2)) {
- struct rx_pkt_cmpl_hi *rxcmp1;
- uint32_t tmp_raw_cons;
- uint16_t cp_cons;
- struct rte_mbuf *mbuf;
- uint64x2_t mm_rxcmp;
- uint8x16_t pkt_mb;
-
- tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
- cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons);
- rxcmp1 = (struct rx_pkt_cmpl_hi *)
- &cpr->cp_desc_ring[cp_cons];
-
- if (!CMP_VALID(rxcmp1, tmp_raw_cons,
- cpr->cp_ring_struct))
- break;
-
- raw_cons = tmp_raw_cons;
- cons = rxcmp->opaque;
-
- mbuf = rxr->rx_buf_ring[cons].mbuf;
- rte_prefetch0(mbuf);
- rxr->rx_buf_ring[cons].mbuf = NULL;
-
- /* Set constant fields from mbuf initializer. */
- vst1q_u64((uint64_t *)&mbuf->rearm_data, mbuf_init);
-
- /* Set mbuf pkt_len, data_len, and rss_hash fields. */
- mm_rxcmp = vld1q_u64((uint64_t *)rxcmp);
- pkt_mb = vqtbl1q_u8(vreinterpretq_u8_u64(mm_rxcmp),
- shuf_msk);
- vst1q_u64((uint64_t *)&mbuf->rx_descriptor_fields1,
- vreinterpretq_u64_u8(pkt_mb));
-
- rte_compiler_barrier();
-
- if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID)
- mbuf->ol_flags |= PKT_RX_RSS_HASH;
-
- if (rxcmp1->flags2 &
- RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) {
- mbuf->vlan_tci = rxcmp1->metadata &
- (RX_PKT_CMPL_METADATA_VID_MASK |
- RX_PKT_CMPL_METADATA_DE |
- RX_PKT_CMPL_METADATA_PRI_MASK);
- mbuf->ol_flags |=
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
- }
-
- bnxt_parse_csum(mbuf, rxcmp1);
- mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
-
- rx_pkts[nb_rx_pkts++] = mbuf;
- } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
- evt =
- bnxt_event_hwrm_resp_handler(rxq->bp,
- (struct cmpl_base *)rxcmp);
- }
+ descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts],
+ rxr);
+ nb_rx_pkts += num_valid;
- raw_cons = NEXT_RAW_CMP(raw_cons);
- if (nb_rx_pkts == nb_pkts || evt)
+ if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
break;
}
- rxr->rx_prod = RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
-
- rxq->rxrearm_nb += nb_rx_pkts;
- cpr->cp_raw_cons = raw_cons;
- cpr->valid = !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
- if (nb_rx_pkts || evt)
- bnxt_db_cq(cpr);
- return nb_rx_pkts;
-}
+ if (nb_rx_pkts) {
+ rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
-static void
-bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
-{
- struct bnxt_tx_ring_info *txr = txq->tx_ring;
- struct rte_mbuf **free = txq->free;
- uint16_t cons = txr->tx_cons;
- unsigned int blk = 0;
-
- while (nr_pkts--) {
- struct bnxt_sw_tx_bd *tx_buf;
- struct rte_mbuf *mbuf;
-
- tx_buf = &txr->tx_buf_ring[cons];
- cons = RING_NEXT(txr->tx_ring_struct, cons);
- mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
- tx_buf->mbuf = NULL;
-
- if (blk && mbuf->pool != free[0]->pool) {
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
- blk = 0;
- }
- free[blk++] = mbuf;
+ rxq->rxrearm_nb += nb_rx_pkts;
+ cpr->cp_raw_cons += 2 * nb_rx_pkts;
+ cpr->valid =
+ !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
+ bnxt_db_cq(cpr);
}
- if (blk)
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
- txr->tx_cons = cons;
+ return nb_rx_pkts;
}
static void
cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
- bnxt_tx_cmp_vec(txq, nb_tx_pkts);
+ if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
+ else
+ bnxt_tx_cmp_vec(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
bnxt_db_cq(cpr);
}
{
struct bnxt_tx_queue *txq = tx_queue;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
- uint16_t prod = txr->tx_prod;
+ uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
struct rte_mbuf *tx_mbuf;
struct tx_bd_long *txbd = NULL;
- struct bnxt_sw_tx_bd *tx_buf;
+ struct rte_mbuf **tx_buf;
uint16_t to_send;
nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
tx_mbuf = *tx_pkts++;
rte_prefetch0(tx_mbuf);
- tx_buf = &txr->tx_buf_ring[prod];
- tx_buf->mbuf = tx_mbuf;
- tx_buf->nr_bds = 1;
+ tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
+ tx_buf = &txr->tx_buf_ring[tx_prod];
+ *tx_buf = tx_mbuf;
- txbd = &txr->tx_desc_ring[prod];
+ txbd = &txr->tx_desc_ring[tx_prod];
txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
txbd->len = tx_mbuf->data_len;
txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
TX_BD_FLAGS_NOCMPL);
- prod = RING_NEXT(txr->tx_ring_struct, prod);
+ tx_raw_prod = RING_NEXT(tx_raw_prod);
to_send--;
}
}
rte_compiler_barrier();
- bnxt_db_write(&txr->tx_db, prod);
+ bnxt_db_write(&txr->tx_db, tx_raw_prod);
- txr->tx_prod = prod;
+ txr->tx_raw_prod = tx_raw_prod;
return nb_pkts;
}