/* SPDX-License-Identifier: BSD-3-Clause */
-/* Copyright(c) 2019-2020 Broadcom All rights reserved. */
+/* Copyright(c) 2019-2021 Broadcom All rights reserved. */
#include <inttypes.h>
#include <stdbool.h>
#include "bnxt.h"
#include "bnxt_cpr.h"
#include "bnxt_ring.h"
-#include "bnxt_rxtx_vec_common.h"
#include "bnxt_txq.h"
#include "bnxt_txr.h"
+#include "bnxt_rxtx_vec_common.h"
/*
* RX Ring handling
uint32_t tmp, of; \
\
of = vgetq_lane_u32((rss_flags), (pi)) | \
- bnxt_ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
+ rxr->ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
\
tmp = vgetq_lane_u32((errors), (pi)); \
if (tmp) \
- of |= bnxt_ol_flags_err_table[tmp]; \
+ of |= rxr->ol_flags_err_table[tmp]; \
(ol_flags) = of; \
}
static void
descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
- uint64x2_t mb_init, struct rte_mbuf **mbuf)
+ uint64x2_t mb_init, struct rte_mbuf **mbuf,
+ struct bnxt_rx_ring_info *rxr)
{
const uint8x16_t shuf_msk = {
0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
0xFF, 0xFF, /* vlan_tci (zeroes) */
12, 13, 14, 15 /* rss hash */
};
- const uint32x4_t flags_type_mask = {
- RX_PKT_CMPL_FLAGS_ITYPE_MASK,
- RX_PKT_CMPL_FLAGS_ITYPE_MASK,
- RX_PKT_CMPL_FLAGS_ITYPE_MASK,
- RX_PKT_CMPL_FLAGS_ITYPE_MASK
- };
- const uint32x4_t flags2_mask1 = {
- RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
- RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
- RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
- RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
- RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
- RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC,
- RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN |
- RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC
- };
- const uint32x4_t flags2_mask2 = {
- RX_PKT_CMPL_FLAGS2_IP_TYPE,
- RX_PKT_CMPL_FLAGS2_IP_TYPE,
- RX_PKT_CMPL_FLAGS2_IP_TYPE,
- RX_PKT_CMPL_FLAGS2_IP_TYPE
- };
- const uint32x4_t rss_mask = {
- RX_PKT_CMPL_FLAGS_RSS_VALID,
- RX_PKT_CMPL_FLAGS_RSS_VALID,
- RX_PKT_CMPL_FLAGS_RSS_VALID,
- RX_PKT_CMPL_FLAGS_RSS_VALID
- };
- const uint32x4_t flags2_index_mask = {
- 0x1F, 0x1F, 0x1F, 0x1F
- };
- const uint32x4_t flags2_error_mask = {
- 0xF, 0xF, 0xF, 0xF
- };
+ const uint32x4_t flags_type_mask =
+ vdupq_n_u32(RX_PKT_CMPL_FLAGS_ITYPE_MASK);
+ const uint32x4_t flags2_mask1 =
+ vdupq_n_u32(CMPL_FLAGS2_VLAN_TUN_MSK);
+ const uint32x4_t flags2_mask2 =
+ vdupq_n_u32(RX_PKT_CMPL_FLAGS2_IP_TYPE);
+ const uint32x4_t rss_mask =
+ vdupq_n_u32(RX_PKT_CMPL_FLAGS_RSS_VALID);
+ const uint32x4_t flags2_index_mask = vdupq_n_u32(0x1F);
+ const uint32x4_t flags2_error_mask = vdupq_n_u32(0x0F);
uint32x4_t flags_type, flags2, index, errors, rss_flags;
- uint32x4_t tmp, ptype_idx;
+ uint32x4_t tmp, ptype_idx, is_tunnel;
uint64x2_t t0, t1;
uint32_t ol_flags;
+ /* Validate ptype table indexing at build time. */
+ bnxt_check_ptype_constants();
+
/* Compute packet type table indexes for four packets */
t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1]));
t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3]));
flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0),
vget_low_u64(t1)));
- ptype_idx =
- vshrq_n_u32(vandq_u32(flags_type, flags_type_mask), 9);
+ ptype_idx = vshrq_n_u32(vandq_u32(flags_type, flags_type_mask),
+ RX_PKT_CMPL_FLAGS_ITYPE_SFT -
+ BNXT_PTYPE_TBL_TYPE_SFT);
t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1]));
t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3]));
vget_low_u64(t1)));
ptype_idx = vorrq_u32(ptype_idx,
- vshrq_n_u32(vandq_u32(flags2, flags2_mask1), 2));
+ vshrq_n_u32(vandq_u32(flags2, flags2_mask1),
+ RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT -
+ BNXT_PTYPE_TBL_VLAN_SFT));
ptype_idx = vorrq_u32(ptype_idx,
- vshrq_n_u32(vandq_u32(flags2, flags2_mask2), 7));
+ vshrq_n_u32(vandq_u32(flags2, flags2_mask2),
+ RX_PKT_CMPL_FLAGS2_IP_TYPE_SFT -
+ BNXT_PTYPE_TBL_IP_VER_SFT));
/* Extract RSS valid flags for four packets. */
rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9);
vget_low_u64(t1)));
/* Compute ol_flags and checksum error indexes for four packets. */
+ is_tunnel = vandq_u32(flags2, vdupq_n_u32(4));
+ is_tunnel = vshlq_n_u32(is_tunnel, 3);
errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask);
errors = vandq_u32(errors, flags2);
index = vbicq_u32(flags2, errors);
+ errors = vorrq_u32(errors, vshrq_n_u32(is_tunnel, 1));
+ index = vorrq_u32(index, is_tunnel);
/* Update mbuf rearm_data for four packets. */
GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags);
vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp);
}
-uint16_t
-bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+recv_burst_vec_neon(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
struct bnxt_rx_queue *rxq = rx_queue;
struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size;
struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring;
uint64_t valid, desc_valid_mask = ~0UL;
- const uint32x4_t info3_v_mask = {
- CMPL_BASE_V, CMPL_BASE_V,
- CMPL_BASE_V, CMPL_BASE_V
- };
+ const uint32x4_t info3_v_mask = vdupq_n_u32(CMPL_BASE_V);
uint32_t raw_cons = cpr->cp_raw_cons;
uint32_t cons, mbcons;
int nb_rx_pkts = 0;
const uint64x2_t mb_init = {rxq->mbuf_initializer, 0};
- const uint32x4_t valid_target = {
- !!(raw_cons & cp_ring_size),
- !!(raw_cons & cp_ring_size),
- !!(raw_cons & cp_ring_size),
- !!(raw_cons & cp_ring_size)
- };
+ const uint32x4_t valid_target =
+ vdupq_n_u32(!!(raw_cons & cp_ring_size));
int i;
/* If Rx Q was stopped return */
if (rxq->rxrearm_nb >= rxq->rx_free_thresh)
bnxt_rxq_rearm(rxq, rxr);
- /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
- nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
-
cons = raw_cons & (cp_ring_size - 1);
mbcons = (raw_cons / 2) & (rx_ring_size - 1);
* maximum number of packets to receive to be a multiple of the per-
* loop count.
*/
- if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP)
- desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts);
- else
- nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
+ if (nb_pkts < BNXT_RX_DESCS_PER_LOOP_VEC128) {
+ desc_valid_mask >>=
+ 16 * (BNXT_RX_DESCS_PER_LOOP_VEC128 - nb_pkts);
+ } else {
+ nb_pkts =
+ RTE_ALIGN_FLOOR(nb_pkts, BNXT_RX_DESCS_PER_LOOP_VEC128);
+ }
/* Handle RX burst request */
- for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP,
- cons += RTE_BNXT_DESCS_PER_LOOP * 2,
- mbcons += RTE_BNXT_DESCS_PER_LOOP) {
- uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP];
- uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP];
+ for (i = 0; i < nb_pkts; i += BNXT_RX_DESCS_PER_LOOP_VEC128,
+ cons += BNXT_RX_DESCS_PER_LOOP_VEC128 * 2,
+ mbcons += BNXT_RX_DESCS_PER_LOOP_VEC128) {
+ uint32x4_t rxcmp1[BNXT_RX_DESCS_PER_LOOP_VEC128];
+ uint32x4_t rxcmp[BNXT_RX_DESCS_PER_LOOP_VEC128];
uint32x4_t info3_v;
uint64x2_t t0, t1;
uint32_t num_valid;
#endif
/* Prefetch four descriptor pairs for next iteration. */
- if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) {
+ if (i + BNXT_RX_DESCS_PER_LOOP_VEC128 < nb_pkts) {
rte_prefetch0(&cp_desc_ring[cons + 8]);
rte_prefetch0(&cp_desc_ring[cons + 12]);
}
* reverse order to ensure consistent state.
*/
rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]);
- rte_cio_rmb();
+ rte_io_rmb();
rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]);
rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]);
- rte_cio_rmb();
+ rte_io_rmb();
rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]);
t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3]));
rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]);
- rte_cio_rmb();
+ rte_io_rmb();
rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]);
rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]);
- rte_cio_rmb();
+ rte_io_rmb();
rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]);
t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1]));
num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) -
(__builtin_clzl(valid & desc_valid_mask) / 16);
- switch (num_valid) {
- case 4:
- rxr->rx_buf_ring[mbcons + 3] = NULL;
- /* FALLTHROUGH */
- case 3:
- rxr->rx_buf_ring[mbcons + 2] = NULL;
- /* FALLTHROUGH */
- case 2:
- rxr->rx_buf_ring[mbcons + 1] = NULL;
- /* FALLTHROUGH */
- case 1:
- rxr->rx_buf_ring[mbcons + 0] = NULL;
+ if (num_valid == 0)
break;
- case 0:
- goto out;
- }
- descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts]);
+ descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts],
+ rxr);
nb_rx_pkts += num_valid;
- if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
+ if (num_valid < BNXT_RX_DESCS_PER_LOOP_VEC128)
break;
}
-out:
if (nb_rx_pkts) {
- rxr->rx_prod =
- RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts);
+ rxr->rx_raw_prod = RING_ADV(rxr->rx_raw_prod, nb_rx_pkts);
rxq->rxrearm_nb += nb_rx_pkts;
cpr->cp_raw_cons += 2 * nb_rx_pkts;
- cpr->valid =
- !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
bnxt_db_cq(cpr);
}
return nb_rx_pkts;
}
-static void
-bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
+uint16_t
+bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
{
- struct bnxt_tx_ring_info *txr = txq->tx_ring;
- struct rte_mbuf **free = txq->free;
- uint16_t cons = txr->tx_cons;
- unsigned int blk = 0;
-
- while (nr_pkts--) {
- struct bnxt_sw_tx_bd *tx_buf;
- struct rte_mbuf *mbuf;
-
- tx_buf = &txr->tx_buf_ring[cons];
- cons = RING_NEXT(txr->tx_ring_struct, cons);
- mbuf = rte_pktmbuf_prefree_seg(tx_buf->mbuf);
- if (unlikely(mbuf == NULL))
- continue;
- tx_buf->mbuf = NULL;
-
- if (blk && mbuf->pool != free[0]->pool) {
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
- blk = 0;
- }
- free[blk++] = mbuf;
+ uint16_t cnt = 0;
+
+ while (nb_pkts > RTE_BNXT_MAX_RX_BURST) {
+ uint16_t burst;
+
+ burst = recv_burst_vec_neon(rx_queue, rx_pkts + cnt,
+ RTE_BNXT_MAX_RX_BURST);
+
+ cnt += burst;
+ nb_pkts -= burst;
+
+ if (burst < RTE_BNXT_MAX_RX_BURST)
+ return cnt;
}
- if (blk)
- rte_mempool_put_bulk(free[0]->pool, (void **)free, blk);
- txr->tx_cons = cons;
+ return cnt + recv_burst_vec_neon(rx_queue, rx_pkts + cnt, nb_pkts);
}
static void
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
+ if (!bnxt_cpr_cmp_valid(txcmp, raw_cons, ring_mask + 1))
break;
if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
raw_cons = NEXT_RAW_CMP(raw_cons);
} while (nb_tx_pkts < ring_mask);
- cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
- bnxt_tx_cmp_vec(txq, nb_tx_pkts);
+ if (txq->offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
+ bnxt_tx_cmp_vec_fast(txq, nb_tx_pkts);
+ else
+ bnxt_tx_cmp_vec(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
bnxt_db_cq(cpr);
}
{
struct bnxt_tx_queue *txq = tx_queue;
struct bnxt_tx_ring_info *txr = txq->tx_ring;
- uint16_t prod = txr->tx_prod;
+ uint16_t tx_prod, tx_raw_prod = txr->tx_raw_prod;
struct rte_mbuf *tx_mbuf;
struct tx_bd_long *txbd = NULL;
- struct bnxt_sw_tx_bd *tx_buf;
+ struct rte_mbuf **tx_buf;
uint16_t to_send;
nb_pkts = RTE_MIN(nb_pkts, bnxt_tx_avail(txq));
tx_mbuf = *tx_pkts++;
rte_prefetch0(tx_mbuf);
- tx_buf = &txr->tx_buf_ring[prod];
- tx_buf->mbuf = tx_mbuf;
- tx_buf->nr_bds = 1;
+ tx_prod = RING_IDX(txr->tx_ring_struct, tx_raw_prod);
+ tx_buf = &txr->tx_buf_ring[tx_prod];
+ *tx_buf = tx_mbuf;
- txbd = &txr->tx_desc_ring[prod];
+ txbd = &txr->tx_desc_ring[tx_prod];
txbd->address = tx_mbuf->buf_iova + tx_mbuf->data_off;
txbd->len = tx_mbuf->data_len;
txbd->flags_type = bnxt_xmit_flags_len(tx_mbuf->data_len,
TX_BD_FLAGS_NOCMPL);
- prod = RING_NEXT(txr->tx_ring_struct, prod);
+ tx_raw_prod = RING_NEXT(tx_raw_prod);
to_send--;
}
}
rte_compiler_barrier();
- bnxt_db_write(&txr->tx_db, prod);
+ bnxt_db_write(&txr->tx_db, tx_raw_prod);
- txr->tx_prod = prod;
+ txr->tx_raw_prod = tx_raw_prod;
return nb_pkts;
}