return bnxt_ptype_table[index];
}
-uint32_t
-bnxt_ol_flags_table[BNXT_OL_FLAGS_TBL_DIM] __rte_cache_aligned;
-
-uint32_t
-bnxt_ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM] __rte_cache_aligned;
-
static void __rte_cold
-bnxt_init_ol_flags_tables(void)
+bnxt_init_ol_flags_tables(struct bnxt_rx_ring_info *rxr)
{
- static bool initialized;
uint32_t *pt;
int i;
- if (initialized)
- return;
-
/* Initialize ol_flags table. */
- pt = bnxt_ol_flags_table;
+ pt = rxr->ol_flags_table;
for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) {
pt[i] = 0;
if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)
}
/* Initialize checksum error table. */
- pt = bnxt_ol_flags_err_table;
+ pt = rxr->ol_flags_err_table;
for (i = 0; i < BNXT_OL_FLAGS_ERR_TBL_DIM; i++) {
pt[i] = 0;
if (i & (RX_PKT_CMPL_ERRORS_IP_CS_ERROR >> 4))
if (i & (RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR >> 4))
pt[i] |= PKT_RX_OUTER_L4_CKSUM_BAD;
}
-
- initialized = true;
}
static void
-bnxt_set_ol_flags(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1,
- struct rte_mbuf *mbuf)
+bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp,
+ struct rx_pkt_cmpl_hi *rxcmp1, struct rte_mbuf *mbuf)
{
uint16_t flags_type, errors, flags;
uint64_t ol_flags;
RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR);
errors = (errors >> 4) & flags;
- ol_flags = bnxt_ol_flags_table[flags & ~errors];
+ ol_flags = rxr->ol_flags_table[flags & ~errors];
if (errors)
- ol_flags |= bnxt_ol_flags_err_table[errors];
+ ol_flags |= rxr->ol_flags_err_table[errors];
if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) {
mbuf->hash.rss = rte_le_to_cpu_32(rxcmp->rss_hash);
mbuf->data_len = mbuf->pkt_len;
mbuf->port = rxq->port_id;
- bnxt_set_ol_flags(rxcmp, rxcmp1, mbuf);
+ bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf);
#ifdef RTE_LIBRTE_IEEE1588
if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) &
/* Initialize packet type table. */
bnxt_init_ptype_table();
- /* Initialize offload flags parsing table. */
- bnxt_init_ol_flags_tables();
-
size = rte_pktmbuf_data_room_size(rxq->mb_pool) - RTE_PKTMBUF_HEADROOM;
size = RTE_MIN(BNXT_MAX_PKT_LEN, size);
ring = rxr->rx_ring_struct;
bnxt_init_rxbds(ring, type, size);
+ /* Initialize offload flags parsing table. */
+ bnxt_init_ol_flags_tables(rxr);
+
raw_prod = rxr->rx_raw_prod;
for (i = 0; i < ring->ring_size; i++) {
if (unlikely(!rxr->rx_buf_ring[i])) {
/* Number of descriptors to process per inner loop in vector mode. */
#define RTE_BNXT_DESCS_PER_LOOP 4U
+#define BNXT_OL_FLAGS_TBL_DIM 32
+#define BNXT_OL_FLAGS_ERR_TBL_DIM 16
+
struct bnxt_tpa_info {
struct rte_mbuf *mbuf;
uint16_t len;
struct rte_bitmap *ag_bitmap;
struct bnxt_tpa_info *tpa_info;
+
+ uint32_t ol_flags_table[BNXT_OL_FLAGS_TBL_DIM];
+ uint32_t ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM];
};
uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
#define BNXT_PTYPE_TBL_DIM 128
extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM];
-
-#define BNXT_OL_FLAGS_TBL_DIM 32
-extern uint32_t bnxt_ol_flags_table[BNXT_OL_FLAGS_TBL_DIM];
-
-#define BNXT_OL_FLAGS_ERR_TBL_DIM 16
-extern uint32_t bnxt_ol_flags_err_table[BNXT_OL_FLAGS_ERR_TBL_DIM];
#endif
uint32_t tmp, of; \
\
of = vgetq_lane_u32((rss_flags), (pi)) | \
- bnxt_ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
+ rxr->ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \
\
tmp = vgetq_lane_u32((errors), (pi)); \
if (tmp) \
- of |= bnxt_ol_flags_err_table[tmp]; \
+ of |= rxr->ol_flags_err_table[tmp]; \
(ol_flags) = of; \
}
static void
descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4],
- uint64x2_t mb_init, struct rte_mbuf **mbuf)
+ uint64x2_t mb_init, struct rte_mbuf **mbuf,
+ struct bnxt_rx_ring_info *rxr)
{
const uint8x16_t shuf_msk = {
0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */
goto out;
}
- descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts]);
+ descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts],
+ rxr);
nb_rx_pkts += num_valid;
if (num_valid < RTE_BNXT_DESCS_PER_LOOP)
uint32_t tmp, of; \
\
of = _mm_extract_epi32((rss_flags), (pi)) | \
- bnxt_ol_flags_table[_mm_extract_epi32((ol_index), (pi))]; \
+ rxr->ol_flags_table[_mm_extract_epi32((ol_index), (pi))]; \
\
tmp = _mm_extract_epi32((errors), (pi)); \
if (tmp) \
- of |= bnxt_ol_flags_err_table[tmp]; \
+ of |= rxr->ol_flags_err_table[tmp]; \
(ol_flags) = of; \
}
static inline void
descs_to_mbufs(__m128i mm_rxcmp[4], __m128i mm_rxcmp1[4],
- __m128i mbuf_init, struct rte_mbuf **mbuf)
+ __m128i mbuf_init, struct rte_mbuf **mbuf,
+ struct bnxt_rx_ring_info *rxr)
{
const __m128i shuf_msk =
_mm_set_epi8(15, 14, 13, 12, /* rss */
goto out;
}
- descs_to_mbufs(rxcmp, rxcmp1, mbuf_init, &rx_pkts[nb_rx_pkts]);
+ descs_to_mbufs(rxcmp, rxcmp1, mbuf_init, &rx_pkts[nb_rx_pkts],
+ rxr);
nb_rx_pkts += num_valid;
if (num_valid < RTE_BNXT_DESCS_PER_LOOP)