}
rxq->rxrearm_start += RTE_BNXT_RXQ_REARM_THRESH;
- B_RX_DB(rxr->rx_doorbell, rxq->rxrearm_start - 1);
+ bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1);
if (rxq->rxrearm_start >= rxq->nb_rx_desc)
rxq->rxrearm_start = 0;
return pkt_type;
}
+static void
+bnxt_parse_csum(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1)
+{
+ uint32_t flags;
+
+ flags = flags2_0xf(rxcmp1);
+ /* IP Checksum */
+ if (likely(IS_IP_NONTUNNEL_PKT(flags))) {
+ if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ } else if (IS_IP_TUNNEL_PKT(flags)) {
+ if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) ||
+ RX_CMP_IP_CS_ERROR(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) {
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ }
+
+ /* L4 Checksum */
+ if (likely(IS_L4_NONTUNNEL_PKT(flags))) {
+ if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ } else if (IS_L4_TUNNEL_PKT(flags)) {
+ if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1)))
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ else
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) {
+ mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS
+ (flags))) {
+ mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN;
+ } else {
+ mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+ }
+ } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) {
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ }
+}
+
uint16_t
bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts)
0xFF, 0xFF, 0xFF, 0xFF); /* pkt_type (zeroes) */
/* If Rx Q was stopped return */
- if (rxq->rx_deferred_start)
+ if (unlikely(!rxq->rx_started))
return 0;
if (rxq->rxrearm_nb >= RTE_BNXT_RXQ_REARM_THRESH)
/* Return no more than RTE_BNXT_MAX_RX_BURST per call. */
nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST);
- /* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP */
+ /*
+ * Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP.
+ * nb_pkts < RTE_BNXT_DESCS_PER_LOOP, just return no packet
+ */
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP);
+ if (!nb_pkts)
+ return 0;
/* Handle RX burst request */
while (1) {
if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct))
break;
- cpr->valid = FLIP_VALID(cons,
- cpr->cp_ring_struct->ring_mask,
- cpr->valid);
-
if (likely(CMP_TYPE(rxcmp) == RX_PKT_CMPL_TYPE_RX_L2)) {
struct rx_pkt_cmpl_hi *rxcmp1;
uint32_t tmp_raw_cons;
rte_prefetch0(mbuf);
rxr->rx_buf_ring[cons].mbuf = NULL;
- cpr->valid = FLIP_VALID(cp_cons,
- cpr->cp_ring_struct->ring_mask,
- cpr->valid);
-
/* Set constant fields from mbuf initializer. */
_mm_store_si128((__m128i *)&mbuf->rearm_data,
mbuf_init);
(RX_PKT_CMPL_METADATA_VID_MASK |
RX_PKT_CMPL_METADATA_DE |
RX_PKT_CMPL_METADATA_PRI_MASK);
- mbuf->ol_flags |= PKT_RX_VLAN;
+ mbuf->ol_flags |=
+ PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
}
+ bnxt_parse_csum(mbuf, rxcmp1);
mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1);
rx_pkts[nb_rx_pkts++] = mbuf;
- } else {
+ } else if (!BNXT_NUM_ASYNC_CPR(rxq->bp)) {
evt =
bnxt_event_hwrm_resp_handler(rxq->bp,
(struct cmpl_base *)rxcmp);
rxq->rxrearm_nb += nb_rx_pkts;
cpr->cp_raw_cons = raw_cons;
+ cpr->valid = !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size);
if (nb_rx_pkts || evt)
- B_CP_DIS_DB(cpr, cpr->cp_raw_cons);
+ bnxt_db_cq(cpr);
return nb_rx_pkts;
}
-static inline void bnxt_next_cmpl(struct bnxt_cp_ring_info *cpr, uint32_t *idx,
- bool *v, uint32_t inc)
-{
- *idx += inc;
- if (unlikely(*idx == cpr->cp_ring_struct->ring_size)) {
- *v = !*v;
- *idx = 0;
- }
-}
-
static void
bnxt_tx_cmp_vec(struct bnxt_tx_queue *txq, int nr_pkts)
{
cons = RING_CMPL(ring_mask, raw_cons);
txcmp = (struct tx_cmpl *)&cp_desc_ring[cons];
- if (!CMPL_VALID(txcmp, cpr->valid))
+ if (!CMP_VALID(txcmp, raw_cons, cp_ring_struct))
break;
- bnxt_next_cmpl(cpr, &cons, &cpr->valid, 1);
- rte_prefetch0(&cp_desc_ring[cons]);
if (likely(CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2))
nb_tx_pkts += txcmp->opaque;
RTE_LOG_DP(ERR, PMD,
"Unhandled CMP type %02x\n",
CMP_TYPE(txcmp));
- raw_cons = cons;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
} while (nb_tx_pkts < ring_mask);
+ cpr->valid = !!(raw_cons & cp_ring_struct->ring_size);
if (nb_tx_pkts) {
bnxt_tx_cmp_vec(txq, nb_tx_pkts);
cpr->cp_raw_cons = raw_cons;
- B_CP_DB(cpr, raw_cons, ring_mask);
+ bnxt_db_cq(cpr);
}
}
}
rte_compiler_barrier();
- B_TX_DB(txr->tx_doorbell, prod);
+ bnxt_db_write(&txr->tx_db, prod);
txr->tx_prod = prod;
struct bnxt_tx_queue *txq = tx_queue;
/* Tx queue was stopped; wait for it to be restarted */
- if (unlikely(txq->tx_deferred_start)) {
+ if (unlikely(!txq->tx_started)) {
PMD_DRV_LOG(DEBUG, "Tx q stopped;return\n");
return 0;
}
return nb_sent;
}
-int __attribute__((cold))
+int __rte_cold
bnxt_rxq_vec_setup(struct bnxt_rx_queue *rxq)
{
uintptr_t p;