From: Kalesh AP Date: Sun, 20 Dec 2020 05:24:30 +0000 (-0800) Subject: net/bnxt: add Rx logic for 58818 chips X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=899f06130724;p=dpdk.git net/bnxt: add Rx logic for 58818 chips 1. On the new 58818 chips, the RX completion is largely the same except for the new completion opcode and the stripped VLAN format and checksum status. Added bnxt_parse_csum_v2(), bnxt_parse_pkt_type_v2() and bnxt_rx_vlan_v2() to support the new RX completion logic. 2. Disable vector mode RX/TX for 58818 chips for now. 3. The cfa_code format on 58818 chips is different than legacy chips. So skip cfa_code parsing logic on 58818 chips for now. Signed-off-by: Kalesh AP Signed-off-by: Ajit Khaparde --- diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c index e11751cc1a..ef6b611beb 100644 --- a/drivers/net/bnxt/bnxt_ethdev.c +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -1159,6 +1159,12 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev) { struct bnxt *bp = eth_dev->data->dev_private; + /* Disable vector mode RX for Stingray2 for now */ + if (BNXT_CHIP_SR2(bp)) { + bp->flags &= ~BNXT_FLAG_RX_VECTOR_PKT_MODE; + return bnxt_recv_pkts; + } + #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) #ifndef RTE_LIBRTE_IEEE1588 /* @@ -1199,12 +1205,17 @@ bnxt_receive_function(struct rte_eth_dev *eth_dev) } static eth_tx_burst_t -bnxt_transmit_function(__rte_unused struct rte_eth_dev *eth_dev) +bnxt_transmit_function(struct rte_eth_dev *eth_dev) { + struct bnxt *bp = eth_dev->data->dev_private; + + /* Disable vector mode TX for Stingray2 for now */ + if (BNXT_CHIP_SR2(bp)) + return bnxt_xmit_pkts; + #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) #ifndef RTE_LIBRTE_IEEE1588 uint64_t offloads = eth_dev->data->dev_conf.txmode.offloads; - struct bnxt *bp = eth_dev->data->dev_private; /* * Vector mode transmit can be enabled only if not using scatter rx diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index af3f9b9368..a195bf1187 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -814,7 +814,8 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, return -EBUSY; *rx_pkt = mbuf; goto next_rx; - } else if (cmp_type != 0x11) { + } else if ((cmp_type != CMPL_BASE_TYPE_RX_L2) && + (cmp_type != CMPL_BASE_TYPE_RX_L2_V2)) { rc = -EINVAL; goto next_rx; } @@ -838,8 +839,6 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, mbuf->data_len = mbuf->pkt_len; mbuf->port = rxq->port_id; - bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf); - #ifdef RTE_LIBRTE_IEEE1588 if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) & RX_PKT_CMPL_FLAGS_MASK) == @@ -849,17 +848,28 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, } #endif + if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) { + bnxt_parse_csum_v2(mbuf, rxcmp1); + bnxt_parse_pkt_type_v2(mbuf, rxcmp, rxcmp1); + bnxt_rx_vlan_v2(mbuf, rxcmp, rxcmp1); + /* TODO Add support for cfa_code parsing */ + goto reuse_rx_mbuf; + } + + bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf); + + mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); + if (BNXT_TRUFLOW_EN(bp)) mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf, &vfr_flag); else bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); +reuse_rx_mbuf: if (agg_buf) bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL); - mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); - #ifdef BNXT_DEBUG if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { /* Re-install the mbuf back to the rx ring */ @@ -972,8 +982,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, cpr->cp_ring_struct->ring_mask, cpr->valid); - /* TODO: Avoid magic numbers... */ - if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) { + if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) && + (CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) { rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); if (!rc) nb_rx_pkts++; diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h index 78814edbb4..9cc5197a7b 100644 --- a/drivers/net/bnxt/bnxt_rxr.h +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -131,5 +131,196 @@ bnxt_cfa_code_dynfield(struct rte_mbuf *mbuf) #define BNXT_PTYPE_TBL_DIM 128 extern uint32_t bnxt_ptype_table[BNXT_PTYPE_TBL_DIM]; +/* Stingray2 specific code for RX completion parsing */ +#define RX_CMP_VLAN_VALID(rxcmp) \ + (((struct rx_pkt_v2_cmpl *)rxcmp)->metadata1_payload_offset & \ + RX_PKT_V2_CMPL_METADATA1_VALID) + +#define RX_CMP_METADATA0_VID(rxcmp1) \ + ((((struct rx_pkt_v2_cmpl_hi *)rxcmp1)->metadata0) & \ + (RX_PKT_V2_CMPL_HI_METADATA0_VID_MASK | \ + RX_PKT_V2_CMPL_HI_METADATA0_DE | \ + RX_PKT_V2_CMPL_HI_METADATA0_PRI_MASK)) + +static inline void bnxt_rx_vlan_v2(struct rte_mbuf *mbuf, + struct rx_pkt_cmpl *rxcmp, + struct rx_pkt_cmpl_hi *rxcmp1) +{ + if (RX_CMP_VLAN_VALID(rxcmp)) { + mbuf->vlan_tci = RX_CMP_METADATA0_VID(rxcmp1); + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + } +} + +#define RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK (0x1 << 3) +#define RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK (0x7 << 10) +#define RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK (0x1 << 13) #define RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK (0x1 << 14) + +#define RX_CMP_V2_CS_OK_HDR_CNT(flags) \ + (((flags) & RX_CMP_FLAGS2_CS_OK_HDR_CNT_MASK) >> \ + RX_PKT_V2_CMPL_HI_FLAGS2_CS_OK_SFT) + +#define RX_CMP_V2_CS_ALL_OK_MODE(flags) \ + (((flags) & RX_CMP_FLAGS2_CS_ALL_OK_MODE_MASK)) + +#define RX_CMP_FLAGS2_L3_CS_OK_MASK (0x7 << 10) +#define RX_CMP_FLAGS2_L4_CS_OK_MASK (0x38 << 10) +#define RX_CMP_FLAGS2_L3_CS_OK_SFT 10 +#define RX_CMP_FLAGS2_L4_CS_OK_SFT 13 + +#define RX_CMP_V2_L4_CS_OK(flags2) \ + (((flags2) & RX_CMP_FLAGS2_L4_CS_OK_MASK) >> \ + RX_CMP_FLAGS2_L4_CS_OK_SFT) + +#define RX_CMP_V2_L3_CS_OK(flags2) \ + (((flags2) & RX_CMP_FLAGS2_L3_CS_OK_MASK) >> \ + RX_CMP_FLAGS2_L3_CS_OK_SFT) + +#define RX_CMP_V2_L4_CS_ERR(err) \ + (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) == \ + RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_L4_CS_ERROR) + +#define RX_CMP_V2_L3_CS_ERR(err) \ + (((err) & RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_MASK) == \ + RX_PKT_V2_CMPL_HI_ERRORS_PKT_ERROR_IP_CS_ERROR) + +#define RX_CMP_V2_T_IP_CS_ERR(err) \ + (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \ + RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_CS_ERROR) + +#define RX_CMP_V2_T_L4_CS_ERR(err) \ + (((err) & RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_MASK) == \ + RX_PKT_V2_CMPL_HI_ERRORS_T_PKT_ERROR_T_L4_CS_ERROR) + +#define RX_CMP_V2_OT_L4_CS_ERR(err) \ + (((err) & RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_MASK) == \ + RX_PKT_V2_CMPL_HI_ERRORS_OT_PKT_ERROR_OT_L4_CS_ERROR) + +static inline void bnxt_parse_csum_v2(struct rte_mbuf *mbuf, + struct rx_pkt_cmpl_hi *rxcmp1) +{ + struct rx_pkt_v2_cmpl_hi *v2_cmp = + (struct rx_pkt_v2_cmpl_hi *)(rxcmp1); + uint16_t error_v2 = rte_le_to_cpu_16(v2_cmp->errors_v2); + uint32_t flags2 = rte_le_to_cpu_32(v2_cmp->flags2); + uint32_t hdr_cnt = 0, t_pkt = 0; + + if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) { + hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2); + if (hdr_cnt > 1) + t_pkt = 1; + + if (unlikely(RX_CMP_V2_L4_CS_ERR(error_v2))) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else if (flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + + if (unlikely(RX_CMP_V2_L3_CS_ERR(error_v2))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (flags2 & RX_CMP_FLAGS2_IP_CSUM_ALL_OK_MASK) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + } else { + hdr_cnt = RX_CMP_V2_L4_CS_OK(flags2); + if (hdr_cnt > 1) + t_pkt = 1; + + if (RX_CMP_V2_L4_CS_OK(flags2)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; + else if (RX_CMP_V2_L4_CS_ERR(error_v2)) + mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; + + if (RX_CMP_V2_L3_CS_OK(flags2)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + else if (RX_CMP_V2_L3_CS_ERR(error_v2)) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + } + + if (t_pkt) { + if (unlikely(RX_CMP_V2_OT_L4_CS_ERR(error_v2) || + RX_CMP_V2_T_L4_CS_ERR(error_v2))) + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; + else + mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; + + if (unlikely(RX_CMP_V2_T_IP_CS_ERR(error_v2))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + } +} + +static inline void +bnxt_parse_pkt_type_v2(struct rte_mbuf *mbuf, + struct rx_pkt_cmpl *rxcmp, + struct rx_pkt_cmpl_hi *rxcmp1) +{ + struct rx_pkt_v2_cmpl *v2_cmp = + (struct rx_pkt_v2_cmpl *)(rxcmp); + struct rx_pkt_v2_cmpl_hi *v2_cmp1 = + (struct rx_pkt_v2_cmpl_hi *)(rxcmp1); + uint16_t flags_type = v2_cmp->flags_type & + rte_cpu_to_le_32(RX_PKT_V2_CMPL_FLAGS_ITYPE_MASK); + uint32_t flags2 = rte_le_to_cpu_32(v2_cmp1->flags2); + uint32_t l3, pkt_type = 0, vlan = 0; + uint32_t ip6 = 0, t_pkt = 0; + uint32_t hdr_cnt, csum_count; + + if (RX_CMP_V2_CS_ALL_OK_MODE(flags2)) { + hdr_cnt = RX_CMP_V2_CS_OK_HDR_CNT(flags2); + if (hdr_cnt > 1) + t_pkt = 1; + } else { + csum_count = RX_CMP_V2_L4_CS_OK(flags2); + if (csum_count > 1) + t_pkt = 1; + } + + vlan = !!RX_CMP_VLAN_VALID(rxcmp); + pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER; + + ip6 = !!(flags2 & RX_PKT_V2_CMPL_HI_FLAGS2_IP_TYPE); + + if (!t_pkt && !ip6) + l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; + else if (!t_pkt && ip6) + l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; + else if (t_pkt && !ip6) + l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; + else + l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; + + switch (flags_type) { + case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_ICMP): + if (!t_pkt) + pkt_type |= l3 | RTE_PTYPE_L4_ICMP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP; + break; + case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_TCP): + if (!t_pkt) + pkt_type |= l3 | RTE_PTYPE_L4_TCP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP; + break; + case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_UDP): + if (!t_pkt) + pkt_type |= l3 | RTE_PTYPE_L4_UDP; + else + pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP; + break; + case RTE_LE32(RX_PKT_V2_CMPL_FLAGS_ITYPE_IP): + pkt_type |= l3; + break; + } + + mbuf->packet_type = pkt_type; +} + #endif /* _BNXT_RXR_H_ */