X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_rxr.c;h=498811a732d9892475c4d87b3f3bc6c059e19506;hb=41e026c1b3fd07ee6520e3d5d4ec0787d0dac300;hp=14901f1b99cd2d6f94a2858333638f1d7a7f5b82;hpb=48a580c5df78b3addac6f57969167bb86c7428f5;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index 14901f1b99..498811a732 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2014-2018 Broadcom + * Copyright(c) 2014-2021 Broadcom * All rights reserved. */ @@ -134,6 +134,53 @@ struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr, return mbuf; } +static void bnxt_tpa_get_metadata(struct bnxt *bp, + struct bnxt_tpa_info *tpa_info, + struct rx_tpa_start_cmpl *tpa_start, + struct rx_tpa_start_cmpl_hi *tpa_start1) +{ + tpa_info->cfa_code_valid = 0; + tpa_info->vlan_valid = 0; + tpa_info->hash_valid = 0; + tpa_info->l4_csum_valid = 0; + + if (likely(tpa_start->flags_type & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) { + tpa_info->hash_valid = 1; + tpa_info->rss_hash = rte_le_to_cpu_32(tpa_start->rss_hash); + } + + if (bp->vnic_cap_flags & BNXT_VNIC_CAP_RX_CMPL_V2) { + struct rx_tpa_start_v2_cmpl *v2_tpa_start = (void *)tpa_start; + struct rx_tpa_start_v2_cmpl_hi *v2_tpa_start1 = + (void *)tpa_start1; + + if (v2_tpa_start->agg_id & + RX_TPA_START_V2_CMPL_METADATA1_VALID) { + tpa_info->vlan_valid = 1; + tpa_info->vlan = + rte_le_to_cpu_16(v2_tpa_start1->metadata0); + } + + if (v2_tpa_start1->flags2 & RX_CMP_FLAGS2_L4_CSUM_ALL_OK_MASK) + tpa_info->l4_csum_valid = 1; + + return; + } + + tpa_info->cfa_code_valid = 1; + tpa_info->cfa_code = rte_le_to_cpu_16(tpa_start1->cfa_code); + if (tpa_start1->flags2 & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) { + tpa_info->vlan_valid = 1; + tpa_info->vlan = rte_le_to_cpu_32(tpa_start1->metadata); + } + + if (likely(tpa_start1->flags2 & + rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC))) + tpa_info->l4_csum_valid = 1; +} + static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, struct rx_tpa_start_cmpl *tpa_start, struct rx_tpa_start_cmpl_hi *tpa_start1) @@ -164,21 +211,23 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, mbuf->data_len = mbuf->pkt_len; mbuf->port = rxq->port_id; mbuf->ol_flags = PKT_RX_LRO; - if (likely(tpa_start->flags_type & - rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS_RSS_VALID))) { - mbuf->hash.rss = rte_le_to_cpu_32(tpa_start->rss_hash); + + bnxt_tpa_get_metadata(rxq->bp, tpa_info, tpa_start, tpa_start1); + + if (likely(tpa_info->hash_valid)) { + mbuf->hash.rss = tpa_info->rss_hash; mbuf->ol_flags |= PKT_RX_RSS_HASH; - } else { - mbuf->hash.fdir.id = rte_le_to_cpu_16(tpa_start1->cfa_code); + } else if (tpa_info->cfa_code_valid) { + mbuf->hash.fdir.id = tpa_info->cfa_code; mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; } - if (tpa_start1->flags2 & - rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_META_FORMAT_VLAN)) { - mbuf->vlan_tci = rte_le_to_cpu_32(tpa_start1->metadata); + + if (tpa_info->vlan_valid) { + mbuf->vlan_tci = tpa_info->vlan; mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; } - if (likely(tpa_start1->flags2 & - rte_cpu_to_le_32(RX_TPA_START_CMPL_FLAGS2_L4_CS_CALC))) + + if (likely(tpa_info->l4_csum_valid)) mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; /* recycle next mbuf */ @@ -276,6 +325,7 @@ static int bnxt_rx_pages(struct bnxt_rx_queue *rxq, */ rte_bitmap_set(rxr->ag_bitmap, ag_cons); } + last->next = NULL; bnxt_prod_ag_mbuf(rxq); return 0; } @@ -353,7 +403,7 @@ bnxt_init_ptype_table(void) ip6 = i & (RX_PKT_CMPL_FLAGS2_IP_TYPE >> 7); tun = i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC >> 2); - type = (i & 0x38) << 9; + type = (i & 0x78) << 9; if (!tun && !ip6) l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; @@ -479,7 +529,7 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq) pt[i] |= PKT_RX_IP_CKSUM_BAD; if (i & (RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR >> 4)) - pt[i] |= PKT_RX_EIP_CKSUM_BAD; + pt[i] |= PKT_RX_OUTER_IP_CKSUM_BAD; if (i & (RX_PKT_CMPL_ERRORS_L4_CS_ERROR >> 4)) pt[i] |= PKT_RX_L4_CKSUM_BAD; @@ -540,6 +590,12 @@ bnxt_set_ol_flags(struct bnxt_rx_ring_info *rxr, struct rx_pkt_cmpl *rxcmp, ol_flags |= PKT_RX_RSS_HASH; } +#ifdef RTE_LIBRTE_IEEE1588 + if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) == + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) + ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST; +#endif + mbuf->ol_flags = ol_flags; } @@ -751,7 +807,8 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, cpr->cp_ring_struct->ring_mask, cpr->valid); - if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START) { + if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START || + cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2) { bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp, (struct rx_tpa_start_cmpl_hi *)rxcmp1); rc = -EINVAL; /* Continue w/o new mbuf */ @@ -764,13 +821,13 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, return -EBUSY; *rx_pkt = mbuf; goto next_rx; - } else if (cmp_type != 0x11) { + } else if ((cmp_type != CMPL_BASE_TYPE_RX_L2) && + (cmp_type != CMPL_BASE_TYPE_RX_L2_V2)) { rc = -EINVAL; goto next_rx; } - agg_buf = (rxcmp->agg_bufs_v1 & RX_PKT_CMPL_AGG_BUFS_MASK) - >> RX_PKT_CMPL_AGG_BUFS_SFT; + agg_buf = BNXT_RX_L2_AGG_BUFS(rxcmp); if (agg_buf && !bnxt_agg_bufs_valid(cpr, agg_buf, tmp_raw_cons)) return -EBUSY; @@ -788,28 +845,35 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, mbuf->data_len = mbuf->pkt_len; mbuf->port = rxq->port_id; - bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf); - #ifdef RTE_LIBRTE_IEEE1588 if (unlikely((rte_le_to_cpu_16(rxcmp->flags_type) & RX_PKT_CMPL_FLAGS_MASK) == - RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) { - mbuf->ol_flags |= PKT_RX_IEEE1588_PTP | PKT_RX_IEEE1588_TMST; + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) bnxt_get_rx_ts_p5(rxq->bp, rxcmp1->reorder); - } #endif + if (cmp_type == CMPL_BASE_TYPE_RX_L2_V2) { + bnxt_parse_csum_v2(mbuf, rxcmp1); + bnxt_parse_pkt_type_v2(mbuf, rxcmp, rxcmp1); + bnxt_rx_vlan_v2(mbuf, rxcmp, rxcmp1); + /* TODO Add support for cfa_code parsing */ + goto reuse_rx_mbuf; + } + + bnxt_set_ol_flags(rxr, rxcmp, rxcmp1, mbuf); + + mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); + if (BNXT_TRUFLOW_EN(bp)) mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf, &vfr_flag); else bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); +reuse_rx_mbuf: if (agg_buf) bnxt_rx_pages(rxq, mbuf, &tmp_raw_cons, agg_buf, NULL); - mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); - #ifdef BNXT_DEBUG if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { /* Re-install the mbuf back to the rx ring */ @@ -922,8 +986,8 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, cpr->cp_ring_struct->ring_mask, cpr->valid); - /* TODO: Avoid magic numbers... */ - if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) { + if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) && + (CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) { rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); if (!rc) nb_rx_pkts++; @@ -1051,12 +1115,9 @@ void bnxt_free_rx_rings(struct bnxt *bp) int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) { - struct rte_eth_dev *eth_dev = rxq->bp->eth_dev; - struct rte_eth_rxmode *rxmode; struct bnxt_cp_ring_info *cpr; struct bnxt_rx_ring_info *rxr; struct bnxt_ring *ring; - bool use_agg_ring; rxq->rx_buf_size = BNXT_MAX_PKT_LEN + sizeof(struct rte_mbuf); @@ -1099,19 +1160,9 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) return -ENOMEM; cpr->cp_ring_struct = ring; - rxmode = ð_dev->data->dev_conf.rxmode; - use_agg_ring = (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) || - (rxmode->offloads & DEV_RX_OFFLOAD_TCP_LRO) || - (rxmode->max_rx_pkt_len > - (uint32_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - - RTE_PKTMBUF_HEADROOM)); - /* Allocate two completion slots per entry in desc ring. */ ring->ring_size = rxr->rx_ring_struct->ring_size * 2; - - /* Allocate additional slots if aggregation ring is in use. */ - if (use_agg_ring) - ring->ring_size *= AGG_RING_SIZE_FACTOR; + ring->ring_size *= AGG_RING_SIZE_FACTOR; ring->ring_size = rte_align32pow2(ring->ring_size); ring->ring_mask = ring->ring_size - 1;