X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_rxr.c;h=a40fa501383b014b0595499ef9cd0f20058aefcc;hb=f1f6ebc0eaf68a825c6175f5e6a436f7d91660c3;hp=30d22b7abaeab9dfc97cd89ad0f4fca080dd8877;hpb=53f98141ee9d3a3406fe1281bbc993efd0c06575;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index 30d22b7aba..a40fa50138 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -10,6 +10,7 @@ #include #include #include +#include #include "bnxt.h" #include "bnxt_reps.h" @@ -17,9 +18,7 @@ #include "bnxt_rxr.h" #include "bnxt_rxq.h" #include "hsi_struct_def_dpdk.h" -#ifdef RTE_LIBRTE_IEEE1588 #include "bnxt_hwrm.h" -#endif #include #include @@ -134,6 +133,51 @@ struct rte_mbuf *bnxt_consume_rx_buf(struct bnxt_rx_ring_info *rxr, return mbuf; } +static void bnxt_rx_ring_reset(void *arg) +{ + struct bnxt *bp = arg; + int i, rc = 0; + struct bnxt_rx_queue *rxq; + + + for (i = 0; i < (int)bp->rx_nr_rings; i++) { + struct bnxt_rx_ring_info *rxr; + + rxq = bp->rx_queues[i]; + if (!rxq || !rxq->in_reset) + continue; + + rxr = rxq->rx_ring; + /* Disable and flush TPA before resetting the RX ring */ + if (rxr->tpa_info) + bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, false); + rc = bnxt_hwrm_rx_ring_reset(bp, i); + if (rc) { + PMD_DRV_LOG(ERR, "Rx ring%d reset failed\n", i); + continue; + } + + bnxt_rx_queue_release_mbufs(rxq); + rxr->rx_raw_prod = 0; + rxr->ag_raw_prod = 0; + rxr->rx_next_cons = 0; + bnxt_init_one_rx_ring(rxq); + bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); + bnxt_db_write(&rxr->ag_db, rxr->ag_raw_prod); + if (rxr->tpa_info) + bnxt_hwrm_vnic_tpa_cfg(bp, rxq->vnic, true); + + rxq->in_reset = 0; + } +} + + +static void bnxt_sched_ring_reset(struct bnxt_rx_queue *rxq) +{ + rxq->in_reset = 1; + rte_eal_alarm_set(1, bnxt_rx_ring_reset, (void *)rxq->bp); +} + static void bnxt_tpa_get_metadata(struct bnxt *bp, struct bnxt_tpa_info *tpa_info, struct rx_tpa_start_cmpl *tpa_start, @@ -195,6 +239,12 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, data_cons = tpa_start->opaque; tpa_info = &rxr->tpa_info[agg_id]; + if (unlikely(data_cons != rxr->rx_next_cons)) { + PMD_DRV_LOG(ERR, "TPA cons %x, expected cons %x\n", + data_cons, rxr->rx_next_cons); + bnxt_sched_ring_reset(rxq); + return; + } mbuf = bnxt_consume_rx_buf(rxr, data_cons); @@ -222,7 +272,7 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; } - if (tpa_info->vlan_valid) { + if (tpa_info->vlan_valid && BNXT_RX_VLAN_STRIP_EN(rxq->bp)) { mbuf->vlan_tci = tpa_info->vlan; mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; } @@ -233,6 +283,9 @@ static void bnxt_tpa_start(struct bnxt_rx_queue *rxq, /* recycle next mbuf */ data_cons = RING_NEXT(data_cons); bnxt_reuse_rx_mbuf(rxr, bnxt_consume_rx_buf(rxr, data_cons)); + + rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct, + RING_NEXT(data_cons)); } static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr, @@ -244,10 +297,8 @@ static int bnxt_agg_bufs_valid(struct bnxt_cp_ring_info *cpr, raw_cp_cons = ADV_RAW_CMP(raw_cp_cons, agg_bufs); last_cp_cons = RING_CMP(cpr->cp_ring_struct, raw_cp_cons); agg_cmpl = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[last_cp_cons]; - cpr->valid = FLIP_VALID(raw_cp_cons, - cpr->cp_ring_struct->ring_mask, - cpr->valid); - return CMP_VALID(agg_cmpl, raw_cp_cons, cpr->cp_ring_struct); + return bnxt_cpr_cmp_valid(agg_cmpl, raw_cp_cons, + cpr->cp_ring_struct->ring_size); } /* TPA consume agg buffer out of order, allocate connected data only */ @@ -330,6 +381,34 @@ static int bnxt_rx_pages(struct bnxt_rx_queue *rxq, return 0; } +static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, + uint32_t *raw_cons, void *cmp) +{ + struct rx_pkt_cmpl *rxcmp = cmp; + uint32_t tmp_raw_cons = *raw_cons; + uint8_t cmp_type, agg_bufs = 0; + + cmp_type = CMP_TYPE(rxcmp); + + if (cmp_type == CMPL_BASE_TYPE_RX_L2) { + agg_bufs = BNXT_RX_L2_AGG_BUFS(rxcmp); + } else if (cmp_type == RX_TPA_END_CMPL_TYPE_RX_TPA_END) { + struct rx_tpa_end_cmpl *tpa_end = cmp; + + if (BNXT_CHIP_P5(bp)) + return 0; + + agg_bufs = BNXT_TPA_END_AGG_BUFS(tpa_end); + } + + if (agg_bufs) { + if (!bnxt_agg_bufs_valid(cpr, agg_bufs, tmp_raw_cons)) + return -EBUSY; + } + *raw_cons = tmp_raw_cons; + return 0; +} + static inline struct rte_mbuf *bnxt_tpa_end( struct bnxt_rx_queue *rxq, uint32_t *raw_cp_cons, @@ -344,6 +423,13 @@ static inline struct rte_mbuf *bnxt_tpa_end( uint8_t payload_offset; struct bnxt_tpa_info *tpa_info; + if (unlikely(rxq->in_reset)) { + PMD_DRV_LOG(ERR, "rxq->in_reset: raw_cp_cons:%d\n", + *raw_cp_cons); + bnxt_discard_rx(rxq->bp, cpr, raw_cp_cons, tpa_end); + return NULL; + } + if (BNXT_CHIP_P5(rxq->bp)) { struct rx_tpa_v2_end_cmpl *th_tpa_end; struct rx_tpa_v2_end_cmpl_hi *th_tpa_end1; @@ -396,14 +482,14 @@ bnxt_init_ptype_table(void) return; for (i = 0; i < BNXT_PTYPE_TBL_DIM; i++) { - if (i & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN >> 2)) + if (i & BNXT_PTYPE_TBL_VLAN_MSK) pt[i] = RTE_PTYPE_L2_ETHER_VLAN; else pt[i] = RTE_PTYPE_L2_ETHER; - ip6 = i & (RX_PKT_CMPL_FLAGS2_IP_TYPE >> 7); - tun = i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC >> 2); - type = (i & 0x78) << 9; + ip6 = !!(i & BNXT_PTYPE_TBL_IP_VER_MSK); + tun = !!(i & BNXT_PTYPE_TBL_TUN_MSK); + type = (i & BNXT_PTYPE_TBL_TYPE_MSK) >> BNXT_PTYPE_TBL_TYPE_SFT; if (!tun && !ip6) l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; @@ -415,25 +501,25 @@ bnxt_init_ptype_table(void) l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; switch (type) { - case RX_PKT_CMPL_FLAGS_ITYPE_ICMP: + case BNXT_PTYPE_TBL_TYPE_ICMP: if (tun) pt[i] |= l3 | RTE_PTYPE_INNER_L4_ICMP; else pt[i] |= l3 | RTE_PTYPE_L4_ICMP; break; - case RX_PKT_CMPL_FLAGS_ITYPE_TCP: + case BNXT_PTYPE_TBL_TYPE_TCP: if (tun) pt[i] |= l3 | RTE_PTYPE_INNER_L4_TCP; else pt[i] |= l3 | RTE_PTYPE_L4_TCP; break; - case RX_PKT_CMPL_FLAGS_ITYPE_UDP: + case BNXT_PTYPE_TBL_TYPE_UDP: if (tun) pt[i] |= l3 | RTE_PTYPE_INNER_L4_UDP; else pt[i] |= l3 | RTE_PTYPE_L4_UDP; break; - case RX_PKT_CMPL_FLAGS_ITYPE_IP: + case BNXT_PTYPE_TBL_TYPE_IP: pt[i] |= l3; break; } @@ -450,17 +536,19 @@ bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) flags_type = rte_le_to_cpu_16(rxcmp->flags_type); flags2 = rte_le_to_cpu_32(rxcmp1->flags2); + /* Validate ptype table indexing at build time. */ + bnxt_check_ptype_constants(); + /* * Index format: - * bit 0: RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC - * bit 1: RX_CMPL_FLAGS2_IP_TYPE - * bit 2: RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN - * bits 3-6: RX_PKT_CMPL_FLAGS_ITYPE + * bit 0: Set if IP tunnel encapsulated packet. + * bit 1: Set if IPv6 packet, clear if IPv4. + * bit 2: Set if VLAN tag present. + * bits 3-6: Four-bit hardware packet type field. */ - index = ((flags_type & RX_PKT_CMPL_FLAGS_ITYPE_MASK) >> 9) | - ((flags2 & (RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | - RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)) >> 2) | - ((flags2 & RX_PKT_CMPL_FLAGS2_IP_TYPE) >> 7); + index = BNXT_CMPL_ITYPE_TO_IDX(flags_type) | + BNXT_CMPL_VLAN_TUN_TO_IDX(flags2) | + BNXT_CMPL_IP_VER_TO_IDX(flags2); return bnxt_ptype_table[index]; } @@ -486,8 +574,10 @@ bnxt_init_ol_flags_tables(struct bnxt_rx_queue *rxq) for (i = 0; i < BNXT_OL_FLAGS_TBL_DIM; i++) { pt[i] = 0; - if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) - pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + if (BNXT_RX_VLAN_STRIP_EN(rxq->bp)) { + if (i & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) + pt[i] |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + } if (i & (RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC << 3)) { /* Tunnel case. */ @@ -805,13 +895,10 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons]; - if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct)) + if (!bnxt_cpr_cmp_valid(rxcmp1, tmp_raw_cons, + cpr->cp_ring_struct->ring_size)) return -EBUSY; - cpr->valid = FLIP_VALID(cp_cons, - cpr->cp_ring_struct->ring_mask, - cpr->valid); - if (cmp_type == RX_TPA_START_CMPL_TYPE_RX_TPA_START || cmp_type == RX_TPA_START_V2_CMPL_TYPE_RX_TPA_START_V2) { bnxt_tpa_start(rxq, (struct rx_tpa_start_cmpl *)rxcmp, @@ -839,6 +926,14 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, raw_prod = rxr->rx_raw_prod; cons = rxcmp->opaque; + if (unlikely(cons != rxr->rx_next_cons)) { + bnxt_discard_rx(bp, cpr, &tmp_raw_cons, rxcmp); + PMD_DRV_LOG(ERR, "RX cons %x != expected cons %x\n", + cons, rxr->rx_next_cons); + bnxt_sched_ring_reset(rxq); + rc = -EBUSY; + goto next_rx; + } mbuf = bnxt_consume_rx_buf(rxr, cons); if (mbuf == NULL) return -EBUSY; @@ -869,6 +964,8 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); + bnxt_set_vlan(rxcmp1, mbuf); + if (BNXT_TRUFLOW_EN(bp)) mark_id = bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf, &vfr_flag); @@ -911,6 +1008,7 @@ reuse_rx_mbuf: goto rx; } rxr->rx_raw_prod = raw_prod; + rxr->rx_next_cons = RING_IDX(rxr->rx_ring_struct, RING_NEXT(cons)); if (BNXT_TRUFLOW_EN(bp) && (BNXT_VF_IS_TRUSTED(bp) || BNXT_PF(bp)) && vfr_flag) { @@ -985,13 +1083,12 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, cons = RING_CMP(cpr->cp_ring_struct, raw_cons); rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; - if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) + if (!bnxt_cpr_cmp_valid(rxcmp, raw_cons, + cpr->cp_ring_struct->ring_size)) break; - cpr->valid = FLIP_VALID(cons, - cpr->cp_ring_struct->ring_mask, - cpr->valid); - - if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) && + if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) { + PMD_DRV_LOG(ERR, "Rx flush done\n"); + } else if ((CMP_TYPE(rxcmp) >= CMPL_BASE_TYPE_RX_TPA_START_V2) && (CMP_TYPE(rxcmp) <= RX_TPA_V2_ABUF_CMPL_TYPE_RX_TPA_AGG)) { rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); if (!rc) @@ -1016,9 +1113,6 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, raw_cons = NEXT_RAW_CMP(raw_cons); if (nb_rx_pkts == nb_pkts || nb_rep_rx_pkts == nb_pkts || evt) break; - /* Post some Rx buf early in case of larger burst processing */ - if (nb_rx_pkts == BNXT_RX_POST_THRESH) - bnxt_db_write(&rxr->rx_db, rxr->rx_raw_prod); } cpr->cp_raw_cons = raw_cons; @@ -1043,7 +1137,7 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Attempt to alloc Rx buf in case of a previous allocation failure. */ if (alloc_failed) { - uint16_t cnt; + int cnt; rx_raw_prod = RING_NEXT(rx_raw_prod); for (cnt = 0; cnt < nb_rx_pkts + nb_rep_rx_pkts; cnt++) { @@ -1146,7 +1240,7 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) /* Allocate extra rx ring entries for vector rx. */ ring->vmem_size = sizeof(struct rte_mbuf *) * - (ring->ring_size + RTE_BNXT_DESCS_PER_LOOP); + (ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES); ring->vmem = (void **)&rxr->rx_buf_ring; ring->fw_ring_id = INVALID_HW_RING_ID; @@ -1250,7 +1344,7 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) /* Initialize dummy mbuf pointers for vector mode rx. */ for (i = ring->ring_size; - i < ring->ring_size + RTE_BNXT_DESCS_PER_LOOP; i++) { + i < ring->ring_size + BNXT_RX_EXTRA_MBUF_ENTRIES; i++) { rxr->rx_buf_ring[i] = &rxq->fake_mbuf; } @@ -1289,5 +1383,40 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) } PMD_DRV_LOG(DEBUG, "TPA alloc Done!\n"); + /* Explicitly reset this driver internal tracker on a ring init */ + rxr->rx_next_cons = 0; + + return 0; +} + +/* Sweep the Rx completion queue till HWRM_DONE for ring flush is received. + * The mbufs will not be freed in this call. + * They will be freed during ring free as a part of mem cleanup. + */ +int bnxt_flush_rx_cmp(struct bnxt_cp_ring_info *cpr) +{ + struct bnxt_ring *cp_ring_struct = cpr->cp_ring_struct; + uint32_t ring_mask = cp_ring_struct->ring_mask; + uint32_t raw_cons = cpr->cp_raw_cons; + struct rx_pkt_cmpl *rxcmp; + uint32_t nb_rx = 0; + uint32_t cons; + + do { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (CMP_TYPE(rxcmp) == CMPL_BASE_TYPE_HWRM_DONE) + return 1; + + raw_cons = NEXT_RAW_CMP(raw_cons); + nb_rx++; + } while (nb_rx < ring_mask); + + cpr->cp_raw_cons = raw_cons; + + /* Ring the completion queue doorbell. */ + bnxt_db_cq(cpr); + return 0; }