X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_rxr.c;h=11807f40949adf18bf367f39ea63b7cb519d9f16;hb=bf89db4409bb43659ee49626f76869a31bb55150;hp=1a6fb7944ba54d295db5e4ba8742b662a4d9190f;hpb=b150a7e7ee66e6ac4c8d96edb614a920f3198900;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index 1a6fb7944b..11807f4094 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -12,7 +12,6 @@ #include #include "bnxt.h" -#include "bnxt_cpr.h" #include "bnxt_ring.h" #include "bnxt_rxr.h" #include "bnxt_rxq.h" @@ -21,6 +20,9 @@ #include "bnxt_hwrm.h" #endif +#include +#include + /* * RX Ring handling */ @@ -64,18 +66,22 @@ static inline int bnxt_alloc_ag_data(struct bnxt_rx_queue *rxq, struct bnxt_sw_rx_bd *rx_buf = &rxr->ag_buf_ring[prod]; struct rte_mbuf *mbuf; + if (rxbd == NULL) { + PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n"); + return -EINVAL; + } + + if (rx_buf == NULL) { + PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n"); + return -EINVAL; + } + mbuf = __bnxt_alloc_rx_data(rxq->mb_pool); if (!mbuf) { rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); return -ENOMEM; } - if (rxbd == NULL) - PMD_DRV_LOG(ERR, "Jumbo Frame. rxbd is NULL\n"); - if (rx_buf == NULL) - PMD_DRV_LOG(ERR, "Jumbo Frame. rx_buf is NULL\n"); - - rx_buf->mbuf = mbuf; mbuf->data_off = RTE_PKTMBUF_HEADROOM; @@ -396,6 +402,142 @@ bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl) } #endif +static void +bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1, + struct rte_mbuf *mbuf) +{ + uint32_t cfa_code; + uint32_t meta_fmt; + uint32_t meta; + bool gfid = false; + uint32_t mark_id; + uint32_t flags2; + uint32_t gfid_support = 0; + int rc; + uint32_t vfr_flag; + + + if (BNXT_GFID_ENABLED(bp)) + gfid_support = 1; + + cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); + flags2 = rte_le_to_cpu_32(rxcmp1->flags2); + meta = rte_le_to_cpu_32(rxcmp1->metadata); + + /* + * The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + + switch (meta_fmt) { + case 0: + if (gfid_support) { + /* Not an LFID or GFID, a flush cmd. */ + goto skip_mark; + } else { + /* LFID mode, no vlan scenario */ + gfid = false; + } + break; + case 4: + case 5: + /* + * EM/TCAM case + * Assume that EM doesn't support Mark due to GFID + * collisions with EEM. Simply return without setting the mark + * in the mbuf. + */ + if (BNXT_CFA_META_EM_TEST(meta)) { + /*This is EM hit {EM(1), GFID[27:16], 19'd0 or vtag } */ + gfid = true; + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + } else { + /* + * It is a TCAM entry, so it is an LFID. + * The TCAM IDX and Mode can also be determined + * by decoding the meta_data. We are not + * using these for now. + */ + } + break; + case 6: + case 7: + /* EEM Case, only using gfid in EEM for now. */ + gfid = true; + + /* + * For EEM flows, The first part of cfa_code is 16 bits. + * The second part is embedded in the + * metadata field from bit 19 onwards. The driver needs to + * ignore the first 19 bits of metadata and use the next 12 + * bits as higher 12 bits of cfa_code. + */ + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + break; + default: + /* For other values, the cfa_code is assumed to be an LFID. */ + break; + } + + rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, + cfa_code, &vfr_flag, &mark_id); + if (!rc) { + /* Got the mark, write it to the mbuf and return */ + mbuf->hash.fdir.hi = mark_id; + mbuf->udata64 = (cfa_code & 0xffffffffull) << 32; + mbuf->hash.fdir.id = rxcmp1->cfa_code; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + return; + } + +skip_mark: + mbuf->hash.fdir.hi = 0; + mbuf->hash.fdir.id = 0; +} + +void bnxt_set_mark_in_mbuf(struct bnxt *bp, + struct rx_pkt_cmpl_hi *rxcmp1, + struct rte_mbuf *mbuf) +{ + uint32_t cfa_code = 0; + uint8_t meta_fmt = 0; + uint16_t flags2 = 0; + uint32_t meta = 0; + + cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); + if (!cfa_code) + return; + + if (cfa_code && !bp->mark_table[cfa_code].valid) + return; + + flags2 = rte_le_to_cpu_16(rxcmp1->flags2); + meta = rte_le_to_cpu_32(rxcmp1->metadata); + if (meta) { + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + + /* The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + + /* meta_fmt == 4 => 'b100 => 'b10x => EM. + * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN + * meta_fmt == 6 => 'b110 => 'b11x => EEM + * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN. + */ + meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT; + } + + mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; +} + static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, struct bnxt_rx_queue *rxq, uint32_t *raw_cons) { @@ -412,6 +554,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, uint16_t cmp_type; uint32_t flags2_f = 0; uint16_t flags_type; + struct bnxt *bp = rxq->bp; rxcmp = (struct rx_pkt_cmpl *) &cpr->cp_desc_ring[cp_cons]; @@ -485,10 +628,13 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { mbuf->hash.rss = rxcmp->rss_hash; mbuf->ol_flags |= PKT_RX_RSS_HASH; - } else { - mbuf->hash.fdir.id = rxcmp1->cfa_code; - mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; } + + if (BNXT_TRUFLOW_EN(bp)) + bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); + else + bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); + #ifdef RTE_LIBRTE_IEEE1588 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) == RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) { @@ -509,15 +655,21 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, flags2_f = flags2_0xf(rxcmp1); /* IP Checksum */ - if (unlikely(((IS_IP_NONTUNNEL_PKT(flags2_f)) && - (RX_CMP_IP_CS_ERROR(rxcmp1))) || - (IS_IP_TUNNEL_PKT(flags2_f) && - (RX_CMP_IP_OUTER_CS_ERROR(rxcmp1))))) { - mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; - } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) { - mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; - } else { - mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + if (likely(IS_IP_NONTUNNEL_PKT(flags2_f))) { + if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } else if (IS_IP_TUNNEL_PKT(flags2_f)) { + if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) || + RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; } /* L4 Checksum */ @@ -636,6 +788,9 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, evt = bnxt_event_hwrm_resp_handler(rxq->bp, (struct cmpl_base *)rxcmp); + /* If the async event is Fatal error, return */ + if (unlikely(is_bnxt_in_error(rxq->bp))) + goto done; } raw_cons = NEXT_RAW_CMP(raw_cons); @@ -666,10 +821,11 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Attempt to alloc Rx buf in case of a previous allocation failure. */ if (rc == -ENOMEM) { - int i; + int i = RING_NEXT(rxr->rx_ring_struct, prod); + int cnt = nb_rx_pkts; - for (i = prod; i <= nb_rx_pkts; - i = RING_NEXT(rxr->rx_ring_struct, i)) { + for (; cnt; + i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; /* Buffer already allocated for this index. */ @@ -742,7 +898,6 @@ void bnxt_free_rx_rings(struct bnxt *bp) int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) { struct bnxt_cp_ring_info *cpr; - struct bnxt_cp_ring_info *nqr; struct bnxt_rx_ring_info *rxr; struct bnxt_ring *ring; @@ -789,32 +944,6 @@ int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) ring->vmem_size = 0; ring->vmem = NULL; - if (BNXT_HAS_NQ(rxq->bp)) { - nqr = rte_zmalloc_socket("bnxt_rx_ring_cq", - sizeof(struct bnxt_cp_ring_info), - RTE_CACHE_LINE_SIZE, socket_id); - if (nqr == NULL) - return -ENOMEM; - - rxq->nq_ring = nqr; - - ring = rte_zmalloc_socket("bnxt_rx_ring_struct", - sizeof(struct bnxt_ring), - RTE_CACHE_LINE_SIZE, socket_id); - if (ring == NULL) - return -ENOMEM; - - nqr->cp_ring_struct = ring; - ring->ring_size = - rte_align32pow2(rxr->rx_ring_struct->ring_size * - (2 + AGG_RING_SIZE_FACTOR)); - ring->ring_mask = ring->ring_size - 1; - ring->bd = (void *)nqr->cp_desc_ring; - ring->bd_dma = nqr->cp_desc_mapping; - ring->vmem_size = 0; - ring->vmem = NULL; - } - /* Allocate Aggregator rings */ ring = rte_zmalloc_socket("bnxt_rx_ring_struct", sizeof(struct bnxt_ring), @@ -867,11 +996,13 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) prod = rxr->rx_prod; for (i = 0; i < ring->ring_size; i++) { - if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) { - PMD_DRV_LOG(WARNING, - "init'ed rx ring %d with %d/%d mbufs only\n", - rxq->queue_id, i, ring->ring_size); - break; + if (unlikely(!rxr->rx_buf_ring[i].mbuf)) { + if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) { + PMD_DRV_LOG(WARNING, + "init'ed rx ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } } rxr->rx_prod = prod; prod = RING_NEXT(rxr->rx_ring_struct, prod); @@ -883,11 +1014,13 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) prod = rxr->ag_prod; for (i = 0; i < ring->ring_size; i++) { - if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) { - PMD_DRV_LOG(WARNING, - "init'ed AG ring %d with %d/%d mbufs only\n", - rxq->queue_id, i, ring->ring_size); - break; + if (unlikely(!rxr->ag_buf_ring[i].mbuf)) { + if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) { + PMD_DRV_LOG(WARNING, + "init'ed AG ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } } rxr->ag_prod = prod; prod = RING_NEXT(rxr->ag_ring_struct, prod); @@ -898,11 +1031,13 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp); for (i = 0; i < max_aggs; i++) { - rxr->tpa_info[i].mbuf = - __bnxt_alloc_rx_data(rxq->mb_pool); - if (!rxr->tpa_info[i].mbuf) { - rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); - return -ENOMEM; + if (unlikely(!rxr->tpa_info[i].mbuf)) { + rxr->tpa_info[i].mbuf = + __bnxt_alloc_rx_data(rxq->mb_pool); + if (!rxr->tpa_info[i].mbuf) { + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); + return -ENOMEM; + } } } }