X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_rxr.c;h=ee1acb1969637f6ae90b3dfeaaf782d7ac14654a;hb=dd8384a4f5dd7e04cd3bc7ded0f0a6d9c79d25b4;hp=ee1444c1d1c4efdc86cb3dfc2b83341f8d659281;hpb=bfbfe4aaa1ebee405d497dfef7346e220c99590b;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c index ee1444c1d1..ee1acb1969 100644 --- a/drivers/net/bnxt/bnxt_rxr.c +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -20,6 +20,9 @@ #include "bnxt_hwrm.h" #endif +#include +#include + /* * RX Ring handling */ @@ -399,6 +402,126 @@ bnxt_get_rx_ts_thor(struct bnxt *bp, uint32_t rx_ts_cmpl) } #endif +static void +bnxt_ulp_set_mark_in_mbuf(struct bnxt *bp, struct rx_pkt_cmpl_hi *rxcmp1, + struct rte_mbuf *mbuf) +{ + uint32_t cfa_code; + uint32_t meta_fmt; + uint32_t meta; + bool gfid = false; + uint32_t mark_id; + uint32_t flags2; + int rc; + + cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); + flags2 = rte_le_to_cpu_32(rxcmp1->flags2); + meta = rte_le_to_cpu_32(rxcmp1->metadata); + + /* + * The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + + switch (meta_fmt) { + case 0: + /* Not an LFID or GFID, a flush cmd. */ + goto skip_mark; + case 4: + case 5: + /* + * EM/TCAM case + * Assume that EM doesn't support Mark due to GFID + * collisions with EEM. Simply return without setting the mark + * in the mbuf. + */ + if (BNXT_CFA_META_EM_TEST(meta)) + goto skip_mark; + /* + * It is a TCAM entry, so it is an LFID. The TCAM IDX and Mode + * can also be determined by decoding the meta_data. We are not + * using these for now. + */ + break; + case 6: + case 7: + /* EEM Case, only using gfid in EEM for now. */ + gfid = true; + + /* + * For EEM flows, The first part of cfa_code is 16 bits. + * The second part is embedded in the + * metadata field from bit 19 onwards. The driver needs to + * ignore the first 19 bits of metadata and use the next 12 + * bits as higher 12 bits of cfa_code. + */ + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + cfa_code |= meta << BNXT_CFA_CODE_META_SHIFT; + break; + default: + /* For other values, the cfa_code is assumed to be an LFID. */ + break; + } + + if (cfa_code) { + rc = ulp_mark_db_mark_get(bp->ulp_ctx, gfid, + cfa_code, &mark_id); + if (!rc) { + /* Got the mark, write it to the mbuf and return */ + mbuf->hash.fdir.hi = mark_id; + mbuf->udata64 = (cfa_code & 0xffffffffull) << 32; + mbuf->hash.fdir.id = rxcmp1->cfa_code; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + return; + } + } + +skip_mark: + mbuf->hash.fdir.hi = 0; + mbuf->hash.fdir.id = 0; +} + +void bnxt_set_mark_in_mbuf(struct bnxt *bp, + struct rx_pkt_cmpl_hi *rxcmp1, + struct rte_mbuf *mbuf) +{ + uint32_t cfa_code = 0; + uint8_t meta_fmt = 0; + uint16_t flags2 = 0; + uint32_t meta = 0; + + cfa_code = rte_le_to_cpu_16(rxcmp1->cfa_code); + if (!cfa_code) + return; + + if (cfa_code && !bp->mark_table[cfa_code].valid) + return; + + flags2 = rte_le_to_cpu_16(rxcmp1->flags2); + meta = rte_le_to_cpu_32(rxcmp1->metadata); + if (meta) { + meta >>= BNXT_RX_META_CFA_CODE_SHIFT; + + /* The flags field holds extra bits of info from [6:4] + * which indicate if the flow is in TCAM or EM or EEM + */ + meta_fmt = (flags2 & BNXT_CFA_META_FMT_MASK) >> + BNXT_CFA_META_FMT_SHFT; + + /* meta_fmt == 4 => 'b100 => 'b10x => EM. + * meta_fmt == 5 => 'b101 => 'b10x => EM + VLAN + * meta_fmt == 6 => 'b110 => 'b11x => EEM + * meta_fmt == 7 => 'b111 => 'b11x => EEM + VLAN. + */ + meta_fmt >>= BNXT_CFA_META_FMT_EM_EEM_SHFT; + } + + mbuf->hash.fdir.hi = bp->mark_table[cfa_code].mark_id; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; +} + static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, struct bnxt_rx_queue *rxq, uint32_t *raw_cons) { @@ -415,6 +538,7 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, uint16_t cmp_type; uint32_t flags2_f = 0; uint16_t flags_type; + struct bnxt *bp = rxq->bp; rxcmp = (struct rx_pkt_cmpl *) &cpr->cp_desc_ring[cp_cons]; @@ -488,10 +612,13 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, if (flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { mbuf->hash.rss = rxcmp->rss_hash; mbuf->ol_flags |= PKT_RX_RSS_HASH; - } else { - mbuf->hash.fdir.id = rxcmp1->cfa_code; - mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; } + + if (BNXT_TRUFLOW_EN(bp)) + bnxt_ulp_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); + else + bnxt_set_mark_in_mbuf(rxq->bp, rxcmp1, mbuf); + #ifdef RTE_LIBRTE_IEEE1588 if (unlikely((flags_type & RX_PKT_CMPL_FLAGS_MASK) == RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP)) { @@ -512,15 +639,21 @@ static int bnxt_rx_pkt(struct rte_mbuf **rx_pkt, flags2_f = flags2_0xf(rxcmp1); /* IP Checksum */ - if (unlikely(((IS_IP_NONTUNNEL_PKT(flags2_f)) && - (RX_CMP_IP_CS_ERROR(rxcmp1))) || - (IS_IP_TUNNEL_PKT(flags2_f) && - (RX_CMP_IP_OUTER_CS_ERROR(rxcmp1))))) { - mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; - } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) { - mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; - } else { - mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + if (likely(IS_IP_NONTUNNEL_PKT(flags2_f))) { + if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; + } else if (IS_IP_TUNNEL_PKT(flags2_f)) { + if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) || + RX_CMP_IP_CS_ERROR(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) + mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; + else + mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; } /* L4 Checksum */ @@ -672,10 +805,11 @@ uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Attempt to alloc Rx buf in case of a previous allocation failure. */ if (rc == -ENOMEM) { - int i; + int i = RING_NEXT(rxr->rx_ring_struct, prod); + int cnt = nb_rx_pkts; - for (i = prod; i <= nb_rx_pkts; - i = RING_NEXT(rxr->rx_ring_struct, i)) { + for (; cnt; + i = RING_NEXT(rxr->rx_ring_struct, i), cnt--) { struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; /* Buffer already allocated for this index. */ @@ -846,11 +980,13 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) prod = rxr->rx_prod; for (i = 0; i < ring->ring_size; i++) { - if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) { - PMD_DRV_LOG(WARNING, - "init'ed rx ring %d with %d/%d mbufs only\n", - rxq->queue_id, i, ring->ring_size); - break; + if (unlikely(!rxr->rx_buf_ring[i].mbuf)) { + if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) { + PMD_DRV_LOG(WARNING, + "init'ed rx ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } } rxr->rx_prod = prod; prod = RING_NEXT(rxr->rx_ring_struct, prod); @@ -862,11 +998,13 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) prod = rxr->ag_prod; for (i = 0; i < ring->ring_size; i++) { - if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) { - PMD_DRV_LOG(WARNING, - "init'ed AG ring %d with %d/%d mbufs only\n", - rxq->queue_id, i, ring->ring_size); - break; + if (unlikely(!rxr->ag_buf_ring[i].mbuf)) { + if (bnxt_alloc_ag_data(rxq, rxr, prod) != 0) { + PMD_DRV_LOG(WARNING, + "init'ed AG ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } } rxr->ag_prod = prod; prod = RING_NEXT(rxr->ag_ring_struct, prod); @@ -877,11 +1015,13 @@ int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) unsigned int max_aggs = BNXT_TPA_MAX_AGGS(rxq->bp); for (i = 0; i < max_aggs; i++) { - rxr->tpa_info[i].mbuf = - __bnxt_alloc_rx_data(rxq->mb_pool); - if (!rxr->tpa_info[i].mbuf) { - rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); - return -ENOMEM; + if (unlikely(!rxr->tpa_info[i].mbuf)) { + rxr->tpa_info[i].mbuf = + __bnxt_alloc_rx_data(rxq->mb_pool); + if (!rxr->tpa_info[i].mbuf) { + rte_atomic64_inc(&rxq->rx_mbuf_alloc_fail); + return -ENOMEM; + } } } }