X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fbnxt%2Fbnxt_rxtx_vec_neon.c;h=4075669e95bc46212ae991df333b8bd8d1cc89b7;hb=6c7cbc8077dd25abd6baf264cd598252ecd9fb3b;hp=7f3eabcda16a62b4dd57a0f4ec0540593160b4b6;hpb=cec43bbf3805911eec0081681faa6dea6a06e7a9;p=dpdk.git diff --git a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c index 7f3eabcda1..4075669e95 100644 --- a/drivers/net/bnxt/bnxt_rxtx_vec_neon.c +++ b/drivers/net/bnxt/bnxt_rxtx_vec_neon.c @@ -22,178 +22,151 @@ * RX Ring handling */ -static inline void -bnxt_rxq_rearm(struct bnxt_rx_queue *rxq, struct bnxt_rx_ring_info *rxr) -{ - struct rx_prod_pkt_bd *rxbds = &rxr->rx_desc_ring[rxq->rxrearm_start]; - struct rte_mbuf **rx_bufs = &rxr->rx_buf_ring[rxq->rxrearm_start]; - struct rte_mbuf *mb0, *mb1; - int nb, i; - - const uint64x2_t hdr_room = {0, RTE_PKTMBUF_HEADROOM}; - const uint64x2_t addrmask = {0, UINT64_MAX}; - - /* - * Number of mbufs to allocate must be a multiple of two. The - * allocation must not go past the end of the ring. - */ - nb = RTE_MIN(rxq->rxrearm_nb & ~0x1, - rxq->nb_rx_desc - rxq->rxrearm_start); - - /* Allocate new mbufs into the software ring */ - if (rte_mempool_get_bulk(rxq->mb_pool, (void *)rx_bufs, nb) < 0) { - rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += nb; - - return; - } - - /* Initialize the mbufs in vector, process 2 mbufs in one loop */ - for (i = 0; i < nb; i += 2, rx_bufs += 2) { - uint64x2_t buf_addr0, buf_addr1; - uint64x2_t rxbd0, rxbd1; - - mb0 = rx_bufs[0]; - mb1 = rx_bufs[1]; - - /* Load address fields from both mbufs */ - buf_addr0 = vld1q_u64((uint64_t *)&mb0->buf_addr); - buf_addr1 = vld1q_u64((uint64_t *)&mb1->buf_addr); - - /* Load both rx descriptors (preserving some existing fields) */ - rxbd0 = vld1q_u64((uint64_t *)(rxbds + 0)); - rxbd1 = vld1q_u64((uint64_t *)(rxbds + 1)); - - /* Add default offset to buffer address. */ - buf_addr0 = vaddq_u64(buf_addr0, hdr_room); - buf_addr1 = vaddq_u64(buf_addr1, hdr_room); - - /* Clear all fields except address. */ - buf_addr0 = vandq_u64(buf_addr0, addrmask); - buf_addr1 = vandq_u64(buf_addr1, addrmask); - - /* Clear address field in descriptor. */ - rxbd0 = vbicq_u64(rxbd0, addrmask); - rxbd1 = vbicq_u64(rxbd1, addrmask); - - /* Set address field in descriptor. */ - rxbd0 = vaddq_u64(rxbd0, buf_addr0); - rxbd1 = vaddq_u64(rxbd1, buf_addr1); - - /* Store descriptors to memory. */ - vst1q_u64((uint64_t *)(rxbds++), rxbd0); - vst1q_u64((uint64_t *)(rxbds++), rxbd1); - } - - rxq->rxrearm_start += nb; - bnxt_db_write(&rxr->rx_db, rxq->rxrearm_start - 1); - if (rxq->rxrearm_start >= rxq->nb_rx_desc) - rxq->rxrearm_start = 0; - - rxq->rxrearm_nb -= nb; +#define GET_OL_FLAGS(rss_flags, ol_idx, errors, pi, ol_flags) \ +{ \ + uint32_t tmp, of; \ + \ + of = vgetq_lane_u32((rss_flags), (pi)) | \ + bnxt_ol_flags_table[vgetq_lane_u32((ol_idx), (pi))]; \ + \ + tmp = vgetq_lane_u32((errors), (pi)); \ + if (tmp) \ + of |= bnxt_ol_flags_err_table[tmp]; \ + (ol_flags) = of; \ } -static uint32_t -bnxt_parse_pkt_type(struct rx_pkt_cmpl *rxcmp, struct rx_pkt_cmpl_hi *rxcmp1) -{ - uint32_t l3, pkt_type = 0; - uint32_t t_ipcs = 0, ip6 = 0, vlan = 0; - uint32_t flags_type; - - vlan = !!(rxcmp1->flags2 & - rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN)); - pkt_type |= vlan ? RTE_PTYPE_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER; - - t_ipcs = !!(rxcmp1->flags2 & - rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC)); - ip6 = !!(rxcmp1->flags2 & - rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS2_IP_TYPE)); - - flags_type = rxcmp->flags_type & - rte_cpu_to_le_32(RX_PKT_CMPL_FLAGS_ITYPE_MASK); - - if (!t_ipcs && !ip6) - l3 = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN; - else if (!t_ipcs && ip6) - l3 = RTE_PTYPE_L3_IPV6_EXT_UNKNOWN; - else if (t_ipcs && !ip6) - l3 = RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN; - else - l3 = RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN; - - switch (flags_type) { - case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_ICMP): - if (!t_ipcs) - pkt_type |= l3 | RTE_PTYPE_L4_ICMP; - else - pkt_type |= l3 | RTE_PTYPE_INNER_L4_ICMP; - break; - - case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_TCP): - if (!t_ipcs) - pkt_type |= l3 | RTE_PTYPE_L4_TCP; - else - pkt_type |= l3 | RTE_PTYPE_INNER_L4_TCP; - break; - - case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_UDP): - if (!t_ipcs) - pkt_type |= l3 | RTE_PTYPE_L4_UDP; - else - pkt_type |= l3 | RTE_PTYPE_INNER_L4_UDP; - break; - - case RTE_LE32(RX_PKT_CMPL_FLAGS_ITYPE_IP): - pkt_type |= l3; - break; - } - - return pkt_type; +#define GET_DESC_FIELDS(rxcmp, rxcmp1, shuf_msk, ptype_idx, pkt_idx, ret) \ +{ \ + uint32_t ptype; \ + uint16_t vlan_tci; \ + uint32x4_t r; \ + \ + /* Set mbuf pkt_len, data_len, and rss_hash fields. */ \ + r = vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(rxcmp), \ + (shuf_msk))); \ + \ + /* Set packet type. */ \ + ptype = bnxt_ptype_table[vgetq_lane_u32((ptype_idx), (pkt_idx))]; \ + r = vsetq_lane_u32(ptype, r, 0); \ + \ + /* Set vlan_tci. */ \ + vlan_tci = vgetq_lane_u32((rxcmp1), 1); \ + r = vreinterpretq_u32_u16(vsetq_lane_u16(vlan_tci, \ + vreinterpretq_u16_u32(r), 5)); \ + (ret) = r; \ } static void -bnxt_parse_csum(struct rte_mbuf *mbuf, struct rx_pkt_cmpl_hi *rxcmp1) +descs_to_mbufs(uint32x4_t mm_rxcmp[4], uint32x4_t mm_rxcmp1[4], + uint64x2_t mb_init, struct rte_mbuf **mbuf) { - uint32_t flags; - - flags = flags2_0xf(rxcmp1); - /* IP Checksum */ - if (likely(IS_IP_NONTUNNEL_PKT(flags))) { - if (unlikely(RX_CMP_IP_CS_ERROR(rxcmp1))) - mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; - else - mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - } else if (IS_IP_TUNNEL_PKT(flags)) { - if (unlikely(RX_CMP_IP_OUTER_CS_ERROR(rxcmp1) || - RX_CMP_IP_CS_ERROR(rxcmp1))) - mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD; - else - mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD; - } else if (unlikely(RX_CMP_IP_CS_UNKNOWN(rxcmp1))) { - mbuf->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN; - } - - /* L4 Checksum */ - if (likely(IS_L4_NONTUNNEL_PKT(flags))) { - if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) - mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; - else - mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; - } else if (IS_L4_TUNNEL_PKT(flags)) { - if (unlikely(RX_CMP_L4_INNER_CS_ERR2(rxcmp1))) - mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD; - else - mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD; - if (unlikely(RX_CMP_L4_OUTER_CS_ERR2(rxcmp1))) { - mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_BAD; - } else if (unlikely(IS_L4_TUNNEL_PKT_ONLY_INNER_L4_CS - (flags))) { - mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_UNKNOWN; - } else { - mbuf->ol_flags |= PKT_RX_OUTER_L4_CKSUM_GOOD; - } - } else if (unlikely(RX_CMP_L4_CS_UNKNOWN(rxcmp1))) { - mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN; - } + const uint8x16_t shuf_msk = { + 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */ + 2, 3, 0xFF, 0xFF, /* pkt_len */ + 2, 3, /* data_len */ + 0xFF, 0xFF, /* vlan_tci (zeroes) */ + 12, 13, 14, 15 /* rss hash */ + }; + const uint32x4_t flags_type_mask = { + RX_PKT_CMPL_FLAGS_ITYPE_MASK, + RX_PKT_CMPL_FLAGS_ITYPE_MASK, + RX_PKT_CMPL_FLAGS_ITYPE_MASK, + RX_PKT_CMPL_FLAGS_ITYPE_MASK + }; + const uint32x4_t flags2_mask1 = { + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC, + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC, + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC, + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN | + RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC + }; + const uint32x4_t flags2_mask2 = { + RX_PKT_CMPL_FLAGS2_IP_TYPE, + RX_PKT_CMPL_FLAGS2_IP_TYPE, + RX_PKT_CMPL_FLAGS2_IP_TYPE, + RX_PKT_CMPL_FLAGS2_IP_TYPE + }; + const uint32x4_t rss_mask = { + RX_PKT_CMPL_FLAGS_RSS_VALID, + RX_PKT_CMPL_FLAGS_RSS_VALID, + RX_PKT_CMPL_FLAGS_RSS_VALID, + RX_PKT_CMPL_FLAGS_RSS_VALID + }; + const uint32x4_t flags2_index_mask = { + 0x1F, 0x1F, 0x1F, 0x1F + }; + const uint32x4_t flags2_error_mask = { + 0xF, 0xF, 0xF, 0xF + }; + uint32x4_t flags_type, flags2, index, errors, rss_flags; + uint32x4_t tmp, ptype_idx; + uint64x2_t t0, t1; + uint32_t ol_flags; + + /* Compute packet type table indexes for four packets */ + t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[0], mm_rxcmp[1])); + t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp[2], mm_rxcmp[3])); + + flags_type = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0), + vget_low_u64(t1))); + ptype_idx = + vshrq_n_u32(vandq_u32(flags_type, flags_type_mask), 9); + + t0 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[0], mm_rxcmp1[1])); + t1 = vreinterpretq_u64_u32(vzip1q_u32(mm_rxcmp1[2], mm_rxcmp1[3])); + + flags2 = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0), + vget_low_u64(t1))); + + ptype_idx = vorrq_u32(ptype_idx, + vshrq_n_u32(vandq_u32(flags2, flags2_mask1), 2)); + ptype_idx = vorrq_u32(ptype_idx, + vshrq_n_u32(vandq_u32(flags2, flags2_mask2), 7)); + + /* Extract RSS valid flags for four packets. */ + rss_flags = vshrq_n_u32(vandq_u32(flags_type, rss_mask), 9); + + flags2 = vandq_u32(flags2, flags2_index_mask); + + /* Extract errors_v2 fields for four packets. */ + t0 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[0], mm_rxcmp1[1])); + t1 = vreinterpretq_u64_u32(vzip2q_u32(mm_rxcmp1[2], mm_rxcmp1[3])); + + errors = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0), + vget_low_u64(t1))); + + /* Compute ol_flags and checksum error indexes for four packets. */ + errors = vandq_u32(vshrq_n_u32(errors, 4), flags2_error_mask); + errors = vandq_u32(errors, flags2); + + index = vbicq_u32(flags2, errors); + + /* Update mbuf rearm_data for four packets. */ + GET_OL_FLAGS(rss_flags, index, errors, 0, ol_flags); + vst1q_u32((uint32_t *)&mbuf[0]->rearm_data, + vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2)); + GET_OL_FLAGS(rss_flags, index, errors, 1, ol_flags); + vst1q_u32((uint32_t *)&mbuf[1]->rearm_data, + vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2)); + GET_OL_FLAGS(rss_flags, index, errors, 2, ol_flags); + vst1q_u32((uint32_t *)&mbuf[2]->rearm_data, + vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2)); + GET_OL_FLAGS(rss_flags, index, errors, 3, ol_flags); + vst1q_u32((uint32_t *)&mbuf[3]->rearm_data, + vsetq_lane_u32(ol_flags, vreinterpretq_u32_u64(mb_init), 2)); + + /* Update mbuf rx_descriptor_fields1 for four packets. */ + GET_DESC_FIELDS(mm_rxcmp[0], mm_rxcmp1[0], shuf_msk, ptype_idx, 0, tmp); + vst1q_u32((uint32_t *)&mbuf[0]->rx_descriptor_fields1, tmp); + GET_DESC_FIELDS(mm_rxcmp[1], mm_rxcmp1[1], shuf_msk, ptype_idx, 1, tmp); + vst1q_u32((uint32_t *)&mbuf[1]->rx_descriptor_fields1, tmp); + GET_DESC_FIELDS(mm_rxcmp[2], mm_rxcmp1[2], shuf_msk, ptype_idx, 2, tmp); + vst1q_u32((uint32_t *)&mbuf[2]->rx_descriptor_fields1, tmp); + GET_DESC_FIELDS(mm_rxcmp[3], mm_rxcmp1[3], shuf_msk, ptype_idx, 3, tmp); + vst1q_u32((uint32_t *)&mbuf[3]->rx_descriptor_fields1, tmp); } uint16_t @@ -203,17 +176,23 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, struct bnxt_rx_queue *rxq = rx_queue; struct bnxt_cp_ring_info *cpr = rxq->cp_ring; struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint16_t cp_ring_size = cpr->cp_ring_struct->ring_size; + uint16_t rx_ring_size = rxr->rx_ring_struct->ring_size; + struct cmpl_base *cp_desc_ring = cpr->cp_desc_ring; + uint64_t valid, desc_valid_mask = ~0UL; + const uint32x4_t info3_v_mask = { + CMPL_BASE_V, CMPL_BASE_V, + CMPL_BASE_V, CMPL_BASE_V + }; uint32_t raw_cons = cpr->cp_raw_cons; - uint32_t cons; + uint32_t cons, mbcons; int nb_rx_pkts = 0; - struct rx_pkt_cmpl *rxcmp; - const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0}; - const uint8x16_t shuf_msk = { - 0xFF, 0xFF, 0xFF, 0xFF, /* pkt_type (zeroes) */ - 2, 3, 0xFF, 0xFF, /* pkt_len */ - 2, 3, /* data_len */ - 0xFF, 0xFF, /* vlan_tci (zeroes) */ - 12, 13, 14, 15 /* rss hash */ + const uint64x2_t mb_init = {rxq->mbuf_initializer, 0}; + const uint32x4_t valid_target = { + !!(raw_cons & cp_ring_size), + !!(raw_cons & cp_ring_size), + !!(raw_cons & cp_ring_size), + !!(raw_cons & cp_ring_size) }; int i; @@ -227,69 +206,130 @@ bnxt_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, /* Return no more than RTE_BNXT_MAX_RX_BURST per call. */ nb_pkts = RTE_MIN(nb_pkts, RTE_BNXT_MAX_RX_BURST); - /* Make nb_pkts an integer multiple of RTE_BNXT_DESCS_PER_LOOP. */ - nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP); - if (!nb_pkts) - return 0; + cons = raw_cons & (cp_ring_size - 1); + mbcons = (raw_cons / 2) & (rx_ring_size - 1); - /* Handle RX burst request */ - for (i = 0; i < nb_pkts; i++) { - struct rx_pkt_cmpl_hi *rxcmp1; - struct rte_mbuf *mbuf; - uint64x2_t mm_rxcmp; - uint8x16_t pkt_mb; + /* Prefetch first four descriptor pairs. */ + rte_prefetch0(&cp_desc_ring[cons]); + rte_prefetch0(&cp_desc_ring[cons + 4]); - cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + /* Ensure that we do not go past the ends of the rings. */ + nb_pkts = RTE_MIN(nb_pkts, RTE_MIN(rx_ring_size - mbcons, + (cp_ring_size - cons) / 2)); + /* + * If we are at the end of the ring, ensure that descriptors after the + * last valid entry are not treated as valid. Otherwise, force the + * maximum number of packets to receive to be a multiple of the per- + * loop count. + */ + if (nb_pkts < RTE_BNXT_DESCS_PER_LOOP) + desc_valid_mask >>= 16 * (RTE_BNXT_DESCS_PER_LOOP - nb_pkts); + else + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_BNXT_DESCS_PER_LOOP); - rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; - rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cons + 1]; + /* Handle RX burst request */ + for (i = 0; i < nb_pkts; i += RTE_BNXT_DESCS_PER_LOOP, + cons += RTE_BNXT_DESCS_PER_LOOP * 2, + mbcons += RTE_BNXT_DESCS_PER_LOOP) { + uint32x4_t rxcmp1[RTE_BNXT_DESCS_PER_LOOP]; + uint32x4_t rxcmp[RTE_BNXT_DESCS_PER_LOOP]; + uint32x4_t info3_v; + uint64x2_t t0, t1; + uint32_t num_valid; + + /* Copy four mbuf pointers to output array. */ + t0 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons]); +#ifdef RTE_ARCH_ARM64 + t1 = vld1q_u64((void *)&rxr->rx_buf_ring[mbcons + 2]); +#endif + vst1q_u64((void *)&rx_pkts[i], t0); +#ifdef RTE_ARCH_ARM64 + vst1q_u64((void *)&rx_pkts[i + 2], t1); +#endif + + /* Prefetch four descriptor pairs for next iteration. */ + if (i + RTE_BNXT_DESCS_PER_LOOP < nb_pkts) { + rte_prefetch0(&cp_desc_ring[cons + 8]); + rte_prefetch0(&cp_desc_ring[cons + 12]); + } - if (!CMP_VALID(rxcmp1, raw_cons + 1, cpr->cp_ring_struct)) + /* + * Load the four current descriptors into SSE registers in + * reverse order to ensure consistent state. + */ + rxcmp1[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 7]); + rte_cio_rmb(); + rxcmp[3] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 6]); + + rxcmp1[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 5]); + rte_cio_rmb(); + rxcmp[2] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 4]); + + t1 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[2], rxcmp1[3])); + + rxcmp1[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 3]); + rte_cio_rmb(); + rxcmp[1] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 2]); + + rxcmp1[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 1]); + rte_cio_rmb(); + rxcmp[0] = vld1q_u32((void *)&cpr->cp_desc_ring[cons + 0]); + + t0 = vreinterpretq_u64_u32(vzip2q_u32(rxcmp1[0], rxcmp1[1])); + + /* Isolate descriptor status flags. */ + info3_v = vreinterpretq_u32_u64(vcombine_u64(vget_low_u64(t0), + vget_low_u64(t1))); + info3_v = vandq_u32(info3_v, info3_v_mask); + info3_v = veorq_u32(info3_v, valid_target); + + /* + * Pack the 128-bit array of valid descriptor flags into 64 + * bits and count the number of set bits in order to determine + * the number of valid descriptors. + */ + valid = vget_lane_u64(vreinterpret_u64_u16(vqmovn_u32(info3_v)), + 0); + /* + * At this point, 'valid' is a 64-bit value containing four + * 16-bit fields, each of which is either 0x0001 or 0x0000. + * Compute number of valid descriptors from the index of + * the highest non-zero field. + */ + num_valid = (sizeof(uint64_t) / sizeof(uint16_t)) - + (__builtin_clzl(valid & desc_valid_mask) / 16); + + switch (num_valid) { + case 4: + rxr->rx_buf_ring[mbcons + 3] = NULL; + /* FALLTHROUGH */ + case 3: + rxr->rx_buf_ring[mbcons + 2] = NULL; + /* FALLTHROUGH */ + case 2: + rxr->rx_buf_ring[mbcons + 1] = NULL; + /* FALLTHROUGH */ + case 1: + rxr->rx_buf_ring[mbcons + 0] = NULL; break; - - raw_cons += 2; - cons = rxcmp->opaque; - - mbuf = rxr->rx_buf_ring[cons]; - rte_prefetch0(mbuf); - rxr->rx_buf_ring[cons] = NULL; - - /* Set constant fields from mbuf initializer. */ - vst1q_u64((uint64_t *)&mbuf->rearm_data, mbuf_init); - - /* Set mbuf pkt_len, data_len, and rss_hash fields. */ - mm_rxcmp = vld1q_u64((uint64_t *)rxcmp); - pkt_mb = vqtbl1q_u8(vreinterpretq_u8_u64(mm_rxcmp), shuf_msk); - vst1q_u64((uint64_t *)&mbuf->rx_descriptor_fields1, - vreinterpretq_u64_u8(pkt_mb)); - - rte_compiler_barrier(); - - if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) - mbuf->ol_flags |= PKT_RX_RSS_HASH; - - if (rxcmp1->flags2 & - RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { - mbuf->vlan_tci = rxcmp1->metadata & - (RX_PKT_CMPL_METADATA_VID_MASK | - RX_PKT_CMPL_METADATA_DE | - RX_PKT_CMPL_METADATA_PRI_MASK); - mbuf->ol_flags |= - PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; + case 0: + goto out; } - bnxt_parse_csum(mbuf, rxcmp1); - mbuf->packet_type = bnxt_parse_pkt_type(rxcmp, rxcmp1); + descs_to_mbufs(rxcmp, rxcmp1, mb_init, &rx_pkts[nb_rx_pkts]); + nb_rx_pkts += num_valid; - rx_pkts[nb_rx_pkts++] = mbuf; + if (num_valid < RTE_BNXT_DESCS_PER_LOOP) + break; } +out: if (nb_rx_pkts) { rxr->rx_prod = RING_ADV(rxr->rx_ring_struct, rxr->rx_prod, nb_rx_pkts); rxq->rxrearm_nb += nb_rx_pkts; - cpr->cp_raw_cons = raw_cons; + cpr->cp_raw_cons += 2 * nb_rx_pkts; cpr->valid = !!(cpr->cp_raw_cons & cpr->cp_ring_struct->ring_size); bnxt_db_cq(cpr);