*/
#include <stdint.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include "iavf.h"
rx_id, rxq->rxrearm_start, rxq->rxrearm_nb);
/* Update the tail pointer on the NIC */
- IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
}
static inline void
0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804);
const __m128i cksum_mask = _mm_set_epi32(
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_EIP_CKSUM_BAD);
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map rss and vlan type to rss hash and vlan flag */
const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- 0, 0, 0, PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
+ 0, 0, 0, RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
0, 0, 0, 0);
const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0,
- 0, 0, PKT_RX_FDIR, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR, RTE_MBUF_F_RX_RSS_HASH, 0, 0,
+ 0, 0, RTE_MBUF_F_RX_FDIR, 0);
const __m128i l3_l4e_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD |
- PKT_RX_L4_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1,
- PKT_RX_IP_CKSUM_BAD >> 1,
- (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
+ RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
+ (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1);
vlan0 = _mm_unpackhi_epi32(descs[0], descs[1]);
vlan1 = _mm_unpackhi_epi32(descs[2], descs[3]);
l3_l4e = _mm_shuffle_epi8(l3_l4e_flags, l3_l4e);
/* then we shift left 1 bit */
l3_l4e = _mm_slli_epi32(l3_l4e, 1);
- /* we need to mask out the reduntant bits */
+ /* we need to mask out the redundant bits */
l3_l4e = _mm_and_si128(l3_l4e, cksum_mask);
vlan0 = _mm_or_si128(vlan0, rss);
_mm_store_si128((__m128i *)&rx_pkts[3]->rearm_data, rearm3);
}
+static inline __m128i
+flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_FDIR_ID != (1 << 13));
+ const __m128i pkt_fdir_bit = _mm_set1_epi32(RTE_MBUF_F_RX_FDIR |
+ RTE_MBUF_F_RX_FDIR_ID);
+ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+ const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
+ __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
+ fdir_mis_mask);
+ /* this XOR op results to bit-reverse the fdir_mask */
+ fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
+ const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
+
+ return fdir_flags;
+}
+
static inline void
flex_desc_to_olflags_v(struct iavf_rx_queue *rxq, __m128i descs[4],
struct rte_mbuf **rx_pkts)
const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
0x3070, 0x3070);
- const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
- PKT_RX_IP_CKSUM_MASK |
- PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD);
+ const __m128i cksum_mask = _mm_set_epi32(RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_MASK |
+ RTE_MBUF_F_RX_L4_CKSUM_MASK |
+ RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD);
/* map the checksum, rss and vlan fields to the checksum, rss
* and vlan flag
*/
const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
/* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD |
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
+ (RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_GOOD) >> 1);
const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED,
- PKT_RX_RSS_HASH, 0);
+ RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED,
+ RTE_MBUF_F_RX_RSS_HASH, 0);
/* merge 4 descriptors */
flags = _mm_unpackhi_epi32(descs[0], descs[1]);
tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]);
tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc);
- tmp_desc = _mm_and_si128(flags, desc_mask);
+ tmp_desc = _mm_and_si128(tmp_desc, desc_mask);
/* checksum flags */
tmp_desc = _mm_srli_epi32(tmp_desc, 4);
/* merge the flags */
flags = _mm_or_si128(flags, rss_vlan);
+ if (rxq->fdir_enabled) {
+ const __m128i fdir_id0_1 =
+ _mm_unpackhi_epi32(descs[0], descs[1]);
+
+ const __m128i fdir_id2_3 =
+ _mm_unpackhi_epi32(descs[2], descs[3]);
+
+ const __m128i fdir_id0_3 =
+ _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
+
+ const __m128i fdir_flags =
+ flex_rxd_to_fdir_flags_vec(fdir_id0_3);
+
+ /* merge with fdir_flags */
+ flags = _mm_or_si128(flags, fdir_flags);
+
+ /* write fdir_id to mbuf */
+ rx_pkts[0]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 0);
+
+ rx_pkts[1]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 1);
+
+ rx_pkts[2]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 2);
+
+ rx_pkts[3]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 3);
+ } /* if() on fdir_enabled */
+
/**
* At this point, we have the 4 sets of flags in the low 16-bits
* of each 32-bit value in flags.
flex_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
const uint32_t *type_table)
{
- const __m128i ptype_mask = _mm_set_epi16(0, IAVF_RX_FLEX_DESC_PTYPE_M,
- 0, IAVF_RX_FLEX_DESC_PTYPE_M,
- 0, IAVF_RX_FLEX_DESC_PTYPE_M,
- 0, IAVF_RX_FLEX_DESC_PTYPE_M);
+ const __m128i ptype_mask =
+ _mm_set_epi16(IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+ IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+ IAVF_RX_FLEX_DESC_PTYPE_M, 0x0,
+ IAVF_RX_FLEX_DESC_PTYPE_M, 0x0);
+
__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
rx_pkts[3]->packet_type = type_table[_mm_extract_epi16(ptype_all, 7)];
}
-/* Notice:
+/**
+ * vPMD raw receive routine, only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP)
+ *
+ * Notice:
* - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
- * numbers of DD bits
+ * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two
*/
static inline uint16_t
_recv_raw_pkts_vec(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_pkts,
offsetof(struct rte_mbuf, rx_descriptor_fields1) + 8);
__m128i dd_check, eop_check;
- /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */
- nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST);
-
/* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
/* and with mask to extract bits, flipping 1-0 */
__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
/* the staterr values are not in order, as the count
- * count of dd bits doesn't care. However, for end of
+ * of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
* compresses the 32-bit values to 8-bit
*/
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
pkt_mb1);
desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
- /* C.4 calc avaialbe number of desc */
+ /* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
nb_pkts_recd += var;
if (likely(var != IAVF_VPMD_DESCS_PER_LOOP))
return nb_pkts_recd;
}
-/* Notice:
+/**
+ * vPMD raw receive routine for flex RxD,
+ * only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP)
+ *
+ * Notice:
* - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > IAVF_VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
- * numbers of DD bits
+ * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two
*/
static inline uint16_t
_recv_raw_pkts_vec_flex_rxd(struct iavf_rx_queue *rxq,
uint16_t nb_pkts_recd;
int pos;
uint64_t var;
- const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
+ struct iavf_adapter *adapter = rxq->vsi->adapter;
+ uint64_t offloads = adapter->dev_data->dev_conf.rxmode.offloads;
+ const uint32_t *ptype_tbl = adapter->ptype_tbl;
__m128i crc_adjust = _mm_set_epi16
(0, 0, 0, /* ignore non-length fields */
-rxq->crc_len, /* sub crc on data_len */
const __m128i zero = _mm_setzero_si128();
/* mask to shuffle from desc. to mbuf */
const __m128i shuf_msk = _mm_set_epi8
- (15, 14, 13, 12, /* octet 12~15, 32 bits rss */
+ (0xFF, 0xFF,
+ 0xFF, 0xFF, /* rss hash parsed separately */
11, 10, /* octet 10~11, 16 bits vlan_macip */
5, 4, /* octet 4~5, 16 bits data_len */
0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL,
0x0000000200000002LL);
- /* nb_pkts shall be less equal than IAVF_VPMD_RX_MAX_BURST */
- nb_pkts = RTE_MIN(nb_pkts, IAVF_VPMD_RX_MAX_BURST);
-
/* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
pos += IAVF_VPMD_DESCS_PER_LOOP,
rxdp += IAVF_VPMD_DESCS_PER_LOOP) {
__m128i descs[IAVF_VPMD_DESCS_PER_LOOP];
- __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
+ __m128i pkt_mb0, pkt_mb1, pkt_mb2, pkt_mb3;
__m128i staterr, sterr_tmp1, sterr_tmp2;
/* 2 64 bit or 4 32 bit mbuf pointers in one XMM reg. */
__m128i mbp1;
/* B.1 load 2 (64 bit) or 4 (32 bit) mbuf points */
mbp1 = _mm_loadu_si128((__m128i *)&sw_ring[pos]);
/* Read desc statuses backwards to avoid race condition */
- /* A.1 load 4 pkts desc */
+ /* A.1 load desc[3] */
descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3));
rte_compiler_barrier();
mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos + 2]);
#endif
+ /* A.1 load desc[2-0] */
descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2));
rte_compiler_barrier();
- /* B.1 load 2 mbuf point */
descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1));
rte_compiler_barrier();
descs[0] = _mm_loadu_si128((__m128i *)(rxdp));
rte_compiler_barrier();
/* D.1 pkt 3,4 convert format from desc to pktmbuf */
- pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk);
- pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk);
+ pkt_mb3 = _mm_shuffle_epi8(descs[3], shuf_msk);
+ pkt_mb2 = _mm_shuffle_epi8(descs[2], shuf_msk);
+
+ /* D.1 pkt 1,2 convert format from desc to pktmbuf */
+ pkt_mb1 = _mm_shuffle_epi8(descs[1], shuf_msk);
+ pkt_mb0 = _mm_shuffle_epi8(descs[0], shuf_msk);
/* C.1 4=>2 filter staterr info only */
sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]);
flex_desc_to_olflags_v(rxq, descs, &rx_pkts[pos]);
/* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
- pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust);
pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust);
+ pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
- /* D.1 pkt 1,2 convert format from desc to pktmbuf */
- pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk);
- pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk);
+ /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
+ pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+ pkt_mb0 = _mm_add_epi16(pkt_mb0, crc_adjust);
+
+#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
+ /**
+ * needs to load 2nd 16B of each desc for RSS hash parsing,
+ * will cause performance drop to get into this context.
+ */
+ if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH) {
+ /* load bottom half of every 32B desc */
+ const __m128i raw_desc_bh3 =
+ _mm_load_si128
+ ((void *)(&rxdp[3].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh2 =
+ _mm_load_si128
+ ((void *)(&rxdp[2].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh1 =
+ _mm_load_si128
+ ((void *)(&rxdp[1].wb.status_error1));
+ rte_compiler_barrier();
+ const __m128i raw_desc_bh0 =
+ _mm_load_si128
+ ((void *)(&rxdp[0].wb.status_error1));
+
+ /**
+ * to shift the 32b RSS hash value to the
+ * highest 32b of each 128b before mask
+ */
+ __m128i rss_hash3 =
+ _mm_slli_epi64(raw_desc_bh3, 32);
+ __m128i rss_hash2 =
+ _mm_slli_epi64(raw_desc_bh2, 32);
+ __m128i rss_hash1 =
+ _mm_slli_epi64(raw_desc_bh1, 32);
+ __m128i rss_hash0 =
+ _mm_slli_epi64(raw_desc_bh0, 32);
+
+ __m128i rss_hash_msk =
+ _mm_set_epi32(0xFFFFFFFF, 0, 0, 0);
+
+ rss_hash3 = _mm_and_si128
+ (rss_hash3, rss_hash_msk);
+ rss_hash2 = _mm_and_si128
+ (rss_hash2, rss_hash_msk);
+ rss_hash1 = _mm_and_si128
+ (rss_hash1, rss_hash_msk);
+ rss_hash0 = _mm_and_si128
+ (rss_hash0, rss_hash_msk);
+
+ pkt_mb3 = _mm_or_si128(pkt_mb3, rss_hash3);
+ pkt_mb2 = _mm_or_si128(pkt_mb2, rss_hash2);
+ pkt_mb1 = _mm_or_si128(pkt_mb1, rss_hash1);
+ pkt_mb0 = _mm_or_si128(pkt_mb0, rss_hash0);
+ } /* if() on RSS hash parsing */
+#endif
/* C.2 get 4 pkts staterr value */
staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2);
/* D.3 copy final 3,4 data to rx_pkts */
_mm_storeu_si128
((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
- pkt_mb4);
+ pkt_mb3);
_mm_storeu_si128
((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
- pkt_mb3);
-
- /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
- pkt_mb2 = _mm_add_epi16(pkt_mb2, crc_adjust);
- pkt_mb1 = _mm_add_epi16(pkt_mb1, crc_adjust);
+ pkt_mb2);
/* C* extract and record EOP bit */
if (split_packet) {
/* and with mask to extract bits, flipping 1-0 */
__m128i eop_bits = _mm_andnot_si128(staterr, eop_check);
/* the staterr values are not in order, as the count
- * count of dd bits doesn't care. However, for end of
+ * of dd bits doesn't care. However, for end of
* packet tracking, we do care, so shuffle. This also
* compresses the 32-bit values to 8-bit
*/
/* D.3 copy final 1,2 data to rx_pkts */
_mm_storeu_si128
((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
- pkt_mb2);
+ pkt_mb1);
_mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1,
- pkt_mb1);
+ pkt_mb0);
flex_desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
/* C.4 calc available number of desc */
var = __builtin_popcountll(_mm_cvtsi128_si64(staterr));
return _recv_raw_pkts_vec_flex_rxd(rx_queue, rx_pkts, nb_pkts, NULL);
}
-/* vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
* Notice:
* - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
- * numbers of DD bits
*/
-uint16_t
-iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+iavf_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct iavf_rx_queue *rxq = rx_queue;
uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
&split_flags[i]);
}
-/* vPMD receive routine that reassembles scattered packets for flex RxD
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+iavf_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ uint16_t burst;
+
+ burst = iavf_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ IAVF_VPMD_RX_MAX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < IAVF_VPMD_RX_MAX_BURST)
+ return retval;
+ }
+
+ return retval + iavf_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ nb_pkts);
+}
+
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ * for flex RxD
+ *
* Notice:
* - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > VPMD_RX_MAX_BURST, only scan IAVF_VPMD_RX_MAX_BURST
- * numbers of DD bits
*/
-uint16_t
-iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
- struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+iavf_recv_scattered_burst_vec_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct iavf_rx_queue *rxq = rx_queue;
uint8_t split_flags[IAVF_VPMD_RX_MAX_BURST] = {0};
&split_flags[i]);
}
+/**
+ * vPMD receive routine that reassembles scattered packets for flex RxD
+ */
+uint16_t
+iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > IAVF_VPMD_RX_MAX_BURST) {
+ uint16_t burst;
+
+ burst = iavf_recv_scattered_burst_vec_flex_rxd(rx_queue,
+ rx_pkts + retval,
+ IAVF_VPMD_RX_MAX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < IAVF_VPMD_RX_MAX_BURST)
+ return retval;
+ }
+
+ return retval + iavf_recv_scattered_burst_vec_flex_rxd(rx_queue,
+ rx_pkts + retval,
+ nb_pkts);
+}
+
static inline void
vtx1(volatile struct iavf_tx_desc *txdp, struct rte_mbuf *pkt, uint64_t flags)
{
PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_pkts=%u",
txq->port_id, txq->queue_id, tx_id, nb_pkts);
- IAVF_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ IAVF_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}