#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
+static inline __m128i
+ice_flex_rxd_to_fdir_flags_vec(const __m128i fdir_id0_3)
+{
+#define FDID_MIS_MAGIC 0xFFFFFFFF
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2));
+ RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13));
+ const __m128i pkt_fdir_bit = _mm_set1_epi32(PKT_RX_FDIR |
+ PKT_RX_FDIR_ID);
+ /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */
+ const __m128i fdir_mis_mask = _mm_set1_epi32(FDID_MIS_MAGIC);
+ __m128i fdir_mask = _mm_cmpeq_epi32(fdir_id0_3,
+ fdir_mis_mask);
+ /* this XOR op results to bit-reverse the fdir_mask */
+ fdir_mask = _mm_xor_si128(fdir_mask, fdir_mis_mask);
+ const __m128i fdir_flags = _mm_and_si128(fdir_mask, pkt_fdir_bit);
+
+ return fdir_flags;
+}
+
static inline void
ice_rxq_rearm(struct ice_rx_queue *rxq)
{
(rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
/* Update the tail pointer on the NIC */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
}
static inline void
* bit12 for RSS indication.
* bit13 for VLAN indication.
*/
- const __m128i desc_mask = _mm_set_epi32(0x3070, 0x3070,
- 0x3070, 0x3070);
-
+ const __m128i desc_mask = _mm_set_epi32(0x30f0, 0x30f0,
+ 0x30f0, 0x30f0);
const __m128i cksum_mask = _mm_set_epi32(PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_OUTER_L4_CKSUM_MASK |
+ PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_OUTER_L4_CKSUM_MASK |
+ PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD,
+ PKT_RX_OUTER_L4_CKSUM_MASK |
+ PKT_RX_OUTER_IP_CKSUM_BAD,
PKT_RX_IP_CKSUM_MASK |
PKT_RX_L4_CKSUM_MASK |
- PKT_RX_EIP_CKSUM_BAD);
+ PKT_RX_OUTER_L4_CKSUM_MASK |
+ PKT_RX_OUTER_IP_CKSUM_BAD);
/* map the checksum, rss and vlan fields to the checksum, rss
* and vlan flag
*/
- const __m128i cksum_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0,
- /* shift right 1 bit to make sure it not exceed 255 */
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD |
- PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
- (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1);
+ const __m128i cksum_flags =
+ _mm_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 |
+ PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ /**
+ * shift right 20 bits to use the low two bits to indicate
+ * outer checksum status
+ * shift right 1 bit to make sure it not exceed 255
+ */
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD |
+ PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_BAD) >> 1,
+ (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD |
+ PKT_RX_IP_CKSUM_GOOD) >> 1);
const __m128i rss_vlan_flags = _mm_set_epi8(0, 0, 0, 0,
0, 0, 0, 0,
flags = _mm_unpackhi_epi32(descs[0], descs[1]);
tmp_desc = _mm_unpackhi_epi32(descs[2], descs[3]);
tmp_desc = _mm_unpacklo_epi64(flags, tmp_desc);
- tmp_desc = _mm_and_si128(flags, desc_mask);
+ tmp_desc = _mm_and_si128(tmp_desc, desc_mask);
/* checksum flags */
tmp_desc = _mm_srli_epi32(tmp_desc, 4);
flags = _mm_shuffle_epi8(cksum_flags, tmp_desc);
/* then we shift left 1 bit */
flags = _mm_slli_epi32(flags, 1);
+
+ __m128i l4_outer_mask = _mm_set_epi32(0x6, 0x6, 0x6, 0x6);
+ __m128i l4_outer_flags = _mm_and_si128(flags, l4_outer_mask);
+ l4_outer_flags = _mm_slli_epi32(l4_outer_flags, 20);
+
+ __m128i l3_l4_mask = _mm_set_epi32(~0x6, ~0x6, ~0x6, ~0x6);
+ __m128i l3_l4_flags = _mm_and_si128(flags, l3_l4_mask);
+ flags = _mm_or_si128(l3_l4_flags, l4_outer_flags);
/* we need to mask out the reduntant bits introduced by RSS or
* VLAN fields.
*/
/* merge the flags */
flags = _mm_or_si128(flags, rss_vlan);
+ if (rxq->fdir_enabled) {
+ const __m128i fdir_id0_1 =
+ _mm_unpackhi_epi32(descs[0], descs[1]);
+
+ const __m128i fdir_id2_3 =
+ _mm_unpackhi_epi32(descs[2], descs[3]);
+
+ const __m128i fdir_id0_3 =
+ _mm_unpackhi_epi64(fdir_id0_1, fdir_id2_3);
+
+ const __m128i fdir_flags =
+ ice_flex_rxd_to_fdir_flags_vec(fdir_id0_3);
+
+ /* merge with fdir_flags */
+ flags = _mm_or_si128(flags, fdir_flags);
+
+ /* write fdir_id to mbuf */
+ rx_pkts[0]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 0);
+
+ rx_pkts[1]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 1);
+
+ rx_pkts[2]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 2);
+
+ rx_pkts[3]->hash.fdir.hi =
+ _mm_extract_epi32(fdir_id0_3, 3);
+ } /* if() on fdir_enabled */
+
/**
* At this point, we have the 4 sets of flags in the low 16-bits
* of each 32-bit value in flags.
* appropriate flags means that we have to do a shift and blend for
* each mbuf before we do the write.
*/
- rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x10);
- rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x10);
- rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x10);
- rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x10);
+ rearm0 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 8), 0x30);
+ rearm1 = _mm_blend_epi16(mbuf_init, _mm_slli_si128(flags, 4), 0x30);
+ rearm2 = _mm_blend_epi16(mbuf_init, flags, 0x30);
+ rearm3 = _mm_blend_epi16(mbuf_init, _mm_srli_si128(flags, 4), 0x30);
/* write the rearm data and the olflags in one write */
RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, ol_flags) !=
ice_rx_desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts,
uint32_t *ptype_tbl)
{
- const __m128i ptype_mask = _mm_set_epi16(0, ICE_RX_FLEX_DESC_PTYPE_M,
- 0, ICE_RX_FLEX_DESC_PTYPE_M,
- 0, ICE_RX_FLEX_DESC_PTYPE_M,
- 0, ICE_RX_FLEX_DESC_PTYPE_M);
+ const __m128i ptype_mask = _mm_set_epi16(ICE_RX_FLEX_DESC_PTYPE_M, 0,
+ ICE_RX_FLEX_DESC_PTYPE_M, 0,
+ ICE_RX_FLEX_DESC_PTYPE_M, 0,
+ ICE_RX_FLEX_DESC_PTYPE_M, 0);
__m128i ptype_01 = _mm_unpacklo_epi32(descs[0], descs[1]);
__m128i ptype_23 = _mm_unpacklo_epi32(descs[2], descs[3]);
__m128i ptype_all = _mm_unpacklo_epi64(ptype_01, ptype_23);
}
/**
+ * vPMD raw receive routine, only accept(nb_pkts >= ICE_DESCS_PER_LOOP)
+ *
* Notice:
* - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
- * numbers of DD bits
+ * - floor align nb_pkts to a ICE_DESCS_PER_LOOP power-of-two
*/
static inline uint16_t
_ice_recv_raw_pkts_vec(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts,
const __m128i eop_check = _mm_set_epi64x(0x0000000200000002LL,
0x0000000200000002LL);
- /* nb_pkts shall be less equal than ICE_MAX_RX_BURST */
- nb_pkts = RTE_MIN(nb_pkts, ICE_MAX_RX_BURST);
-
/* nb_pkts has to be floor-aligned to ICE_DESCS_PER_LOOP */
nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, ICE_DESCS_PER_LOOP);
* needs to load 2nd 16B of each desc for RSS hash parsing,
* will cause performance drop to get into this context.
*/
- if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads &
+ if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_RSS_HASH) {
/* load bottom half of every 32B desc */
const __m128i raw_desc_bh3 =
return _ice_recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
}
-/* vPMD receive routine that reassembles scattered packets
+/**
+ * vPMD receive routine that reassembles single burst of 32 scattered packets
+ *
* Notice:
* - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet
- * - nb_pkts > ICE_VPMD_RX_BURST, only scan ICE_VPMD_RX_BURST
- * numbers of DD bits
*/
-uint16_t
-ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts)
+static uint16_t
+ice_recv_scattered_burst_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
{
struct ice_rx_queue *rxq = rx_queue;
uint8_t split_flags[ICE_VPMD_RX_BURST] = {0};
&split_flags[i]);
}
+/**
+ * vPMD receive routine that reassembles scattered packets.
+ */
+uint16_t
+ice_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t retval = 0;
+
+ while (nb_pkts > ICE_VPMD_RX_BURST) {
+ uint16_t burst;
+
+ burst = ice_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ ICE_VPMD_RX_BURST);
+ retval += burst;
+ nb_pkts -= burst;
+ if (burst < ICE_VPMD_RX_BURST)
+ return retval;
+ }
+
+ return retval + ice_recv_scattered_burst_vec(rx_queue,
+ rx_pkts + retval,
+ nb_pkts);
+}
+
static inline void
ice_vtx1(volatile struct ice_tx_desc *txdp, struct rte_mbuf *pkt,
uint64_t flags)
nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
if (txq->nb_tx_free < txq->tx_free_thresh)
- ice_tx_free_bufs(txq);
+ ice_tx_free_bufs_vec(txq);
nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
nb_commit = nb_pkts;
txq->tx_tail = tx_id;
- ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}