From d9a2009a81089093645fea2e04b51dd37edf3e6f Mon Sep 17 00:00:00 2001 From: Konstantin Ananyev Date: Tue, 22 Mar 2016 14:30:17 +0000 Subject: [PATCH] ixgbe: fix packet type from vector Rx Current vector RX can't always set the packet_type properly. To be more specific: a) it never sets RTE_PTYPE_L2_ETHER b) it doesn't handle tunnel ipv4/ipv6 case correctly. c) it doesn't check is IXGBE_RXDADV_PKTTYPE_ETQF set or not. While a) is pretty easy to fix, b) and c) are not that straightforward in terms of SIMD ops (specially b). So far I wasn't able to make vRX support packet_type properly without noticeable performance loss. So for now, just remove that functionality from vector RX and update dev_supported_ptypes_get(). Fixes: 396254175854 ("mbuf: redefine packet type") Signed-off-by: Konstantin Ananyev Acked-by: Cunming Liang --- drivers/net/ixgbe/ixgbe_ethdev.c | 4 +--- drivers/net/ixgbe/ixgbe_rxtx_vec.c | 34 ++++++++++-------------------- 2 files changed, 12 insertions(+), 26 deletions(-) diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index c20338270b..6ce4111ff2 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -2961,9 +2961,7 @@ ixgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev) if (dev->rx_pkt_burst == ixgbe_recv_pkts || dev->rx_pkt_burst == ixgbe_recv_pkts_lro_single_alloc || dev->rx_pkt_burst == ixgbe_recv_pkts_lro_bulk_alloc || - dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc || - dev->rx_pkt_burst == ixgbe_recv_pkts_vec || - dev->rx_pkt_burst == ixgbe_recv_scattered_pkts_vec) + dev->rx_pkt_burst == ixgbe_recv_pkts_bulk_alloc) return ptypes; return NULL; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec.c index ccd93c7b6b..50407043f8 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec.c @@ -220,8 +220,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, 0, 0 /* ignore pkt_type field */ ); __m128i dd_check, eop_check; - __m128i desc_mask = _mm_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, - 0xFFFFFFFF, 0xFFFF07F0); /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); @@ -259,9 +257,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, 13, 12, /* octet 12~13, 16 bits data_len */ 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ 13, 12, /* octet 12~13, low 16 bits pkt_len */ - 0xFF, 0xFF, /* skip high 16 bits pkt_type */ - 1, /* octet 1, 8 bits pkt_type field */ - 0 /* octet 0, 4 bits offset 4 pkt_type field */ + 0xFF, 0xFF, /* skip 32 bit pkt_type */ + 0xFF, 0xFF ); /* Cache is empty -> need to scan the buffer rings, but first move @@ -278,7 +275,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; pos += RTE_IXGBE_DESCS_PER_LOOP, rxdp += RTE_IXGBE_DESCS_PER_LOOP) { - __m128i descs0[RTE_IXGBE_DESCS_PER_LOOP]; __m128i descs[RTE_IXGBE_DESCS_PER_LOOP]; __m128i pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; __m128i zero, staterr, sterr_tmp1, sterr_tmp2; @@ -289,7 +285,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* Read desc statuses backwards to avoid race condition */ /* A.1 load 4 pkts desc */ - descs0[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); + descs[3] = _mm_loadu_si128((__m128i *)(rxdp + 3)); /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos], mbp1); @@ -297,10 +293,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* B.1 load 1 mbuf point */ mbp2 = _mm_loadu_si128((__m128i *)&sw_ring[pos+2]); - descs0[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); + descs[2] = _mm_loadu_si128((__m128i *)(rxdp + 2)); /* B.1 load 2 mbuf point */ - descs0[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); - descs0[0] = _mm_loadu_si128((__m128i *)(rxdp)); + descs[1] = _mm_loadu_si128((__m128i *)(rxdp + 1)); + descs[0] = _mm_loadu_si128((__m128i *)(rxdp)); /* B.2 copy 2 mbuf point into rx_pkts */ _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); @@ -312,14 +308,6 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); } - /* A* mask out 0~3 bits RSS type */ - descs[3] = _mm_and_si128(descs0[3], desc_mask); - descs[2] = _mm_and_si128(descs0[2], desc_mask); - - /* A* mask out 0~3 bits RSS type */ - descs[1] = _mm_and_si128(descs0[1], desc_mask); - descs[0] = _mm_and_si128(descs0[0], desc_mask); - /* avoid compiler reorder optimization */ rte_compiler_barrier(); @@ -327,22 +315,22 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); + pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); + /* C.1 4=>2 filter staterr info only */ sterr_tmp2 = _mm_unpackhi_epi32(descs[3], descs[2]); /* C.1 4=>2 filter staterr info only */ sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); /* set ol_flags with vlan packet type */ - desc_to_olflags_v(descs0, &rx_pkts[pos]); + desc_to_olflags_v(descs, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); - /* D.1 pkt 1,2 convert format from desc to pktmbuf */ - pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); - pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); - /* C.2 get 4 pkts staterr value */ zero = _mm_xor_si128(dd_check, dd_check); staterr = _mm_unpacklo_epi32(sterr_tmp1, sterr_tmp2); -- 2.20.1