X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx_vec_avx2.c;h=9725ac01804357c4ea4c68eb306681e77d81f63a;hb=05b405d581486651305551a9f7295f40388d95db;hp=be50677c2f90197d806d613f50757bb1f299693c;hpb=43840e9d699c52b1d225fb28b0ce6d76cdeb0580;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx_vec_avx2.c b/drivers/net/ice/ice_rxtx_vec_avx2.c index be50677c2f..9725ac0180 100644 --- a/drivers/net/ice/ice_rxtx_vec_avx2.c +++ b/drivers/net/ice/ice_rxtx_vec_avx2.c @@ -4,137 +4,41 @@ #include "ice_rxtx_vec_common.h" -#include +#include #ifndef __INTEL_COMPILER #pragma GCC diagnostic ignored "-Wcast-qual" #endif -static inline void +static __rte_always_inline void ice_rxq_rearm(struct ice_rx_queue *rxq) { - int i; - uint16_t rx_id; - volatile union ice_rx_flex_desc *rxdp; - struct ice_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; - - rxdp = rxq->rx_ring + rxq->rxrearm_start; - - /* Pull 'n' more MBUFs into the software ring */ - if (rte_mempool_get_bulk(rxq->mp, - (void *)rxep, - ICE_RXQ_REARM_THRESH) < 0) { - if (rxq->rxrearm_nb + ICE_RXQ_REARM_THRESH >= - rxq->nb_rx_desc) { - __m128i dma_addr0; - - dma_addr0 = _mm_setzero_si128(); - for (i = 0; i < ICE_DESCS_PER_LOOP; i++) { - rxep[i].mbuf = &rxq->fake_mbuf; - _mm_store_si128((__m128i *)&rxdp[i].read, - dma_addr0); - } - } - rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += - ICE_RXQ_REARM_THRESH; - return; - } - -#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC - struct rte_mbuf *mb0, *mb1; - __m128i dma_addr0, dma_addr1; - __m128i hdr_room = _mm_set_epi64x(RTE_PKTMBUF_HEADROOM, - RTE_PKTMBUF_HEADROOM); - /* Initialize the mbufs in vector, process 2 mbufs in one loop */ - for (i = 0; i < ICE_RXQ_REARM_THRESH; i += 2, rxep += 2) { - __m128i vaddr0, vaddr1; - - mb0 = rxep[0].mbuf; - mb1 = rxep[1].mbuf; - - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != - offsetof(struct rte_mbuf, buf_addr) + 8); - vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); - vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); - - /* convert pa to dma_addr hdr/data */ - dma_addr0 = _mm_unpackhi_epi64(vaddr0, vaddr0); - dma_addr1 = _mm_unpackhi_epi64(vaddr1, vaddr1); - - /* add headroom to pa values */ - dma_addr0 = _mm_add_epi64(dma_addr0, hdr_room); - dma_addr1 = _mm_add_epi64(dma_addr1, hdr_room); - - /* flush desc with pa dma_addr */ - _mm_store_si128((__m128i *)&rxdp++->read, dma_addr0); - _mm_store_si128((__m128i *)&rxdp++->read, dma_addr1); - } -#else - struct rte_mbuf *mb0, *mb1, *mb2, *mb3; - __m256i dma_addr0_1, dma_addr2_3; - __m256i hdr_room = _mm256_set1_epi64x(RTE_PKTMBUF_HEADROOM); - /* Initialize the mbufs in vector, process 4 mbufs in one loop */ - for (i = 0; i < ICE_RXQ_REARM_THRESH; - i += 4, rxep += 4, rxdp += 4) { - __m128i vaddr0, vaddr1, vaddr2, vaddr3; - __m256i vaddr0_1, vaddr2_3; - - mb0 = rxep[0].mbuf; - mb1 = rxep[1].mbuf; - mb2 = rxep[2].mbuf; - mb3 = rxep[3].mbuf; - - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != - offsetof(struct rte_mbuf, buf_addr) + 8); - vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); - vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); - vaddr2 = _mm_loadu_si128((__m128i *)&mb2->buf_addr); - vaddr3 = _mm_loadu_si128((__m128i *)&mb3->buf_addr); - - /** - * merge 0 & 1, by casting 0 to 256-bit and inserting 1 - * into the high lanes. Similarly for 2 & 3 - */ - vaddr0_1 = - _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr0), - vaddr1, 1); - vaddr2_3 = - _mm256_inserti128_si256(_mm256_castsi128_si256(vaddr2), - vaddr3, 1); - - /* convert pa to dma_addr hdr/data */ - dma_addr0_1 = _mm256_unpackhi_epi64(vaddr0_1, vaddr0_1); - dma_addr2_3 = _mm256_unpackhi_epi64(vaddr2_3, vaddr2_3); - - /* add headroom to pa values */ - dma_addr0_1 = _mm256_add_epi64(dma_addr0_1, hdr_room); - dma_addr2_3 = _mm256_add_epi64(dma_addr2_3, hdr_room); - - /* flush desc with pa dma_addr */ - _mm256_store_si256((__m256i *)&rxdp->read, dma_addr0_1); - _mm256_store_si256((__m256i *)&(rxdp + 2)->read, dma_addr2_3); - } - -#endif - - rxq->rxrearm_start += ICE_RXQ_REARM_THRESH; - if (rxq->rxrearm_start >= rxq->nb_rx_desc) - rxq->rxrearm_start = 0; - - rxq->rxrearm_nb -= ICE_RXQ_REARM_THRESH; - - rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? - (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + return ice_rxq_rearm_common(rxq, false); +} - /* Update the tail pointer on the NIC */ - ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id); +static __rte_always_inline __m256i +ice_flex_rxd_to_fdir_flags_vec_avx2(const __m256i fdir_id0_7) +{ +#define FDID_MIS_MAGIC 0xFFFFFFFF + RTE_BUILD_BUG_ON(PKT_RX_FDIR != (1 << 2)); + RTE_BUILD_BUG_ON(PKT_RX_FDIR_ID != (1 << 13)); + const __m256i pkt_fdir_bit = _mm256_set1_epi32(PKT_RX_FDIR | + PKT_RX_FDIR_ID); + /* desc->flow_id field == 0xFFFFFFFF means fdir mismatch */ + const __m256i fdir_mis_mask = _mm256_set1_epi32(FDID_MIS_MAGIC); + __m256i fdir_mask = _mm256_cmpeq_epi32(fdir_id0_7, + fdir_mis_mask); + /* this XOR op results to bit-reverse the fdir_mask */ + fdir_mask = _mm256_xor_si256(fdir_mask, fdir_mis_mask); + const __m256i fdir_flags = _mm256_and_si256(fdir_mask, pkt_fdir_bit); + + return fdir_flags; } -static inline uint16_t +static __rte_always_inline uint16_t _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts, uint8_t *split_packet) + uint16_t nb_pkts, uint8_t *split_packet, + bool offload) { #define ICE_DESCS_PER_LOOP_AVX 8 @@ -191,8 +95,8 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, const __m256i shuf_msk = _mm256_set_epi8 (/* first descriptor */ - 15, 14, - 13, 12, /* octet 12~15, 32 bits rss */ + 0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ 11, 10, /* octet 10~11, 16 bits vlan_macip */ 5, 4, /* octet 4~5, 16 bits data_len */ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ @@ -200,8 +104,8 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, 0xFF, 0xFF, /* pkt_type set as unknown */ 0xFF, 0xFF, /*pkt_type set as unknown */ /* second descriptor */ - 15, 14, - 13, 12, /* octet 12~15, 32 bits rss */ + 0xFF, 0xFF, + 0xFF, 0xFF, /* rss hash parsed separately */ 11, 10, /* octet 10~11, 16 bits vlan_macip */ 5, 4, /* octet 4~5, 16 bits data_len */ 0xFF, 0xFF, /* skip hi 16 bits pkt_len, zero out */ @@ -232,43 +136,88 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, * bit13 is for VLAN indication. */ const __m256i flags_mask = - _mm256_set1_epi32((7 << 4) | (1 << 12) | (1 << 13)); + _mm256_set1_epi32((0xF << 4) | (1 << 12) | (1 << 13)); /** * data to be shuffled by the result of the flags mask shifted by 4 * bits. This gives use the l3_l4 flags. */ - const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, - /* shift right 1 bit to make sure it not exceed 255 */ - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, - /* second 128-bits */ - 0, 0, 0, 0, 0, 0, 0, 0, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | - PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1); + const __m256i l3_l4_flags_shuf = + _mm256_set_epi8((PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | + PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + /** + * second 128-bits + * shift right 20 bits to use the low two bits to indicate + * outer checksum status + * shift right 1 bit to make sure it not exceed 255 + */ + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_BAD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_BAD | + PKT_RX_IP_CKSUM_GOOD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_L4_CKSUM_GOOD >> 20 | PKT_RX_L4_CKSUM_GOOD | + PKT_RX_IP_CKSUM_GOOD) >> 1); const __m256i cksum_mask = - _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | - PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_EIP_CKSUM_BAD); + _mm256_set1_epi32(PKT_RX_IP_CKSUM_MASK | + PKT_RX_L4_CKSUM_MASK | + PKT_RX_OUTER_IP_CKSUM_BAD | + PKT_RX_OUTER_L4_CKSUM_MASK); /** * data to be shuffled by result of flag mask, shifted down 12. * If RSS(bit12)/VLAN(bit13) are set, @@ -437,30 +386,178 @@ _ice_recv_raw_pkts_vec_avx2(struct ice_rx_queue *rxq, struct rte_mbuf **rx_pkts, */ __m256i status0_7 = _mm256_unpacklo_epi64(status4_7, status0_3); + __m256i mbuf_flags = _mm256_set1_epi32(0); - /* now do flag manipulation */ + if (offload) { + /* now do flag manipulation */ + + /* get only flag/error bits we want */ + const __m256i flag_bits = + _mm256_and_si256(status0_7, flags_mask); + /** + * l3_l4_error flags, shuffle, then shift to correct adjustment + * of flags in flags_shuf, and finally mask out extra bits + */ + __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, + _mm256_srli_epi32(flag_bits, 4)); + l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); + + __m256i l4_outer_mask = _mm256_set1_epi32(0x6); + __m256i l4_outer_flags = + _mm256_and_si256(l3_l4_flags, l4_outer_mask); + l4_outer_flags = _mm256_slli_epi32(l4_outer_flags, 20); + + __m256i l3_l4_mask = _mm256_set1_epi32(~0x6); + + l3_l4_flags = _mm256_and_si256(l3_l4_flags, l3_l4_mask); + l3_l4_flags = _mm256_or_si256(l3_l4_flags, l4_outer_flags); + l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ + const __m256i rss_vlan_flag_bits = + _mm256_srli_epi32(flag_bits, 12); + const __m256i rss_vlan_flags = + _mm256_shuffle_epi8(rss_vlan_flags_shuf, + rss_vlan_flag_bits); + + /* merge flags */ + mbuf_flags = _mm256_or_si256(l3_l4_flags, + rss_vlan_flags); + } + + if (rxq->fdir_enabled) { + const __m256i fdir_id4_7 = + _mm256_unpackhi_epi32(raw_desc6_7, raw_desc4_5); + + const __m256i fdir_id0_3 = + _mm256_unpackhi_epi32(raw_desc2_3, raw_desc0_1); + + const __m256i fdir_id0_7 = + _mm256_unpackhi_epi64(fdir_id4_7, fdir_id0_3); + + const __m256i fdir_flags = + ice_flex_rxd_to_fdir_flags_vec_avx2(fdir_id0_7); + + /* merge with fdir_flags */ + mbuf_flags = _mm256_or_si256(mbuf_flags, fdir_flags); + + /* write to mbuf: have to use scalar store here */ + rx_pkts[i + 0]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 3); + + rx_pkts[i + 1]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 7); + + rx_pkts[i + 2]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 2); + + rx_pkts[i + 3]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 6); + + rx_pkts[i + 4]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 1); + + rx_pkts[i + 5]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 5); + + rx_pkts[i + 6]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 0); + + rx_pkts[i + 7]->hash.fdir.hi = + _mm256_extract_epi32(fdir_id0_7, 4); + } /* if() on fdir_enabled */ + + if (offload) { +#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC + /** + * needs to load 2nd 16B of each desc for RSS hash parsing, + * will cause performance drop to get into this context. + */ + if (rxq->vsi->adapter->pf.dev_data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /* load bottom half of every 32B desc */ + const __m128i raw_desc_bh7 = + _mm_load_si128 + ((void *)(&rxdp[7].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh6 = + _mm_load_si128 + ((void *)(&rxdp[6].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh5 = + _mm_load_si128 + ((void *)(&rxdp[5].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh4 = + _mm_load_si128 + ((void *)(&rxdp[4].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh3 = + _mm_load_si128 + ((void *)(&rxdp[3].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh2 = + _mm_load_si128 + ((void *)(&rxdp[2].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh1 = + _mm_load_si128 + ((void *)(&rxdp[1].wb.status_error1)); + rte_compiler_barrier(); + const __m128i raw_desc_bh0 = + _mm_load_si128 + ((void *)(&rxdp[0].wb.status_error1)); + + __m256i raw_desc_bh6_7 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh6), + raw_desc_bh7, 1); + __m256i raw_desc_bh4_5 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh4), + raw_desc_bh5, 1); + __m256i raw_desc_bh2_3 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh2), + raw_desc_bh3, 1); + __m256i raw_desc_bh0_1 = + _mm256_inserti128_si256 + (_mm256_castsi128_si256(raw_desc_bh0), + raw_desc_bh1, 1); + + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m256i rss_hash6_7 = + _mm256_slli_epi64(raw_desc_bh6_7, 32); + __m256i rss_hash4_5 = + _mm256_slli_epi64(raw_desc_bh4_5, 32); + __m256i rss_hash2_3 = + _mm256_slli_epi64(raw_desc_bh2_3, 32); + __m256i rss_hash0_1 = + _mm256_slli_epi64(raw_desc_bh0_1, 32); + + __m256i rss_hash_msk = + _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, + 0xFFFFFFFF, 0, 0, 0); + + rss_hash6_7 = _mm256_and_si256 + (rss_hash6_7, rss_hash_msk); + rss_hash4_5 = _mm256_and_si256 + (rss_hash4_5, rss_hash_msk); + rss_hash2_3 = _mm256_and_si256 + (rss_hash2_3, rss_hash_msk); + rss_hash0_1 = _mm256_and_si256 + (rss_hash0_1, rss_hash_msk); + + mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); + mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); + mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); + mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + } /* if() on RSS hash parsing */ +#endif + } - /* get only flag/error bits we want */ - const __m256i flag_bits = - _mm256_and_si256(status0_7, flags_mask); - /** - * l3_l4_error flags, shuffle, then shift to correct adjustment - * of flags in flags_shuf, and finally mask out extra bits - */ - __m256i l3_l4_flags = _mm256_shuffle_epi8(l3_l4_flags_shuf, - _mm256_srli_epi32(flag_bits, 4)); - l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); - l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); - /* set rss and vlan flags */ - const __m256i rss_vlan_flag_bits = - _mm256_srli_epi32(flag_bits, 12); - const __m256i rss_vlan_flags = - _mm256_shuffle_epi8(rss_vlan_flags_shuf, - rss_vlan_flag_bits); - - /* merge flags */ - const __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, - rss_vlan_flags); /** * At this point, we have the 8 sets of flags in the low 16-bits * of each 32-bit value in vlan0. @@ -611,7 +708,16 @@ uint16_t ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, nb_pkts, NULL); + return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, + nb_pkts, NULL, false); +} + +uint16_t +ice_recv_pkts_vec_avx2_offload(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _ice_recv_raw_pkts_vec_avx2(rx_queue, rx_pkts, + nb_pkts, NULL, true); } /** @@ -619,16 +725,16 @@ ice_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, * Notice: * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet */ -static uint16_t +static __rte_always_inline uint16_t ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) + uint16_t nb_pkts, bool offload) { struct ice_rx_queue *rxq = rx_queue; uint8_t split_flags[ICE_VPMD_RX_BURST] = {0}; /* get some new buffers */ uint16_t nb_bufs = _ice_recv_raw_pkts_vec_avx2(rxq, rx_pkts, nb_pkts, - split_flags); + split_flags, offload); if (nb_bufs == 0) return 0; @@ -661,48 +767,74 @@ ice_recv_scattered_burst_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, * Notice: * - nb_pkts < ICE_DESCS_PER_LOOP, just return no packet */ -uint16_t -ice_recv_scattered_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) +static __rte_always_inline uint16_t +ice_recv_scattered_pkts_vec_avx2_common(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, + bool offload) { uint16_t retval = 0; while (nb_pkts > ICE_VPMD_RX_BURST) { uint16_t burst = ice_recv_scattered_burst_vec_avx2(rx_queue, - rx_pkts + retval, ICE_VPMD_RX_BURST); + rx_pkts + retval, ICE_VPMD_RX_BURST, offload); retval += burst; nb_pkts -= burst; if (burst < ICE_VPMD_RX_BURST) return retval; } return retval + ice_recv_scattered_burst_vec_avx2(rx_queue, - rx_pkts + retval, nb_pkts); + rx_pkts + retval, nb_pkts, offload); +} + +uint16_t +ice_recv_scattered_pkts_vec_avx2(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return ice_recv_scattered_pkts_vec_avx2_common(rx_queue, + rx_pkts, + nb_pkts, + false); } -static inline void +uint16_t +ice_recv_scattered_pkts_vec_avx2_offload(void *rx_queue, + struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return ice_recv_scattered_pkts_vec_avx2_common(rx_queue, + rx_pkts, + nb_pkts, + true); +} + +static __rte_always_inline void ice_vtx1(volatile struct ice_tx_desc *txdp, - struct rte_mbuf *pkt, uint64_t flags) + struct rte_mbuf *pkt, uint64_t flags, bool offload) { uint64_t high_qw = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S) | ((uint64_t)pkt->data_len << ICE_TXD_QW1_TX_BUF_SZ_S)); + if (offload) + ice_txd_enable_offload(pkt, &high_qw); __m128i descriptor = _mm_set_epi64x(high_qw, - pkt->buf_physaddr + pkt->data_off); + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)txdp, descriptor); } -static inline void +static __rte_always_inline void ice_vtx(volatile struct ice_tx_desc *txdp, - struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags, bool offload) { const uint64_t hi_qw_tmpl = (ICE_TX_DESC_DTYPE_DATA | ((uint64_t)flags << ICE_TXD_QW1_CMD_S)); /* if unaligned on 32-bit boundary, do one to align */ if (((uintptr_t)txdp & 0x1F) != 0 && nb_pkts != 0) { - ice_vtx1(txdp, *pkt, flags); + ice_vtx1(txdp, *pkt, flags, offload); nb_pkts--, txdp++, pkt++; } @@ -712,45 +844,53 @@ ice_vtx(volatile struct ice_tx_desc *txdp, hi_qw_tmpl | ((uint64_t)pkt[3]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (offload) + ice_txd_enable_offload(pkt[3], &hi_qw3); uint64_t hi_qw2 = hi_qw_tmpl | ((uint64_t)pkt[2]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (offload) + ice_txd_enable_offload(pkt[2], &hi_qw2); uint64_t hi_qw1 = hi_qw_tmpl | ((uint64_t)pkt[1]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (offload) + ice_txd_enable_offload(pkt[1], &hi_qw1); uint64_t hi_qw0 = hi_qw_tmpl | ((uint64_t)pkt[0]->data_len << ICE_TXD_QW1_TX_BUF_SZ_S); + if (offload) + ice_txd_enable_offload(pkt[0], &hi_qw0); __m256i desc2_3 = _mm256_set_epi64x (hi_qw3, - pkt[3]->buf_physaddr + pkt[3]->data_off, + pkt[3]->buf_iova + pkt[3]->data_off, hi_qw2, - pkt[2]->buf_physaddr + pkt[2]->data_off); + pkt[2]->buf_iova + pkt[2]->data_off); __m256i desc0_1 = _mm256_set_epi64x (hi_qw1, - pkt[1]->buf_physaddr + pkt[1]->data_off, + pkt[1]->buf_iova + pkt[1]->data_off, hi_qw0, - pkt[0]->buf_physaddr + pkt[0]->data_off); + pkt[0]->buf_iova + pkt[0]->data_off); _mm256_store_si256((void *)(txdp + 2), desc2_3); _mm256_store_si256((void *)txdp, desc0_1); } /* do any last ones */ while (nb_pkts) { - ice_vtx1(txdp, *pkt, flags); + ice_vtx1(txdp, *pkt, flags, offload); txdp++, pkt++, nb_pkts--; } } -static inline uint16_t +static __rte_always_inline uint16_t ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) + uint16_t nb_pkts, bool offload) { struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; volatile struct ice_tx_desc *txdp; @@ -763,7 +903,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); if (txq->nb_tx_free < txq->tx_free_thresh) - ice_tx_free_bufs(txq); + ice_tx_free_bufs_vec(txq); nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); if (unlikely(nb_pkts == 0)) @@ -779,11 +919,11 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, if (nb_commit >= n) { ice_tx_backlog_entry(txep, tx_pkts, n); - ice_vtx(txdp, tx_pkts, n - 1, flags); + ice_vtx(txdp, tx_pkts, n - 1, flags, offload); tx_pkts += (n - 1); txdp += (n - 1); - ice_vtx1(txdp, *tx_pkts++, rs); + ice_vtx1(txdp, *tx_pkts++, rs, offload); nb_commit = (uint16_t)(nb_commit - n); @@ -797,7 +937,7 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, ice_tx_backlog_entry(txep, tx_pkts, nb_commit); - ice_vtx(txdp, tx_pkts, nb_commit, flags); + ice_vtx(txdp, tx_pkts, nb_commit, flags, offload); tx_id = (uint16_t)(tx_id + nb_commit); if (tx_id > txq->tx_next_rs) { @@ -810,14 +950,14 @@ ice_xmit_fixed_burst_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, txq->tx_tail = tx_id; - ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail); + ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail); return nb_pkts; } -uint16_t -ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +static __rte_always_inline uint16_t +ice_xmit_pkts_vec_avx2_common(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts, bool offload) { uint16_t nb_tx = 0; struct ice_tx_queue *txq = (struct ice_tx_queue *)tx_queue; @@ -827,7 +967,7 @@ ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, num = (uint16_t)RTE_MIN(nb_pkts, txq->tx_rs_thresh); ret = ice_xmit_fixed_burst_vec_avx2(tx_queue, &tx_pkts[nb_tx], - num); + num, offload); nb_tx += ret; nb_pkts -= ret; if (ret < num) @@ -836,3 +976,17 @@ ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } + +uint16_t +ice_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, false); +} + +uint16_t +ice_xmit_pkts_vec_avx2_offload(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + return ice_xmit_pkts_vec_avx2_common(tx_queue, tx_pkts, nb_pkts, true); +}