X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fiavf%2Fiavf_rxtx_vec_avx2.c;h=cdb51397ff742361479e4a31d7f4366b5b4126ad;hb=fdfde7a4a0f8be8f79c82ee91da9041acd64a798;hp=e5e0fd3095c15beb08cdcb34d0e3c606e922bdcd;hpb=f978c1c9b3b5e346c9a9f50cc8ee636bee4e1bdd;p=dpdk.git diff --git a/drivers/net/iavf/iavf_rxtx_vec_avx2.c b/drivers/net/iavf/iavf_rxtx_vec_avx2.c index e5e0fd3095..cdb51397ff 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_avx2.c +++ b/drivers/net/iavf/iavf_rxtx_vec_avx2.c @@ -52,8 +52,8 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq) mb0 = rxp[0]; mb1 = rxp[1]; - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != offsetof(struct rte_mbuf, buf_addr) + 8); vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); @@ -85,8 +85,8 @@ iavf_rxq_rearm(struct iavf_rx_queue *rxq) mb2 = rxp[2]; mb3 = rxp[3]; - /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */ - RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_physaddr) != + /* load buf_addr(lo 64bit) and buf_iova(hi 64bit) */ + RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, buf_iova) != offsetof(struct rte_mbuf, buf_addr) + 8); vaddr0 = _mm_loadu_si128((__m128i *)&mb0->buf_addr); vaddr1 = _mm_loadu_si128((__m128i *)&mb1->buf_addr); @@ -264,24 +264,24 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, */ const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, /* shift right 1 bit to make sure it not exceed 255 */ - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1, (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, PKT_RX_IP_CKSUM_BAD >> 1, (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD) >> 1, /* second 128-bits */ 0, 0, 0, 0, 0, 0, 0, 0, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD | + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_IP_CKSUM_GOOD | PKT_RX_EIP_CKSUM_BAD) >> 1, + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, + (PKT_RX_IP_CKSUM_GOOD | PKT_RX_OUTER_IP_CKSUM_BAD) >> 1, (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD) >> 1, PKT_RX_IP_CKSUM_BAD >> 1, @@ -290,7 +290,7 @@ _iavf_recv_raw_pkts_vec_avx2(struct iavf_rx_queue *rxq, const __m256i cksum_mask = _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_EIP_CKSUM_BAD); + PKT_RX_OUTER_IP_CKSUM_BAD); RTE_SET_USED(avx_aligned); /* for 32B descriptors we don't use this */ @@ -742,13 +742,13 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, */ const __m256i l3_l4_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, /* shift right 1 bit to make sure it not exceed 255 */ - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, @@ -756,13 +756,13 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, (PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, /* second 128-bits */ 0, 0, 0, 0, 0, 0, 0, 0, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD) >> 1, - (PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | + (PKT_RX_OUTER_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_IP_CKSUM_GOOD) >> 1, (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD) >> 1, (PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_GOOD) >> 1, @@ -771,26 +771,38 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, const __m256i cksum_mask = _mm256_set1_epi32(PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD | - PKT_RX_EIP_CKSUM_BAD); + PKT_RX_OUTER_IP_CKSUM_BAD); /** * data to be shuffled by result of flag mask, shifted down 12. * If RSS(bit12)/VLAN(bit13) are set, * shuffle moves appropriate flags in place. */ - const __m256i rss_vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + const __m256i rss_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, - PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, /* end up 128-bits */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - PKT_RX_RSS_HASH | PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, - PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0); + const __m256i vlan_flags_shuf = _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + 0, 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED, + 0, 0); + uint16_t i, received; for (i = 0, received = 0; i < nb_pkts; @@ -938,13 +950,24 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, _mm256_srli_epi32(flag_bits, 4)); l3_l4_flags = _mm256_slli_epi32(l3_l4_flags, 1); l3_l4_flags = _mm256_and_si256(l3_l4_flags, cksum_mask); + /* set rss and vlan flags */ const __m256i rss_vlan_flag_bits = _mm256_srli_epi32(flag_bits, 12); - const __m256i rss_vlan_flags = - _mm256_shuffle_epi8(rss_vlan_flags_shuf, + const __m256i rss_flags = + _mm256_shuffle_epi8(rss_flags_shuf, rss_vlan_flag_bits); + __m256i vlan_flags = _mm256_setzero_si256(); + + if (rxq->rx_flags == IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1) + vlan_flags = + _mm256_shuffle_epi8(vlan_flags_shuf, + rss_vlan_flag_bits); + + const __m256i rss_vlan_flags = + _mm256_or_si256(rss_flags, vlan_flags); + /* merge flags */ __m256i mbuf_flags = _mm256_or_si256(l3_l4_flags, rss_vlan_flags); @@ -997,7 +1020,8 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, * will cause performance drop to get into this context. */ if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_RSS_HASH) { + DEV_RX_OFFLOAD_RSS_HASH || + rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { /* load bottom half of every 32B desc */ const __m128i raw_desc_bh7 = _mm_load_si128 @@ -1048,36 +1072,115 @@ _iavf_recv_raw_pkts_vec_avx2_flex_rxd(struct iavf_rx_queue *rxq, (_mm256_castsi128_si256(raw_desc_bh0), raw_desc_bh1, 1); - /** - * to shift the 32b RSS hash value to the - * highest 32b of each 128b before mask - */ - __m256i rss_hash6_7 = - _mm256_slli_epi64(raw_desc_bh6_7, 32); - __m256i rss_hash4_5 = - _mm256_slli_epi64(raw_desc_bh4_5, 32); - __m256i rss_hash2_3 = - _mm256_slli_epi64(raw_desc_bh2_3, 32); - __m256i rss_hash0_1 = - _mm256_slli_epi64(raw_desc_bh0_1, 32); - - __m256i rss_hash_msk = - _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, - 0xFFFFFFFF, 0, 0, 0); - - rss_hash6_7 = _mm256_and_si256 - (rss_hash6_7, rss_hash_msk); - rss_hash4_5 = _mm256_and_si256 - (rss_hash4_5, rss_hash_msk); - rss_hash2_3 = _mm256_and_si256 - (rss_hash2_3, rss_hash_msk); - rss_hash0_1 = _mm256_and_si256 - (rss_hash0_1, rss_hash_msk); - - mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); - mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); - mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); - mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + if (rxq->vsi->adapter->eth_dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_RSS_HASH) { + /** + * to shift the 32b RSS hash value to the + * highest 32b of each 128b before mask + */ + __m256i rss_hash6_7 = + _mm256_slli_epi64(raw_desc_bh6_7, 32); + __m256i rss_hash4_5 = + _mm256_slli_epi64(raw_desc_bh4_5, 32); + __m256i rss_hash2_3 = + _mm256_slli_epi64(raw_desc_bh2_3, 32); + __m256i rss_hash0_1 = + _mm256_slli_epi64(raw_desc_bh0_1, 32); + + const __m256i rss_hash_msk = + _mm256_set_epi32(0xFFFFFFFF, 0, 0, 0, + 0xFFFFFFFF, 0, 0, 0); + + rss_hash6_7 = _mm256_and_si256 + (rss_hash6_7, rss_hash_msk); + rss_hash4_5 = _mm256_and_si256 + (rss_hash4_5, rss_hash_msk); + rss_hash2_3 = _mm256_and_si256 + (rss_hash2_3, rss_hash_msk); + rss_hash0_1 = _mm256_and_si256 + (rss_hash0_1, rss_hash_msk); + + mb6_7 = _mm256_or_si256(mb6_7, rss_hash6_7); + mb4_5 = _mm256_or_si256(mb4_5, rss_hash4_5); + mb2_3 = _mm256_or_si256(mb2_3, rss_hash2_3); + mb0_1 = _mm256_or_si256(mb0_1, rss_hash0_1); + } + + if (rxq->rx_flags & IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2) { + /* merge the status/error-1 bits into one register */ + const __m256i status1_4_7 = + _mm256_unpacklo_epi32(raw_desc_bh6_7, + raw_desc_bh4_5); + const __m256i status1_0_3 = + _mm256_unpacklo_epi32(raw_desc_bh2_3, + raw_desc_bh0_1); + + const __m256i status1_0_7 = + _mm256_unpacklo_epi64(status1_4_7, + status1_0_3); + + const __m256i l2tag2p_flag_mask = + _mm256_set1_epi32 + (1 << IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S); + + __m256i l2tag2p_flag_bits = + _mm256_and_si256 + (status1_0_7, l2tag2p_flag_mask); + + l2tag2p_flag_bits = + _mm256_srli_epi32(l2tag2p_flag_bits, + IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S); + + const __m256i l2tag2_flags_shuf = + _mm256_set_epi8(0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + /* end up 128-bits */ + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, 0, 0, + 0, 0, + PKT_RX_VLAN | + PKT_RX_VLAN_STRIPPED, + 0); + + vlan_flags = + _mm256_shuffle_epi8(l2tag2_flags_shuf, + l2tag2p_flag_bits); + + /* merge with vlan_flags */ + mbuf_flags = _mm256_or_si256 + (mbuf_flags, vlan_flags); + + /* L2TAG2_2 */ + __m256i vlan_tci6_7 = + _mm256_slli_si256(raw_desc_bh6_7, 4); + __m256i vlan_tci4_5 = + _mm256_slli_si256(raw_desc_bh4_5, 4); + __m256i vlan_tci2_3 = + _mm256_slli_si256(raw_desc_bh2_3, 4); + __m256i vlan_tci0_1 = + _mm256_slli_si256(raw_desc_bh0_1, 4); + + const __m256i vlan_tci_msk = + _mm256_set_epi32(0, 0xFFFF0000, 0, 0, + 0, 0xFFFF0000, 0, 0); + + vlan_tci6_7 = _mm256_and_si256 + (vlan_tci6_7, vlan_tci_msk); + vlan_tci4_5 = _mm256_and_si256 + (vlan_tci4_5, vlan_tci_msk); + vlan_tci2_3 = _mm256_and_si256 + (vlan_tci2_3, vlan_tci_msk); + vlan_tci0_1 = _mm256_and_si256 + (vlan_tci0_1, vlan_tci_msk); + + mb6_7 = _mm256_or_si256(mb6_7, vlan_tci6_7); + mb4_5 = _mm256_or_si256(mb4_5, vlan_tci4_5); + mb2_3 = _mm256_or_si256(mb2_3, vlan_tci2_3); + mb0_1 = _mm256_or_si256(mb0_1, vlan_tci0_1); + } } /* if() on RSS hash parsing */ #endif @@ -1391,7 +1494,7 @@ iavf_vtx1(volatile struct iavf_tx_desc *txdp, ((uint64_t)pkt->data_len << IAVF_TXD_QW1_TX_BUF_SZ_SHIFT)); __m128i descriptor = _mm_set_epi64x(high_qw, - pkt->buf_physaddr + pkt->data_off); + pkt->buf_iova + pkt->data_off); _mm_store_si128((__m128i *)txdp, descriptor); } @@ -1430,15 +1533,15 @@ iavf_vtx(volatile struct iavf_tx_desc *txdp, __m256i desc2_3 = _mm256_set_epi64x (hi_qw3, - pkt[3]->buf_physaddr + pkt[3]->data_off, + pkt[3]->buf_iova + pkt[3]->data_off, hi_qw2, - pkt[2]->buf_physaddr + pkt[2]->data_off); + pkt[2]->buf_iova + pkt[2]->data_off); __m256i desc0_1 = _mm256_set_epi64x (hi_qw1, - pkt[1]->buf_physaddr + pkt[1]->data_off, + pkt[1]->buf_iova + pkt[1]->data_off, hi_qw0, - pkt[0]->buf_physaddr + pkt[0]->data_off); + pkt[0]->buf_iova + pkt[0]->data_off); _mm256_store_si256((void *)(txdp + 2), desc2_3); _mm256_store_si256((void *)txdp, desc0_1); }