From: Qi Zhang Date: Wed, 12 Apr 2017 13:55:32 +0000 (-0400) Subject: net/i40e: enable per-device packet type mapping X-Git-Tag: spdx-start~3537 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=67f038076657;p=dpdk.git net/i40e: enable per-device packet type mapping The mapping from hardware defined packet type to software defined packet type is static for i40e device, the patch let each ethdev to to have their own copy of mapping table, this give the possibility that different ethdev can be set different PTYPE mapping rule which is the requirement to support following hardware's dynamic PTYPE feature. Signed-off-by: Qi Zhang Acked-by: Jianbo Liu Acked-by: Chao Zhu --- diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 7ba8c70f31..7f3b66dcbb 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1059,6 +1059,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_set_tx_function(dev); return 0; } + i40e_set_default_ptype_table(dev); pci_dev = I40E_DEV_TO_PCI(dev); intr_handle = &pci_dev->intr_handle; diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index c745e9b462..71b087407f 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -744,6 +744,8 @@ struct i40e_vf { uint64_t flags; }; +#define I40E_MAX_PKT_TYPE 256 + /* * Structure to store private data for each PF/VF instance. */ @@ -768,6 +770,9 @@ struct i40e_adapter { struct rte_timecounter systime_tc; struct rte_timecounter rx_tstamp_tc; struct rte_timecounter tx_tstamp_tc; + + /* ptype mapping table */ + uint32_t ptype_tbl[I40E_MAX_PKT_TYPE] __rte_cache_min_aligned; }; extern const struct rte_flow_ops i40e_flow_ops; diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index ca35268b47..cb34023e9e 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1478,7 +1478,7 @@ i40evf_dev_init(struct rte_eth_dev *eth_dev) i40e_set_tx_function(eth_dev); return 0; } - + i40e_set_default_ptype_table(eth_dev); rte_eth_copy_pci_info(eth_dev, pci_dev); eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index ff70c06cc7..e5471b1437 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -61,7 +61,6 @@ #define DEFAULT_TX_RS_THRESH 32 #define DEFAULT_TX_FREE_THRESH 32 -#define I40E_MAX_PKT_TYPE 256 #define I40E_TX_MAX_BURST 32 @@ -458,6 +457,7 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) int32_t s[I40E_LOOK_AHEAD], nb_dd; int32_t i, j, nb_rx = 0; uint64_t pkt_flags; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; rxdp = &rxq->rx_ring[rxq->rx_tail]; rxep = &rxq->sw_ring[rxq->rx_tail]; @@ -506,9 +506,9 @@ i40e_rx_scan_hw_ring(struct i40e_rx_queue *rxq) pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); mb->packet_type = - i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & - I40E_RXD_QW1_PTYPE_MASK) >> - I40E_RXD_QW1_PTYPE_SHIFT)); + ptype_tbl[(uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> + I40E_RXD_QW1_PTYPE_SHIFT)]; if (pkt_flags & PKT_RX_RSS_HASH) mb->hash.rss = rte_le_to_cpu_32(\ rxdp[j].wb.qword0.hi_dword.rss); @@ -700,6 +700,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) uint16_t rx_id, nb_hold; uint64_t dma_addr; uint64_t pkt_flags; + uint32_t *ptype_tbl; nb_rx = 0; nb_hold = 0; @@ -707,6 +708,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_id = rxq->rx_tail; rx_ring = rxq->rx_ring; sw_ring = rxq->sw_ring; + ptype_tbl = rxq->vsi->adapter->ptype_tbl; while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; @@ -763,8 +765,8 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); rxm->packet_type = - i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & - I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)); + ptype_tbl[(uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)]; if (pkt_flags & PKT_RX_RSS_HASH) rxm->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); @@ -818,6 +820,7 @@ i40e_recv_scattered_pkts(void *rx_queue, uint64_t qword1; uint64_t dma_addr; uint64_t pkt_flags; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; while (nb_rx < nb_pkts) { rxdp = &rx_ring[rx_id]; @@ -925,8 +928,8 @@ i40e_recv_scattered_pkts(void *rx_queue, pkt_flags = i40e_rxd_status_to_pkt_flags(qword1); pkt_flags |= i40e_rxd_error_to_pkt_flags(qword1); first_seg->packet_type = - i40e_rxd_pkt_type_mapping((uint8_t)((qword1 & - I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)); + ptype_tbl[(uint8_t)((qword1 & + I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT)]; if (pkt_flags & PKT_RX_RSS_HASH) first_seg->hash.rss = rte_le_to_cpu_32(rxd.wb.qword0.hi_dword.rss); @@ -2922,6 +2925,17 @@ i40e_set_tx_function(struct rte_eth_dev *dev) } } +void __attribute__((cold)) +i40e_set_default_ptype_table(struct rte_eth_dev *dev) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int i; + + for (i = 0; i <= I40E_MAX_PKT_TYPE; i++) + ad->ptype_tbl[i] = i40e_get_default_pkt_type(i); +} + /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ int __attribute__((weak)) i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) diff --git a/drivers/net/i40e/i40e_rxtx.h b/drivers/net/i40e/i40e_rxtx.h index 2fa7f372fc..20084d6491 100644 --- a/drivers/net/i40e/i40e_rxtx.h +++ b/drivers/net/i40e/i40e_rxtx.h @@ -254,13 +254,14 @@ void i40e_set_rx_function(struct rte_eth_dev *dev); void i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq); void i40e_set_tx_function(struct rte_eth_dev *dev); +void i40e_set_default_ptype_table(struct rte_eth_dev *dev); /* For each value it means, datasheet of hardware can tell more details * * @note: fix i40e_dev_supported_ptypes_get() if any change here. */ static inline uint32_t -i40e_rxd_pkt_type_mapping(uint8_t ptype) +i40e_get_default_pkt_type(uint8_t ptype) { static const uint32_t type_table[UINT8_MAX + 1] __rte_cache_aligned = { /* L2 types */ diff --git a/drivers/net/i40e/i40e_rxtx_vec_altivec.c b/drivers/net/i40e/i40e_rxtx_vec_altivec.c index 98314386d9..07de31b01e 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_altivec.c +++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c @@ -198,7 +198,8 @@ desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts) #define PKTLEN_SHIFT 10 static inline void -desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts) +desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts, + uint32_t *ptype_tbl) { vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]); vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]); @@ -206,14 +207,14 @@ desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts) ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30}); ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30}); - rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping( - (*(vector unsigned char *)&ptype0)[0]); - rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping( - (*(vector unsigned char *)&ptype0)[8]); - rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping( - (*(vector unsigned char *)&ptype1)[0]); - rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping( - (*(vector unsigned char *)&ptype1)[8]); + rx_pkts[0]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype0)[0])]; + rx_pkts[1]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype0)[8])]; + rx_pkts[2]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype1)[0])]; + rx_pkts[3]->packet_type = + ptype_tbl[(*(vector unsigned char *)&ptype1)[8])]; } /* Notice: @@ -231,6 +232,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, int pos; uint64_t var; vector unsigned char shuf_msk; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; vector unsigned short crc_adjust = (vector unsigned short){ 0, 0, /* ignore pkt_type field */ @@ -455,7 +457,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, vec_st(pkt_mb1, 0, (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1 ); - desc_to_ptype_v(descs, &rx_pkts[pos]); + desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); desc_to_olflags_v(descs, &rx_pkts[pos]); /* C.4 calc avaialbe number of desc */ diff --git a/drivers/net/i40e/i40e_rxtx_vec_neon.c b/drivers/net/i40e/i40e_rxtx_vec_neon.c index ca6b1f4e64..694e91f330 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_neon.c +++ b/drivers/net/i40e/i40e_rxtx_vec_neon.c @@ -201,7 +201,8 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, uint64x2_t descs[4], #define I40E_VPMD_DESC_DD_MASK 0x0001000100010001ULL static inline void -desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) +desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts, + uint32_t *ptype_tbl) { int i; uint8_t ptype; @@ -210,7 +211,7 @@ desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **rx_pkts) for (i = 0; i < 4; i++) { tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30)); ptype = vgetq_lane_u8(tmp, 8); - rx_pkts[i]->packet_type = i40e_rxd_pkt_type_mapping(ptype); + rx_pkts[i]->packet_type = ptype_tbl[ptype]; } } @@ -230,6 +231,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts_recd; int pos; uint64_t var; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; /* mask to shuffle from desc. to mbuf */ uint8x16_t shuf_msk = { @@ -434,7 +436,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb2); vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); - desc_to_ptype_v(descs, &rx_pkts[pos]); + desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc avaialbe number of desc */ var = __builtin_popcountll(stat & I40E_VPMD_DESC_DD_MASK); nb_pkts_recd += var; diff --git a/drivers/net/i40e/i40e_rxtx_vec_sse.c b/drivers/net/i40e/i40e_rxtx_vec_sse.c index f91e32d451..8bc3399a95 100644 --- a/drivers/net/i40e/i40e_rxtx_vec_sse.c +++ b/drivers/net/i40e/i40e_rxtx_vec_sse.c @@ -212,7 +212,8 @@ desc_to_olflags_v(struct i40e_rx_queue *rxq, __m128i descs[4] __rte_unused, #define PKTLEN_SHIFT 10 static inline void -desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts, + uint32_t *ptype_tbl) { __m128i ptype0 = _mm_unpackhi_epi64(descs[0], descs[1]); __m128i ptype1 = _mm_unpackhi_epi64(descs[2], descs[3]); @@ -220,10 +221,10 @@ desc_to_ptype_v(__m128i descs[4], struct rte_mbuf **rx_pkts) ptype0 = _mm_srli_epi64(ptype0, 30); ptype1 = _mm_srli_epi64(ptype1, 30); - rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 0)); - rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype0, 8)); - rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 0)); - rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(_mm_extract_epi8(ptype1, 8)); + rx_pkts[0]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 0)]; + rx_pkts[1]->packet_type = ptype_tbl[_mm_extract_epi8(ptype0, 8)]; + rx_pkts[2]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 0)]; + rx_pkts[3]->packet_type = ptype_tbl[_mm_extract_epi8(ptype1, 8)]; } /* @@ -242,6 +243,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, int pos; uint64_t var; __m128i shuf_msk; + uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl; __m128i crc_adjust = _mm_set_epi16( 0, 0, 0, /* ignore non-length fields */ @@ -429,7 +431,7 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb2); _mm_storeu_si128((void *)&rx_pkts[pos]->rx_descriptor_fields1, pkt_mb1); - desc_to_ptype_v(descs, &rx_pkts[pos]); + desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl); /* C.4 calc avaialbe number of desc */ var = __builtin_popcountll(_mm_cvtsi128_si64(staterr)); nb_pkts_recd += var;