X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_rxtx_vec_common.h;h=2dd2d8365057fd29cc9e29b118c18a87521531d1;hb=8eedf416a7243007fe4ad3b4f7f52e174423897b;hp=5e6f89642ae0c59126b1b246c9e8819cb4d65f50;hpb=3b4888d53d6b1dc719d1afe8fac7efebebcf0925;p=dpdk.git diff --git a/drivers/net/ice/ice_rxtx_vec_common.h b/drivers/net/ice/ice_rxtx_vec_common.h index 5e6f89642a..2dd2d83650 100644 --- a/drivers/net/ice/ice_rxtx_vec_common.h +++ b/drivers/net/ice/ice_rxtx_vec_common.h @@ -7,6 +7,10 @@ #include "ice_rxtx.h" +#ifndef __INTEL_COMPILER +#pragma GCC diagnostic ignored "-Wcast-qual" +#endif + static inline uint16_t ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, uint16_t nb_bufs, uint8_t *split_flags) @@ -29,6 +33,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, if (!split_flags[buf_idx]) { /* it's the last packet of the set */ start->hash = end->hash; + start->vlan_tci = end->vlan_tci; start->ol_flags = end->ol_flags; /* we need to strip crc for the whole packet */ start->pkt_len -= rxq->crc_len; @@ -72,7 +77,7 @@ ice_rx_reassemble_packets(struct ice_rx_queue *rxq, struct rte_mbuf **rx_bufs, } static __rte_always_inline int -ice_tx_free_bufs(struct ice_tx_queue *txq) +ice_tx_free_bufs_vec(struct ice_tx_queue *txq) { struct ice_tx_entry *txep; uint32_t n; @@ -188,16 +193,39 @@ _ice_tx_queue_release_mbufs_vec(struct ice_tx_queue *txq) * so need to free remains more carefully. */ i = txq->tx_next_dd - txq->tx_rs_thresh + 1; - if (txq->tx_tail < i) { - for (; i < txq->nb_tx_desc; i++) { + +#ifdef __AVX512VL__ + struct rte_eth_dev *dev = &rte_eth_devices[txq->vsi->adapter->pf.dev_data->port_id]; + + if (dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512 || + dev->tx_pkt_burst == ice_xmit_pkts_vec_avx512_offload) { + struct ice_vec_tx_entry *swr = (void *)txq->sw_ring; + + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { + rte_pktmbuf_free_seg(swr[i].mbuf); + swr[i].mbuf = NULL; + } + } else +#endif + { + if (txq->tx_tail < i) { + for (; i < txq->nb_tx_desc; i++) { + rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); + txq->sw_ring[i].mbuf = NULL; + } + i = 0; + } + for (; i < txq->tx_tail; i++) { rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); txq->sw_ring[i].mbuf = NULL; } - i = 0; - } - for (; i < txq->tx_tail; i++) { - rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); - txq->sw_ring[i].mbuf = NULL; } } @@ -219,6 +247,29 @@ ice_rxq_vec_setup_default(struct ice_rx_queue *rxq) return 0; } +#define ICE_TX_NO_VECTOR_FLAGS ( \ + RTE_ETH_TX_OFFLOAD_MULTI_SEGS | \ + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_TSO | \ + RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) + +#define ICE_TX_VECTOR_OFFLOAD ( \ + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | \ + RTE_ETH_TX_OFFLOAD_QINQ_INSERT | \ + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | \ + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | \ + RTE_ETH_TX_OFFLOAD_TCP_CKSUM) + +#define ICE_RX_VECTOR_OFFLOAD ( \ + RTE_ETH_RX_OFFLOAD_CHECKSUM | \ + RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | \ + RTE_ETH_RX_OFFLOAD_VLAN | \ + RTE_ETH_RX_OFFLOAD_RSS_HASH) + +#define ICE_VECTOR_PATH 0 +#define ICE_VECTOR_OFFLOAD_PATH 1 + static inline int ice_rx_vec_queue_default(struct ice_rx_queue *rxq) { @@ -237,15 +288,14 @@ ice_rx_vec_queue_default(struct ice_rx_queue *rxq) if (rxq->proto_xtr != PROTO_XTR_NONE) return -1; - return 0; -} + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) + return -1; + + if (rxq->offloads & ICE_RX_VECTOR_OFFLOAD) + return ICE_VECTOR_OFFLOAD_PATH; -#define ICE_NO_VECTOR_FLAGS ( \ - DEV_TX_OFFLOAD_MULTI_SEGS | \ - DEV_TX_OFFLOAD_VLAN_INSERT | \ - DEV_TX_OFFLOAD_SCTP_CKSUM | \ - DEV_TX_OFFLOAD_UDP_CKSUM | \ - DEV_TX_OFFLOAD_TCP_CKSUM) + return ICE_VECTOR_PATH; +} static inline int ice_tx_vec_queue_default(struct ice_tx_queue *txq) @@ -253,14 +303,17 @@ ice_tx_vec_queue_default(struct ice_tx_queue *txq) if (!txq) return -1; - if (txq->offloads & ICE_NO_VECTOR_FLAGS) - return -1; - if (txq->tx_rs_thresh < ICE_VPMD_TX_BURST || txq->tx_rs_thresh > ICE_TX_MAX_FREE_BUF_SZ) return -1; - return 0; + if (txq->offloads & ICE_TX_NO_VECTOR_FLAGS) + return -1; + + if (txq->offloads & ICE_TX_VECTOR_OFFLOAD) + return ICE_VECTOR_OFFLOAD_PATH; + + return ICE_VECTOR_PATH; } static inline int @@ -268,20 +321,19 @@ ice_rx_vec_dev_check_default(struct rte_eth_dev *dev) { int i; struct ice_rx_queue *rxq; - struct ice_adapter *ad = - ICE_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - - /* vPMD does not support flow mark. */ - if (ad->devargs.flow_mark_support) - return -1; + int ret = 0; + int result = 0; for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - if (ice_rx_vec_queue_default(rxq)) + ret = (ice_rx_vec_queue_default(rxq)); + if (ret < 0) return -1; + if (ret == ICE_VECTOR_OFFLOAD_PATH) + result = ret; } - return 0; + return result; } static inline int @@ -289,14 +341,79 @@ ice_tx_vec_dev_check_default(struct rte_eth_dev *dev) { int i; struct ice_tx_queue *txq; + int ret = 0; + int result = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if (ice_tx_vec_queue_default(txq)) + ret = ice_tx_vec_queue_default(txq); + if (ret < 0) return -1; + if (ret == ICE_VECTOR_OFFLOAD_PATH) + result = ret; } - return 0; + return result; } +static inline void +ice_txd_enable_offload(struct rte_mbuf *tx_pkt, + uint64_t *txd_hi) +{ + uint64_t ol_flags = tx_pkt->ol_flags; + uint32_t td_cmd = 0; + uint32_t td_offset = 0; + + /* Tx Checksum Offload */ + /* SET MACLEN */ + td_offset |= (tx_pkt->l2_len >> 1) << + ICE_TX_DESC_LEN_MACLEN_S; + + /* Enable L3 checksum offload */ + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { + td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM; + td_offset |= (tx_pkt->l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { + td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV4; + td_offset |= (tx_pkt->l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { + td_cmd |= ICE_TX_DESC_CMD_IIPT_IPV6; + td_offset |= (tx_pkt->l3_len >> 2) << + ICE_TX_DESC_LEN_IPLEN_S; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_TCP_CKSUM: + td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP; + td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + case RTE_MBUF_F_TX_SCTP_CKSUM: + td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP; + td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + case RTE_MBUF_F_TX_UDP_CKSUM: + td_cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP; + td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + ICE_TX_DESC_LEN_L4_LEN_S; + break; + default: + break; + } + + *txd_hi |= ((uint64_t)td_offset) << ICE_TXD_QW1_OFFSET_S; + + /* Tx VLAN/QINQ insertion Offload */ + if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { + td_cmd |= ICE_TX_DESC_CMD_IL2TAG1; + *txd_hi |= ((uint64_t)tx_pkt->vlan_tci << + ICE_TXD_QW1_L2TAG1_S); + } + + *txd_hi |= ((uint64_t)td_cmd) << ICE_TXD_QW1_CMD_S; +} #endif