#include <rte_ethdev_driver.h>
#include <rte_net.h>
+#include <rte_vect.h>
#include "rte_pmd_ice.h"
#include "ice_rxtx.h"
rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
}
+static inline void
+ice_rxd_to_pkt_fields_by_comms_generic(__rte_unused struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err = rte_le_to_cpu_16(desc->status_error0);
+
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+#endif
+}
+
static inline void
ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
struct rte_mbuf *mb,
#endif
}
-static void
+void
ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
{
switch (rxdid) {
rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
break;
+ case ICE_RXDID_COMMS_GENERIC:
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_generic;
+ break;
+
case ICE_RXDID_COMMS_OVS:
rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
break;
if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
flags |= PKT_RX_EIP_CKSUM_BAD;
+ if (unlikely(stat_err0 & (1 << ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S)))
+ flags |= PKT_RX_OUTER_L4_CKSUM_BAD;
+ else
+ flags |= PKT_RX_OUTER_L4_CKSUM_GOOD;
+
return flags;
}
rx_id = (uint16_t)(rx_id == 0 ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
/* write TAIL register */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
#ifdef RTE_ARCH_X86
if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
+#ifdef CC_AVX512_SUPPORT
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+#endif
dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
return ptypes;
rx_id = (uint16_t)(rx_id == 0 ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
/* write TAIL register */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
*cd_tunneling |= (tx_offload.l2_len >> 1) <<
ICE_TXD_CTX_QW0_NATLEN_S;
- if ((ol_flags & PKT_TX_OUTER_UDP_CKSUM) &&
- (ol_flags & PKT_TX_OUTER_IP_CKSUM) &&
+ /**
+ * Calculate the tunneling UDP checksum.
+ * Shall be set only if L4TUNT = 01b and EIPT is not zero
+ */
+ if (!(*cd_tunneling & ICE_TX_CTX_EIPT_NONE) &&
(*cd_tunneling & ICE_TXD_CTX_UDP_TUNNELING))
*cd_tunneling |= ICE_TXD_CTX_QW0_L4T_CS_M;
}
txq->tx_tail = 0;
/* Update the tx tail register */
- ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
#ifdef RTE_ARCH_X86
struct ice_rx_queue *rxq;
int i;
+ bool use_avx512 = false;
bool use_avx2 = false;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
+ if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
}
}
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+#ifdef CC_AVX512_SUPPORT
+ use_avx512 = true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX512 is not supported in build env");
+#endif
+ if (!use_avx512 &&
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
use_avx2 = true;
} else {
if (ad->rx_vec_allowed) {
if (dev->data->scattered_rx) {
- PMD_DRV_LOG(DEBUG,
+ if (use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_DRV_LOG(DEBUG,
"Using %sVector Scattered Rx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
+ dev->rx_pkt_burst = use_avx2 ?
ice_recv_scattered_pkts_vec_avx2 :
ice_recv_scattered_pkts_vec;
+ }
} else {
- PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
+ if (use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "Using %sVector Rx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_pkts_vec_avx2 :
- ice_recv_pkts_vec;
+ dev->rx_pkt_burst = use_avx2 ?
+ ice_recv_pkts_vec_avx2 :
+ ice_recv_pkts_vec;
+ }
}
return;
}
{ ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
{ ice_recv_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+ { ice_recv_pkts_vec_avx512, "Vector AVX512" },
+#endif
{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
{ ice_recv_pkts_vec_avx2, "Vector AVX2" },
{ ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
#ifdef RTE_ARCH_X86
struct ice_tx_queue *txq;
int i;
+ bool use_avx512 = false;
bool use_avx2 = false;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!ice_tx_vec_dev_check(dev)) {
+ if (!ice_tx_vec_dev_check(dev) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->tx_vec_allowed = true;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
}
}
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+#ifdef CC_AVX512_SUPPORT
+ use_avx512 = true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX512 is not supported in build env");
+#endif
+ if (!use_avx512 &&
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
use_avx2 = true;
} else {
}
if (ad->tx_vec_allowed) {
- PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
- dev->tx_pkt_burst = use_avx2 ?
- ice_xmit_pkts_vec_avx2 :
- ice_xmit_pkts_vec;
+ if (use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = use_avx2 ?
+ ice_xmit_pkts_vec_avx2 :
+ ice_xmit_pkts_vec;
+ }
dev->tx_pkt_prepare = NULL;
return;
{ ice_xmit_pkts_simple, "Scalar Simple" },
{ ice_xmit_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
+#endif
{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
{ ice_xmit_pkts_vec, "Vector SSE" },
#endif