#include <rte_ethdev_driver.h>
#include <rte_net.h>
+#include <rte_vect.h>
#include "rte_pmd_ice.h"
#include "ice_rxtx.h"
uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
-static inline uint64_t
-ice_rxdid_to_proto_xtr_ol_flag(uint8_t rxdid, bool *chk_valid)
-{
- static struct {
- uint64_t *ol_flag;
- bool chk_valid;
- } ol_flag_map[] = {
- [ICE_RXDID_COMMS_AUX_VLAN] = {
- &rte_net_ice_dynflag_proto_xtr_vlan_mask, true },
- [ICE_RXDID_COMMS_AUX_IPV4] = {
- &rte_net_ice_dynflag_proto_xtr_ipv4_mask, true },
- [ICE_RXDID_COMMS_AUX_IPV6] = {
- &rte_net_ice_dynflag_proto_xtr_ipv6_mask, true },
- [ICE_RXDID_COMMS_AUX_IPV6_FLOW] = {
- &rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask, true },
- [ICE_RXDID_COMMS_AUX_TCP] = {
- &rte_net_ice_dynflag_proto_xtr_tcp_mask, true },
- [ICE_RXDID_COMMS_AUX_IP_OFFSET] = {
- &rte_net_ice_dynflag_proto_xtr_ip_offset_mask, false },
- };
- uint64_t *ol_flag;
-
- if (rxdid < RTE_DIM(ol_flag_map)) {
- ol_flag = ol_flag_map[rxdid].ol_flag;
- if (!ol_flag)
- return 0ULL;
-
- *chk_valid = ol_flag_map[rxdid].chk_valid;
- return *ol_flag;
- }
-
- return 0ULL;
-}
-
static inline uint8_t
ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
{
rxdid_map[xtr_type] : ICE_RXDID_COMMS_OVS;
}
+static inline void
+ice_rxd_to_pkt_fields_by_comms_ovs(__rte_unused struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ uint16_t stat_err;
+#endif
+
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+#endif
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_aux_v1(struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+ if (rxq->xtr_ol_flag) {
+ uint32_t metadata = 0;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error1);
+
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+
+ if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
+ metadata |=
+ rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
+
+ if (metadata) {
+ mb->ol_flags |= rxq->xtr_ol_flag;
+
+ *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ }
+ }
+#endif
+}
+
+static inline void
+ice_rxd_to_pkt_fields_by_comms_aux_v2(struct ice_rx_queue *rxq,
+ struct rte_mbuf *mb,
+ volatile union ice_rx_flex_desc *rxdp)
+{
+ volatile struct ice_32b_rx_flex_desc_comms *desc =
+ (volatile struct ice_32b_rx_flex_desc_comms *)rxdp;
+ uint16_t stat_err;
+
+ stat_err = rte_le_to_cpu_16(desc->status_error0);
+ if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
+ mb->ol_flags |= PKT_RX_RSS_HASH;
+ mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
+ }
+
+#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
+ if (desc->flow_id != 0xFFFFFFFF) {
+ mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
+ }
+
+ if (rxq->xtr_ol_flag) {
+ uint32_t metadata = 0;
+
+ if (desc->flex_ts.flex.aux0 != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
+ else if (desc->flex_ts.flex.aux1 != 0xFFFF)
+ metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
+
+ if (metadata) {
+ mb->ol_flags |= rxq->xtr_ol_flag;
+
+ *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
+ }
+ }
+#endif
+}
+
+static void
+ice_select_rxd_to_pkt_fields_handler(struct ice_rx_queue *rxq, uint32_t rxdid)
+{
+ switch (rxdid) {
+ case ICE_RXDID_COMMS_AUX_VLAN:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_vlan_mask;
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IPV4:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv4_mask;
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IPV6:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_mask;
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IPV6_FLOW:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_TCP:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_tcp_mask;
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v1;
+ break;
+
+ case ICE_RXDID_COMMS_AUX_IP_OFFSET:
+ rxq->xtr_ol_flag = rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_aux_v2;
+ break;
+
+ case ICE_RXDID_COMMS_OVS:
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ break;
+
+ default:
+ /* update this according to the RXDID for PROTO_XTR_NONE */
+ rxq->rxd_to_pkt_fields = ice_rxd_to_pkt_fields_by_comms_ovs;
+ break;
+ }
+
+ if (!rte_net_ice_dynf_proto_xtr_metadata_avail())
+ rxq->xtr_ol_flag = 0;
+}
+
static enum ice_status
ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
{
return -EINVAL;
}
+ ice_select_rxd_to_pkt_fields_handler(rxq, rxdid);
+
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
*/
mb->vlan_tci, mb->vlan_tci_outer);
}
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
-#define ICE_RX_PROTO_XTR_VALID \
- ((1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S) | \
- (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
-
-static void
-ice_rxd_to_proto_xtr(struct rte_mbuf *mb,
- volatile struct ice_32b_rx_flex_desc_comms_ovs *desc)
-{
- uint16_t stat_err = rte_le_to_cpu_16(desc->status_error1);
- uint32_t metadata = 0;
- uint64_t ol_flag;
- bool chk_valid;
-
- ol_flag = ice_rxdid_to_proto_xtr_ol_flag(desc->rxdid, &chk_valid);
- if (unlikely(!ol_flag))
- return;
-
- if (chk_valid) {
- if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S))
- metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
-
- if (stat_err & (1 << ICE_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S))
- metadata |=
- rte_le_to_cpu_16(desc->flex_ts.flex.aux1) << 16;
- } else {
- if (rte_le_to_cpu_16(desc->flex_ts.flex.aux0) != 0xFFFF)
- metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux0);
- else if (rte_le_to_cpu_16(desc->flex_ts.flex.aux1) != 0xFFFF)
- metadata = rte_le_to_cpu_16(desc->flex_ts.flex.aux1);
- }
-
- if (!metadata)
- return;
-
- mb->ol_flags |= ol_flag;
-
- *RTE_NET_ICE_DYNF_PROTO_XTR_METADATA(mb) = metadata;
-}
-#endif
-
-static inline void
-ice_rxd_to_pkt_fields(struct rte_mbuf *mb,
- volatile union ice_rx_flex_desc *rxdp)
-{
- volatile struct ice_32b_rx_flex_desc_comms_ovs *desc =
- (volatile struct ice_32b_rx_flex_desc_comms_ovs *)rxdp;
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- uint16_t stat_err;
-
- stat_err = rte_le_to_cpu_16(desc->status_error0);
- if (likely(stat_err & (1 << ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
- mb->ol_flags |= PKT_RX_RSS_HASH;
- mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
- }
-#endif
-
- if (desc->flow_id != 0xFFFFFFFF) {
- mb->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
- mb->hash.fdir.hi = rte_le_to_cpu_32(desc->flow_id);
- }
-
-#ifndef RTE_LIBRTE_ICE_16BYTE_RX_DESC
- if (unlikely(rte_net_ice_dynf_proto_xtr_metadata_avail()))
- ice_rxd_to_proto_xtr(mb, desc);
-#endif
-}
-
#define ICE_LOOK_AHEAD 8
#if (ICE_LOOK_AHEAD != 8)
#error "PMD ICE: ICE_LOOK_AHEAD must be 8\n"
mb->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(mb, &rxdp[j]);
- ice_rxd_to_pkt_fields(mb, &rxdp[j]);
+ rxq->rxd_to_pkt_fields(rxq, mb, &rxdp[j]);
mb->ol_flags |= pkt_flags;
}
first_seg->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(first_seg, &rxd);
- ice_rxd_to_pkt_fields(first_seg, &rxd);
+ rxq->rxd_to_pkt_fields(rxq, first_seg, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
first_seg->ol_flags |= pkt_flags;
/* Prefetch data of first segment, if configured to do so. */
rx_id = (uint16_t)(rx_id == 0 ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
/* write TAIL register */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
#ifdef RTE_ARCH_X86
if (dev->rx_pkt_burst == ice_recv_pkts_vec ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec ||
+#ifdef CC_AVX512_SUPPORT
+ dev->rx_pkt_burst == ice_recv_pkts_vec_avx512 ||
+ dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx512 ||
+#endif
dev->rx_pkt_burst == ice_recv_pkts_vec_avx2 ||
dev->rx_pkt_burst == ice_recv_scattered_pkts_vec_avx2)
return ptypes;
rxm->packet_type = ptype_tbl[ICE_RX_FLEX_DESC_PTYPE_M &
rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
ice_rxd_to_vlan_tci(rxm, &rxd);
- ice_rxd_to_pkt_fields(rxm, &rxd);
+ rxq->rxd_to_pkt_fields(rxq, rxm, &rxd);
pkt_flags = ice_rxd_error_to_pkt_flags(rx_stat_err0);
rxm->ol_flags |= pkt_flags;
/* copy old mbuf to rx_pkts */
rx_id = (uint16_t)(rx_id == 0 ?
(rxq->nb_rx_desc - 1) : (rx_id - 1));
/* write TAIL register */
- ICE_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
+ ICE_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id);
nb_hold = 0;
}
rxq->nb_rx_hold = nb_hold;
txq->tx_tail = 0;
/* Update the tx tail register */
- ICE_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
+ ICE_PCI_REG_WC_WRITE(txq->qtx_tail, txq->tx_tail);
return nb_pkts;
}
#ifdef RTE_ARCH_X86
struct ice_rx_queue *rxq;
int i;
+ bool use_avx512 = false;
bool use_avx2 = false;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed) {
+ if (!ice_rx_vec_dev_check(dev) && ad->rx_bulk_alloc_allowed &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->rx_vec_allowed = true;
for (i = 0; i < dev->data->nb_rx_queues; i++) {
rxq = dev->data->rx_queues[i];
}
}
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+#ifdef CC_AVX512_SUPPORT
+ use_avx512 = true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX512 is not supported in build env");
+#endif
+ if (!use_avx512 &&
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
use_avx2 = true;
} else {
if (ad->rx_vec_allowed) {
if (dev->data->scattered_rx) {
- PMD_DRV_LOG(DEBUG,
+ if (use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Scattered Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_scattered_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_DRV_LOG(DEBUG,
"Using %sVector Scattered Rx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
+ dev->rx_pkt_burst = use_avx2 ?
ice_recv_scattered_pkts_vec_avx2 :
ice_recv_scattered_pkts_vec;
+ }
} else {
- PMD_DRV_LOG(DEBUG, "Using %sVector Rx (port %d).",
+ if (use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE,
+ "Using AVX512 Vector Rx (port %d).",
+ dev->data->port_id);
+ dev->rx_pkt_burst =
+ ice_recv_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_DRV_LOG(DEBUG,
+ "Using %sVector Rx (port %d).",
use_avx2 ? "avx2 " : "",
dev->data->port_id);
- dev->rx_pkt_burst = use_avx2 ?
- ice_recv_pkts_vec_avx2 :
- ice_recv_pkts_vec;
+ dev->rx_pkt_burst = use_avx2 ?
+ ice_recv_pkts_vec_avx2 :
+ ice_recv_pkts_vec;
+ }
}
return;
}
{ ice_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
{ ice_recv_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { ice_recv_scattered_pkts_vec_avx512, "Vector AVX512 Scattered" },
+ { ice_recv_pkts_vec_avx512, "Vector AVX512" },
+#endif
{ ice_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
{ ice_recv_pkts_vec_avx2, "Vector AVX2" },
{ ice_recv_scattered_pkts_vec, "Vector SSE Scattered" },
#ifdef RTE_ARCH_X86
struct ice_tx_queue *txq;
int i;
+ bool use_avx512 = false;
bool use_avx2 = false;
if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- if (!ice_tx_vec_dev_check(dev)) {
+ if (!ice_tx_vec_dev_check(dev) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
ad->tx_vec_allowed = true;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
}
}
- if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
- rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1)
+ if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1 &&
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) == 1)
+#ifdef CC_AVX512_SUPPORT
+ use_avx512 = true;
+#else
+ PMD_DRV_LOG(NOTICE,
+ "AVX512 is not supported in build env");
+#endif
+ if (!use_avx512 &&
+ (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) == 1 ||
+ rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) == 1) &&
+ rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
use_avx2 = true;
} else {
}
if (ad->tx_vec_allowed) {
- PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
- use_avx2 ? "avx2 " : "",
- dev->data->port_id);
- dev->tx_pkt_burst = use_avx2 ?
- ice_xmit_pkts_vec_avx2 :
- ice_xmit_pkts_vec;
+ if (use_avx512) {
+#ifdef CC_AVX512_SUPPORT
+ PMD_DRV_LOG(NOTICE, "Using AVX512 Vector Tx (port %d).",
+ dev->data->port_id);
+ dev->tx_pkt_burst = ice_xmit_pkts_vec_avx512;
+#endif
+ } else {
+ PMD_DRV_LOG(DEBUG, "Using %sVector Tx (port %d).",
+ use_avx2 ? "avx2 " : "",
+ dev->data->port_id);
+ dev->tx_pkt_burst = use_avx2 ?
+ ice_xmit_pkts_vec_avx2 :
+ ice_xmit_pkts_vec;
+ }
dev->tx_pkt_prepare = NULL;
return;
{ ice_xmit_pkts_simple, "Scalar Simple" },
{ ice_xmit_pkts, "Scalar" },
#ifdef RTE_ARCH_X86
+#ifdef CC_AVX512_SUPPORT
+ { ice_xmit_pkts_vec_avx512, "Vector AVX512" },
+#endif
{ ice_xmit_pkts_vec_avx2, "Vector AVX2" },
{ ice_xmit_pkts_vec, "Vector SSE" },
#endif