+
+void
+i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct i40e_rx_queue *rxq;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = rxq->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_rx_desc;
+
+ qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
+ qinfo->conf.rx_drop_en = rxq->drop_en;
+ qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+void
+i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct i40e_tx_queue *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_tx_desc;
+
+ qinfo->conf.tx_thresh.pthresh = txq->pthresh;
+ qinfo->conf.tx_thresh.hthresh = txq->hthresh;
+ qinfo->conf.tx_thresh.wthresh = txq->wthresh;
+
+ qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
+ qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
+ qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
+ qinfo->conf.offloads = txq->offloads;
+}
+
+static eth_rx_burst_t
+i40e_get_latest_rx_vec(bool scatter)
+{
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
+static eth_rx_burst_t
+i40e_get_recommend_rx_vec(bool scatter)
+{
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return scatter ? i40e_recv_scattered_pkts_vec_avx2 :
+ i40e_recv_pkts_vec_avx2;
+#endif
+ return scatter ? i40e_recv_scattered_pkts_vec :
+ i40e_recv_pkts_vec;
+}
+
+void __attribute__((cold))
+i40e_set_rx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ uint16_t rx_using_sse, i;
+ /* In order to allow Vector Rx there are a few configuration
+ * conditions to be met and Rx Bulk Allocation should be allowed.
+ */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (i40e_rx_vec_dev_conf_condition_check(dev) ||
+ !ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Port[%d] doesn't meet"
+ " Vector Rx preconditions",
+ dev->data->port_id);
+
+ ad->rx_vec_allowed = false;
+ }
+ if (ad->rx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq =
+ dev->data->rx_queues[i];
+
+ if (rxq && i40e_rxq_vec_setup(rxq)) {
+ ad->rx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->rx_vec_allowed) {
+ /* Vec Rx path */
+ PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.",
+ dev->data->port_id);
+ if (ad->use_latest_vec)
+ dev->rx_pkt_burst =
+ i40e_get_latest_rx_vec(dev->data->scattered_rx);
+ else
+ dev->rx_pkt_burst =
+ i40e_get_recommend_rx_vec(dev->data->scattered_rx);
+ } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) {
+ PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
+ "satisfied. Rx Burst Bulk Alloc function "
+ "will be used on port=%d.",
+ dev->data->port_id);
+
+ dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
+ } else {
+ /* Simple Rx Path. */
+ PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.",
+ dev->data->port_id);
+ dev->rx_pkt_burst = dev->data->scattered_rx ?
+ i40e_recv_scattered_pkts :
+ i40e_recv_pkts;
+ }
+
+ /* Propagate information about RX function choice through all queues. */
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ rx_using_sse =
+ (dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec ||
+ dev->rx_pkt_burst == i40e_recv_scattered_pkts_vec_avx2 ||
+ dev->rx_pkt_burst == i40e_recv_pkts_vec_avx2);
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ struct i40e_rx_queue *rxq = dev->data->rx_queues[i];
+
+ if (rxq)
+ rxq->rx_using_sse = rx_using_sse;
+ }
+ }
+}
+
+static const struct {
+ eth_rx_burst_t pkt_burst;
+ const char *info;
+} i40e_rx_burst_infos[] = {
+ { i40e_recv_scattered_pkts, "Scalar Scattered" },
+ { i40e_recv_pkts_bulk_alloc, "Scalar Bulk Alloc" },
+ { i40e_recv_pkts, "Scalar" },
+#ifdef RTE_ARCH_X86
+ { i40e_recv_scattered_pkts_vec_avx2, "Vector AVX2 Scattered" },
+ { i40e_recv_pkts_vec_avx2, "Vector AVX2" },
+ { i40e_recv_scattered_pkts_vec, "Vector SSE Scattered" },
+ { i40e_recv_pkts_vec, "Vector SSE" },
+#elif defined(RTE_ARCH_ARM64)
+ { i40e_recv_scattered_pkts_vec, "Vector Neon Scattered" },
+ { i40e_recv_pkts_vec, "Vector Neon" },
+#elif defined(RTE_ARCH_PPC_64)
+ { i40e_recv_scattered_pkts_vec, "Vector AltiVec Scattered" },
+ { i40e_recv_pkts_vec, "Vector AltiVec" },
+#endif
+};
+
+int
+i40e_rx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ int ret = -EINVAL;
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(i40e_rx_burst_infos); ++i) {
+ if (pkt_burst == i40e_rx_burst_infos[i].pkt_burst) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ i40e_rx_burst_infos[i].info);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void __attribute__((cold))
+i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+
+ /* Use a simple Tx queue if possible (only fast free is allowed) */
+ ad->tx_simple_allowed =
+ (txq->offloads ==
+ (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) &&
+ txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST);
+ ad->tx_vec_allowed = (ad->tx_simple_allowed &&
+ txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ);
+
+ if (ad->tx_vec_allowed)
+ PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else if (ad->tx_simple_allowed)
+ PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.",
+ txq->queue_id);
+ else
+ PMD_INIT_LOG(DEBUG,
+ "Neither simple nor vector Tx enabled on Tx queue %u\n",
+ txq->queue_id);
+}
+
+static eth_tx_burst_t
+i40e_get_latest_tx_vec(void)
+{
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
+static eth_tx_burst_t
+i40e_get_recommend_tx_vec(void)
+{
+#if defined(RTE_ARCH_X86) && defined(CC_AVX2_SUPPORT)
+ /*
+ * since AVX frequency can be different to base frequency, limit
+ * use of AVX2 version to later plaforms, not all those that could
+ * theoretically run it.
+ */
+ if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F))
+ return i40e_xmit_pkts_vec_avx2;
+#endif
+ return i40e_xmit_pkts_vec;
+}
+
+void __attribute__((cold))
+i40e_set_tx_function(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ if (ad->tx_vec_allowed) {
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ struct i40e_tx_queue *txq =
+ dev->data->tx_queues[i];
+
+ if (txq && i40e_txq_vec_setup(txq)) {
+ ad->tx_vec_allowed = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (ad->tx_simple_allowed) {
+ if (ad->tx_vec_allowed) {
+ PMD_INIT_LOG(DEBUG, "Vector tx finally be used.");
+ if (ad->use_latest_vec)
+ dev->tx_pkt_burst =
+ i40e_get_latest_tx_vec();
+ else
+ dev->tx_pkt_burst =
+ i40e_get_recommend_tx_vec();
+ } else {
+ PMD_INIT_LOG(DEBUG, "Simple tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts_simple;
+ }
+ dev->tx_pkt_prepare = NULL;
+ } else {
+ PMD_INIT_LOG(DEBUG, "Xmit tx finally be used.");
+ dev->tx_pkt_burst = i40e_xmit_pkts;
+ dev->tx_pkt_prepare = i40e_prep_pkts;
+ }
+}
+
+static const struct {
+ eth_tx_burst_t pkt_burst;
+ const char *info;
+} i40e_tx_burst_infos[] = {
+ { i40e_xmit_pkts_simple, "Scalar Simple" },
+ { i40e_xmit_pkts, "Scalar" },
+#ifdef RTE_ARCH_X86
+ { i40e_xmit_pkts_vec_avx2, "Vector AVX2" },
+ { i40e_xmit_pkts_vec, "Vector SSE" },
+#elif defined(RTE_ARCH_ARM64)
+ { i40e_xmit_pkts_vec, "Vector Neon" },
+#elif defined(RTE_ARCH_PPC_64)
+ { i40e_xmit_pkts_vec, "Vector AltiVec" },
+#endif
+};
+
+int
+i40e_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
+ struct rte_eth_burst_mode *mode)
+{
+ eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
+ int ret = -EINVAL;
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(i40e_tx_burst_infos); ++i) {
+ if (pkt_burst == i40e_tx_burst_infos[i].pkt_burst) {
+ snprintf(mode->info, sizeof(mode->info), "%s",
+ i40e_tx_burst_infos[i].info);
+ ret = 0;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+void __attribute__((cold))
+i40e_set_default_ptype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_MAX_PKT_TYPE; i++)
+ ad->ptype_tbl[i] = i40e_get_default_pkt_type(i);
+}
+
+void __attribute__((cold))
+i40e_set_default_pctype_table(struct rte_eth_dev *dev)
+{
+ struct i40e_adapter *ad =
+ I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++)
+ ad->pctypes_tbl[i] = 0ULL;
+ ad->flow_types_mask = 0ULL;
+ ad->pctypes_mask = 0ULL;
+
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV4] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_FRAG_IPV6] =
+ (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_SCTP] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_OTHER] =
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ ad->pctypes_tbl[RTE_ETH_FLOW_L2_PAYLOAD] =
+ (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+
+ if (hw->mac.type == I40E_MAC_X722 ||
+ hw->mac.type == I40E_MAC_X722_VF) {
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV4_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_UDP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
+ ad->pctypes_tbl[RTE_ETH_FLOW_NONFRAG_IPV6_TCP] |=
+ (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
+ }
+
+ for (i = 0; i < I40E_FLOW_TYPE_MAX; i++) {
+ if (ad->pctypes_tbl[i])
+ ad->flow_types_mask |= (1ULL << i);
+ ad->pctypes_mask |= ad->pctypes_tbl[i];
+ }
+}
+
+#ifndef RTE_LIBRTE_I40E_INC_VECTOR
+/* Stubs needed for linkage when CONFIG_RTE_LIBRTE_I40E_INC_VECTOR is set to 'n' */
+int
+i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
+{
+ return -1;
+}
+
+uint16_t
+i40e_recv_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t
+i40e_recv_scattered_pkts_vec(
+ void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+int
+i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq)
+{
+ return -1;
+}
+
+int
+i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq)
+{
+ return -1;
+}
+
+void
+i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq)
+{
+ return;
+}
+
+uint16_t
+i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+#endif /* ifndef RTE_LIBRTE_I40E_INC_VECTOR */
+
+#ifndef CC_AVX2_SUPPORT
+uint16_t
+i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t
+i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue,
+ struct rte_mbuf __rte_unused **rx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+
+uint16_t
+i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue,
+ struct rte_mbuf __rte_unused **tx_pkts,
+ uint16_t __rte_unused nb_pkts)
+{
+ return 0;
+}
+#endif /* ifndef CC_AVX2_SUPPORT */