CONFIG_RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE=y
CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR=y
+#
+# Compile burst-oriented AVF PMD driver
+#
+CONFIG_RTE_LIBRTE_AVF_PMD=y
+
#
# Compile burst-oriented Mellanox ConnectX-3 (MLX4) PMD
#
DIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += af_packet
DIRS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += ark
+DIRS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf
DIRS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += avp
DIRS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += bnx2x
DIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += bonding
--- /dev/null
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright(c) 2017 Intel Corporation
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_avf.a
+
+CFLAGS += -O3
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
+LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_hash
+LDLIBS += -lrte_bus_pci
+
+EXPORT_MAP := rte_pmd_avf_version.map
+
+LIBABIVER := 1
+
+#
+# Add extra flags for base driver files (also known as shared code)
+# to disable warnings
+#
+ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y)
+CFLAGS_BASE_DRIVER =
+else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y)
+CFLAGS_BASE_DRIVER = -Wno-pointer-to-int-cast
+else
+CFLAGS_BASE_DRIVER = -Wno-pointer-to-int-cast
+
+endif
+OBJS_BASE_DRIVER=$(sort $(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))))
+$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER)))
+
+
+VPATH += $(SRCDIR)/base
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_adminq.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_common.c
+
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += avf_vchnl.c
+
+include $(RTE_SDK)/mk/rte.lib.mk
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#ifndef _AVF_ETHDEV_H_
+#define _AVF_ETHDEV_H_
+
+#include <rte_kvargs.h>
+
+#define AVF_AQ_LEN 32
+#define AVF_AQ_BUF_SZ 4096
+#define AVF_RESET_WAIT_CNT 50
+#define AVF_BUF_SIZE_MIN 1024
+#define AVF_FRAME_SIZE_MAX 9728
+#define AVF_QUEUE_BASE_ADDR_UNIT 128
+
+#define AVF_MAX_NUM_QUEUES 16
+
+#define AVF_NUM_MACADDR_MAX 64
+
+#define AVF_DEFAULT_RX_PTHRESH 8
+#define AVF_DEFAULT_RX_HTHRESH 8
+#define AVF_DEFAULT_RX_WTHRESH 0
+
+#define AVF_DEFAULT_RX_FREE_THRESH 32
+
+#define AVF_DEFAULT_TX_PTHRESH 32
+#define AVF_DEFAULT_TX_HTHRESH 0
+#define AVF_DEFAULT_TX_WTHRESH 0
+
+#define AVF_DEFAULT_TX_FREE_THRESH 32
+#define AVF_DEFAULT_TX_RS_THRESH 32
+
+#define AVF_BASIC_OFFLOAD_CAPS ( \
+ VF_BASE_MODE_OFFLOADS | \
+ VIRTCHNL_VF_OFFLOAD_WB_ON_ITR | \
+ VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+
+#define AVF_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
+#define AVF_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+
+/* Default queue interrupt throttling time in microseconds */
+#define AVF_ITR_INDEX_DEFAULT 0
+#define AVF_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */
+#define AVF_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */
+
+/* The overhead from MTU to max frame size.
+ * Considering QinQ packet, the VLAN tag needs to be counted twice.
+ */
+#define AVF_VLAN_TAG_SIZE 4
+#define AVF_ETH_OVERHEAD \
+ (ETHER_HDR_LEN + ETHER_CRC_LEN + AVF_VLAN_TAG_SIZE * 2)
+
+struct avf_adapter;
+struct avf_rx_queue;
+struct avf_tx_queue;
+
+/* Structure that defines a VSI, associated with a adapter. */
+struct avf_vsi {
+ struct avf_adapter *adapter; /* Backreference to associated adapter */
+ uint16_t vsi_id;
+ uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
+ uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
+ uint16_t max_macaddrs; /* Maximum number of MAC addresses */
+ uint16_t base_vector;
+ uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
+};
+
+/* TODO: is that correct to assume the max number to be 16 ?*/
+#define AVF_MAX_MSIX_VECTORS 16
+
+/* Structure to store private data specific for VF instance. */
+struct avf_info {
+ uint16_t num_queue_pairs;
+ uint16_t max_pkt_len; /* Maximum packet length */
+ uint16_t mac_num; /* Number of MAC addresses */
+ bool promisc_unicast_enabled;
+ bool promisc_multicast_enabled;
+
+ struct virtchnl_version_info virtchnl_version;
+ struct virtchnl_vf_resource *vf_res; /* VF resource */
+ struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
+
+ volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
+ uint32_t cmd_retval; /* return value of the cmd response from PF */
+ uint8_t *aq_resp; /* buffer to store the adminq response from PF */
+
+ /* Event from pf */
+ bool dev_closed;
+ bool link_up;
+ enum virtchnl_link_speed link_speed;
+
+ struct avf_vsi vsi;
+ bool vf_reset;
+ uint64_t flags;
+
+ uint8_t *rss_lut;
+ uint8_t *rss_key;
+ uint16_t nb_msix; /* number of MSI-X interrupts on Rx */
+ uint16_t msix_base; /* msix vector base from */
+ /* queue bitmask for each vector */
+ uint16_t rxq_map[AVF_MAX_MSIX_VECTORS];
+};
+
+#define AVF_MAX_PKT_TYPE 256
+
+/* Structure to store private data for each VF instance. */
+struct avf_adapter {
+ struct avf_hw hw;
+ struct rte_eth_dev *eth_dev;
+ struct avf_info vf;
+};
+
+/* AVF_DEV_PRIVATE_TO */
+#define AVF_DEV_PRIVATE_TO_ADAPTER(adapter) \
+ ((struct avf_adapter *)adapter)
+#define AVF_DEV_PRIVATE_TO_VF(adapter) \
+ (&((struct avf_adapter *)adapter)->vf)
+#define AVF_DEV_PRIVATE_TO_HW(adapter) \
+ (&((struct avf_adapter *)adapter)->hw)
+
+/* AVF_VSI_TO */
+#define AVF_VSI_TO_HW(vsi) \
+ (&(((struct avf_vsi *)vsi)->adapter->hw))
+#define AVF_VSI_TO_VF(vsi) \
+ (&(((struct avf_vsi *)vsi)->adapter->vf))
+#define AVF_VSI_TO_ETH_DEV(vsi) \
+ (((struct avf_vsi *)vsi)->adapter->eth_dev)
+
+static inline void
+avf_init_adminq_parameter(struct avf_hw *hw)
+{
+ hw->aq.num_arq_entries = AVF_AQ_LEN;
+ hw->aq.num_asq_entries = AVF_AQ_LEN;
+ hw->aq.arq_buf_size = AVF_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = AVF_AQ_BUF_SZ;
+}
+
+static inline uint16_t
+avf_calc_itr_interval(int16_t interval)
+{
+ if (interval < 0 || interval > AVF_QUEUE_ITR_INTERVAL_MAX)
+ interval = AVF_QUEUE_ITR_INTERVAL_DEFAULT;
+
+ /* Convert to hardware count, as writing each 1 represents 2 us */
+ return interval / 2;
+}
+
+/* structure used for sending and checking response of virtchnl ops */
+struct avf_cmd_info {
+ enum virtchnl_ops ops;
+ uint8_t *in_args; /* buffer for sending */
+ uint32_t in_args_size; /* buffer size for sending */
+ uint8_t *out_buffer; /* buffer for response */
+ uint32_t out_size; /* buffer size for response */
+};
+
+/* clear current command. Only call in case execute
+ * _atomic_set_cmd successfully.
+ */
+static inline void
+_clear_cmd(struct avf_info *vf)
+{
+ rte_wmb();
+ vf->pend_cmd = VIRTCHNL_OP_UNKNOWN;
+ vf->cmd_retval = VIRTCHNL_STATUS_SUCCESS;
+}
+
+/* Check there is pending cmd in execution. If none, set new command. */
+static inline int
+_atomic_set_cmd(struct avf_info *vf, enum virtchnl_ops ops)
+{
+ int ret = rte_atomic32_cmpset(&vf->pend_cmd, VIRTCHNL_OP_UNKNOWN, ops);
+
+ if (!ret)
+ PMD_DRV_LOG(ERR, "There is incomplete cmd %d", vf->pend_cmd);
+
+ return !ret;
+}
+
+int avf_check_api_version(struct avf_adapter *adapter);
+int avf_get_vf_resource(struct avf_adapter *adapter);
+void avf_handle_virtchnl_msg(struct rte_eth_dev *dev);
+#endif /* _AVF_ETHDEV_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <sys/queue.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_interrupts.h>
+#include <rte_debug.h>
+#include <rte_pci.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
+#include <rte_malloc.h>
+#include <rte_memzone.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+
+int avf_logtype_init;
+int avf_logtype_driver;
+static const struct rte_pci_id pci_id_avf_map[] = {
+ { RTE_PCI_DEVICE(AVF_INTEL_VENDOR_ID, AVF_DEV_ID_ADAPTIVE_VF) },
+ { .vendor_id = 0, /* sentinel */ },
+};
+
+static const struct eth_dev_ops avf_eth_dev_ops = {
+};
+
+static int
+avf_check_vf_reset_done(struct avf_hw *hw)
+{
+ int i, reset;
+
+ for (i = 0; i < AVF_RESET_WAIT_CNT; i++) {
+ reset = AVF_READ_REG(hw, AVFGEN_RSTAT) &
+ AVFGEN_RSTAT_VFR_STATE_MASK;
+ reset = reset >> AVFGEN_RSTAT_VFR_STATE_SHIFT;
+ if (reset == VIRTCHNL_VFR_VFACTIVE ||
+ reset == VIRTCHNL_VFR_COMPLETED)
+ break;
+ rte_delay_ms(20);
+ }
+
+ if (i >= AVF_RESET_WAIT_CNT)
+ return -1;
+
+ return 0;
+}
+
+static int
+avf_init_vf(struct rte_eth_dev *dev)
+{
+ int i, err, bufsz;
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+
+ err = avf_set_mac_type(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "set_mac_type failed: %d", err);
+ goto err;
+ }
+
+ err = avf_check_vf_reset_done(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "VF is still resetting");
+ goto err;
+ }
+
+ avf_init_adminq_parameter(hw);
+ err = avf_init_adminq(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "init_adminq failed: %d", err);
+ goto err;
+ }
+
+ vf->aq_resp = rte_zmalloc("vf_aq_resp", AVF_AQ_BUF_SZ, 0);
+ if (!vf->aq_resp) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_aq_resp memory");
+ goto err_aq;
+ }
+ if (avf_check_api_version(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "check_api version failed");
+ goto err_api;
+ }
+
+ bufsz = sizeof(struct virtchnl_vf_resource) +
+ (AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource));
+ vf->vf_res = rte_zmalloc("vf_res", bufsz, 0);
+ if (!vf->vf_res) {
+ PMD_INIT_LOG(ERR, "unable to allocate vf_res memory");
+ goto err_api;
+ }
+ if (avf_get_vf_resource(adapter) != 0) {
+ PMD_INIT_LOG(ERR, "avf_get_vf_config failed");
+ goto err_alloc;
+ }
+ /* Allocate memort for RSS info */
+ if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vf->rss_key = rte_zmalloc("rss_key",
+ vf->vf_res->rss_key_size, 0);
+ if (!vf->rss_key) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_key memory");
+ goto err_rss;
+ }
+ vf->rss_lut = rte_zmalloc("rss_lut",
+ vf->vf_res->rss_lut_size, 0);
+ if (!vf->rss_lut) {
+ PMD_INIT_LOG(ERR, "unable to allocate rss_lut memory");
+ goto err_rss;
+ }
+ }
+ return 0;
+err_rss:
+ rte_free(vf->rss_key);
+ rte_free(vf->rss_lut);
+err_alloc:
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+err_api:
+ rte_free(vf->aq_resp);
+err_aq:
+ avf_shutdown_adminq(hw);
+err:
+ return -1;
+}
+
+/* Enable default admin queue interrupt setting */
+static inline void
+avf_enable_irq0(struct avf_hw *hw)
+{
+ /* Enable admin queue interrupt trigger */
+ AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, AVFINT_ICR0_ENA1_ADMINQ_MASK);
+
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01, AVFINT_DYN_CTL01_INTENA_MASK |
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+
+ AVF_WRITE_FLUSH(hw);
+}
+
+static inline void
+avf_disable_irq0(struct avf_hw *hw)
+{
+ /* Disable all interrupt types */
+ AVF_WRITE_REG(hw, AVFINT_ICR0_ENA1, 0);
+ AVF_WRITE_REG(hw, AVFINT_DYN_CTL01,
+ AVFINT_DYN_CTL01_ITR_INDX_MASK);
+ AVF_WRITE_FLUSH(hw);
+}
+
+static void
+avf_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ avf_disable_irq0(hw);
+
+ avf_handle_virtchnl_msg(dev);
+
+done:
+ avf_enable_irq0(hw);
+}
+
+static int
+avf_dev_init(struct rte_eth_dev *eth_dev)
+{
+ struct avf_adapter *adapter =
+ AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* assign ops func pointer */
+ eth_dev->dev_ops = &avf_eth_dev_ops;
+
+ rte_eth_copy_pci_info(eth_dev, pci_dev);
+
+ hw->vendor_id = pci_dev->id.vendor_id;
+ hw->device_id = pci_dev->id.device_id;
+ hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
+ hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
+ hw->bus.bus_id = pci_dev->addr.bus;
+ hw->bus.device = pci_dev->addr.devid;
+ hw->bus.func = pci_dev->addr.function;
+ hw->hw_addr = (void *)pci_dev->mem_resource[0].addr;
+ hw->back = AVF_DEV_PRIVATE_TO_ADAPTER(eth_dev->data->dev_private);
+ adapter->eth_dev = eth_dev;
+
+ if (avf_init_vf(eth_dev) != 0) {
+ PMD_INIT_LOG(ERR, "Init vf failed");
+ return -1;
+ }
+
+ /* copy mac addr */
+ eth_dev->data->mac_addrs = rte_zmalloc(
+ "avf_mac",
+ ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX,
+ 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to"
+ " store MAC addresses",
+ ETHER_ADDR_LEN * AVF_NUM_MACADDR_MAX);
+ return -ENOMEM;
+ }
+ /* If the MAC address is not configured by host,
+ * generate a random one.
+ */
+ if (!is_valid_assigned_ether_addr((struct ether_addr *)hw->mac.addr))
+ eth_random_addr(hw->mac.addr);
+ ether_addr_copy((struct ether_addr *)hw->mac.addr,
+ ð_dev->data->mac_addrs[0]);
+
+ /* register callback func to eal lib */
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ avf_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ /* enable uio intr after callback register */
+ rte_intr_enable(&pci_dev->intr_handle);
+
+ /* configure and enable device interrupt */
+ avf_enable_irq0(hw);
+
+ return 0;
+}
+
+static void
+avf_dev_close(struct rte_eth_dev *dev)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+
+ avf_shutdown_adminq(hw);
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(intr_handle);
+
+ /* unregister callback func from eal lib */
+ rte_intr_callback_unregister(intr_handle,
+ avf_dev_interrupt_handler, dev);
+ avf_disable_irq0(hw);
+}
+
+static int
+avf_dev_uninit(struct rte_eth_dev *dev)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -EPERM;
+
+ dev->dev_ops = NULL;
+ dev->rx_pkt_burst = NULL;
+ dev->tx_pkt_burst = NULL;
+ if (hw->adapter_stopped == 0)
+ avf_dev_close(dev);
+
+ rte_free(vf->vf_res);
+ vf->vsi_res = NULL;
+ vf->vf_res = NULL;
+
+ rte_free(vf->aq_resp);
+ vf->aq_resp = NULL;
+
+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ if (vf->rss_lut) {
+ rte_free(vf->rss_lut);
+ vf->rss_lut = NULL;
+ }
+ if (vf->rss_key) {
+ rte_free(vf->rss_key);
+ vf->rss_key = NULL;
+ }
+
+ return 0;
+}
+
+static int eth_avf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct avf_adapter), avf_dev_init);
+}
+
+static int eth_avf_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, avf_dev_uninit);
+}
+
+/* Adaptive virtual function driver struct */
+static struct rte_pci_driver rte_avf_pmd = {
+ .id_table = pci_id_avf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = eth_avf_pci_probe,
+ .remove = eth_avf_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_avf, rte_avf_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_avf, pci_id_avf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_avf, "* igb_uio | vfio-pci");
+RTE_INIT(avf_init_log);
+static void
+avf_init_log(void)
+{
+ avf_logtype_init = rte_log_register("pmd.avf.init");
+ if (avf_logtype_init >= 0)
+ rte_log_set_level(avf_logtype_init, RTE_LOG_NOTICE);
+ avf_logtype_driver = rte_log_register("pmd.avf.driver");
+ if (avf_logtype_driver >= 0)
+ rte_log_set_level(avf_logtype_driver, RTE_LOG_NOTICE);
+}
+
+/* memory func for base code */
+enum avf_status_code
+avf_allocate_dma_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_dma_mem *mem,
+ u64 size,
+ u32 alignment)
+{
+ const struct rte_memzone *mz = NULL;
+ char z_name[RTE_MEMZONE_NAMESIZE];
+
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ snprintf(z_name, sizeof(z_name), "avf_dma_%"PRIu64, rte_rand());
+ mz = rte_memzone_reserve_bounded(z_name, size, SOCKET_ID_ANY, 0,
+ alignment, RTE_PGSIZE_2M);
+ if (!mz)
+ return AVF_ERR_NO_MEMORY;
+
+ mem->size = size;
+ mem->va = mz->addr;
+ mem->pa = mz->phys_addr;
+ mem->zone = (const void *)mz;
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s allocated with physical address: %"PRIu64,
+ mz->name, mem->pa);
+
+ return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_free_dma_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_dma_mem *mem)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ PMD_DRV_LOG(DEBUG,
+ "memzone %s to be freed with physical address: %"PRIu64,
+ ((const struct rte_memzone *)mem->zone)->name, mem->pa);
+ rte_memzone_free((const struct rte_memzone *)mem->zone);
+ mem->zone = NULL;
+ mem->va = NULL;
+ mem->pa = (u64)0;
+
+ return AVF_SUCCESS;
+}
+
+enum avf_status_code
+avf_allocate_virt_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_virt_mem *mem,
+ u32 size)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ mem->size = size;
+ mem->va = rte_zmalloc("avf", size, 0);
+
+ if (mem->va)
+ return AVF_SUCCESS;
+ else
+ return AVF_ERR_NO_MEMORY;
+}
+
+enum avf_status_code
+avf_free_virt_mem_d(__rte_unused struct avf_hw *hw,
+ struct avf_virt_mem *mem)
+{
+ if (!mem)
+ return AVF_ERR_PARAM;
+
+ rte_free(mem->va);
+ mem->va = NULL;
+
+ return AVF_SUCCESS;
+}
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2017 Intel Corporation
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <inttypes.h>
+#include <rte_byteorder.h>
+#include <rte_common.h>
+
+#include <rte_debug.h>
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+
+#include "avf_log.h"
+#include "base/avf_prototype.h"
+#include "base/avf_adminq_cmd.h"
+#include "base/avf_type.h"
+
+#include "avf.h"
+
+#define MAX_TRY_TIMES 200
+#define ASQ_DELAY_MS 10
+
+/* Read data in admin queue to get msg from pf driver */
+static enum avf_status_code
+avf_read_msg_from_pf(struct avf_adapter *adapter, uint16_t buf_len,
+ uint8_t *buf)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_arq_event_info event;
+ enum virtchnl_ops opcode;
+ int ret;
+
+ event.buf_len = buf_len;
+ event.msg_buf = buf;
+ ret = avf_clean_arq_element(hw, &event, NULL);
+ /* Can't read any msg from adminQ */
+ if (ret) {
+ PMD_DRV_LOG(DEBUG, "Can't read msg from AQ");
+ return ret;
+ }
+
+ opcode = (enum virtchnl_ops)rte_le_to_cpu_32(event.desc.cookie_high);
+ vf->cmd_retval = (enum virtchnl_status_code)rte_le_to_cpu_32(
+ event.desc.cookie_low);
+
+ PMD_DRV_LOG(DEBUG, "AQ from pf carries opcode %u, retval %d",
+ opcode, vf->cmd_retval);
+
+ if (opcode != vf->pend_cmd)
+ PMD_DRV_LOG(WARNING, "command mismatch, expect %u, get %u",
+ vf->pend_cmd, opcode);
+
+ return AVF_SUCCESS;
+}
+
+static int
+avf_execute_vf_cmd(struct avf_adapter *adapter, struct avf_cmd_info *args)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_arq_event_info event_info;
+ enum avf_status_code ret;
+ int err = 0;
+ int i = 0;
+
+ if (_atomic_set_cmd(vf, args->ops))
+ return -1;
+
+ ret = avf_aq_send_msg_to_pf(hw, args->ops, AVF_SUCCESS,
+ args->in_args, args->in_args_size, NULL);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "fail to send cmd %d", args->ops);
+ _clear_cmd(vf);
+ return err;
+ }
+
+ switch (args->ops) {
+ case VIRTCHNL_OP_RESET_VF:
+ /*no need to wait for response */
+ _clear_cmd(vf);
+ break;
+ case VIRTCHNL_OP_VERSION:
+ case VIRTCHNL_OP_GET_VF_RESOURCES:
+ /* for init virtchnl ops, need to poll the response */
+ do {
+ ret = avf_read_msg_from_pf(adapter, args->out_size,
+ args->out_buffer);
+ if (ret == AVF_SUCCESS)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ } while (i++ < MAX_TRY_TIMES);
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ }
+ _clear_cmd(vf);
+ break;
+
+ default:
+ /* For other virtchnl ops in running time,
+ * wait for the cmd done flag.
+ */
+ do {
+ if (vf->pend_cmd == VIRTCHNL_OP_UNKNOWN)
+ break;
+ rte_delay_ms(ASQ_DELAY_MS);
+ /* If don't read msg or read sys event, continue */
+ } while (i++ < MAX_TRY_TIMES);
+ /* If there's no response is received, clear command */
+ if (i >= MAX_TRY_TIMES ||
+ vf->cmd_retval != VIRTCHNL_STATUS_SUCCESS) {
+ err = -1;
+ PMD_DRV_LOG(ERR, "No response or return failure (%d)"
+ " for cmd %d", vf->cmd_retval, args->ops);
+ _clear_cmd(vf);
+ }
+ break;
+ }
+
+ return err;
+}
+
+void
+avf_handle_virtchnl_msg(struct rte_eth_dev *dev)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
+ struct avf_arq_event_info info;
+ uint16_t pending, aq_opc;
+ enum virtchnl_ops msg_opc;
+ enum avf_status_code msg_ret;
+ int ret;
+
+ info.buf_len = AVF_AQ_BUF_SZ;
+ if (!vf->aq_resp) {
+ PMD_DRV_LOG(ERR, "Buffer for adminq resp should not be NULL");
+ return;
+ }
+ info.msg_buf = vf->aq_resp;
+
+ pending = 1;
+ while (pending) {
+ ret = avf_clean_arq_element(hw, &info, &pending);
+
+ if (ret != AVF_SUCCESS) {
+ PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ,"
+ "ret: %d", ret);
+ break;
+ }
+ aq_opc = rte_le_to_cpu_16(info.desc.opcode);
+ /* For the message sent from pf to vf, opcode is stored in
+ * cookie_high of struct avf_aq_desc, while return error code
+ * are stored in cookie_low, Which is done by PF driver.
+ */
+ msg_opc = (enum virtchnl_ops)rte_le_to_cpu_32(
+ info.desc.cookie_high);
+ msg_ret = (enum avf_status_code)rte_le_to_cpu_32(
+ info.desc.cookie_low);
+ switch (aq_opc) {
+ case avf_aqc_opc_send_msg_to_vf:
+ if (msg_opc == VIRTCHNL_OP_EVENT) {
+ /* TODO */
+ } else {
+ /* read message and it's expected one */
+ if (msg_opc == vf->pend_cmd) {
+ vf->cmd_retval = msg_ret;
+ /* prevent compiler reordering */
+ rte_compiler_barrier();
+ _clear_cmd(vf);
+ } else
+ PMD_DRV_LOG(ERR, "command mismatch,"
+ "expect %u, get %u",
+ vf->pend_cmd, msg_opc);
+ PMD_DRV_LOG(DEBUG,
+ "adminq response is received,"
+ " opcode = %d", msg_opc);
+ }
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Request %u is not supported yet",
+ aq_opc);
+ break;
+ }
+ }
+}
+
+#define VIRTCHNL_VERSION_MAJOR_START 1
+#define VIRTCHNL_VERSION_MINOR_START 1
+
+/* Check API version with sync wait until version read from admin queue */
+int
+avf_check_api_version(struct avf_adapter *adapter)
+{
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct virtchnl_version_info version, *pver;
+ struct avf_cmd_info args;
+ int err;
+
+ version.major = VIRTCHNL_VERSION_MAJOR;
+ version.minor = VIRTCHNL_VERSION_MINOR;
+
+ args.ops = VIRTCHNL_OP_VERSION;
+ args.in_args = (uint8_t *)&version;
+ args.in_args_size = sizeof(version);
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ err = avf_execute_vf_cmd(adapter, &args);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Fail to execute command of OP_VERSION");
+ return err;
+ }
+
+ pver = (struct virtchnl_version_info *)args.out_buffer;
+ vf->virtchnl_version = *pver;
+
+ if (vf->virtchnl_version.major < VIRTCHNL_VERSION_MAJOR_START ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR_START &&
+ vf->virtchnl_version.minor < VIRTCHNL_VERSION_MINOR_START)) {
+ PMD_INIT_LOG(ERR, "VIRTCHNL API version should not be lower"
+ " than (%u.%u) to support Adapative VF",
+ VIRTCHNL_VERSION_MAJOR_START,
+ VIRTCHNL_VERSION_MAJOR_START);
+ return -1;
+ } else if (vf->virtchnl_version.major > VIRTCHNL_VERSION_MAJOR ||
+ (vf->virtchnl_version.major == VIRTCHNL_VERSION_MAJOR &&
+ vf->virtchnl_version.minor > VIRTCHNL_VERSION_MINOR)) {
+ PMD_INIT_LOG(ERR, "PF/VF API version mismatch:(%u.%u)-(%u.%u)",
+ vf->virtchnl_version.major,
+ vf->virtchnl_version.minor,
+ VIRTCHNL_VERSION_MAJOR,
+ VIRTCHNL_VERSION_MINOR);
+ return -1;
+ }
+
+ PMD_DRV_LOG(DEBUG, "Peer is supported PF host");
+ return 0;
+}
+
+int
+avf_get_vf_resource(struct avf_adapter *adapter)
+{
+ struct avf_hw *hw = AVF_DEV_PRIVATE_TO_HW(adapter);
+ struct avf_info *vf = AVF_DEV_PRIVATE_TO_VF(adapter);
+ struct avf_cmd_info args;
+ uint32_t caps, len;
+ int err, i;
+
+ args.ops = VIRTCHNL_OP_GET_VF_RESOURCES;
+ args.out_buffer = vf->aq_resp;
+ args.out_size = AVF_AQ_BUF_SZ;
+
+ /* TODO: basic offload capabilities, need to
+ * add advanced/optional offload capabilities
+ */
+
+ caps = AVF_BASIC_OFFLOAD_CAPS;
+
+ args.in_args = (uint8_t *)∩︀
+ args.in_args_size = sizeof(caps);
+
+ err = avf_execute_vf_cmd(adapter, &args);
+
+ if (err) {
+ PMD_DRV_LOG(ERR, "Failed to execute command of "
+ "OP_GET_VF_RESOURCE");
+ return -1;
+ }
+
+ len = sizeof(struct virtchnl_vf_resource) +
+ AVF_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource);
+
+ rte_memcpy(vf->vf_res, args.out_buffer,
+ RTE_MIN(args.out_size, len));
+ /* parse VF config message back from PF*/
+ avf_parse_hw_config(hw, vf->vf_res);
+ for (i = 0; i < vf->vf_res->num_vsis; i++) {
+ if (vf->vf_res->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
+ vf->vsi_res = &vf->vf_res->vsi_res[i];
+ }
+
+ if (!vf->vsi_res) {
+ PMD_INIT_LOG(ERR, "no LAN VSI found");
+ return -1;
+ }
+
+ vf->vsi.vsi_id = vf->vsi_res->vsi_id;
+ vf->vsi.nb_qps = vf->vsi_res->num_queue_pairs;
+ vf->vsi.adapter = adapter;
+
+ return 0;
+}
--- /dev/null
+DPDK_18.02 {
+
+ local: *;
+};
_LDLIBS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += -lrte_pmd_af_packet
_LDLIBS-$(CONFIG_RTE_LIBRTE_ARK_PMD) += -lrte_pmd_ark
+_LDLIBS-$(CONFIG_RTE_LIBRTE_AVF_PMD) += -lrte_pmd_avf
_LDLIBS-$(CONFIG_RTE_LIBRTE_AVP_PMD) += -lrte_pmd_avp
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNX2X_PMD) += -lrte_pmd_bnx2x -lz
_LDLIBS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += -lrte_pmd_bnxt