#include "vnic_enet.h"
#include "enic.h"
-int enicpmd_logtype_init;
-int enicpmd_logtype_flow;
-
-#define PMD_INIT_LOG(level, fmt, args...) \
- rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
- "%s" fmt "\n", __func__, ##args)
-
-#define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
-
/*
* The set of PCI devices this driver supports
*/
#define CISCO_PCI_VENDOR_ID 0x1137
static const struct rte_pci_id pci_id_enic_map[] = {
- { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
- { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
+ {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)},
+ {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)},
+ {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)},
{.vendor_id = 0, /* sentinel */},
};
+/* Supported link speeds of production VIC models */
+static const struct vic_speed_capa {
+ uint16_t sub_devid;
+ uint32_t capa;
+} vic_speed_capa_map[] = {
+ { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
+ { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
+ { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
+ { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
+ { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
+ { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
+ { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
+ { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
+ { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
+ { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
+ { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
+ { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
+ { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
+ { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
+ { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G }, /* 1440 Mezz */
+ { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
+ ETH_LINK_SPEED_40G }, /* 1480 MLOM */
+ { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
+ { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
+ { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
+ { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
+ { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
+ { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
+ { 0, 0 }, /* End marker */
+};
+
#define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
+#define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
+#define ENIC_DEVARG_GENEVE_OPT "geneve-opt"
#define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
-RTE_INIT(enicpmd_init_log);
-static void
-enicpmd_init_log(void)
-{
- enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
- if (enicpmd_logtype_init >= 0)
- rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
- enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
- if (enicpmd_logtype_flow >= 0)
- rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
-}
+RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
static int
enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
enum rte_filter_op filter_op,
void *arg)
{
+ struct enic *enic = pmd_priv(dev);
int ret = 0;
ENICPMD_FUNC_TRACE();
+ /*
+ * Currently, when Geneve with options offload is enabled, host
+ * cannot insert match-action rules.
+ */
+ if (enic->geneve_opt_enabled)
+ return -ENOTSUP;
switch (filter_type) {
case RTE_ETH_FILTER_GENERIC:
if (filter_op != RTE_ETH_FILTER_GET)
return -EINVAL;
- *(const void **)arg = &enic_flow_ops;
+ if (enic->flow_filter_mode == FILTER_FLOWMAN)
+ *(const void **)arg = &enic_fm_flow_ops;
+ else
+ *(const void **)arg = &enic_flow_ops;
break;
case RTE_ETH_FILTER_FDIR:
ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __rte_unused const struct rte_eth_txconf *tx_conf)
+ const struct rte_eth_txconf *tx_conf)
{
int ret;
struct enic *enic = pmd_priv(eth_dev);
+ struct vnic_wq *wq;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
RTE_ASSERT(queue_idx < enic->conf_wq_count);
- eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
+ wq = &enic->wq[queue_idx];
+ wq->offloads = tx_conf->offloads |
+ eth_dev->data->dev_conf.txmode.offloads;
+ eth_dev->data->tx_queues[queue_idx] = (void *)wq;
ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
if (ret) {
return ret;
}
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ eth_dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_RSS_HASH;
+
+ enic->mc_count = 0;
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CHECKSUM);
/* All vlan offload masks to apply the current settings */
static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
__rte_unused int wait_to_complete)
{
- struct enic *enic = pmd_priv(eth_dev);
-
ENICPMD_FUNC_TRACE();
- return enic_link_update(enic);
+ return enic_link_update(eth_dev);
}
static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
return enic_dev_stats_get(enic, stats);
}
-static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
- enic_dev_stats_clear(enic);
+ return enic_dev_stats_clear(enic);
+}
+
+static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
+{
+ const struct vic_speed_capa *m;
+ struct rte_pci_device *pdev;
+ uint16_t id;
+
+ pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ id = pdev->id.subsystem_device_id;
+ for (m = vic_speed_capa_map; m->sub_devid != 0; m++) {
+ if (m->sub_devid == id)
+ return m->capa;
+ }
+ /* 1300 and later models are at least 40G */
+ if (id >= 0x0100)
+ return ETH_LINK_SPEED_40G;
+ /* VFs have subsystem id 0, check device id */
+ if (id == 0) {
+ /* Newer VF implies at least 40G model */
+ if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
+ return ETH_LINK_SPEED_40G;
+ }
+ return ETH_LINK_SPEED_10G;
}
-static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
+static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *device_info)
{
struct enic *enic = pmd_priv(eth_dev);
* ignoring vNIC mtu.
*/
device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
- device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
+ device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
+ device_info->min_mtu = ENIC_MIN_MTU;
+ device_info->max_mtu = enic->max_mtu;
device_info->rx_offload_capa = enic->rx_offload_capa;
device_info->tx_offload_capa = enic->tx_offload_capa;
+ device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
ENIC_DEFAULT_TX_RING_SIZE),
.nb_queues = ENIC_DEFAULT_TX_RINGS,
};
+ device_info->speed_capa = speed_capa_from_pci_id(eth_dev);
+
+ return 0;
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
RTE_PTYPE_L4_NONFRAG,
RTE_PTYPE_UNKNOWN
};
+ static const uint32_t ptypes_overlay[] = {
+ RTE_PTYPE_L2_ETHER,
+ RTE_PTYPE_L2_ETHER_VLAN,
+ RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_L4_TCP,
+ RTE_PTYPE_L4_UDP,
+ RTE_PTYPE_L4_FRAG,
+ RTE_PTYPE_L4_NONFRAG,
+ RTE_PTYPE_TUNNEL_GRENAT,
+ RTE_PTYPE_INNER_L2_ETHER,
+ RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
+ RTE_PTYPE_INNER_L4_TCP,
+ RTE_PTYPE_INNER_L4_UDP,
+ RTE_PTYPE_INNER_L4_FRAG,
+ RTE_PTYPE_INNER_L4_NONFRAG,
+ RTE_PTYPE_UNKNOWN
+ };
- if (dev->rx_pkt_burst == enic_recv_pkts)
- return ptypes;
+ if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
+ dev->rx_pkt_burst != NULL) {
+ struct enic *enic = pmd_priv(dev);
+ if (enic->overlay_offload)
+ return ptypes_overlay;
+ else
+ return ptypes;
+ }
return NULL;
}
-static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ int ret;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return;
+ return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
enic->promisc = 1;
- enic_add_packet_filter(enic);
+ ret = enic_add_packet_filter(enic);
+ if (ret != 0)
+ enic->promisc = 0;
+
+ return ret;
}
-static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ int ret;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return;
+ return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
enic->promisc = 0;
- enic_add_packet_filter(enic);
+ ret = enic_add_packet_filter(enic);
+ if (ret != 0)
+ enic->promisc = 1;
+
+ return ret;
}
-static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ int ret;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return;
+ return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
enic->allmulti = 1;
- enic_add_packet_filter(enic);
+ ret = enic_add_packet_filter(enic);
+ if (ret != 0)
+ enic->allmulti = 0;
+
+ return ret;
}
-static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
+static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
struct enic *enic = pmd_priv(eth_dev);
+ int ret;
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return;
+ return -E_RTE_SECONDARY;
ENICPMD_FUNC_TRACE();
enic->allmulti = 0;
- enic_add_packet_filter(enic);
+ ret = enic_add_packet_filter(enic);
+ if (ret != 0)
+ enic->allmulti = 1;
+
+ return ret;
}
static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
- struct ether_addr *mac_addr,
+ struct rte_ether_addr *mac_addr,
__rte_unused uint32_t index, __rte_unused uint32_t pool)
{
struct enic *enic = pmd_priv(eth_dev);
}
static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
- struct ether_addr *addr)
+ struct rte_ether_addr *addr)
{
struct enic *enic = pmd_priv(eth_dev);
int ret;
return enic_set_mac_address(enic, addr->addr_bytes);
}
+static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
+{
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+
+ rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
+ ENICPMD_LOG(DEBUG, " %s address %s\n",
+ add ? "add" : "remove", mac_str);
+}
+
+static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
+ struct rte_ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
+ struct rte_ether_addr *addr;
+ uint32_t i, j;
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+
+ /* Validate the given addresses first */
+ for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
+ addr = &mc_addr_set[i];
+ if (!rte_is_multicast_ether_addr(addr) ||
+ rte_is_broadcast_ether_addr(addr)) {
+ rte_ether_format_addr(mac_str,
+ RTE_ETHER_ADDR_FMT_SIZE, addr);
+ ENICPMD_LOG(ERR, " invalid multicast address %s\n",
+ mac_str);
+ return -EINVAL;
+ }
+ }
+
+ /* Flush all if requested */
+ if (nb_mc_addr == 0 || mc_addr_set == NULL) {
+ ENICPMD_LOG(DEBUG, " flush multicast addresses\n");
+ for (i = 0; i < enic->mc_count; i++) {
+ addr = &enic->mc_addrs[i];
+ debug_log_add_del_addr(addr, false);
+ ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
+ if (ret)
+ return ret;
+ }
+ enic->mc_count = 0;
+ return 0;
+ }
+
+ if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
+ ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n",
+ ENIC_MULTICAST_PERFECT_FILTERS);
+ return -ENOSPC;
+ }
+ /*
+ * devcmd is slow, so apply the difference instead of flushing and
+ * adding everything.
+ * 1. Delete addresses on the NIC but not on the host
+ */
+ for (i = 0; i < enic->mc_count; i++) {
+ addr = &enic->mc_addrs[i];
+ for (j = 0; j < nb_mc_addr; j++) {
+ if (rte_is_same_ether_addr(addr, &mc_addr_set[j]))
+ break;
+ }
+ if (j < nb_mc_addr)
+ continue;
+ debug_log_add_del_addr(addr, false);
+ ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
+ if (ret)
+ return ret;
+ }
+ /* 2. Add addresses on the host but not on the NIC */
+ for (i = 0; i < nb_mc_addr; i++) {
+ addr = &mc_addr_set[i];
+ for (j = 0; j < enic->mc_count; j++) {
+ if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j]))
+ break;
+ }
+ if (j < enic->mc_count)
+ continue;
+ debug_log_add_del_addr(addr, true);
+ ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
+ if (ret)
+ return ret;
+ }
+ /* Keep a copy so we can flush/apply later on.. */
+ memcpy(enic->mc_addrs, mc_addr_set,
+ nb_mc_addr * sizeof(struct rte_ether_addr));
+ enic->mc_count = nb_mc_addr;
+ return 0;
+}
+
static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct enic *enic = pmd_priv(eth_dev);
ENICPMD_FUNC_TRACE();
sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
- data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+ data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
rq_sop = &enic->rq[sop_queue_idx];
rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
qinfo->mp = rq_sop->mp;
ENICPMD_FUNC_TRACE();
qinfo->nb_desc = wq->ring.desc_count;
memset(&qinfo->conf, 0, sizeof(qinfo->conf));
- qinfo->conf.offloads = enic->tx_offload_capa;
+ qinfo->conf.offloads = wq->offloads;
/* tx_thresh, and all the other fields are not applicable for enic */
}
return 0;
}
+static int udp_tunnel_common_check(struct enic *enic,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
+ return -ENOTSUP;
+ if (!enic->overlay_offload) {
+ ENICPMD_LOG(DEBUG, " vxlan (overlay offload) is not "
+ "supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int update_vxlan_port(struct enic *enic, uint16_t port)
+{
+ if (vnic_dev_overlay_offload_cfg(enic->vdev,
+ OVERLAY_CFG_VXLAN_PORT_UPDATE,
+ port)) {
+ ENICPMD_LOG(DEBUG, " failed to update vxlan port\n");
+ return -EINVAL;
+ }
+ ENICPMD_LOG(DEBUG, " updated vxlan port to %u\n", port);
+ enic->vxlan_port = port;
+ return 0;
+}
+
+static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * The NIC has 1 configurable VXLAN port number. "Adding" a new port
+ * number replaces it.
+ */
+ if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
+ ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, tnl->udp_port);
+}
+
+static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
+ struct rte_eth_udp_tunnel *tnl)
+{
+ struct enic *enic = pmd_priv(eth_dev);
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ ret = udp_tunnel_common_check(enic, tnl);
+ if (ret)
+ return ret;
+ /*
+ * Clear the previously set port number and restore the
+ * hardware default port number. Some drivers disable VXLAN
+ * offloads when there are no configured port numbers. But
+ * enic does not do that as VXLAN is part of overlay offload,
+ * which is tied to inner RSS and TSO.
+ */
+ if (tnl->udp_port != enic->vxlan_port) {
+ ENICPMD_LOG(DEBUG, " %u is not a configured vxlan port\n",
+ tnl->udp_port);
+ return -EINVAL;
+ }
+ return update_vxlan_port(enic, RTE_VXLAN_DEFAULT_PORT);
+}
+
+static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
+ char *fw_version, size_t fw_size)
+{
+ struct vnic_devcmd_fw_info *info;
+ struct enic *enic;
+ int ret;
+
+ ENICPMD_FUNC_TRACE();
+ if (fw_version == NULL || fw_size <= 0)
+ return -EINVAL;
+ enic = pmd_priv(eth_dev);
+ ret = vnic_dev_fw_info(enic->vdev, &info);
+ if (ret)
+ return ret;
+ snprintf(fw_version, fw_size, "%s %s",
+ info->fw_version, info->fw_build);
+ fw_version[fw_size - 1] = '\0';
+ return 0;
+}
+
static const struct eth_dev_ops enicpmd_eth_dev_ops = {
.dev_configure = enicpmd_dev_configure,
.dev_start = enicpmd_dev_start,
.mac_addr_add = enicpmd_add_mac_addr,
.mac_addr_remove = enicpmd_remove_mac_addr,
.mac_addr_set = enicpmd_set_mac_addr,
+ .set_mc_addr_list = enicpmd_set_mc_addr_list,
.filter_ctrl = enicpmd_dev_filter_ctrl,
.reta_query = enicpmd_dev_rss_reta_query,
.reta_update = enicpmd_dev_rss_reta_update,
.rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
.rss_hash_update = enicpmd_dev_rss_hash_update,
+ .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
+ .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
+ .fw_version_get = enicpmd_dev_fw_version_get,
};
-static int enic_parse_disable_overlay(__rte_unused const char *key,
- const char *value,
- void *opaque)
+static int enic_parse_zero_one(const char *key,
+ const char *value,
+ void *opaque)
{
struct enic *enic;
+ bool b;
enic = (struct enic *)opaque;
if (strcmp(value, "0") == 0) {
- enic->disable_overlay = false;
+ b = false;
} else if (strcmp(value, "1") == 0) {
- enic->disable_overlay = true;
+ b = true;
} else {
- dev_err(enic, "Invalid value for " ENIC_DEVARG_DISABLE_OVERLAY
- ": expected=0|1 given=%s\n", value);
+ dev_err(enic, "Invalid value for %s"
+ ": expected=0|1 given=%s\n", key, value);
return -EINVAL;
}
+ if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
+ enic->disable_overlay = b;
+ if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
+ enic->enable_avx2_rx = b;
+ if (strcmp(key, ENIC_DEVARG_GENEVE_OPT) == 0)
+ enic->geneve_opt_request = b;
return 0;
}
{
static const char *const valid_keys[] = {
ENIC_DEVARG_DISABLE_OVERLAY,
+ ENIC_DEVARG_ENABLE_AVX2_RX,
+ ENIC_DEVARG_GENEVE_OPT,
ENIC_DEVARG_IG_VLAN_REWRITE,
NULL};
struct enic *enic = pmd_priv(dev);
ENICPMD_FUNC_TRACE();
enic->disable_overlay = false;
+ enic->enable_avx2_rx = false;
+ enic->geneve_opt_request = false;
enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
if (!dev->device->devargs)
return 0;
if (!kvlist)
return -EINVAL;
if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
- enic_parse_disable_overlay, enic) < 0 ||
+ enic_parse_zero_one, enic) < 0 ||
+ rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
+ enic_parse_zero_one, enic) < 0 ||
+ rte_kvargs_process(kvlist, ENIC_DEVARG_GENEVE_OPT,
+ enic_parse_zero_one, enic) < 0 ||
rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
enic_parse_ig_vlan_rewrite, enic) < 0) {
rte_kvargs_free(kvlist);
return 0;
}
-struct enic *enicpmd_list_head = NULL;
/* Initialize the driver
* It returns 0 on success.
*/
ENICPMD_FUNC_TRACE();
- enic->port_id = eth_dev->data->port_id;
- enic->rte_dev = eth_dev;
eth_dev->dev_ops = &enicpmd_eth_dev_ops;
eth_dev->rx_pkt_burst = &enic_recv_pkts;
eth_dev->tx_pkt_burst = &enic_xmit_pkts;
eth_dev->tx_pkt_prepare = &enic_prep_pkts;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+ enic_pick_tx_handler(eth_dev);
+ enic_pick_rx_handler(eth_dev);
+ return 0;
+ }
+ /* Only the primary sets up adapter and other data in shared memory */
+ enic->port_id = eth_dev->data->port_id;
+ enic->rte_dev = eth_dev;
+ enic->dev_data = eth_dev->data;
+ /* Let rte_eth_dev_close() release the port resources */
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
rte_eth_copy_pci_info(eth_dev, pdev);
.remove = eth_enic_pci_remove,
};
+int dev_is_enic(struct rte_eth_dev *dev)
+{
+ return dev->device->driver == &rte_enic_pmd.driver;
+}
+
RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_enic,
ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
+ ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
+ ENIC_DEVARG_GENEVE_OPT "=0|1 "
ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");