1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
11 #include <rte_bus_pci.h>
12 #include <ethdev_driver.h>
13 #include <ethdev_pci.h>
14 #include <rte_geneve.h>
15 #include <rte_kvargs.h>
16 #include <rte_string_fns.h>
18 #include "vnic_intr.h"
22 #include "vnic_enet.h"
26 * The set of PCI devices this driver supports
28 #define CISCO_PCI_VENDOR_ID 0x1137
29 static const struct rte_pci_id pci_id_enic_map[] = {
30 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)},
31 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)},
32 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)},
33 {.vendor_id = 0, /* sentinel */},
36 /* Supported link speeds of production VIC models */
37 static const struct vic_speed_capa {
40 } vic_speed_capa_map[] = {
41 { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
42 { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
43 { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
44 { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
45 { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
46 { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
47 { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
48 { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
49 { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
50 { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
51 { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
52 { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
53 { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
54 { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
55 { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
56 ETH_LINK_SPEED_40G }, /* 1440 Mezz */
57 { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
58 ETH_LINK_SPEED_40G }, /* 1480 MLOM */
59 { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
60 { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
61 { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
62 { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
63 { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
64 { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
65 { 0, 0 }, /* End marker */
68 #define ENIC_DEVARG_CQ64 "cq64"
69 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
70 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
71 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
72 #define ENIC_DEVARG_REPRESENTOR "representor"
74 RTE_LOG_REGISTER_DEFAULT(enic_pmd_logtype, INFO);
77 enicpmd_dev_flow_ops_get(struct rte_eth_dev *dev,
78 const struct rte_flow_ops **ops)
80 struct enic *enic = pmd_priv(dev);
84 if (enic->flow_filter_mode == FILTER_FLOWMAN)
85 *ops = &enic_fm_flow_ops;
87 *ops = &enic_flow_ops;
91 static void enicpmd_dev_tx_queue_release(void *txq)
95 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
101 static int enicpmd_dev_setup_intr(struct enic *enic)
106 ENICPMD_FUNC_TRACE();
108 /* Are we done with the init of all the queues? */
109 for (index = 0; index < enic->cq_count; index++) {
110 if (!enic->cq[index].ctrl)
113 if (enic->cq_count != index)
115 for (index = 0; index < enic->wq_count; index++) {
116 if (!enic->wq[index].ctrl)
119 if (enic->wq_count != index)
121 /* check start of packet (SOP) RQs only in case scatter is disabled. */
122 for (index = 0; index < enic->rq_count; index++) {
123 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
126 if (enic->rq_count != index)
129 ret = enic_alloc_intr_resources(enic);
131 dev_err(enic, "alloc intr failed\n");
134 enic_init_vnic_resources(enic);
136 ret = enic_setup_finish(enic);
138 dev_err(enic, "setup could not be finished\n");
143 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
146 unsigned int socket_id,
147 const struct rte_eth_txconf *tx_conf)
150 struct enic *enic = pmd_priv(eth_dev);
153 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
154 return -E_RTE_SECONDARY;
156 ENICPMD_FUNC_TRACE();
157 RTE_ASSERT(queue_idx < enic->conf_wq_count);
158 wq = &enic->wq[queue_idx];
159 wq->offloads = tx_conf->offloads |
160 eth_dev->data->dev_conf.txmode.offloads;
161 eth_dev->data->tx_queues[queue_idx] = (void *)wq;
163 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
165 dev_err(enic, "error in allocating wq\n");
169 return enicpmd_dev_setup_intr(enic);
172 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
175 struct enic *enic = pmd_priv(eth_dev);
177 ENICPMD_FUNC_TRACE();
179 enic_start_wq(enic, queue_idx);
184 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
188 struct enic *enic = pmd_priv(eth_dev);
190 ENICPMD_FUNC_TRACE();
192 ret = enic_stop_wq(enic, queue_idx);
194 dev_err(enic, "error in stopping wq %d\n", queue_idx);
199 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
202 struct enic *enic = pmd_priv(eth_dev);
204 ENICPMD_FUNC_TRACE();
206 enic_start_rq(enic, queue_idx);
211 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
215 struct enic *enic = pmd_priv(eth_dev);
217 ENICPMD_FUNC_TRACE();
219 ret = enic_stop_rq(enic, queue_idx);
221 dev_err(enic, "error in stopping rq %d\n", queue_idx);
226 static void enicpmd_dev_rx_queue_release(void *rxq)
228 ENICPMD_FUNC_TRACE();
230 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
236 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
237 uint16_t rx_queue_id)
239 struct enic *enic = pmd_priv(dev);
240 uint32_t queue_count = 0;
246 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
247 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
248 cq_idx = cq->to_clean;
250 cq_tail = ioread32(&cq->ctrl->cq_tail);
252 if (cq_tail < cq_idx)
253 cq_tail += cq->ring.desc_count;
255 queue_count = cq_tail - cq_idx;
260 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
263 unsigned int socket_id,
264 const struct rte_eth_rxconf *rx_conf,
265 struct rte_mempool *mp)
268 struct enic *enic = pmd_priv(eth_dev);
270 ENICPMD_FUNC_TRACE();
272 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
273 return -E_RTE_SECONDARY;
274 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
275 eth_dev->data->rx_queues[queue_idx] =
276 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
278 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
279 rx_conf->rx_free_thresh);
281 dev_err(enic, "error in allocating rq\n");
285 return enicpmd_dev_setup_intr(enic);
288 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
290 struct enic *enic = pmd_priv(eth_dev);
293 ENICPMD_FUNC_TRACE();
295 offloads = eth_dev->data->dev_conf.rxmode.offloads;
296 if (mask & ETH_VLAN_STRIP_MASK) {
297 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
298 enic->ig_vlan_strip_en = 1;
300 enic->ig_vlan_strip_en = 0;
303 return enic_set_vlan_strip(enic);
306 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
310 struct enic *enic = pmd_priv(eth_dev);
312 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
313 return -E_RTE_SECONDARY;
315 ENICPMD_FUNC_TRACE();
316 ret = enic_set_vnic_res(enic);
318 dev_err(enic, "Set vNIC resource num failed, aborting\n");
322 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
323 eth_dev->data->dev_conf.rxmode.offloads |=
324 DEV_RX_OFFLOAD_RSS_HASH;
327 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
328 DEV_RX_OFFLOAD_CHECKSUM);
329 /* All vlan offload masks to apply the current settings */
330 mask = ETH_VLAN_STRIP_MASK |
331 ETH_VLAN_FILTER_MASK |
332 ETH_VLAN_EXTEND_MASK;
333 ret = enicpmd_vlan_offload_set(eth_dev, mask);
335 dev_err(enic, "Failed to configure VLAN offloads\n");
339 * Initialize RSS with the default reta and key. If the user key is
340 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
343 return enic_init_rss_nic_cfg(enic);
347 * It returns 0 on success.
349 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
351 struct enic *enic = pmd_priv(eth_dev);
353 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
354 return -E_RTE_SECONDARY;
356 ENICPMD_FUNC_TRACE();
357 return enic_enable(enic);
361 * Stop device: disable rx and tx functions to allow for reconfiguring.
363 static int enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
365 struct rte_eth_link link;
366 struct enic *enic = pmd_priv(eth_dev);
368 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
371 ENICPMD_FUNC_TRACE();
374 memset(&link, 0, sizeof(link));
375 rte_eth_linkstatus_set(eth_dev, &link);
383 static int enicpmd_dev_close(struct rte_eth_dev *eth_dev)
385 struct enic *enic = pmd_priv(eth_dev);
387 ENICPMD_FUNC_TRACE();
388 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
396 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
397 __rte_unused int wait_to_complete)
399 ENICPMD_FUNC_TRACE();
400 return enic_link_update(eth_dev);
403 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
404 struct rte_eth_stats *stats)
406 struct enic *enic = pmd_priv(eth_dev);
408 ENICPMD_FUNC_TRACE();
409 return enic_dev_stats_get(enic, stats);
412 static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
414 struct enic *enic = pmd_priv(eth_dev);
416 ENICPMD_FUNC_TRACE();
417 return enic_dev_stats_clear(enic);
420 static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
422 const struct vic_speed_capa *m;
423 struct rte_pci_device *pdev;
426 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
427 id = pdev->id.subsystem_device_id;
428 for (m = vic_speed_capa_map; m->sub_devid != 0; m++) {
429 if (m->sub_devid == id)
432 /* 1300 and later models are at least 40G */
434 return ETH_LINK_SPEED_40G;
435 /* VFs have subsystem id 0, check device id */
437 /* Newer VF implies at least 40G model */
438 if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
439 return ETH_LINK_SPEED_40G;
441 return ETH_LINK_SPEED_10G;
444 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
445 struct rte_eth_dev_info *device_info)
447 struct enic *enic = pmd_priv(eth_dev);
449 ENICPMD_FUNC_TRACE();
450 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
451 device_info->max_rx_queues = enic->conf_rq_count / 2;
452 device_info->max_tx_queues = enic->conf_wq_count;
453 device_info->min_rx_bufsize = ENIC_MIN_MTU;
454 /* "Max" mtu is not a typo. HW receives packet sizes up to the
455 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
456 * a hint to the driver to size receive buffers accordingly so that
457 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
458 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
461 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
462 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
463 device_info->min_mtu = ENIC_MIN_MTU;
464 device_info->max_mtu = enic->max_mtu;
465 device_info->rx_offload_capa = enic->rx_offload_capa;
466 device_info->tx_offload_capa = enic->tx_offload_capa;
467 device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
468 device_info->default_rxconf = (struct rte_eth_rxconf) {
469 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
471 device_info->reta_size = enic->reta_size;
472 device_info->hash_key_size = enic->hash_key_size;
473 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
474 device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
475 .nb_max = enic->config.rq_desc_count,
476 .nb_min = ENIC_MIN_RQ_DESCS,
477 .nb_align = ENIC_ALIGN_DESCS,
479 device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
480 .nb_max = enic->config.wq_desc_count,
481 .nb_min = ENIC_MIN_WQ_DESCS,
482 .nb_align = ENIC_ALIGN_DESCS,
483 .nb_seg_max = ENIC_TX_XMIT_MAX,
484 .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
486 device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
487 .burst_size = ENIC_DEFAULT_RX_BURST,
488 .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
489 ENIC_DEFAULT_RX_RING_SIZE),
490 .nb_queues = ENIC_DEFAULT_RX_RINGS,
492 device_info->default_txportconf = (struct rte_eth_dev_portconf) {
493 .burst_size = ENIC_DEFAULT_TX_BURST,
494 .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
495 ENIC_DEFAULT_TX_RING_SIZE),
496 .nb_queues = ENIC_DEFAULT_TX_RINGS,
498 device_info->speed_capa = speed_capa_from_pci_id(eth_dev);
503 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
505 static const uint32_t ptypes[] = {
507 RTE_PTYPE_L2_ETHER_VLAN,
508 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
509 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
513 RTE_PTYPE_L4_NONFRAG,
516 static const uint32_t ptypes_overlay[] = {
518 RTE_PTYPE_L2_ETHER_VLAN,
519 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
520 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
524 RTE_PTYPE_L4_NONFRAG,
525 RTE_PTYPE_TUNNEL_GRENAT,
526 RTE_PTYPE_INNER_L2_ETHER,
527 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
528 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
529 RTE_PTYPE_INNER_L4_TCP,
530 RTE_PTYPE_INNER_L4_UDP,
531 RTE_PTYPE_INNER_L4_FRAG,
532 RTE_PTYPE_INNER_L4_NONFRAG,
536 if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
537 dev->rx_pkt_burst != NULL) {
538 struct enic *enic = pmd_priv(dev);
539 if (enic->overlay_offload)
540 return ptypes_overlay;
547 static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
549 struct enic *enic = pmd_priv(eth_dev);
552 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
553 return -E_RTE_SECONDARY;
555 ENICPMD_FUNC_TRACE();
558 ret = enic_add_packet_filter(enic);
565 static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
567 struct enic *enic = pmd_priv(eth_dev);
570 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
571 return -E_RTE_SECONDARY;
573 ENICPMD_FUNC_TRACE();
575 ret = enic_add_packet_filter(enic);
582 static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
584 struct enic *enic = pmd_priv(eth_dev);
587 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
588 return -E_RTE_SECONDARY;
590 ENICPMD_FUNC_TRACE();
592 ret = enic_add_packet_filter(enic);
599 static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
601 struct enic *enic = pmd_priv(eth_dev);
604 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
605 return -E_RTE_SECONDARY;
607 ENICPMD_FUNC_TRACE();
609 ret = enic_add_packet_filter(enic);
616 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
617 struct rte_ether_addr *mac_addr,
618 __rte_unused uint32_t index, __rte_unused uint32_t pool)
620 struct enic *enic = pmd_priv(eth_dev);
622 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
623 return -E_RTE_SECONDARY;
625 ENICPMD_FUNC_TRACE();
626 return enic_set_mac_address(enic, mac_addr->addr_bytes);
629 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
631 struct enic *enic = pmd_priv(eth_dev);
633 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
636 ENICPMD_FUNC_TRACE();
637 if (enic_del_mac_address(enic, index))
638 dev_err(enic, "del mac addr failed\n");
641 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
642 struct rte_ether_addr *addr)
644 struct enic *enic = pmd_priv(eth_dev);
647 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
648 return -E_RTE_SECONDARY;
650 ENICPMD_FUNC_TRACE();
651 ret = enic_del_mac_address(enic, 0);
654 return enic_set_mac_address(enic, addr->addr_bytes);
657 static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
659 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
661 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
662 ENICPMD_LOG(DEBUG, " %s address %s\n",
663 add ? "add" : "remove", mac_str);
666 static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
667 struct rte_ether_addr *mc_addr_set,
670 struct enic *enic = pmd_priv(eth_dev);
671 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
672 struct rte_ether_addr *addr;
676 ENICPMD_FUNC_TRACE();
678 /* Validate the given addresses first */
679 for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
680 addr = &mc_addr_set[i];
681 if (!rte_is_multicast_ether_addr(addr) ||
682 rte_is_broadcast_ether_addr(addr)) {
683 rte_ether_format_addr(mac_str,
684 RTE_ETHER_ADDR_FMT_SIZE, addr);
685 ENICPMD_LOG(ERR, " invalid multicast address %s\n",
691 /* Flush all if requested */
692 if (nb_mc_addr == 0 || mc_addr_set == NULL) {
693 ENICPMD_LOG(DEBUG, " flush multicast addresses\n");
694 for (i = 0; i < enic->mc_count; i++) {
695 addr = &enic->mc_addrs[i];
696 debug_log_add_del_addr(addr, false);
697 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
705 if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
706 ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n",
707 ENIC_MULTICAST_PERFECT_FILTERS);
711 * devcmd is slow, so apply the difference instead of flushing and
713 * 1. Delete addresses on the NIC but not on the host
715 for (i = 0; i < enic->mc_count; i++) {
716 addr = &enic->mc_addrs[i];
717 for (j = 0; j < nb_mc_addr; j++) {
718 if (rte_is_same_ether_addr(addr, &mc_addr_set[j]))
723 debug_log_add_del_addr(addr, false);
724 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
728 /* 2. Add addresses on the host but not on the NIC */
729 for (i = 0; i < nb_mc_addr; i++) {
730 addr = &mc_addr_set[i];
731 for (j = 0; j < enic->mc_count; j++) {
732 if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j]))
735 if (j < enic->mc_count)
737 debug_log_add_del_addr(addr, true);
738 ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
742 /* Keep a copy so we can flush/apply later on.. */
743 memcpy(enic->mc_addrs, mc_addr_set,
744 nb_mc_addr * sizeof(struct rte_ether_addr));
745 enic->mc_count = nb_mc_addr;
749 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
751 struct enic *enic = pmd_priv(eth_dev);
753 ENICPMD_FUNC_TRACE();
754 return enic_set_mtu(enic, mtu);
757 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
758 struct rte_eth_rss_reta_entry64
762 struct enic *enic = pmd_priv(dev);
763 uint16_t i, idx, shift;
765 ENICPMD_FUNC_TRACE();
766 if (reta_size != ENIC_RSS_RETA_SIZE) {
767 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
768 reta_size, ENIC_RSS_RETA_SIZE);
772 for (i = 0; i < reta_size; i++) {
773 idx = i / RTE_RETA_GROUP_SIZE;
774 shift = i % RTE_RETA_GROUP_SIZE;
775 if (reta_conf[idx].mask & (1ULL << shift))
776 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
777 enic->rss_cpu.cpu[i / 4].b[i % 4]);
783 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
784 struct rte_eth_rss_reta_entry64
788 struct enic *enic = pmd_priv(dev);
789 union vnic_rss_cpu rss_cpu;
790 uint16_t i, idx, shift;
792 ENICPMD_FUNC_TRACE();
793 if (reta_size != ENIC_RSS_RETA_SIZE) {
794 dev_err(enic, "reta_update: wrong reta_size. given=%u"
796 reta_size, ENIC_RSS_RETA_SIZE);
800 * Start with the current reta and modify it per reta_conf, as we
801 * need to push the entire reta even if we only modify one entry.
803 rss_cpu = enic->rss_cpu;
804 for (i = 0; i < reta_size; i++) {
805 idx = i / RTE_RETA_GROUP_SIZE;
806 shift = i % RTE_RETA_GROUP_SIZE;
807 if (reta_conf[idx].mask & (1ULL << shift))
808 rss_cpu.cpu[i / 4].b[i % 4] =
809 enic_rte_rq_idx_to_sop_idx(
810 reta_conf[idx].reta[shift]);
812 return enic_set_rss_reta(enic, &rss_cpu);
815 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
816 struct rte_eth_rss_conf *rss_conf)
818 struct enic *enic = pmd_priv(dev);
820 ENICPMD_FUNC_TRACE();
821 return enic_set_rss_conf(enic, rss_conf);
824 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
825 struct rte_eth_rss_conf *rss_conf)
827 struct enic *enic = pmd_priv(dev);
829 ENICPMD_FUNC_TRACE();
830 if (rss_conf == NULL)
832 if (rss_conf->rss_key != NULL &&
833 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
834 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
836 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
839 rss_conf->rss_hf = enic->rss_hf;
840 if (rss_conf->rss_key != NULL) {
842 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
843 rss_conf->rss_key[i] =
844 enic->rss_key.key[i / 10].b[i % 10];
846 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
851 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
852 uint16_t rx_queue_id,
853 struct rte_eth_rxq_info *qinfo)
855 struct enic *enic = pmd_priv(dev);
856 struct vnic_rq *rq_sop;
857 struct vnic_rq *rq_data;
858 struct rte_eth_rxconf *conf;
859 uint16_t sop_queue_idx;
860 uint16_t data_queue_idx;
862 ENICPMD_FUNC_TRACE();
863 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
864 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
865 rq_sop = &enic->rq[sop_queue_idx];
866 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
867 qinfo->mp = rq_sop->mp;
868 qinfo->scattered_rx = rq_sop->data_queue_enable;
869 qinfo->nb_desc = rq_sop->ring.desc_count;
870 if (qinfo->scattered_rx)
871 qinfo->nb_desc += rq_data->ring.desc_count;
873 memset(conf, 0, sizeof(*conf));
874 conf->rx_free_thresh = rq_sop->rx_free_thresh;
875 conf->rx_drop_en = 1;
877 * Except VLAN stripping (port setting), all the checksum offloads
878 * are always enabled.
880 conf->offloads = enic->rx_offload_capa;
881 if (!enic->ig_vlan_strip_en)
882 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
883 /* rx_thresh and other fields are not applicable for enic */
886 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
887 uint16_t tx_queue_id,
888 struct rte_eth_txq_info *qinfo)
890 struct enic *enic = pmd_priv(dev);
891 struct vnic_wq *wq = &enic->wq[tx_queue_id];
893 ENICPMD_FUNC_TRACE();
894 qinfo->nb_desc = wq->ring.desc_count;
895 memset(&qinfo->conf, 0, sizeof(qinfo->conf));
896 qinfo->conf.offloads = wq->offloads;
897 /* tx_thresh, and all the other fields are not applicable for enic */
900 static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
901 __rte_unused uint16_t queue_id,
902 struct rte_eth_burst_mode *mode)
904 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
905 struct enic *enic = pmd_priv(dev);
906 const char *info_str = NULL;
909 ENICPMD_FUNC_TRACE();
910 if (enic->use_noscatter_vec_rx_handler)
911 info_str = "Vector AVX2 No Scatter";
912 else if (pkt_burst == enic_noscatter_recv_pkts)
913 info_str = "Scalar No Scatter";
914 else if (pkt_burst == enic_recv_pkts)
916 else if (pkt_burst == enic_recv_pkts_64)
917 info_str = "Scalar 64B Completion";
919 strlcpy(mode->info, info_str, sizeof(mode->info));
925 static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
926 __rte_unused uint16_t queue_id,
927 struct rte_eth_burst_mode *mode)
929 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
930 const char *info_str = NULL;
933 ENICPMD_FUNC_TRACE();
934 if (pkt_burst == enic_simple_xmit_pkts)
935 info_str = "Scalar Simplified";
936 else if (pkt_burst == enic_xmit_pkts)
939 strlcpy(mode->info, info_str, sizeof(mode->info));
945 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
946 uint16_t rx_queue_id)
948 struct enic *enic = pmd_priv(eth_dev);
950 ENICPMD_FUNC_TRACE();
951 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
955 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
956 uint16_t rx_queue_id)
958 struct enic *enic = pmd_priv(eth_dev);
960 ENICPMD_FUNC_TRACE();
961 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
965 static int udp_tunnel_common_check(struct enic *enic,
966 struct rte_eth_udp_tunnel *tnl)
968 if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN &&
969 tnl->prot_type != RTE_TUNNEL_TYPE_GENEVE)
971 if (!enic->overlay_offload) {
972 ENICPMD_LOG(DEBUG, " overlay offload is not supported\n");
978 static int update_tunnel_port(struct enic *enic, uint16_t port, bool vxlan)
982 cfg = vxlan ? OVERLAY_CFG_VXLAN_PORT_UPDATE :
983 OVERLAY_CFG_GENEVE_PORT_UPDATE;
984 if (vnic_dev_overlay_offload_cfg(enic->vdev, cfg, port)) {
985 ENICPMD_LOG(DEBUG, " failed to update tunnel port\n");
988 ENICPMD_LOG(DEBUG, " updated %s port to %u\n",
989 vxlan ? "vxlan" : "geneve", port);
991 enic->vxlan_port = port;
993 enic->geneve_port = port;
997 static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
998 struct rte_eth_udp_tunnel *tnl)
1000 struct enic *enic = pmd_priv(eth_dev);
1005 ENICPMD_FUNC_TRACE();
1006 ret = udp_tunnel_common_check(enic, tnl);
1009 vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
1011 port = enic->vxlan_port;
1013 port = enic->geneve_port;
1015 * The NIC has 1 configurable port number per tunnel type.
1016 * "Adding" a new port number replaces it.
1018 if (tnl->udp_port == port || tnl->udp_port == 0) {
1019 ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n",
1023 return update_tunnel_port(enic, tnl->udp_port, vxlan);
1026 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
1027 struct rte_eth_udp_tunnel *tnl)
1029 struct enic *enic = pmd_priv(eth_dev);
1034 ENICPMD_FUNC_TRACE();
1035 ret = udp_tunnel_common_check(enic, tnl);
1038 vxlan = (tnl->prot_type == RTE_TUNNEL_TYPE_VXLAN);
1040 port = enic->vxlan_port;
1042 port = enic->geneve_port;
1044 * Clear the previously set port number and restore the
1045 * hardware default port number. Some drivers disable VXLAN
1046 * offloads when there are no configured port numbers. But
1047 * enic does not do that as VXLAN is part of overlay offload,
1048 * which is tied to inner RSS and TSO.
1050 if (tnl->udp_port != port) {
1051 ENICPMD_LOG(DEBUG, " %u is not a configured tunnel port\n",
1055 port = vxlan ? RTE_VXLAN_DEFAULT_PORT : RTE_GENEVE_DEFAULT_PORT;
1056 return update_tunnel_port(enic, port, vxlan);
1059 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
1060 char *fw_version, size_t fw_size)
1062 struct vnic_devcmd_fw_info *info;
1066 ENICPMD_FUNC_TRACE();
1068 enic = pmd_priv(eth_dev);
1069 ret = vnic_dev_fw_info(enic->vdev, &info);
1072 ret = snprintf(fw_version, fw_size, "%s %s",
1073 info->fw_version, info->fw_build);
1077 ret += 1; /* add the size of '\0' */
1078 if (fw_size < (size_t)ret)
1084 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
1085 .dev_configure = enicpmd_dev_configure,
1086 .dev_start = enicpmd_dev_start,
1087 .dev_stop = enicpmd_dev_stop,
1088 .dev_set_link_up = NULL,
1089 .dev_set_link_down = NULL,
1090 .dev_close = enicpmd_dev_close,
1091 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
1092 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
1093 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
1094 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
1095 .link_update = enicpmd_dev_link_update,
1096 .stats_get = enicpmd_dev_stats_get,
1097 .stats_reset = enicpmd_dev_stats_reset,
1098 .queue_stats_mapping_set = NULL,
1099 .dev_infos_get = enicpmd_dev_info_get,
1100 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
1101 .mtu_set = enicpmd_mtu_set,
1102 .vlan_filter_set = NULL,
1103 .vlan_tpid_set = NULL,
1104 .vlan_offload_set = enicpmd_vlan_offload_set,
1105 .vlan_strip_queue_set = NULL,
1106 .rx_queue_start = enicpmd_dev_rx_queue_start,
1107 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
1108 .tx_queue_start = enicpmd_dev_tx_queue_start,
1109 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
1110 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
1111 .rx_queue_release = enicpmd_dev_rx_queue_release,
1112 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
1113 .tx_queue_release = enicpmd_dev_tx_queue_release,
1114 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
1115 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
1116 .rxq_info_get = enicpmd_dev_rxq_info_get,
1117 .txq_info_get = enicpmd_dev_txq_info_get,
1118 .rx_burst_mode_get = enicpmd_dev_rx_burst_mode_get,
1119 .tx_burst_mode_get = enicpmd_dev_tx_burst_mode_get,
1121 .dev_led_off = NULL,
1122 .flow_ctrl_get = NULL,
1123 .flow_ctrl_set = NULL,
1124 .priority_flow_ctrl_set = NULL,
1125 .mac_addr_add = enicpmd_add_mac_addr,
1126 .mac_addr_remove = enicpmd_remove_mac_addr,
1127 .mac_addr_set = enicpmd_set_mac_addr,
1128 .set_mc_addr_list = enicpmd_set_mc_addr_list,
1129 .flow_ops_get = enicpmd_dev_flow_ops_get,
1130 .reta_query = enicpmd_dev_rss_reta_query,
1131 .reta_update = enicpmd_dev_rss_reta_update,
1132 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
1133 .rss_hash_update = enicpmd_dev_rss_hash_update,
1134 .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
1135 .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
1136 .fw_version_get = enicpmd_dev_fw_version_get,
1139 static int enic_parse_zero_one(const char *key,
1146 enic = (struct enic *)opaque;
1147 if (strcmp(value, "0") == 0) {
1149 } else if (strcmp(value, "1") == 0) {
1152 dev_err(enic, "Invalid value for %s"
1153 ": expected=0|1 given=%s\n", key, value);
1156 if (strcmp(key, ENIC_DEVARG_CQ64) == 0)
1157 enic->cq64_request = b;
1158 if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
1159 enic->disable_overlay = b;
1160 if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
1161 enic->enable_avx2_rx = b;
1165 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
1171 enic = (struct enic *)opaque;
1172 if (strcmp(value, "trunk") == 0) {
1173 /* Trunk mode: always tag */
1174 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
1175 } else if (strcmp(value, "untag") == 0) {
1176 /* Untag default VLAN mode: untag if VLAN = default VLAN */
1177 enic->ig_vlan_rewrite_mode =
1178 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
1179 } else if (strcmp(value, "priority") == 0) {
1181 * Priority-tag default VLAN mode: priority tag (VLAN header
1182 * with ID=0) if VLAN = default
1184 enic->ig_vlan_rewrite_mode =
1185 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
1186 } else if (strcmp(value, "pass") == 0) {
1187 /* Pass through mode: do not touch tags */
1188 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1190 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
1191 ": expected=trunk|untag|priority|pass given=%s\n",
1198 static int enic_check_devargs(struct rte_eth_dev *dev)
1200 static const char *const valid_keys[] = {
1202 ENIC_DEVARG_DISABLE_OVERLAY,
1203 ENIC_DEVARG_ENABLE_AVX2_RX,
1204 ENIC_DEVARG_IG_VLAN_REWRITE,
1205 ENIC_DEVARG_REPRESENTOR,
1207 struct enic *enic = pmd_priv(dev);
1208 struct rte_kvargs *kvlist;
1210 ENICPMD_FUNC_TRACE();
1212 enic->cq64_request = true; /* Use 64B entry if available */
1213 enic->disable_overlay = false;
1214 enic->enable_avx2_rx = false;
1215 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1216 if (!dev->device->devargs)
1218 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1221 if (rte_kvargs_process(kvlist, ENIC_DEVARG_CQ64,
1222 enic_parse_zero_one, enic) < 0 ||
1223 rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
1224 enic_parse_zero_one, enic) < 0 ||
1225 rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
1226 enic_parse_zero_one, enic) < 0 ||
1227 rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
1228 enic_parse_ig_vlan_rewrite, enic) < 0) {
1229 rte_kvargs_free(kvlist);
1232 rte_kvargs_free(kvlist);
1236 /* Initialize the driver for PF */
1237 static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
1238 void *init_params __rte_unused)
1240 struct rte_pci_device *pdev;
1241 struct rte_pci_addr *addr;
1242 struct enic *enic = pmd_priv(eth_dev);
1245 ENICPMD_FUNC_TRACE();
1246 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
1247 eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
1248 eth_dev->rx_pkt_burst = &enic_recv_pkts;
1249 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
1250 eth_dev->tx_pkt_prepare = &enic_prep_pkts;
1251 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1252 enic_pick_tx_handler(eth_dev);
1253 enic_pick_rx_handler(eth_dev);
1256 /* Only the primary sets up adapter and other data in shared memory */
1257 enic->port_id = eth_dev->data->port_id;
1258 enic->rte_dev = eth_dev;
1259 enic->dev_data = eth_dev->data;
1261 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1262 rte_eth_copy_pci_info(eth_dev, pdev);
1263 eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
1267 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
1268 addr->domain, addr->bus, addr->devid, addr->function);
1270 err = enic_check_devargs(eth_dev);
1273 err = enic_probe(enic);
1274 if (!err && enic->fm) {
1275 err = enic_fm_allocate_switch_domain(enic);
1277 ENICPMD_LOG(ERR, "failed to allocate switch domain id");
1282 static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev)
1284 struct enic *enic = pmd_priv(eth_dev);
1287 ENICPMD_FUNC_TRACE();
1288 eth_dev->device = NULL;
1289 eth_dev->intr_handle = NULL;
1290 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1292 err = rte_eth_switch_domain_free(enic->switch_domain_id);
1294 ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err);
1298 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1299 struct rte_pci_device *pci_dev)
1301 char name[RTE_ETH_NAME_MAX_LEN];
1302 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1303 struct rte_eth_dev *pf_ethdev;
1304 struct enic *pf_enic;
1307 ENICPMD_FUNC_TRACE();
1308 if (pci_dev->device.devargs) {
1309 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1314 if (eth_da.nb_representor_ports > 0 &&
1315 eth_da.type != RTE_ETH_REPRESENTOR_VF) {
1316 ENICPMD_LOG(ERR, "unsupported representor type: %s\n",
1317 pci_dev->device.devargs->args);
1320 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1321 sizeof(struct enic),
1322 eth_dev_pci_specific_init, pci_dev,
1323 eth_enic_dev_init, NULL);
1324 if (retval || eth_da.nb_representor_ports < 1)
1327 /* Probe VF representor */
1328 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1329 if (pf_ethdev == NULL)
1331 /* Representors require flowman */
1332 pf_enic = pmd_priv(pf_ethdev);
1333 if (pf_enic->fm == NULL) {
1334 ENICPMD_LOG(ERR, "VF representors require flowman");
1338 * For now representors imply switchdev, as firmware does not support
1339 * legacy mode SR-IOV
1341 pf_enic->switchdev_mode = 1;
1342 /* Calculate max VF ID before initializing representor*/
1343 pf_enic->max_vf_id = 0;
1344 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1345 pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id,
1346 eth_da.representor_ports[i]);
1348 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1349 struct enic_vf_representor representor;
1351 representor.vf_id = eth_da.representor_ports[i];
1352 representor.switch_domain_id =
1353 pmd_priv(pf_ethdev)->switch_domain_id;
1354 representor.pf = pmd_priv(pf_ethdev);
1355 snprintf(name, sizeof(name), "net_%s_representor_%d",
1356 pci_dev->device.name, eth_da.representor_ports[i]);
1357 retval = rte_eth_dev_create(&pci_dev->device, name,
1358 sizeof(struct enic_vf_representor), NULL, NULL,
1359 enic_vf_representor_init, &representor);
1361 ENICPMD_LOG(ERR, "failed to create enic vf representor %s",
1369 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
1371 struct rte_eth_dev *ethdev;
1373 ENICPMD_FUNC_TRACE();
1374 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1377 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1378 return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit);
1380 return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit);
1383 static struct rte_pci_driver rte_enic_pmd = {
1384 .id_table = pci_id_enic_map,
1385 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1386 .probe = eth_enic_pci_probe,
1387 .remove = eth_enic_pci_remove,
1390 int dev_is_enic(struct rte_eth_dev *dev)
1392 return dev->device->driver == &rte_enic_pmd.driver;
1395 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
1396 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
1397 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
1398 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
1399 ENIC_DEVARG_CQ64 "=0|1"
1400 ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
1401 ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
1402 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");