1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
11 #include <rte_bus_pci.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_kvargs.h>
15 #include <rte_string_fns.h>
17 #include "vnic_intr.h"
21 #include "vnic_enet.h"
25 * The set of PCI devices this driver supports
27 #define CISCO_PCI_VENDOR_ID 0x1137
28 static const struct rte_pci_id pci_id_enic_map[] = {
29 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET)},
30 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF)},
31 {RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_SN)},
32 {.vendor_id = 0, /* sentinel */},
35 /* Supported link speeds of production VIC models */
36 static const struct vic_speed_capa {
39 } vic_speed_capa_map[] = {
40 { 0x0043, ETH_LINK_SPEED_10G }, /* VIC */
41 { 0x0047, ETH_LINK_SPEED_10G }, /* P81E PCIe */
42 { 0x0048, ETH_LINK_SPEED_10G }, /* M81KR Mezz */
43 { 0x004f, ETH_LINK_SPEED_10G }, /* 1280 Mezz */
44 { 0x0084, ETH_LINK_SPEED_10G }, /* 1240 MLOM */
45 { 0x0085, ETH_LINK_SPEED_10G }, /* 1225 PCIe */
46 { 0x00cd, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1285 PCIe */
47 { 0x00ce, ETH_LINK_SPEED_10G }, /* 1225T PCIe */
48 { 0x012a, ETH_LINK_SPEED_40G }, /* M4308 */
49 { 0x012c, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1340 MLOM */
50 { 0x012e, ETH_LINK_SPEED_10G }, /* 1227 PCIe */
51 { 0x0137, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1380 Mezz */
52 { 0x014d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1385 PCIe */
53 { 0x015d, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G }, /* 1387 MLOM */
54 { 0x0215, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
55 ETH_LINK_SPEED_40G }, /* 1440 Mezz */
56 { 0x0216, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
57 ETH_LINK_SPEED_40G }, /* 1480 MLOM */
58 { 0x0217, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1455 PCIe */
59 { 0x0218, ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G }, /* 1457 MLOM */
60 { 0x0219, ETH_LINK_SPEED_40G }, /* 1485 PCIe */
61 { 0x021a, ETH_LINK_SPEED_40G }, /* 1487 MLOM */
62 { 0x024a, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1495 PCIe */
63 { 0x024b, ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G }, /* 1497 MLOM */
64 { 0, 0 }, /* End marker */
67 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
68 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
69 #define ENIC_DEVARG_GENEVE_OPT "geneve-opt"
70 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
71 #define ENIC_DEVARG_REPRESENTOR "representor"
73 RTE_LOG_REGISTER(enic_pmd_logtype, pmd.net.enic, INFO);
76 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
77 enum rte_filter_op filter_op, void *arg)
79 struct enic *enic = pmd_priv(eth_dev);
83 if (filter_op == RTE_ETH_FILTER_NOP)
86 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
90 case RTE_ETH_FILTER_ADD:
91 case RTE_ETH_FILTER_UPDATE:
92 ret = enic_fdir_add_fltr(enic,
93 (struct rte_eth_fdir_filter *)arg);
96 case RTE_ETH_FILTER_DELETE:
97 ret = enic_fdir_del_fltr(enic,
98 (struct rte_eth_fdir_filter *)arg);
101 case RTE_ETH_FILTER_STATS:
102 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
105 case RTE_ETH_FILTER_FLUSH:
106 dev_warning(enic, "unsupported operation %u", filter_op);
109 case RTE_ETH_FILTER_INFO:
110 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
113 dev_err(enic, "unknown operation %u", filter_op);
121 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
122 enum rte_filter_type filter_type,
123 enum rte_filter_op filter_op,
126 struct enic *enic = pmd_priv(dev);
129 ENICPMD_FUNC_TRACE();
132 * Currently, when Geneve with options offload is enabled, host
133 * cannot insert match-action rules.
135 if (enic->geneve_opt_enabled)
137 switch (filter_type) {
138 case RTE_ETH_FILTER_GENERIC:
139 if (filter_op != RTE_ETH_FILTER_GET)
141 if (enic->flow_filter_mode == FILTER_FLOWMAN)
142 *(const void **)arg = &enic_fm_flow_ops;
144 *(const void **)arg = &enic_flow_ops;
146 case RTE_ETH_FILTER_FDIR:
147 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
150 dev_warning(enic, "Filter type (%d) not supported",
159 static void enicpmd_dev_tx_queue_release(void *txq)
161 ENICPMD_FUNC_TRACE();
163 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
169 static int enicpmd_dev_setup_intr(struct enic *enic)
174 ENICPMD_FUNC_TRACE();
176 /* Are we done with the init of all the queues? */
177 for (index = 0; index < enic->cq_count; index++) {
178 if (!enic->cq[index].ctrl)
181 if (enic->cq_count != index)
183 for (index = 0; index < enic->wq_count; index++) {
184 if (!enic->wq[index].ctrl)
187 if (enic->wq_count != index)
189 /* check start of packet (SOP) RQs only in case scatter is disabled. */
190 for (index = 0; index < enic->rq_count; index++) {
191 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
194 if (enic->rq_count != index)
197 ret = enic_alloc_intr_resources(enic);
199 dev_err(enic, "alloc intr failed\n");
202 enic_init_vnic_resources(enic);
204 ret = enic_setup_finish(enic);
206 dev_err(enic, "setup could not be finished\n");
211 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
214 unsigned int socket_id,
215 const struct rte_eth_txconf *tx_conf)
218 struct enic *enic = pmd_priv(eth_dev);
221 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
222 return -E_RTE_SECONDARY;
224 ENICPMD_FUNC_TRACE();
225 RTE_ASSERT(queue_idx < enic->conf_wq_count);
226 wq = &enic->wq[queue_idx];
227 wq->offloads = tx_conf->offloads |
228 eth_dev->data->dev_conf.txmode.offloads;
229 eth_dev->data->tx_queues[queue_idx] = (void *)wq;
231 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
233 dev_err(enic, "error in allocating wq\n");
237 return enicpmd_dev_setup_intr(enic);
240 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
243 struct enic *enic = pmd_priv(eth_dev);
245 ENICPMD_FUNC_TRACE();
247 enic_start_wq(enic, queue_idx);
252 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
256 struct enic *enic = pmd_priv(eth_dev);
258 ENICPMD_FUNC_TRACE();
260 ret = enic_stop_wq(enic, queue_idx);
262 dev_err(enic, "error in stopping wq %d\n", queue_idx);
267 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
270 struct enic *enic = pmd_priv(eth_dev);
272 ENICPMD_FUNC_TRACE();
274 enic_start_rq(enic, queue_idx);
279 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
283 struct enic *enic = pmd_priv(eth_dev);
285 ENICPMD_FUNC_TRACE();
287 ret = enic_stop_rq(enic, queue_idx);
289 dev_err(enic, "error in stopping rq %d\n", queue_idx);
294 static void enicpmd_dev_rx_queue_release(void *rxq)
296 ENICPMD_FUNC_TRACE();
298 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
304 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
305 uint16_t rx_queue_id)
307 struct enic *enic = pmd_priv(dev);
308 uint32_t queue_count = 0;
314 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
315 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
316 cq_idx = cq->to_clean;
318 cq_tail = ioread32(&cq->ctrl->cq_tail);
320 if (cq_tail < cq_idx)
321 cq_tail += cq->ring.desc_count;
323 queue_count = cq_tail - cq_idx;
328 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
331 unsigned int socket_id,
332 const struct rte_eth_rxconf *rx_conf,
333 struct rte_mempool *mp)
336 struct enic *enic = pmd_priv(eth_dev);
338 ENICPMD_FUNC_TRACE();
340 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
341 return -E_RTE_SECONDARY;
342 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
343 eth_dev->data->rx_queues[queue_idx] =
344 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
346 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
347 rx_conf->rx_free_thresh);
349 dev_err(enic, "error in allocating rq\n");
353 return enicpmd_dev_setup_intr(enic);
356 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
358 struct enic *enic = pmd_priv(eth_dev);
361 ENICPMD_FUNC_TRACE();
363 offloads = eth_dev->data->dev_conf.rxmode.offloads;
364 if (mask & ETH_VLAN_STRIP_MASK) {
365 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
366 enic->ig_vlan_strip_en = 1;
368 enic->ig_vlan_strip_en = 0;
371 return enic_set_vlan_strip(enic);
374 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
378 struct enic *enic = pmd_priv(eth_dev);
380 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
381 return -E_RTE_SECONDARY;
383 ENICPMD_FUNC_TRACE();
384 ret = enic_set_vnic_res(enic);
386 dev_err(enic, "Set vNIC resource num failed, aborting\n");
390 if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
391 eth_dev->data->dev_conf.rxmode.offloads |=
392 DEV_RX_OFFLOAD_RSS_HASH;
395 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
396 DEV_RX_OFFLOAD_CHECKSUM);
397 /* All vlan offload masks to apply the current settings */
398 mask = ETH_VLAN_STRIP_MASK |
399 ETH_VLAN_FILTER_MASK |
400 ETH_VLAN_EXTEND_MASK;
401 ret = enicpmd_vlan_offload_set(eth_dev, mask);
403 dev_err(enic, "Failed to configure VLAN offloads\n");
407 * Initialize RSS with the default reta and key. If the user key is
408 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
411 return enic_init_rss_nic_cfg(enic);
415 * It returns 0 on success.
417 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
419 struct enic *enic = pmd_priv(eth_dev);
421 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
422 return -E_RTE_SECONDARY;
424 ENICPMD_FUNC_TRACE();
425 return enic_enable(enic);
429 * Stop device: disable rx and tx functions to allow for reconfiguring.
431 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
433 struct rte_eth_link link;
434 struct enic *enic = pmd_priv(eth_dev);
436 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
439 ENICPMD_FUNC_TRACE();
442 memset(&link, 0, sizeof(link));
443 rte_eth_linkstatus_set(eth_dev, &link);
449 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
451 struct enic *enic = pmd_priv(eth_dev);
453 ENICPMD_FUNC_TRACE();
457 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
458 __rte_unused int wait_to_complete)
460 ENICPMD_FUNC_TRACE();
461 return enic_link_update(eth_dev);
464 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
465 struct rte_eth_stats *stats)
467 struct enic *enic = pmd_priv(eth_dev);
469 ENICPMD_FUNC_TRACE();
470 return enic_dev_stats_get(enic, stats);
473 static int enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
475 struct enic *enic = pmd_priv(eth_dev);
477 ENICPMD_FUNC_TRACE();
478 return enic_dev_stats_clear(enic);
481 static uint32_t speed_capa_from_pci_id(struct rte_eth_dev *eth_dev)
483 const struct vic_speed_capa *m;
484 struct rte_pci_device *pdev;
487 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
488 id = pdev->id.subsystem_device_id;
489 for (m = vic_speed_capa_map; m->sub_devid != 0; m++) {
490 if (m->sub_devid == id)
493 /* 1300 and later models are at least 40G */
495 return ETH_LINK_SPEED_40G;
496 /* VFs have subsystem id 0, check device id */
498 /* Newer VF implies at least 40G model */
499 if (pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_SN)
500 return ETH_LINK_SPEED_40G;
502 return ETH_LINK_SPEED_10G;
505 static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
506 struct rte_eth_dev_info *device_info)
508 struct enic *enic = pmd_priv(eth_dev);
510 ENICPMD_FUNC_TRACE();
511 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
512 device_info->max_rx_queues = enic->conf_rq_count / 2;
513 device_info->max_tx_queues = enic->conf_wq_count;
514 device_info->min_rx_bufsize = ENIC_MIN_MTU;
515 /* "Max" mtu is not a typo. HW receives packet sizes up to the
516 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
517 * a hint to the driver to size receive buffers accordingly so that
518 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
519 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
522 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
523 device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
524 device_info->min_mtu = ENIC_MIN_MTU;
525 device_info->max_mtu = enic->max_mtu;
526 device_info->rx_offload_capa = enic->rx_offload_capa;
527 device_info->tx_offload_capa = enic->tx_offload_capa;
528 device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
529 device_info->default_rxconf = (struct rte_eth_rxconf) {
530 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
532 device_info->reta_size = enic->reta_size;
533 device_info->hash_key_size = enic->hash_key_size;
534 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
535 device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
536 .nb_max = enic->config.rq_desc_count,
537 .nb_min = ENIC_MIN_RQ_DESCS,
538 .nb_align = ENIC_ALIGN_DESCS,
540 device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
541 .nb_max = enic->config.wq_desc_count,
542 .nb_min = ENIC_MIN_WQ_DESCS,
543 .nb_align = ENIC_ALIGN_DESCS,
544 .nb_seg_max = ENIC_TX_XMIT_MAX,
545 .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
547 device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
548 .burst_size = ENIC_DEFAULT_RX_BURST,
549 .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
550 ENIC_DEFAULT_RX_RING_SIZE),
551 .nb_queues = ENIC_DEFAULT_RX_RINGS,
553 device_info->default_txportconf = (struct rte_eth_dev_portconf) {
554 .burst_size = ENIC_DEFAULT_TX_BURST,
555 .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
556 ENIC_DEFAULT_TX_RING_SIZE),
557 .nb_queues = ENIC_DEFAULT_TX_RINGS,
559 device_info->speed_capa = speed_capa_from_pci_id(eth_dev);
564 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
566 static const uint32_t ptypes[] = {
568 RTE_PTYPE_L2_ETHER_VLAN,
569 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
570 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
574 RTE_PTYPE_L4_NONFRAG,
577 static const uint32_t ptypes_overlay[] = {
579 RTE_PTYPE_L2_ETHER_VLAN,
580 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
581 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
585 RTE_PTYPE_L4_NONFRAG,
586 RTE_PTYPE_TUNNEL_GRENAT,
587 RTE_PTYPE_INNER_L2_ETHER,
588 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
589 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
590 RTE_PTYPE_INNER_L4_TCP,
591 RTE_PTYPE_INNER_L4_UDP,
592 RTE_PTYPE_INNER_L4_FRAG,
593 RTE_PTYPE_INNER_L4_NONFRAG,
597 if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
598 dev->rx_pkt_burst != NULL) {
599 struct enic *enic = pmd_priv(dev);
600 if (enic->overlay_offload)
601 return ptypes_overlay;
608 static int enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
610 struct enic *enic = pmd_priv(eth_dev);
613 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
614 return -E_RTE_SECONDARY;
616 ENICPMD_FUNC_TRACE();
619 ret = enic_add_packet_filter(enic);
626 static int enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
628 struct enic *enic = pmd_priv(eth_dev);
631 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
632 return -E_RTE_SECONDARY;
634 ENICPMD_FUNC_TRACE();
636 ret = enic_add_packet_filter(enic);
643 static int enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
645 struct enic *enic = pmd_priv(eth_dev);
648 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
649 return -E_RTE_SECONDARY;
651 ENICPMD_FUNC_TRACE();
653 ret = enic_add_packet_filter(enic);
660 static int enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
662 struct enic *enic = pmd_priv(eth_dev);
665 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
666 return -E_RTE_SECONDARY;
668 ENICPMD_FUNC_TRACE();
670 ret = enic_add_packet_filter(enic);
677 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
678 struct rte_ether_addr *mac_addr,
679 __rte_unused uint32_t index, __rte_unused uint32_t pool)
681 struct enic *enic = pmd_priv(eth_dev);
683 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
684 return -E_RTE_SECONDARY;
686 ENICPMD_FUNC_TRACE();
687 return enic_set_mac_address(enic, mac_addr->addr_bytes);
690 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
692 struct enic *enic = pmd_priv(eth_dev);
694 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
697 ENICPMD_FUNC_TRACE();
698 if (enic_del_mac_address(enic, index))
699 dev_err(enic, "del mac addr failed\n");
702 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
703 struct rte_ether_addr *addr)
705 struct enic *enic = pmd_priv(eth_dev);
708 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
709 return -E_RTE_SECONDARY;
711 ENICPMD_FUNC_TRACE();
712 ret = enic_del_mac_address(enic, 0);
715 return enic_set_mac_address(enic, addr->addr_bytes);
718 static void debug_log_add_del_addr(struct rte_ether_addr *addr, bool add)
720 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
722 rte_ether_format_addr(mac_str, RTE_ETHER_ADDR_FMT_SIZE, addr);
723 ENICPMD_LOG(DEBUG, " %s address %s\n",
724 add ? "add" : "remove", mac_str);
727 static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
728 struct rte_ether_addr *mc_addr_set,
731 struct enic *enic = pmd_priv(eth_dev);
732 char mac_str[RTE_ETHER_ADDR_FMT_SIZE];
733 struct rte_ether_addr *addr;
737 ENICPMD_FUNC_TRACE();
739 /* Validate the given addresses first */
740 for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
741 addr = &mc_addr_set[i];
742 if (!rte_is_multicast_ether_addr(addr) ||
743 rte_is_broadcast_ether_addr(addr)) {
744 rte_ether_format_addr(mac_str,
745 RTE_ETHER_ADDR_FMT_SIZE, addr);
746 ENICPMD_LOG(ERR, " invalid multicast address %s\n",
752 /* Flush all if requested */
753 if (nb_mc_addr == 0 || mc_addr_set == NULL) {
754 ENICPMD_LOG(DEBUG, " flush multicast addresses\n");
755 for (i = 0; i < enic->mc_count; i++) {
756 addr = &enic->mc_addrs[i];
757 debug_log_add_del_addr(addr, false);
758 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
766 if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
767 ENICPMD_LOG(ERR, " too many multicast addresses: max=%d\n",
768 ENIC_MULTICAST_PERFECT_FILTERS);
772 * devcmd is slow, so apply the difference instead of flushing and
774 * 1. Delete addresses on the NIC but not on the host
776 for (i = 0; i < enic->mc_count; i++) {
777 addr = &enic->mc_addrs[i];
778 for (j = 0; j < nb_mc_addr; j++) {
779 if (rte_is_same_ether_addr(addr, &mc_addr_set[j]))
784 debug_log_add_del_addr(addr, false);
785 ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
789 /* 2. Add addresses on the host but not on the NIC */
790 for (i = 0; i < nb_mc_addr; i++) {
791 addr = &mc_addr_set[i];
792 for (j = 0; j < enic->mc_count; j++) {
793 if (rte_is_same_ether_addr(addr, &enic->mc_addrs[j]))
796 if (j < enic->mc_count)
798 debug_log_add_del_addr(addr, true);
799 ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
803 /* Keep a copy so we can flush/apply later on.. */
804 memcpy(enic->mc_addrs, mc_addr_set,
805 nb_mc_addr * sizeof(struct rte_ether_addr));
806 enic->mc_count = nb_mc_addr;
810 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
812 struct enic *enic = pmd_priv(eth_dev);
814 ENICPMD_FUNC_TRACE();
815 return enic_set_mtu(enic, mtu);
818 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
819 struct rte_eth_rss_reta_entry64
823 struct enic *enic = pmd_priv(dev);
824 uint16_t i, idx, shift;
826 ENICPMD_FUNC_TRACE();
827 if (reta_size != ENIC_RSS_RETA_SIZE) {
828 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
829 reta_size, ENIC_RSS_RETA_SIZE);
833 for (i = 0; i < reta_size; i++) {
834 idx = i / RTE_RETA_GROUP_SIZE;
835 shift = i % RTE_RETA_GROUP_SIZE;
836 if (reta_conf[idx].mask & (1ULL << shift))
837 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
838 enic->rss_cpu.cpu[i / 4].b[i % 4]);
844 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
845 struct rte_eth_rss_reta_entry64
849 struct enic *enic = pmd_priv(dev);
850 union vnic_rss_cpu rss_cpu;
851 uint16_t i, idx, shift;
853 ENICPMD_FUNC_TRACE();
854 if (reta_size != ENIC_RSS_RETA_SIZE) {
855 dev_err(enic, "reta_update: wrong reta_size. given=%u"
857 reta_size, ENIC_RSS_RETA_SIZE);
861 * Start with the current reta and modify it per reta_conf, as we
862 * need to push the entire reta even if we only modify one entry.
864 rss_cpu = enic->rss_cpu;
865 for (i = 0; i < reta_size; i++) {
866 idx = i / RTE_RETA_GROUP_SIZE;
867 shift = i % RTE_RETA_GROUP_SIZE;
868 if (reta_conf[idx].mask & (1ULL << shift))
869 rss_cpu.cpu[i / 4].b[i % 4] =
870 enic_rte_rq_idx_to_sop_idx(
871 reta_conf[idx].reta[shift]);
873 return enic_set_rss_reta(enic, &rss_cpu);
876 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
877 struct rte_eth_rss_conf *rss_conf)
879 struct enic *enic = pmd_priv(dev);
881 ENICPMD_FUNC_TRACE();
882 return enic_set_rss_conf(enic, rss_conf);
885 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
886 struct rte_eth_rss_conf *rss_conf)
888 struct enic *enic = pmd_priv(dev);
890 ENICPMD_FUNC_TRACE();
891 if (rss_conf == NULL)
893 if (rss_conf->rss_key != NULL &&
894 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
895 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
897 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
900 rss_conf->rss_hf = enic->rss_hf;
901 if (rss_conf->rss_key != NULL) {
903 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
904 rss_conf->rss_key[i] =
905 enic->rss_key.key[i / 10].b[i % 10];
907 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
912 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
913 uint16_t rx_queue_id,
914 struct rte_eth_rxq_info *qinfo)
916 struct enic *enic = pmd_priv(dev);
917 struct vnic_rq *rq_sop;
918 struct vnic_rq *rq_data;
919 struct rte_eth_rxconf *conf;
920 uint16_t sop_queue_idx;
921 uint16_t data_queue_idx;
923 ENICPMD_FUNC_TRACE();
924 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
925 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id, enic);
926 rq_sop = &enic->rq[sop_queue_idx];
927 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
928 qinfo->mp = rq_sop->mp;
929 qinfo->scattered_rx = rq_sop->data_queue_enable;
930 qinfo->nb_desc = rq_sop->ring.desc_count;
931 if (qinfo->scattered_rx)
932 qinfo->nb_desc += rq_data->ring.desc_count;
934 memset(conf, 0, sizeof(*conf));
935 conf->rx_free_thresh = rq_sop->rx_free_thresh;
936 conf->rx_drop_en = 1;
938 * Except VLAN stripping (port setting), all the checksum offloads
939 * are always enabled.
941 conf->offloads = enic->rx_offload_capa;
942 if (!enic->ig_vlan_strip_en)
943 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
944 /* rx_thresh and other fields are not applicable for enic */
947 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
948 uint16_t tx_queue_id,
949 struct rte_eth_txq_info *qinfo)
951 struct enic *enic = pmd_priv(dev);
952 struct vnic_wq *wq = &enic->wq[tx_queue_id];
954 ENICPMD_FUNC_TRACE();
955 qinfo->nb_desc = wq->ring.desc_count;
956 memset(&qinfo->conf, 0, sizeof(qinfo->conf));
957 qinfo->conf.offloads = wq->offloads;
958 /* tx_thresh, and all the other fields are not applicable for enic */
961 static int enicpmd_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
962 __rte_unused uint16_t queue_id,
963 struct rte_eth_burst_mode *mode)
965 eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
966 struct enic *enic = pmd_priv(dev);
967 const char *info_str = NULL;
970 ENICPMD_FUNC_TRACE();
971 if (enic->use_noscatter_vec_rx_handler)
972 info_str = "Vector AVX2 No Scatter";
973 else if (pkt_burst == enic_noscatter_recv_pkts)
974 info_str = "Scalar No Scatter";
975 else if (pkt_burst == enic_recv_pkts)
978 strlcpy(mode->info, info_str, sizeof(mode->info));
984 static int enicpmd_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
985 __rte_unused uint16_t queue_id,
986 struct rte_eth_burst_mode *mode)
988 eth_tx_burst_t pkt_burst = dev->tx_pkt_burst;
989 const char *info_str = NULL;
992 ENICPMD_FUNC_TRACE();
993 if (pkt_burst == enic_simple_xmit_pkts)
994 info_str = "Scalar Simplified";
995 else if (pkt_burst == enic_xmit_pkts)
998 strlcpy(mode->info, info_str, sizeof(mode->info));
1004 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
1005 uint16_t rx_queue_id)
1007 struct enic *enic = pmd_priv(eth_dev);
1009 ENICPMD_FUNC_TRACE();
1010 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
1014 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
1015 uint16_t rx_queue_id)
1017 struct enic *enic = pmd_priv(eth_dev);
1019 ENICPMD_FUNC_TRACE();
1020 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
1024 static int udp_tunnel_common_check(struct enic *enic,
1025 struct rte_eth_udp_tunnel *tnl)
1027 if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
1029 if (!enic->overlay_offload) {
1030 ENICPMD_LOG(DEBUG, " vxlan (overlay offload) is not "
1037 static int update_vxlan_port(struct enic *enic, uint16_t port)
1039 if (vnic_dev_overlay_offload_cfg(enic->vdev,
1040 OVERLAY_CFG_VXLAN_PORT_UPDATE,
1042 ENICPMD_LOG(DEBUG, " failed to update vxlan port\n");
1045 ENICPMD_LOG(DEBUG, " updated vxlan port to %u\n", port);
1046 enic->vxlan_port = port;
1050 static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
1051 struct rte_eth_udp_tunnel *tnl)
1053 struct enic *enic = pmd_priv(eth_dev);
1056 ENICPMD_FUNC_TRACE();
1057 ret = udp_tunnel_common_check(enic, tnl);
1061 * The NIC has 1 configurable VXLAN port number. "Adding" a new port
1062 * number replaces it.
1064 if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
1065 ENICPMD_LOG(DEBUG, " %u is already configured or invalid\n",
1069 return update_vxlan_port(enic, tnl->udp_port);
1072 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
1073 struct rte_eth_udp_tunnel *tnl)
1075 struct enic *enic = pmd_priv(eth_dev);
1078 ENICPMD_FUNC_TRACE();
1079 ret = udp_tunnel_common_check(enic, tnl);
1083 * Clear the previously set port number and restore the
1084 * hardware default port number. Some drivers disable VXLAN
1085 * offloads when there are no configured port numbers. But
1086 * enic does not do that as VXLAN is part of overlay offload,
1087 * which is tied to inner RSS and TSO.
1089 if (tnl->udp_port != enic->vxlan_port) {
1090 ENICPMD_LOG(DEBUG, " %u is not a configured vxlan port\n",
1094 return update_vxlan_port(enic, RTE_VXLAN_DEFAULT_PORT);
1097 static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
1098 char *fw_version, size_t fw_size)
1100 struct vnic_devcmd_fw_info *info;
1104 ENICPMD_FUNC_TRACE();
1105 if (fw_version == NULL || fw_size <= 0)
1107 enic = pmd_priv(eth_dev);
1108 ret = vnic_dev_fw_info(enic->vdev, &info);
1111 snprintf(fw_version, fw_size, "%s %s",
1112 info->fw_version, info->fw_build);
1113 fw_version[fw_size - 1] = '\0';
1117 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
1118 .dev_configure = enicpmd_dev_configure,
1119 .dev_start = enicpmd_dev_start,
1120 .dev_stop = enicpmd_dev_stop,
1121 .dev_set_link_up = NULL,
1122 .dev_set_link_down = NULL,
1123 .dev_close = enicpmd_dev_close,
1124 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
1125 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
1126 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
1127 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
1128 .link_update = enicpmd_dev_link_update,
1129 .stats_get = enicpmd_dev_stats_get,
1130 .stats_reset = enicpmd_dev_stats_reset,
1131 .queue_stats_mapping_set = NULL,
1132 .dev_infos_get = enicpmd_dev_info_get,
1133 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
1134 .mtu_set = enicpmd_mtu_set,
1135 .vlan_filter_set = NULL,
1136 .vlan_tpid_set = NULL,
1137 .vlan_offload_set = enicpmd_vlan_offload_set,
1138 .vlan_strip_queue_set = NULL,
1139 .rx_queue_start = enicpmd_dev_rx_queue_start,
1140 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
1141 .tx_queue_start = enicpmd_dev_tx_queue_start,
1142 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
1143 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
1144 .rx_queue_release = enicpmd_dev_rx_queue_release,
1145 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
1146 .tx_queue_release = enicpmd_dev_tx_queue_release,
1147 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
1148 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
1149 .rxq_info_get = enicpmd_dev_rxq_info_get,
1150 .txq_info_get = enicpmd_dev_txq_info_get,
1151 .rx_burst_mode_get = enicpmd_dev_rx_burst_mode_get,
1152 .tx_burst_mode_get = enicpmd_dev_tx_burst_mode_get,
1154 .dev_led_off = NULL,
1155 .flow_ctrl_get = NULL,
1156 .flow_ctrl_set = NULL,
1157 .priority_flow_ctrl_set = NULL,
1158 .mac_addr_add = enicpmd_add_mac_addr,
1159 .mac_addr_remove = enicpmd_remove_mac_addr,
1160 .mac_addr_set = enicpmd_set_mac_addr,
1161 .set_mc_addr_list = enicpmd_set_mc_addr_list,
1162 .filter_ctrl = enicpmd_dev_filter_ctrl,
1163 .reta_query = enicpmd_dev_rss_reta_query,
1164 .reta_update = enicpmd_dev_rss_reta_update,
1165 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
1166 .rss_hash_update = enicpmd_dev_rss_hash_update,
1167 .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
1168 .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
1169 .fw_version_get = enicpmd_dev_fw_version_get,
1172 static int enic_parse_zero_one(const char *key,
1179 enic = (struct enic *)opaque;
1180 if (strcmp(value, "0") == 0) {
1182 } else if (strcmp(value, "1") == 0) {
1185 dev_err(enic, "Invalid value for %s"
1186 ": expected=0|1 given=%s\n", key, value);
1189 if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
1190 enic->disable_overlay = b;
1191 if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
1192 enic->enable_avx2_rx = b;
1193 if (strcmp(key, ENIC_DEVARG_GENEVE_OPT) == 0)
1194 enic->geneve_opt_request = b;
1198 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
1204 enic = (struct enic *)opaque;
1205 if (strcmp(value, "trunk") == 0) {
1206 /* Trunk mode: always tag */
1207 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
1208 } else if (strcmp(value, "untag") == 0) {
1209 /* Untag default VLAN mode: untag if VLAN = default VLAN */
1210 enic->ig_vlan_rewrite_mode =
1211 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
1212 } else if (strcmp(value, "priority") == 0) {
1214 * Priority-tag default VLAN mode: priority tag (VLAN header
1215 * with ID=0) if VLAN = default
1217 enic->ig_vlan_rewrite_mode =
1218 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
1219 } else if (strcmp(value, "pass") == 0) {
1220 /* Pass through mode: do not touch tags */
1221 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1223 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
1224 ": expected=trunk|untag|priority|pass given=%s\n",
1231 static int enic_check_devargs(struct rte_eth_dev *dev)
1233 static const char *const valid_keys[] = {
1234 ENIC_DEVARG_DISABLE_OVERLAY,
1235 ENIC_DEVARG_ENABLE_AVX2_RX,
1236 ENIC_DEVARG_GENEVE_OPT,
1237 ENIC_DEVARG_IG_VLAN_REWRITE,
1238 ENIC_DEVARG_REPRESENTOR,
1240 struct enic *enic = pmd_priv(dev);
1241 struct rte_kvargs *kvlist;
1243 ENICPMD_FUNC_TRACE();
1245 enic->disable_overlay = false;
1246 enic->enable_avx2_rx = false;
1247 enic->geneve_opt_request = false;
1248 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1249 if (!dev->device->devargs)
1251 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1254 if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
1255 enic_parse_zero_one, enic) < 0 ||
1256 rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
1257 enic_parse_zero_one, enic) < 0 ||
1258 rte_kvargs_process(kvlist, ENIC_DEVARG_GENEVE_OPT,
1259 enic_parse_zero_one, enic) < 0 ||
1260 rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
1261 enic_parse_ig_vlan_rewrite, enic) < 0) {
1262 rte_kvargs_free(kvlist);
1265 rte_kvargs_free(kvlist);
1269 /* Initialize the driver for PF */
1270 static int eth_enic_dev_init(struct rte_eth_dev *eth_dev,
1271 void *init_params __rte_unused)
1273 struct rte_pci_device *pdev;
1274 struct rte_pci_addr *addr;
1275 struct enic *enic = pmd_priv(eth_dev);
1278 ENICPMD_FUNC_TRACE();
1279 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
1280 eth_dev->rx_queue_count = enicpmd_dev_rx_queue_count;
1281 eth_dev->rx_pkt_burst = &enic_recv_pkts;
1282 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
1283 eth_dev->tx_pkt_prepare = &enic_prep_pkts;
1284 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1285 enic_pick_tx_handler(eth_dev);
1286 enic_pick_rx_handler(eth_dev);
1289 /* Only the primary sets up adapter and other data in shared memory */
1290 enic->port_id = eth_dev->data->port_id;
1291 enic->rte_dev = eth_dev;
1292 enic->dev_data = eth_dev->data;
1293 /* Let rte_eth_dev_close() release the port resources */
1294 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
1296 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1297 rte_eth_copy_pci_info(eth_dev, pdev);
1301 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
1302 addr->domain, addr->bus, addr->devid, addr->function);
1304 err = enic_check_devargs(eth_dev);
1307 err = enic_probe(enic);
1308 if (!err && enic->fm) {
1309 err = enic_fm_allocate_switch_domain(enic);
1311 ENICPMD_LOG(ERR, "failed to allocate switch domain id");
1316 static int eth_enic_dev_uninit(struct rte_eth_dev *eth_dev)
1318 struct enic *enic = pmd_priv(eth_dev);
1321 ENICPMD_FUNC_TRACE();
1322 eth_dev->device = NULL;
1323 eth_dev->intr_handle = NULL;
1324 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1326 err = rte_eth_switch_domain_free(enic->switch_domain_id);
1328 ENICPMD_LOG(WARNING, "failed to free switch domain: %d", err);
1332 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1333 struct rte_pci_device *pci_dev)
1335 char name[RTE_ETH_NAME_MAX_LEN];
1336 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
1337 struct rte_eth_dev *pf_ethdev;
1338 struct enic *pf_enic;
1341 ENICPMD_FUNC_TRACE();
1342 if (pci_dev->device.devargs) {
1343 retval = rte_eth_devargs_parse(pci_dev->device.devargs->args,
1348 retval = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
1349 sizeof(struct enic),
1350 eth_dev_pci_specific_init, pci_dev,
1351 eth_enic_dev_init, NULL);
1352 if (retval || eth_da.nb_representor_ports < 1)
1355 /* Probe VF representor */
1356 pf_ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1357 if (pf_ethdev == NULL)
1359 /* Representors require flowman */
1360 pf_enic = pmd_priv(pf_ethdev);
1361 if (pf_enic->fm == NULL) {
1362 ENICPMD_LOG(ERR, "VF representors require flowman");
1366 * For now representors imply switchdev, as firmware does not support
1367 * legacy mode SR-IOV
1369 pf_enic->switchdev_mode = 1;
1370 /* Calculate max VF ID before initializing representor*/
1371 pf_enic->max_vf_id = 0;
1372 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1373 pf_enic->max_vf_id = RTE_MAX(pf_enic->max_vf_id,
1374 eth_da.representor_ports[i]);
1376 for (i = 0; i < eth_da.nb_representor_ports; i++) {
1377 struct enic_vf_representor representor;
1379 representor.vf_id = eth_da.representor_ports[i];
1380 representor.switch_domain_id =
1381 pmd_priv(pf_ethdev)->switch_domain_id;
1382 representor.pf = pmd_priv(pf_ethdev);
1383 snprintf(name, sizeof(name), "net_%s_representor_%d",
1384 pci_dev->device.name, eth_da.representor_ports[i]);
1385 retval = rte_eth_dev_create(&pci_dev->device, name,
1386 sizeof(struct enic_vf_representor), NULL, NULL,
1387 enic_vf_representor_init, &representor);
1389 ENICPMD_LOG(ERR, "failed to create enic vf representor %s",
1397 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
1399 struct rte_eth_dev *ethdev;
1401 ENICPMD_FUNC_TRACE();
1402 ethdev = rte_eth_dev_allocated(pci_dev->device.name);
1405 if (ethdev->data->dev_flags & RTE_ETH_DEV_REPRESENTOR)
1406 return rte_eth_dev_destroy(ethdev, enic_vf_representor_uninit);
1408 return rte_eth_dev_destroy(ethdev, eth_enic_dev_uninit);
1411 static struct rte_pci_driver rte_enic_pmd = {
1412 .id_table = pci_id_enic_map,
1413 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
1414 .probe = eth_enic_pci_probe,
1415 .remove = eth_enic_pci_remove,
1418 int dev_is_enic(struct rte_eth_dev *dev)
1420 return dev->device->driver == &rte_enic_pmd.driver;
1423 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
1424 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
1425 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
1426 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
1427 ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
1428 ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
1429 ENIC_DEVARG_GENEVE_OPT "=0|1 "
1430 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");