1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
11 #include <rte_bus_pci.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_string_fns.h>
16 #include "vnic_intr.h"
20 #include "vnic_enet.h"
23 int enicpmd_logtype_init;
24 int enicpmd_logtype_flow;
26 #define PMD_INIT_LOG(level, fmt, args...) \
27 rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
28 "%s" fmt "\n", __func__, ##args)
30 #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
33 * The set of PCI devices this driver supports
35 #define CISCO_PCI_VENDOR_ID 0x1137
36 static const struct rte_pci_id pci_id_enic_map[] = {
37 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
38 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
39 {.vendor_id = 0, /* sentinel */},
42 #define ENIC_TX_OFFLOAD_CAPA ( \
43 DEV_TX_OFFLOAD_VLAN_INSERT | \
44 DEV_TX_OFFLOAD_IPV4_CKSUM | \
45 DEV_TX_OFFLOAD_UDP_CKSUM | \
46 DEV_TX_OFFLOAD_TCP_CKSUM | \
47 DEV_TX_OFFLOAD_TCP_TSO)
49 #define ENIC_RX_OFFLOAD_CAPA ( \
50 DEV_RX_OFFLOAD_VLAN_STRIP | \
51 DEV_RX_OFFLOAD_IPV4_CKSUM | \
52 DEV_RX_OFFLOAD_UDP_CKSUM | \
53 DEV_RX_OFFLOAD_TCP_CKSUM)
55 RTE_INIT(enicpmd_init_log);
57 enicpmd_init_log(void)
59 enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
60 if (enicpmd_logtype_init >= 0)
61 rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
62 enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
63 if (enicpmd_logtype_flow >= 0)
64 rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
68 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
69 enum rte_filter_op filter_op, void *arg)
71 struct enic *enic = pmd_priv(eth_dev);
75 if (filter_op == RTE_ETH_FILTER_NOP)
78 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
82 case RTE_ETH_FILTER_ADD:
83 case RTE_ETH_FILTER_UPDATE:
84 ret = enic_fdir_add_fltr(enic,
85 (struct rte_eth_fdir_filter *)arg);
88 case RTE_ETH_FILTER_DELETE:
89 ret = enic_fdir_del_fltr(enic,
90 (struct rte_eth_fdir_filter *)arg);
93 case RTE_ETH_FILTER_STATS:
94 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
97 case RTE_ETH_FILTER_FLUSH:
98 dev_warning(enic, "unsupported operation %u", filter_op);
101 case RTE_ETH_FILTER_INFO:
102 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
105 dev_err(enic, "unknown operation %u", filter_op);
113 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
114 enum rte_filter_type filter_type,
115 enum rte_filter_op filter_op,
120 ENICPMD_FUNC_TRACE();
122 switch (filter_type) {
123 case RTE_ETH_FILTER_GENERIC:
124 if (filter_op != RTE_ETH_FILTER_GET)
126 *(const void **)arg = &enic_flow_ops;
128 case RTE_ETH_FILTER_FDIR:
129 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
132 dev_warning(enic, "Filter type (%d) not supported",
141 static void enicpmd_dev_tx_queue_release(void *txq)
143 ENICPMD_FUNC_TRACE();
145 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
151 static int enicpmd_dev_setup_intr(struct enic *enic)
156 ENICPMD_FUNC_TRACE();
158 /* Are we done with the init of all the queues? */
159 for (index = 0; index < enic->cq_count; index++) {
160 if (!enic->cq[index].ctrl)
163 if (enic->cq_count != index)
165 for (index = 0; index < enic->wq_count; index++) {
166 if (!enic->wq[index].ctrl)
169 if (enic->wq_count != index)
171 /* check start of packet (SOP) RQs only in case scatter is disabled. */
172 for (index = 0; index < enic->rq_count; index++) {
173 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
176 if (enic->rq_count != index)
179 ret = enic_alloc_intr_resources(enic);
181 dev_err(enic, "alloc intr failed\n");
184 enic_init_vnic_resources(enic);
186 ret = enic_setup_finish(enic);
188 dev_err(enic, "setup could not be finished\n");
193 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
196 unsigned int socket_id,
197 __rte_unused const struct rte_eth_txconf *tx_conf)
200 struct enic *enic = pmd_priv(eth_dev);
202 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
203 return -E_RTE_SECONDARY;
205 ENICPMD_FUNC_TRACE();
206 RTE_ASSERT(queue_idx < enic->conf_wq_count);
207 eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
209 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
211 dev_err(enic, "error in allocating wq\n");
215 return enicpmd_dev_setup_intr(enic);
218 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
221 struct enic *enic = pmd_priv(eth_dev);
223 ENICPMD_FUNC_TRACE();
225 enic_start_wq(enic, queue_idx);
230 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
234 struct enic *enic = pmd_priv(eth_dev);
236 ENICPMD_FUNC_TRACE();
238 ret = enic_stop_wq(enic, queue_idx);
240 dev_err(enic, "error in stopping wq %d\n", queue_idx);
245 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
248 struct enic *enic = pmd_priv(eth_dev);
250 ENICPMD_FUNC_TRACE();
252 enic_start_rq(enic, queue_idx);
257 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
261 struct enic *enic = pmd_priv(eth_dev);
263 ENICPMD_FUNC_TRACE();
265 ret = enic_stop_rq(enic, queue_idx);
267 dev_err(enic, "error in stopping rq %d\n", queue_idx);
272 static void enicpmd_dev_rx_queue_release(void *rxq)
274 ENICPMD_FUNC_TRACE();
276 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
282 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
283 uint16_t rx_queue_id)
285 struct enic *enic = pmd_priv(dev);
286 uint32_t queue_count = 0;
292 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
293 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
294 cq_idx = cq->to_clean;
296 cq_tail = ioread32(&cq->ctrl->cq_tail);
298 if (cq_tail < cq_idx)
299 cq_tail += cq->ring.desc_count;
301 queue_count = cq_tail - cq_idx;
306 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
309 unsigned int socket_id,
310 const struct rte_eth_rxconf *rx_conf,
311 struct rte_mempool *mp)
314 struct enic *enic = pmd_priv(eth_dev);
316 ENICPMD_FUNC_TRACE();
318 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
319 return -E_RTE_SECONDARY;
320 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
321 eth_dev->data->rx_queues[queue_idx] =
322 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
324 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
325 rx_conf->rx_free_thresh);
327 dev_err(enic, "error in allocating rq\n");
331 return enicpmd_dev_setup_intr(enic);
334 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
336 struct enic *enic = pmd_priv(eth_dev);
339 ENICPMD_FUNC_TRACE();
341 offloads = eth_dev->data->dev_conf.rxmode.offloads;
342 if (mask & ETH_VLAN_STRIP_MASK) {
343 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
344 enic->ig_vlan_strip_en = 1;
346 enic->ig_vlan_strip_en = 0;
349 if ((mask & ETH_VLAN_FILTER_MASK) &&
350 (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
352 "Configuration of VLAN filter is not supported\n");
355 if ((mask & ETH_VLAN_EXTEND_MASK) &&
356 (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
358 "Configuration of extended VLAN is not supported\n");
361 return enic_set_vlan_strip(enic);
364 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
368 struct enic *enic = pmd_priv(eth_dev);
370 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
371 return -E_RTE_SECONDARY;
373 ENICPMD_FUNC_TRACE();
374 ret = enic_set_vnic_res(enic);
376 dev_err(enic, "Set vNIC resource num failed, aborting\n");
380 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
381 DEV_RX_OFFLOAD_CHECKSUM);
382 /* All vlan offload masks to apply the current settings */
383 mask = ETH_VLAN_STRIP_MASK |
384 ETH_VLAN_FILTER_MASK |
385 ETH_VLAN_EXTEND_MASK;
386 ret = enicpmd_vlan_offload_set(eth_dev, mask);
388 dev_err(enic, "Failed to configure VLAN offloads\n");
392 * Initialize RSS with the default reta and key. If the user key is
393 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
396 return enic_init_rss_nic_cfg(enic);
400 * It returns 0 on success.
402 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
404 struct enic *enic = pmd_priv(eth_dev);
406 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
407 return -E_RTE_SECONDARY;
409 ENICPMD_FUNC_TRACE();
410 return enic_enable(enic);
414 * Stop device: disable rx and tx functions to allow for reconfiguring.
416 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
418 struct rte_eth_link link;
419 struct enic *enic = pmd_priv(eth_dev);
421 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
424 ENICPMD_FUNC_TRACE();
427 memset(&link, 0, sizeof(link));
428 rte_eth_linkstatus_set(eth_dev, &link);
434 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
436 struct enic *enic = pmd_priv(eth_dev);
438 ENICPMD_FUNC_TRACE();
442 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
443 __rte_unused int wait_to_complete)
445 struct enic *enic = pmd_priv(eth_dev);
447 ENICPMD_FUNC_TRACE();
448 return enic_link_update(enic);
451 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
452 struct rte_eth_stats *stats)
454 struct enic *enic = pmd_priv(eth_dev);
456 ENICPMD_FUNC_TRACE();
457 return enic_dev_stats_get(enic, stats);
460 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
462 struct enic *enic = pmd_priv(eth_dev);
464 ENICPMD_FUNC_TRACE();
465 enic_dev_stats_clear(enic);
468 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
469 struct rte_eth_dev_info *device_info)
471 struct enic *enic = pmd_priv(eth_dev);
473 ENICPMD_FUNC_TRACE();
474 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
475 device_info->max_rx_queues = enic->conf_rq_count / 2;
476 device_info->max_tx_queues = enic->conf_wq_count;
477 device_info->min_rx_bufsize = ENIC_MIN_MTU;
478 /* "Max" mtu is not a typo. HW receives packet sizes up to the
479 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
480 * a hint to the driver to size receive buffers accordingly so that
481 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
482 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
485 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
486 device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
487 device_info->rx_offload_capa = ENIC_RX_OFFLOAD_CAPA;
488 device_info->tx_offload_capa = ENIC_TX_OFFLOAD_CAPA;
489 device_info->default_rxconf = (struct rte_eth_rxconf) {
490 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
492 device_info->reta_size = enic->reta_size;
493 device_info->hash_key_size = enic->hash_key_size;
494 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
497 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
499 static const uint32_t ptypes[] = {
501 RTE_PTYPE_L2_ETHER_VLAN,
502 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
503 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
507 RTE_PTYPE_L4_NONFRAG,
511 if (dev->rx_pkt_burst == enic_recv_pkts)
516 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
518 struct enic *enic = pmd_priv(eth_dev);
520 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
523 ENICPMD_FUNC_TRACE();
526 enic_add_packet_filter(enic);
529 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
531 struct enic *enic = pmd_priv(eth_dev);
533 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
536 ENICPMD_FUNC_TRACE();
538 enic_add_packet_filter(enic);
541 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
543 struct enic *enic = pmd_priv(eth_dev);
545 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
548 ENICPMD_FUNC_TRACE();
550 enic_add_packet_filter(enic);
553 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
555 struct enic *enic = pmd_priv(eth_dev);
557 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
560 ENICPMD_FUNC_TRACE();
562 enic_add_packet_filter(enic);
565 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
566 struct ether_addr *mac_addr,
567 __rte_unused uint32_t index, __rte_unused uint32_t pool)
569 struct enic *enic = pmd_priv(eth_dev);
571 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
572 return -E_RTE_SECONDARY;
574 ENICPMD_FUNC_TRACE();
575 return enic_set_mac_address(enic, mac_addr->addr_bytes);
578 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
580 struct enic *enic = pmd_priv(eth_dev);
582 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
585 ENICPMD_FUNC_TRACE();
586 if (enic_del_mac_address(enic, index))
587 dev_err(enic, "del mac addr failed\n");
590 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
591 struct ether_addr *addr)
593 struct enic *enic = pmd_priv(eth_dev);
596 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
597 return -E_RTE_SECONDARY;
599 ENICPMD_FUNC_TRACE();
600 ret = enic_del_mac_address(enic, 0);
603 return enic_set_mac_address(enic, addr->addr_bytes);
606 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
608 struct enic *enic = pmd_priv(eth_dev);
610 ENICPMD_FUNC_TRACE();
611 return enic_set_mtu(enic, mtu);
614 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
615 struct rte_eth_rss_reta_entry64
619 struct enic *enic = pmd_priv(dev);
620 uint16_t i, idx, shift;
622 ENICPMD_FUNC_TRACE();
623 if (reta_size != ENIC_RSS_RETA_SIZE) {
624 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
625 reta_size, ENIC_RSS_RETA_SIZE);
629 for (i = 0; i < reta_size; i++) {
630 idx = i / RTE_RETA_GROUP_SIZE;
631 shift = i % RTE_RETA_GROUP_SIZE;
632 if (reta_conf[idx].mask & (1ULL << shift))
633 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
634 enic->rss_cpu.cpu[i / 4].b[i % 4]);
640 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
641 struct rte_eth_rss_reta_entry64
645 struct enic *enic = pmd_priv(dev);
646 union vnic_rss_cpu rss_cpu;
647 uint16_t i, idx, shift;
649 ENICPMD_FUNC_TRACE();
650 if (reta_size != ENIC_RSS_RETA_SIZE) {
651 dev_err(enic, "reta_update: wrong reta_size. given=%u"
653 reta_size, ENIC_RSS_RETA_SIZE);
657 * Start with the current reta and modify it per reta_conf, as we
658 * need to push the entire reta even if we only modify one entry.
660 rss_cpu = enic->rss_cpu;
661 for (i = 0; i < reta_size; i++) {
662 idx = i / RTE_RETA_GROUP_SIZE;
663 shift = i % RTE_RETA_GROUP_SIZE;
664 if (reta_conf[idx].mask & (1ULL << shift))
665 rss_cpu.cpu[i / 4].b[i % 4] =
666 enic_rte_rq_idx_to_sop_idx(
667 reta_conf[idx].reta[shift]);
669 return enic_set_rss_reta(enic, &rss_cpu);
672 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
673 struct rte_eth_rss_conf *rss_conf)
675 struct enic *enic = pmd_priv(dev);
677 ENICPMD_FUNC_TRACE();
678 return enic_set_rss_conf(enic, rss_conf);
681 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
682 struct rte_eth_rss_conf *rss_conf)
684 struct enic *enic = pmd_priv(dev);
686 ENICPMD_FUNC_TRACE();
687 if (rss_conf == NULL)
689 if (rss_conf->rss_key != NULL &&
690 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
691 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
693 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
696 rss_conf->rss_hf = enic->rss_hf;
697 if (rss_conf->rss_key != NULL) {
699 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
700 rss_conf->rss_key[i] =
701 enic->rss_key.key[i / 10].b[i % 10];
703 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
708 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
709 uint16_t rx_queue_id,
710 struct rte_eth_rxq_info *qinfo)
712 struct enic *enic = pmd_priv(dev);
713 struct vnic_rq *rq_sop;
714 struct vnic_rq *rq_data;
715 struct rte_eth_rxconf *conf;
716 uint16_t sop_queue_idx;
717 uint16_t data_queue_idx;
719 ENICPMD_FUNC_TRACE();
720 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
721 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
722 rq_sop = &enic->rq[sop_queue_idx];
723 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
724 qinfo->mp = rq_sop->mp;
725 qinfo->scattered_rx = rq_sop->data_queue_enable;
726 qinfo->nb_desc = rq_sop->ring.desc_count;
727 if (qinfo->scattered_rx)
728 qinfo->nb_desc += rq_data->ring.desc_count;
730 memset(conf, 0, sizeof(*conf));
731 conf->rx_free_thresh = rq_sop->rx_free_thresh;
732 conf->rx_drop_en = 1;
734 * Except VLAN stripping (port setting), all the checksum offloads
735 * are always enabled.
737 conf->offloads = ENIC_RX_OFFLOAD_CAPA;
738 if (!enic->ig_vlan_strip_en)
739 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
740 /* rx_thresh and other fields are not applicable for enic */
743 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
744 __rte_unused uint16_t tx_queue_id,
745 struct rte_eth_txq_info *qinfo)
747 struct enic *enic = pmd_priv(dev);
749 ENICPMD_FUNC_TRACE();
750 qinfo->nb_desc = enic->config.wq_desc_count;
751 memset(&qinfo->conf, 0, sizeof(qinfo->conf));
752 qinfo->conf.offloads = ENIC_TX_OFFLOAD_CAPA; /* not configurable */
753 /* tx_thresh, and all the other fields are not applicable for enic */
756 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
757 uint16_t rx_queue_id)
759 struct enic *enic = pmd_priv(eth_dev);
761 ENICPMD_FUNC_TRACE();
762 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
766 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
767 uint16_t rx_queue_id)
769 struct enic *enic = pmd_priv(eth_dev);
771 ENICPMD_FUNC_TRACE();
772 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
776 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
777 .dev_configure = enicpmd_dev_configure,
778 .dev_start = enicpmd_dev_start,
779 .dev_stop = enicpmd_dev_stop,
780 .dev_set_link_up = NULL,
781 .dev_set_link_down = NULL,
782 .dev_close = enicpmd_dev_close,
783 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
784 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
785 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
786 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
787 .link_update = enicpmd_dev_link_update,
788 .stats_get = enicpmd_dev_stats_get,
789 .stats_reset = enicpmd_dev_stats_reset,
790 .queue_stats_mapping_set = NULL,
791 .dev_infos_get = enicpmd_dev_info_get,
792 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
793 .mtu_set = enicpmd_mtu_set,
794 .vlan_filter_set = NULL,
795 .vlan_tpid_set = NULL,
796 .vlan_offload_set = enicpmd_vlan_offload_set,
797 .vlan_strip_queue_set = NULL,
798 .rx_queue_start = enicpmd_dev_rx_queue_start,
799 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
800 .tx_queue_start = enicpmd_dev_tx_queue_start,
801 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
802 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
803 .rx_queue_release = enicpmd_dev_rx_queue_release,
804 .rx_queue_count = enicpmd_dev_rx_queue_count,
805 .rx_descriptor_done = NULL,
806 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
807 .tx_queue_release = enicpmd_dev_tx_queue_release,
808 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
809 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
810 .rxq_info_get = enicpmd_dev_rxq_info_get,
811 .txq_info_get = enicpmd_dev_txq_info_get,
814 .flow_ctrl_get = NULL,
815 .flow_ctrl_set = NULL,
816 .priority_flow_ctrl_set = NULL,
817 .mac_addr_add = enicpmd_add_mac_addr,
818 .mac_addr_remove = enicpmd_remove_mac_addr,
819 .mac_addr_set = enicpmd_set_mac_addr,
820 .filter_ctrl = enicpmd_dev_filter_ctrl,
821 .reta_query = enicpmd_dev_rss_reta_query,
822 .reta_update = enicpmd_dev_rss_reta_update,
823 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
824 .rss_hash_update = enicpmd_dev_rss_hash_update,
827 struct enic *enicpmd_list_head = NULL;
828 /* Initialize the driver
829 * It returns 0 on success.
831 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
833 struct rte_pci_device *pdev;
834 struct rte_pci_addr *addr;
835 struct enic *enic = pmd_priv(eth_dev);
837 ENICPMD_FUNC_TRACE();
839 enic->port_id = eth_dev->data->port_id;
840 enic->rte_dev = eth_dev;
841 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
842 eth_dev->rx_pkt_burst = &enic_recv_pkts;
843 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
844 eth_dev->tx_pkt_prepare = &enic_prep_pkts;
846 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
847 rte_eth_copy_pci_info(eth_dev, pdev);
851 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
852 addr->domain, addr->bus, addr->devid, addr->function);
854 return enic_probe(enic);
857 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
858 struct rte_pci_device *pci_dev)
860 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
861 eth_enicpmd_dev_init);
864 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
866 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
869 static struct rte_pci_driver rte_enic_pmd = {
870 .id_table = pci_id_enic_map,
871 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
872 .probe = eth_enic_pci_probe,
873 .remove = eth_enic_pci_remove,
876 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
877 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
878 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");