1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
11 #include <rte_bus_pci.h>
12 #include <rte_ethdev.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_string_fns.h>
16 #include "vnic_intr.h"
20 #include "vnic_enet.h"
23 int enicpmd_logtype_init;
24 int enicpmd_logtype_flow;
26 #define PMD_INIT_LOG(level, fmt, args...) \
27 rte_log(RTE_LOG_ ## level, enicpmd_logtype_init, \
28 "%s" fmt "\n", __func__, ##args)
30 #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
33 * The set of PCI devices this driver supports
35 #define CISCO_PCI_VENDOR_ID 0x1137
36 static const struct rte_pci_id pci_id_enic_map[] = {
37 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
38 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
39 {.vendor_id = 0, /* sentinel */},
42 RTE_INIT(enicpmd_init_log);
44 enicpmd_init_log(void)
46 enicpmd_logtype_init = rte_log_register("pmd.enic.init");
47 if (enicpmd_logtype_init >= 0)
48 rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
49 enicpmd_logtype_flow = rte_log_register("pmd.enic.flow");
50 if (enicpmd_logtype_flow >= 0)
51 rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
55 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
56 enum rte_filter_op filter_op, void *arg)
58 struct enic *enic = pmd_priv(eth_dev);
62 if (filter_op == RTE_ETH_FILTER_NOP)
65 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
69 case RTE_ETH_FILTER_ADD:
70 case RTE_ETH_FILTER_UPDATE:
71 ret = enic_fdir_add_fltr(enic,
72 (struct rte_eth_fdir_filter *)arg);
75 case RTE_ETH_FILTER_DELETE:
76 ret = enic_fdir_del_fltr(enic,
77 (struct rte_eth_fdir_filter *)arg);
80 case RTE_ETH_FILTER_STATS:
81 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
84 case RTE_ETH_FILTER_FLUSH:
85 dev_warning(enic, "unsupported operation %u", filter_op);
88 case RTE_ETH_FILTER_INFO:
89 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
92 dev_err(enic, "unknown operation %u", filter_op);
100 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
101 enum rte_filter_type filter_type,
102 enum rte_filter_op filter_op,
107 ENICPMD_FUNC_TRACE();
109 switch (filter_type) {
110 case RTE_ETH_FILTER_GENERIC:
111 if (filter_op != RTE_ETH_FILTER_GET)
113 *(const void **)arg = &enic_flow_ops;
115 case RTE_ETH_FILTER_FDIR:
116 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
119 dev_warning(enic, "Filter type (%d) not supported",
128 static void enicpmd_dev_tx_queue_release(void *txq)
130 ENICPMD_FUNC_TRACE();
132 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
138 static int enicpmd_dev_setup_intr(struct enic *enic)
143 ENICPMD_FUNC_TRACE();
145 /* Are we done with the init of all the queues? */
146 for (index = 0; index < enic->cq_count; index++) {
147 if (!enic->cq[index].ctrl)
150 if (enic->cq_count != index)
152 for (index = 0; index < enic->wq_count; index++) {
153 if (!enic->wq[index].ctrl)
156 if (enic->wq_count != index)
158 /* check start of packet (SOP) RQs only in case scatter is disabled. */
159 for (index = 0; index < enic->rq_count; index++) {
160 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
163 if (enic->rq_count != index)
166 ret = enic_alloc_intr_resources(enic);
168 dev_err(enic, "alloc intr failed\n");
171 enic_init_vnic_resources(enic);
173 ret = enic_setup_finish(enic);
175 dev_err(enic, "setup could not be finished\n");
180 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
183 unsigned int socket_id,
184 __rte_unused const struct rte_eth_txconf *tx_conf)
187 struct enic *enic = pmd_priv(eth_dev);
189 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
190 return -E_RTE_SECONDARY;
192 ENICPMD_FUNC_TRACE();
193 if (queue_idx >= ENIC_WQ_MAX) {
195 "Max number of TX queues exceeded. Max is %d\n",
200 eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
202 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
204 dev_err(enic, "error in allocating wq\n");
208 return enicpmd_dev_setup_intr(enic);
211 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
214 struct enic *enic = pmd_priv(eth_dev);
216 ENICPMD_FUNC_TRACE();
218 enic_start_wq(enic, queue_idx);
223 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
227 struct enic *enic = pmd_priv(eth_dev);
229 ENICPMD_FUNC_TRACE();
231 ret = enic_stop_wq(enic, queue_idx);
233 dev_err(enic, "error in stopping wq %d\n", queue_idx);
238 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
241 struct enic *enic = pmd_priv(eth_dev);
243 ENICPMD_FUNC_TRACE();
245 enic_start_rq(enic, queue_idx);
250 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
254 struct enic *enic = pmd_priv(eth_dev);
256 ENICPMD_FUNC_TRACE();
258 ret = enic_stop_rq(enic, queue_idx);
260 dev_err(enic, "error in stopping rq %d\n", queue_idx);
265 static void enicpmd_dev_rx_queue_release(void *rxq)
267 ENICPMD_FUNC_TRACE();
269 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
275 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
276 uint16_t rx_queue_id)
278 struct enic *enic = pmd_priv(dev);
279 uint32_t queue_count = 0;
285 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
286 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
287 cq_idx = cq->to_clean;
289 cq_tail = ioread32(&cq->ctrl->cq_tail);
291 if (cq_tail < cq_idx)
292 cq_tail += cq->ring.desc_count;
294 queue_count = cq_tail - cq_idx;
299 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
302 unsigned int socket_id,
303 const struct rte_eth_rxconf *rx_conf,
304 struct rte_mempool *mp)
307 struct enic *enic = pmd_priv(eth_dev);
309 ENICPMD_FUNC_TRACE();
311 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
312 return -E_RTE_SECONDARY;
314 /* With Rx scatter support, two RQs are now used on VIC per RQ used
315 * by the application.
317 if (queue_idx * 2 >= ENIC_RQ_MAX) {
319 "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
324 eth_dev->data->rx_queues[queue_idx] =
325 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
327 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
328 rx_conf->rx_free_thresh);
330 dev_err(enic, "error in allocating rq\n");
334 return enicpmd_dev_setup_intr(enic);
337 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
338 uint16_t vlan_id, int on)
340 struct enic *enic = pmd_priv(eth_dev);
343 ENICPMD_FUNC_TRACE();
345 err = enic_add_vlan(enic, vlan_id);
347 err = enic_del_vlan(enic, vlan_id);
351 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
353 struct enic *enic = pmd_priv(eth_dev);
355 ENICPMD_FUNC_TRACE();
357 if (mask & ETH_VLAN_STRIP_MASK) {
358 if (eth_dev->data->dev_conf.rxmode.offloads &
359 DEV_RX_OFFLOAD_VLAN_STRIP)
360 enic->ig_vlan_strip_en = 1;
362 enic->ig_vlan_strip_en = 0;
364 enic_set_rss_nic_cfg(enic);
367 if (mask & ETH_VLAN_FILTER_MASK) {
369 "Configuration of VLAN filter is not supported\n");
372 if (mask & ETH_VLAN_EXTEND_MASK) {
374 "Configuration of extended VLAN is not supported\n");
380 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
383 struct enic *enic = pmd_priv(eth_dev);
385 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
386 return -E_RTE_SECONDARY;
388 ENICPMD_FUNC_TRACE();
389 ret = enic_set_vnic_res(enic);
391 dev_err(enic, "Set vNIC resource num failed, aborting\n");
395 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
396 DEV_RX_OFFLOAD_CHECKSUM);
397 ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
403 * It returns 0 on success.
405 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
407 struct enic *enic = pmd_priv(eth_dev);
409 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
410 return -E_RTE_SECONDARY;
412 ENICPMD_FUNC_TRACE();
413 return enic_enable(enic);
417 * Stop device: disable rx and tx functions to allow for reconfiguring.
419 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
421 struct rte_eth_link link;
422 struct enic *enic = pmd_priv(eth_dev);
424 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
427 ENICPMD_FUNC_TRACE();
429 memset(&link, 0, sizeof(link));
430 rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link,
431 *(uint64_t *)ð_dev->data->dev_link,
438 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
440 struct enic *enic = pmd_priv(eth_dev);
442 ENICPMD_FUNC_TRACE();
446 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
447 __rte_unused int wait_to_complete)
449 struct enic *enic = pmd_priv(eth_dev);
451 ENICPMD_FUNC_TRACE();
452 return enic_link_update(enic);
455 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
456 struct rte_eth_stats *stats)
458 struct enic *enic = pmd_priv(eth_dev);
460 ENICPMD_FUNC_TRACE();
461 return enic_dev_stats_get(enic, stats);
464 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
466 struct enic *enic = pmd_priv(eth_dev);
468 ENICPMD_FUNC_TRACE();
469 enic_dev_stats_clear(enic);
472 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
473 struct rte_eth_dev_info *device_info)
475 struct enic *enic = pmd_priv(eth_dev);
477 ENICPMD_FUNC_TRACE();
478 device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
479 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
480 device_info->max_rx_queues = enic->conf_rq_count / 2;
481 device_info->max_tx_queues = enic->conf_wq_count;
482 device_info->min_rx_bufsize = ENIC_MIN_MTU;
483 device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
484 device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
485 device_info->rx_offload_capa =
486 DEV_RX_OFFLOAD_VLAN_STRIP |
487 DEV_RX_OFFLOAD_IPV4_CKSUM |
488 DEV_RX_OFFLOAD_UDP_CKSUM |
489 DEV_RX_OFFLOAD_TCP_CKSUM;
490 device_info->tx_offload_capa =
491 DEV_TX_OFFLOAD_VLAN_INSERT |
492 DEV_TX_OFFLOAD_IPV4_CKSUM |
493 DEV_TX_OFFLOAD_UDP_CKSUM |
494 DEV_TX_OFFLOAD_TCP_CKSUM |
495 DEV_TX_OFFLOAD_TCP_TSO;
496 device_info->default_rxconf = (struct rte_eth_rxconf) {
497 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
501 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
503 static const uint32_t ptypes[] = {
505 RTE_PTYPE_L2_ETHER_VLAN,
506 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
507 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
511 RTE_PTYPE_L4_NONFRAG,
515 if (dev->rx_pkt_burst == enic_recv_pkts)
520 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
522 struct enic *enic = pmd_priv(eth_dev);
524 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
527 ENICPMD_FUNC_TRACE();
530 enic_add_packet_filter(enic);
533 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
535 struct enic *enic = pmd_priv(eth_dev);
537 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
540 ENICPMD_FUNC_TRACE();
542 enic_add_packet_filter(enic);
545 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
547 struct enic *enic = pmd_priv(eth_dev);
549 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
552 ENICPMD_FUNC_TRACE();
554 enic_add_packet_filter(enic);
557 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
559 struct enic *enic = pmd_priv(eth_dev);
561 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
564 ENICPMD_FUNC_TRACE();
566 enic_add_packet_filter(enic);
569 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
570 struct ether_addr *mac_addr,
571 __rte_unused uint32_t index, __rte_unused uint32_t pool)
573 struct enic *enic = pmd_priv(eth_dev);
575 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
576 return -E_RTE_SECONDARY;
578 ENICPMD_FUNC_TRACE();
579 return enic_set_mac_address(enic, mac_addr->addr_bytes);
582 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
584 struct enic *enic = pmd_priv(eth_dev);
586 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
589 ENICPMD_FUNC_TRACE();
590 enic_del_mac_address(enic, index);
593 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
595 struct enic *enic = pmd_priv(eth_dev);
597 ENICPMD_FUNC_TRACE();
598 return enic_set_mtu(enic, mtu);
601 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
602 .dev_configure = enicpmd_dev_configure,
603 .dev_start = enicpmd_dev_start,
604 .dev_stop = enicpmd_dev_stop,
605 .dev_set_link_up = NULL,
606 .dev_set_link_down = NULL,
607 .dev_close = enicpmd_dev_close,
608 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
609 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
610 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
611 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
612 .link_update = enicpmd_dev_link_update,
613 .stats_get = enicpmd_dev_stats_get,
614 .stats_reset = enicpmd_dev_stats_reset,
615 .queue_stats_mapping_set = NULL,
616 .dev_infos_get = enicpmd_dev_info_get,
617 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
618 .mtu_set = enicpmd_mtu_set,
619 .vlan_filter_set = enicpmd_vlan_filter_set,
620 .vlan_tpid_set = NULL,
621 .vlan_offload_set = enicpmd_vlan_offload_set,
622 .vlan_strip_queue_set = NULL,
623 .rx_queue_start = enicpmd_dev_rx_queue_start,
624 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
625 .tx_queue_start = enicpmd_dev_tx_queue_start,
626 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
627 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
628 .rx_queue_release = enicpmd_dev_rx_queue_release,
629 .rx_queue_count = enicpmd_dev_rx_queue_count,
630 .rx_descriptor_done = NULL,
631 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
632 .tx_queue_release = enicpmd_dev_tx_queue_release,
635 .flow_ctrl_get = NULL,
636 .flow_ctrl_set = NULL,
637 .priority_flow_ctrl_set = NULL,
638 .mac_addr_add = enicpmd_add_mac_addr,
639 .mac_addr_remove = enicpmd_remove_mac_addr,
640 .filter_ctrl = enicpmd_dev_filter_ctrl,
643 struct enic *enicpmd_list_head = NULL;
644 /* Initialize the driver
645 * It returns 0 on success.
647 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
649 struct rte_pci_device *pdev;
650 struct rte_pci_addr *addr;
651 struct enic *enic = pmd_priv(eth_dev);
653 ENICPMD_FUNC_TRACE();
655 enic->port_id = eth_dev->data->port_id;
656 enic->rte_dev = eth_dev;
657 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
658 eth_dev->rx_pkt_burst = &enic_recv_pkts;
659 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
661 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
662 rte_eth_copy_pci_info(eth_dev, pdev);
666 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
667 addr->domain, addr->bus, addr->devid, addr->function);
669 return enic_probe(enic);
672 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
673 struct rte_pci_device *pci_dev)
675 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
676 eth_enicpmd_dev_init);
679 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
681 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
684 static struct rte_pci_driver rte_enic_pmd = {
685 .id_table = pci_id_enic_map,
686 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
687 .probe = eth_enic_pci_probe,
688 .remove = eth_enic_pci_remove,
691 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
692 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
693 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");