2 * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * Copyright (c) 2014, Cisco Systems, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in
17 * the documentation and/or other materials provided with the
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
40 #include <rte_ethdev.h>
41 #include <rte_ethdev_pci.h>
42 #include <rte_string_fns.h>
44 #include "vnic_intr.h"
48 #include "vnic_enet.h"
51 #ifdef RTE_LIBRTE_ENIC_DEBUG
52 #define ENICPMD_FUNC_TRACE() \
53 RTE_LOG(DEBUG, PMD, "ENICPMD trace: %s\n", __func__)
55 #define ENICPMD_FUNC_TRACE() (void)0
59 * The set of PCI devices this driver supports
61 #define CISCO_PCI_VENDOR_ID 0x1137
62 static const struct rte_pci_id pci_id_enic_map[] = {
63 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
64 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
65 {.vendor_id = 0, /* sentinel */},
69 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
70 enum rte_filter_op filter_op, void *arg)
72 struct enic *enic = pmd_priv(eth_dev);
76 if (filter_op == RTE_ETH_FILTER_NOP)
79 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
83 case RTE_ETH_FILTER_ADD:
84 case RTE_ETH_FILTER_UPDATE:
85 ret = enic_fdir_add_fltr(enic,
86 (struct rte_eth_fdir_filter *)arg);
89 case RTE_ETH_FILTER_DELETE:
90 ret = enic_fdir_del_fltr(enic,
91 (struct rte_eth_fdir_filter *)arg);
94 case RTE_ETH_FILTER_STATS:
95 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
98 case RTE_ETH_FILTER_FLUSH:
99 dev_warning(enic, "unsupported operation %u", filter_op);
102 case RTE_ETH_FILTER_INFO:
103 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
106 dev_err(enic, "unknown operation %u", filter_op);
114 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
115 enum rte_filter_type filter_type,
116 enum rte_filter_op filter_op,
121 ENICPMD_FUNC_TRACE();
123 switch (filter_type) {
124 case RTE_ETH_FILTER_GENERIC:
125 if (filter_op != RTE_ETH_FILTER_GET)
127 *(const void **)arg = &enic_flow_ops;
129 case RTE_ETH_FILTER_FDIR:
130 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
133 dev_warning(enic, "Filter type (%d) not supported",
142 static void enicpmd_dev_tx_queue_release(void *txq)
144 ENICPMD_FUNC_TRACE();
148 static int enicpmd_dev_setup_intr(struct enic *enic)
153 ENICPMD_FUNC_TRACE();
155 /* Are we done with the init of all the queues? */
156 for (index = 0; index < enic->cq_count; index++) {
157 if (!enic->cq[index].ctrl)
160 if (enic->cq_count != index)
162 for (index = 0; index < enic->wq_count; index++) {
163 if (!enic->wq[index].ctrl)
166 if (enic->wq_count != index)
168 /* check start of packet (SOP) RQs only in case scatter is disabled. */
169 for (index = 0; index < enic->rq_count; index++) {
170 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
173 if (enic->rq_count != index)
176 ret = enic_alloc_intr_resources(enic);
178 dev_err(enic, "alloc intr failed\n");
181 enic_init_vnic_resources(enic);
183 ret = enic_setup_finish(enic);
185 dev_err(enic, "setup could not be finished\n");
190 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
193 unsigned int socket_id,
194 __rte_unused const struct rte_eth_txconf *tx_conf)
197 struct enic *enic = pmd_priv(eth_dev);
199 ENICPMD_FUNC_TRACE();
200 if (queue_idx >= ENIC_WQ_MAX) {
202 "Max number of TX queues exceeded. Max is %d\n",
207 eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx];
209 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
211 dev_err(enic, "error in allocating wq\n");
215 return enicpmd_dev_setup_intr(enic);
218 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
221 struct enic *enic = pmd_priv(eth_dev);
223 ENICPMD_FUNC_TRACE();
225 enic_start_wq(enic, queue_idx);
230 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
234 struct enic *enic = pmd_priv(eth_dev);
236 ENICPMD_FUNC_TRACE();
238 ret = enic_stop_wq(enic, queue_idx);
240 dev_err(enic, "error in stopping wq %d\n", queue_idx);
245 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
248 struct enic *enic = pmd_priv(eth_dev);
250 ENICPMD_FUNC_TRACE();
252 enic_start_rq(enic, queue_idx);
257 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
261 struct enic *enic = pmd_priv(eth_dev);
263 ENICPMD_FUNC_TRACE();
265 ret = enic_stop_rq(enic, queue_idx);
267 dev_err(enic, "error in stopping rq %d\n", queue_idx);
272 static void enicpmd_dev_rx_queue_release(void *rxq)
274 ENICPMD_FUNC_TRACE();
278 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
279 uint16_t rx_queue_id)
281 struct enic *enic = pmd_priv(dev);
282 uint32_t queue_count = 0;
288 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
289 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
290 cq_idx = cq->to_clean;
292 cq_tail = ioread32(&cq->ctrl->cq_tail);
294 if (cq_tail < cq_idx)
295 cq_tail += cq->ring.desc_count;
297 queue_count = cq_tail - cq_idx;
302 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
305 unsigned int socket_id,
306 const struct rte_eth_rxconf *rx_conf,
307 struct rte_mempool *mp)
310 struct enic *enic = pmd_priv(eth_dev);
312 ENICPMD_FUNC_TRACE();
313 /* With Rx scatter support, two RQs are now used on VIC per RQ used
314 * by the application.
316 if (queue_idx * 2 >= ENIC_RQ_MAX) {
318 "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n",
323 eth_dev->data->rx_queues[queue_idx] =
324 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
326 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
327 rx_conf->rx_free_thresh);
329 dev_err(enic, "error in allocating rq\n");
333 return enicpmd_dev_setup_intr(enic);
336 static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
337 uint16_t vlan_id, int on)
339 struct enic *enic = pmd_priv(eth_dev);
342 ENICPMD_FUNC_TRACE();
344 err = enic_add_vlan(enic, vlan_id);
346 err = enic_del_vlan(enic, vlan_id);
350 static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
352 struct enic *enic = pmd_priv(eth_dev);
354 ENICPMD_FUNC_TRACE();
356 if (mask & ETH_VLAN_STRIP_MASK) {
357 if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip)
358 enic->ig_vlan_strip_en = 1;
360 enic->ig_vlan_strip_en = 0;
362 enic_set_rss_nic_cfg(enic);
365 if (mask & ETH_VLAN_FILTER_MASK) {
367 "Configuration of VLAN filter is not supported\n");
370 if (mask & ETH_VLAN_EXTEND_MASK) {
372 "Configuration of extended VLAN is not supported\n");
376 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
379 struct enic *enic = pmd_priv(eth_dev);
381 ENICPMD_FUNC_TRACE();
382 ret = enic_set_vnic_res(enic);
384 dev_err(enic, "Set vNIC resource num failed, aborting\n");
388 if (eth_dev->data->dev_conf.rxmode.split_hdr_size &&
389 eth_dev->data->dev_conf.rxmode.header_split) {
390 /* Enable header-data-split */
391 enic_set_hdr_split_size(enic,
392 eth_dev->data->dev_conf.rxmode.split_hdr_size);
395 enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
396 enic->hw_ip_checksum = eth_dev->data->dev_conf.rxmode.hw_ip_checksum;
401 * It returns 0 on success.
403 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
405 struct enic *enic = pmd_priv(eth_dev);
407 ENICPMD_FUNC_TRACE();
408 return enic_enable(enic);
412 * Stop device: disable rx and tx functions to allow for reconfiguring.
414 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
416 struct rte_eth_link link;
417 struct enic *enic = pmd_priv(eth_dev);
419 ENICPMD_FUNC_TRACE();
421 memset(&link, 0, sizeof(link));
422 rte_atomic64_cmpset((uint64_t *)ð_dev->data->dev_link,
423 *(uint64_t *)ð_dev->data->dev_link,
430 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
432 struct enic *enic = pmd_priv(eth_dev);
434 ENICPMD_FUNC_TRACE();
438 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
439 __rte_unused int wait_to_complete)
441 struct enic *enic = pmd_priv(eth_dev);
443 ENICPMD_FUNC_TRACE();
444 return enic_link_update(enic);
447 static void enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
448 struct rte_eth_stats *stats)
450 struct enic *enic = pmd_priv(eth_dev);
452 ENICPMD_FUNC_TRACE();
453 enic_dev_stats_get(enic, stats);
456 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
458 struct enic *enic = pmd_priv(eth_dev);
460 ENICPMD_FUNC_TRACE();
461 enic_dev_stats_clear(enic);
464 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
465 struct rte_eth_dev_info *device_info)
467 struct enic *enic = pmd_priv(eth_dev);
469 ENICPMD_FUNC_TRACE();
470 device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
471 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
472 device_info->max_rx_queues = enic->conf_rq_count / 2;
473 device_info->max_tx_queues = enic->conf_wq_count;
474 device_info->min_rx_bufsize = ENIC_MIN_MTU;
475 device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
476 device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
477 device_info->rx_offload_capa =
478 DEV_RX_OFFLOAD_VLAN_STRIP |
479 DEV_RX_OFFLOAD_IPV4_CKSUM |
480 DEV_RX_OFFLOAD_UDP_CKSUM |
481 DEV_RX_OFFLOAD_TCP_CKSUM;
482 device_info->tx_offload_capa =
483 DEV_TX_OFFLOAD_VLAN_INSERT |
484 DEV_TX_OFFLOAD_IPV4_CKSUM |
485 DEV_TX_OFFLOAD_UDP_CKSUM |
486 DEV_TX_OFFLOAD_TCP_CKSUM |
487 DEV_TX_OFFLOAD_TCP_TSO;
488 device_info->default_rxconf = (struct rte_eth_rxconf) {
489 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
493 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
495 static const uint32_t ptypes[] = {
497 RTE_PTYPE_L2_ETHER_VLAN,
498 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
499 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
503 RTE_PTYPE_L4_NONFRAG,
507 if (dev->rx_pkt_burst == enic_recv_pkts)
512 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
514 struct enic *enic = pmd_priv(eth_dev);
516 ENICPMD_FUNC_TRACE();
518 enic_add_packet_filter(enic);
521 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
523 struct enic *enic = pmd_priv(eth_dev);
525 ENICPMD_FUNC_TRACE();
527 enic_add_packet_filter(enic);
530 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
532 struct enic *enic = pmd_priv(eth_dev);
534 ENICPMD_FUNC_TRACE();
536 enic_add_packet_filter(enic);
539 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
541 struct enic *enic = pmd_priv(eth_dev);
543 ENICPMD_FUNC_TRACE();
545 enic_add_packet_filter(enic);
548 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
549 struct ether_addr *mac_addr,
550 __rte_unused uint32_t index, __rte_unused uint32_t pool)
552 struct enic *enic = pmd_priv(eth_dev);
554 ENICPMD_FUNC_TRACE();
555 return enic_set_mac_address(enic, mac_addr->addr_bytes);
558 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
560 struct enic *enic = pmd_priv(eth_dev);
562 ENICPMD_FUNC_TRACE();
563 enic_del_mac_address(enic, index);
566 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
568 struct enic *enic = pmd_priv(eth_dev);
570 ENICPMD_FUNC_TRACE();
571 return enic_set_mtu(enic, mtu);
574 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
575 .dev_configure = enicpmd_dev_configure,
576 .dev_start = enicpmd_dev_start,
577 .dev_stop = enicpmd_dev_stop,
578 .dev_set_link_up = NULL,
579 .dev_set_link_down = NULL,
580 .dev_close = enicpmd_dev_close,
581 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
582 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
583 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
584 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
585 .link_update = enicpmd_dev_link_update,
586 .stats_get = enicpmd_dev_stats_get,
587 .stats_reset = enicpmd_dev_stats_reset,
588 .queue_stats_mapping_set = NULL,
589 .dev_infos_get = enicpmd_dev_info_get,
590 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
591 .mtu_set = enicpmd_mtu_set,
592 .vlan_filter_set = enicpmd_vlan_filter_set,
593 .vlan_tpid_set = NULL,
594 .vlan_offload_set = enicpmd_vlan_offload_set,
595 .vlan_strip_queue_set = NULL,
596 .rx_queue_start = enicpmd_dev_rx_queue_start,
597 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
598 .tx_queue_start = enicpmd_dev_tx_queue_start,
599 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
600 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
601 .rx_queue_release = enicpmd_dev_rx_queue_release,
602 .rx_queue_count = enicpmd_dev_rx_queue_count,
603 .rx_descriptor_done = NULL,
604 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
605 .tx_queue_release = enicpmd_dev_tx_queue_release,
608 .flow_ctrl_get = NULL,
609 .flow_ctrl_set = NULL,
610 .priority_flow_ctrl_set = NULL,
611 .mac_addr_add = enicpmd_add_mac_addr,
612 .mac_addr_remove = enicpmd_remove_mac_addr,
613 .filter_ctrl = enicpmd_dev_filter_ctrl,
616 struct enic *enicpmd_list_head = NULL;
617 /* Initialize the driver
618 * It returns 0 on success.
620 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
622 struct rte_pci_device *pdev;
623 struct rte_pci_addr *addr;
624 struct enic *enic = pmd_priv(eth_dev);
626 ENICPMD_FUNC_TRACE();
628 enic->port_id = eth_dev->data->port_id;
629 enic->rte_dev = eth_dev;
630 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
631 eth_dev->rx_pkt_burst = &enic_recv_pkts;
632 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
634 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
635 rte_eth_copy_pci_info(eth_dev, pdev);
639 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
640 addr->domain, addr->bus, addr->devid, addr->function);
642 return enic_probe(enic);
645 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
646 struct rte_pci_device *pci_dev)
648 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
649 eth_enicpmd_dev_init);
652 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
654 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
657 static struct rte_pci_driver rte_enic_pmd = {
658 .id_table = pci_id_enic_map,
659 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
660 .probe = eth_enic_pci_probe,
661 .remove = eth_enic_pci_remove,
664 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
665 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
666 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");