1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
11 #include <rte_bus_pci.h>
12 #include <rte_ethdev_driver.h>
13 #include <rte_ethdev_pci.h>
14 #include <rte_kvargs.h>
15 #include <rte_string_fns.h>
17 #include "vnic_intr.h"
21 #include "vnic_enet.h"
24 int enicpmd_logtype_init;
25 int enicpmd_logtype_flow;
27 #define ENICPMD_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
30 * The set of PCI devices this driver supports
32 #define CISCO_PCI_VENDOR_ID 0x1137
33 static const struct rte_pci_id pci_id_enic_map[] = {
34 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET) },
35 { RTE_PCI_DEVICE(CISCO_PCI_VENDOR_ID, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
36 {.vendor_id = 0, /* sentinel */},
39 #define ENIC_DEVARG_DISABLE_OVERLAY "disable-overlay"
40 #define ENIC_DEVARG_ENABLE_AVX2_RX "enable-avx2-rx"
41 #define ENIC_DEVARG_IG_VLAN_REWRITE "ig-vlan-rewrite"
43 RTE_INIT(enicpmd_init_log)
45 enicpmd_logtype_init = rte_log_register("pmd.net.enic.init");
46 if (enicpmd_logtype_init >= 0)
47 rte_log_set_level(enicpmd_logtype_init, RTE_LOG_NOTICE);
48 enicpmd_logtype_flow = rte_log_register("pmd.net.enic.flow");
49 if (enicpmd_logtype_flow >= 0)
50 rte_log_set_level(enicpmd_logtype_flow, RTE_LOG_NOTICE);
54 enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
55 enum rte_filter_op filter_op, void *arg)
57 struct enic *enic = pmd_priv(eth_dev);
61 if (filter_op == RTE_ETH_FILTER_NOP)
64 if (arg == NULL && filter_op != RTE_ETH_FILTER_FLUSH)
68 case RTE_ETH_FILTER_ADD:
69 case RTE_ETH_FILTER_UPDATE:
70 ret = enic_fdir_add_fltr(enic,
71 (struct rte_eth_fdir_filter *)arg);
74 case RTE_ETH_FILTER_DELETE:
75 ret = enic_fdir_del_fltr(enic,
76 (struct rte_eth_fdir_filter *)arg);
79 case RTE_ETH_FILTER_STATS:
80 enic_fdir_stats_get(enic, (struct rte_eth_fdir_stats *)arg);
83 case RTE_ETH_FILTER_FLUSH:
84 dev_warning(enic, "unsupported operation %u", filter_op);
87 case RTE_ETH_FILTER_INFO:
88 enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
91 dev_err(enic, "unknown operation %u", filter_op);
99 enicpmd_dev_filter_ctrl(struct rte_eth_dev *dev,
100 enum rte_filter_type filter_type,
101 enum rte_filter_op filter_op,
106 ENICPMD_FUNC_TRACE();
108 switch (filter_type) {
109 case RTE_ETH_FILTER_GENERIC:
110 if (filter_op != RTE_ETH_FILTER_GET)
112 *(const void **)arg = &enic_flow_ops;
114 case RTE_ETH_FILTER_FDIR:
115 ret = enicpmd_fdir_ctrl_func(dev, filter_op, arg);
118 dev_warning(enic, "Filter type (%d) not supported",
127 static void enicpmd_dev_tx_queue_release(void *txq)
129 ENICPMD_FUNC_TRACE();
131 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
137 static int enicpmd_dev_setup_intr(struct enic *enic)
142 ENICPMD_FUNC_TRACE();
144 /* Are we done with the init of all the queues? */
145 for (index = 0; index < enic->cq_count; index++) {
146 if (!enic->cq[index].ctrl)
149 if (enic->cq_count != index)
151 for (index = 0; index < enic->wq_count; index++) {
152 if (!enic->wq[index].ctrl)
155 if (enic->wq_count != index)
157 /* check start of packet (SOP) RQs only in case scatter is disabled. */
158 for (index = 0; index < enic->rq_count; index++) {
159 if (!enic->rq[enic_rte_rq_idx_to_sop_idx(index)].ctrl)
162 if (enic->rq_count != index)
165 ret = enic_alloc_intr_resources(enic);
167 dev_err(enic, "alloc intr failed\n");
170 enic_init_vnic_resources(enic);
172 ret = enic_setup_finish(enic);
174 dev_err(enic, "setup could not be finished\n");
179 static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev,
182 unsigned int socket_id,
183 const struct rte_eth_txconf *tx_conf)
186 struct enic *enic = pmd_priv(eth_dev);
189 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
190 return -E_RTE_SECONDARY;
192 ENICPMD_FUNC_TRACE();
193 RTE_ASSERT(queue_idx < enic->conf_wq_count);
194 wq = &enic->wq[queue_idx];
195 wq->offloads = tx_conf->offloads |
196 eth_dev->data->dev_conf.txmode.offloads;
197 eth_dev->data->tx_queues[queue_idx] = (void *)wq;
199 ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc);
201 dev_err(enic, "error in allocating wq\n");
205 return enicpmd_dev_setup_intr(enic);
208 static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
211 struct enic *enic = pmd_priv(eth_dev);
213 ENICPMD_FUNC_TRACE();
215 enic_start_wq(enic, queue_idx);
220 static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
224 struct enic *enic = pmd_priv(eth_dev);
226 ENICPMD_FUNC_TRACE();
228 ret = enic_stop_wq(enic, queue_idx);
230 dev_err(enic, "error in stopping wq %d\n", queue_idx);
235 static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
238 struct enic *enic = pmd_priv(eth_dev);
240 ENICPMD_FUNC_TRACE();
242 enic_start_rq(enic, queue_idx);
247 static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
251 struct enic *enic = pmd_priv(eth_dev);
253 ENICPMD_FUNC_TRACE();
255 ret = enic_stop_rq(enic, queue_idx);
257 dev_err(enic, "error in stopping rq %d\n", queue_idx);
262 static void enicpmd_dev_rx_queue_release(void *rxq)
264 ENICPMD_FUNC_TRACE();
266 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
272 static uint32_t enicpmd_dev_rx_queue_count(struct rte_eth_dev *dev,
273 uint16_t rx_queue_id)
275 struct enic *enic = pmd_priv(dev);
276 uint32_t queue_count = 0;
282 rq_num = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
283 cq = &enic->cq[enic_cq_rq(enic, rq_num)];
284 cq_idx = cq->to_clean;
286 cq_tail = ioread32(&cq->ctrl->cq_tail);
288 if (cq_tail < cq_idx)
289 cq_tail += cq->ring.desc_count;
291 queue_count = cq_tail - cq_idx;
296 static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
299 unsigned int socket_id,
300 const struct rte_eth_rxconf *rx_conf,
301 struct rte_mempool *mp)
304 struct enic *enic = pmd_priv(eth_dev);
306 ENICPMD_FUNC_TRACE();
308 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
309 return -E_RTE_SECONDARY;
310 RTE_ASSERT(enic_rte_rq_idx_to_sop_idx(queue_idx) < enic->conf_rq_count);
311 eth_dev->data->rx_queues[queue_idx] =
312 (void *)&enic->rq[enic_rte_rq_idx_to_sop_idx(queue_idx)];
314 ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc,
315 rx_conf->rx_free_thresh);
317 dev_err(enic, "error in allocating rq\n");
321 return enicpmd_dev_setup_intr(enic);
324 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
326 struct enic *enic = pmd_priv(eth_dev);
329 ENICPMD_FUNC_TRACE();
331 offloads = eth_dev->data->dev_conf.rxmode.offloads;
332 if (mask & ETH_VLAN_STRIP_MASK) {
333 if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
334 enic->ig_vlan_strip_en = 1;
336 enic->ig_vlan_strip_en = 0;
339 if ((mask & ETH_VLAN_FILTER_MASK) &&
340 (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
342 "Configuration of VLAN filter is not supported\n");
345 if ((mask & ETH_VLAN_EXTEND_MASK) &&
346 (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
348 "Configuration of extended VLAN is not supported\n");
351 return enic_set_vlan_strip(enic);
354 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
358 struct enic *enic = pmd_priv(eth_dev);
360 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
361 return -E_RTE_SECONDARY;
363 ENICPMD_FUNC_TRACE();
364 ret = enic_set_vnic_res(enic);
366 dev_err(enic, "Set vNIC resource num failed, aborting\n");
370 enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
371 DEV_RX_OFFLOAD_CHECKSUM);
372 /* All vlan offload masks to apply the current settings */
373 mask = ETH_VLAN_STRIP_MASK |
374 ETH_VLAN_FILTER_MASK |
375 ETH_VLAN_EXTEND_MASK;
376 ret = enicpmd_vlan_offload_set(eth_dev, mask);
378 dev_err(enic, "Failed to configure VLAN offloads\n");
382 * Initialize RSS with the default reta and key. If the user key is
383 * given (rx_adv_conf.rss_conf.rss_key), will use that instead of the
386 return enic_init_rss_nic_cfg(enic);
390 * It returns 0 on success.
392 static int enicpmd_dev_start(struct rte_eth_dev *eth_dev)
394 struct enic *enic = pmd_priv(eth_dev);
396 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
397 return -E_RTE_SECONDARY;
399 ENICPMD_FUNC_TRACE();
400 return enic_enable(enic);
404 * Stop device: disable rx and tx functions to allow for reconfiguring.
406 static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
408 struct rte_eth_link link;
409 struct enic *enic = pmd_priv(eth_dev);
411 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
414 ENICPMD_FUNC_TRACE();
417 memset(&link, 0, sizeof(link));
418 rte_eth_linkstatus_set(eth_dev, &link);
424 static void enicpmd_dev_close(struct rte_eth_dev *eth_dev)
426 struct enic *enic = pmd_priv(eth_dev);
428 ENICPMD_FUNC_TRACE();
432 static int enicpmd_dev_link_update(struct rte_eth_dev *eth_dev,
433 __rte_unused int wait_to_complete)
435 struct enic *enic = pmd_priv(eth_dev);
437 ENICPMD_FUNC_TRACE();
438 return enic_link_update(enic);
441 static int enicpmd_dev_stats_get(struct rte_eth_dev *eth_dev,
442 struct rte_eth_stats *stats)
444 struct enic *enic = pmd_priv(eth_dev);
446 ENICPMD_FUNC_TRACE();
447 return enic_dev_stats_get(enic, stats);
450 static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
452 struct enic *enic = pmd_priv(eth_dev);
454 ENICPMD_FUNC_TRACE();
455 enic_dev_stats_clear(enic);
458 static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
459 struct rte_eth_dev_info *device_info)
461 struct enic *enic = pmd_priv(eth_dev);
463 ENICPMD_FUNC_TRACE();
464 /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
465 device_info->max_rx_queues = enic->conf_rq_count / 2;
466 device_info->max_tx_queues = enic->conf_wq_count;
467 device_info->min_rx_bufsize = ENIC_MIN_MTU;
468 /* "Max" mtu is not a typo. HW receives packet sizes up to the
469 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
470 * a hint to the driver to size receive buffers accordingly so that
471 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
472 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
475 device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
476 device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
477 device_info->rx_offload_capa = enic->rx_offload_capa;
478 device_info->tx_offload_capa = enic->tx_offload_capa;
479 device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
480 device_info->default_rxconf = (struct rte_eth_rxconf) {
481 .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
483 device_info->reta_size = enic->reta_size;
484 device_info->hash_key_size = enic->hash_key_size;
485 device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
486 device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
487 .nb_max = enic->config.rq_desc_count,
488 .nb_min = ENIC_MIN_RQ_DESCS,
489 .nb_align = ENIC_ALIGN_DESCS,
491 device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
492 .nb_max = enic->config.wq_desc_count,
493 .nb_min = ENIC_MIN_WQ_DESCS,
494 .nb_align = ENIC_ALIGN_DESCS,
495 .nb_seg_max = ENIC_TX_XMIT_MAX,
496 .nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
498 device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
499 .burst_size = ENIC_DEFAULT_RX_BURST,
500 .ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
501 ENIC_DEFAULT_RX_RING_SIZE),
502 .nb_queues = ENIC_DEFAULT_RX_RINGS,
504 device_info->default_txportconf = (struct rte_eth_dev_portconf) {
505 .burst_size = ENIC_DEFAULT_TX_BURST,
506 .ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
507 ENIC_DEFAULT_TX_RING_SIZE),
508 .nb_queues = ENIC_DEFAULT_TX_RINGS,
512 static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
514 static const uint32_t ptypes[] = {
516 RTE_PTYPE_L2_ETHER_VLAN,
517 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
518 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
522 RTE_PTYPE_L4_NONFRAG,
525 static const uint32_t ptypes_overlay[] = {
527 RTE_PTYPE_L2_ETHER_VLAN,
528 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
529 RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
533 RTE_PTYPE_L4_NONFRAG,
534 RTE_PTYPE_TUNNEL_GRENAT,
535 RTE_PTYPE_INNER_L2_ETHER,
536 RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
537 RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
538 RTE_PTYPE_INNER_L4_TCP,
539 RTE_PTYPE_INNER_L4_UDP,
540 RTE_PTYPE_INNER_L4_FRAG,
541 RTE_PTYPE_INNER_L4_NONFRAG,
545 if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
546 dev->rx_pkt_burst != NULL) {
547 struct enic *enic = pmd_priv(dev);
548 if (enic->overlay_offload)
549 return ptypes_overlay;
556 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
558 struct enic *enic = pmd_priv(eth_dev);
560 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
563 ENICPMD_FUNC_TRACE();
566 enic_add_packet_filter(enic);
569 static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
571 struct enic *enic = pmd_priv(eth_dev);
573 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
576 ENICPMD_FUNC_TRACE();
578 enic_add_packet_filter(enic);
581 static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
583 struct enic *enic = pmd_priv(eth_dev);
585 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
588 ENICPMD_FUNC_TRACE();
590 enic_add_packet_filter(enic);
593 static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
595 struct enic *enic = pmd_priv(eth_dev);
597 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
600 ENICPMD_FUNC_TRACE();
602 enic_add_packet_filter(enic);
605 static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
606 struct ether_addr *mac_addr,
607 __rte_unused uint32_t index, __rte_unused uint32_t pool)
609 struct enic *enic = pmd_priv(eth_dev);
611 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
612 return -E_RTE_SECONDARY;
614 ENICPMD_FUNC_TRACE();
615 return enic_set_mac_address(enic, mac_addr->addr_bytes);
618 static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
620 struct enic *enic = pmd_priv(eth_dev);
622 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
625 ENICPMD_FUNC_TRACE();
626 if (enic_del_mac_address(enic, index))
627 dev_err(enic, "del mac addr failed\n");
630 static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
631 struct ether_addr *addr)
633 struct enic *enic = pmd_priv(eth_dev);
636 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
637 return -E_RTE_SECONDARY;
639 ENICPMD_FUNC_TRACE();
640 ret = enic_del_mac_address(enic, 0);
643 return enic_set_mac_address(enic, addr->addr_bytes);
646 static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
648 struct enic *enic = pmd_priv(eth_dev);
650 ENICPMD_FUNC_TRACE();
651 return enic_set_mtu(enic, mtu);
654 static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
655 struct rte_eth_rss_reta_entry64
659 struct enic *enic = pmd_priv(dev);
660 uint16_t i, idx, shift;
662 ENICPMD_FUNC_TRACE();
663 if (reta_size != ENIC_RSS_RETA_SIZE) {
664 dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
665 reta_size, ENIC_RSS_RETA_SIZE);
669 for (i = 0; i < reta_size; i++) {
670 idx = i / RTE_RETA_GROUP_SIZE;
671 shift = i % RTE_RETA_GROUP_SIZE;
672 if (reta_conf[idx].mask & (1ULL << shift))
673 reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
674 enic->rss_cpu.cpu[i / 4].b[i % 4]);
680 static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
681 struct rte_eth_rss_reta_entry64
685 struct enic *enic = pmd_priv(dev);
686 union vnic_rss_cpu rss_cpu;
687 uint16_t i, idx, shift;
689 ENICPMD_FUNC_TRACE();
690 if (reta_size != ENIC_RSS_RETA_SIZE) {
691 dev_err(enic, "reta_update: wrong reta_size. given=%u"
693 reta_size, ENIC_RSS_RETA_SIZE);
697 * Start with the current reta and modify it per reta_conf, as we
698 * need to push the entire reta even if we only modify one entry.
700 rss_cpu = enic->rss_cpu;
701 for (i = 0; i < reta_size; i++) {
702 idx = i / RTE_RETA_GROUP_SIZE;
703 shift = i % RTE_RETA_GROUP_SIZE;
704 if (reta_conf[idx].mask & (1ULL << shift))
705 rss_cpu.cpu[i / 4].b[i % 4] =
706 enic_rte_rq_idx_to_sop_idx(
707 reta_conf[idx].reta[shift]);
709 return enic_set_rss_reta(enic, &rss_cpu);
712 static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
713 struct rte_eth_rss_conf *rss_conf)
715 struct enic *enic = pmd_priv(dev);
717 ENICPMD_FUNC_TRACE();
718 return enic_set_rss_conf(enic, rss_conf);
721 static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
722 struct rte_eth_rss_conf *rss_conf)
724 struct enic *enic = pmd_priv(dev);
726 ENICPMD_FUNC_TRACE();
727 if (rss_conf == NULL)
729 if (rss_conf->rss_key != NULL &&
730 rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
731 dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
733 rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
736 rss_conf->rss_hf = enic->rss_hf;
737 if (rss_conf->rss_key != NULL) {
739 for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
740 rss_conf->rss_key[i] =
741 enic->rss_key.key[i / 10].b[i % 10];
743 rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
748 static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
749 uint16_t rx_queue_id,
750 struct rte_eth_rxq_info *qinfo)
752 struct enic *enic = pmd_priv(dev);
753 struct vnic_rq *rq_sop;
754 struct vnic_rq *rq_data;
755 struct rte_eth_rxconf *conf;
756 uint16_t sop_queue_idx;
757 uint16_t data_queue_idx;
759 ENICPMD_FUNC_TRACE();
760 sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
761 data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
762 rq_sop = &enic->rq[sop_queue_idx];
763 rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
764 qinfo->mp = rq_sop->mp;
765 qinfo->scattered_rx = rq_sop->data_queue_enable;
766 qinfo->nb_desc = rq_sop->ring.desc_count;
767 if (qinfo->scattered_rx)
768 qinfo->nb_desc += rq_data->ring.desc_count;
770 memset(conf, 0, sizeof(*conf));
771 conf->rx_free_thresh = rq_sop->rx_free_thresh;
772 conf->rx_drop_en = 1;
774 * Except VLAN stripping (port setting), all the checksum offloads
775 * are always enabled.
777 conf->offloads = enic->rx_offload_capa;
778 if (!enic->ig_vlan_strip_en)
779 conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
780 /* rx_thresh and other fields are not applicable for enic */
783 static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
784 uint16_t tx_queue_id,
785 struct rte_eth_txq_info *qinfo)
787 struct enic *enic = pmd_priv(dev);
788 struct vnic_wq *wq = &enic->wq[tx_queue_id];
790 ENICPMD_FUNC_TRACE();
791 qinfo->nb_desc = wq->ring.desc_count;
792 memset(&qinfo->conf, 0, sizeof(qinfo->conf));
793 qinfo->conf.offloads = wq->offloads;
794 /* tx_thresh, and all the other fields are not applicable for enic */
797 static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
798 uint16_t rx_queue_id)
800 struct enic *enic = pmd_priv(eth_dev);
802 ENICPMD_FUNC_TRACE();
803 vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
807 static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
808 uint16_t rx_queue_id)
810 struct enic *enic = pmd_priv(eth_dev);
812 ENICPMD_FUNC_TRACE();
813 vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
817 static int udp_tunnel_common_check(struct enic *enic,
818 struct rte_eth_udp_tunnel *tnl)
820 if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
822 if (!enic->overlay_offload) {
823 PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
830 static int update_vxlan_port(struct enic *enic, uint16_t port)
832 if (vnic_dev_overlay_offload_cfg(enic->vdev,
833 OVERLAY_CFG_VXLAN_PORT_UPDATE,
835 PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
838 PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
839 enic->vxlan_port = port;
843 static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
844 struct rte_eth_udp_tunnel *tnl)
846 struct enic *enic = pmd_priv(eth_dev);
849 ENICPMD_FUNC_TRACE();
850 ret = udp_tunnel_common_check(enic, tnl);
854 * The NIC has 1 configurable VXLAN port number. "Adding" a new port
855 * number replaces it.
857 if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
858 PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
862 return update_vxlan_port(enic, tnl->udp_port);
865 static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
866 struct rte_eth_udp_tunnel *tnl)
868 struct enic *enic = pmd_priv(eth_dev);
871 ENICPMD_FUNC_TRACE();
872 ret = udp_tunnel_common_check(enic, tnl);
876 * Clear the previously set port number and restore the
877 * hardware default port number. Some drivers disable VXLAN
878 * offloads when there are no configured port numbers. But
879 * enic does not do that as VXLAN is part of overlay offload,
880 * which is tied to inner RSS and TSO.
882 if (tnl->udp_port != enic->vxlan_port) {
883 PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
887 return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
890 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
891 .dev_configure = enicpmd_dev_configure,
892 .dev_start = enicpmd_dev_start,
893 .dev_stop = enicpmd_dev_stop,
894 .dev_set_link_up = NULL,
895 .dev_set_link_down = NULL,
896 .dev_close = enicpmd_dev_close,
897 .promiscuous_enable = enicpmd_dev_promiscuous_enable,
898 .promiscuous_disable = enicpmd_dev_promiscuous_disable,
899 .allmulticast_enable = enicpmd_dev_allmulticast_enable,
900 .allmulticast_disable = enicpmd_dev_allmulticast_disable,
901 .link_update = enicpmd_dev_link_update,
902 .stats_get = enicpmd_dev_stats_get,
903 .stats_reset = enicpmd_dev_stats_reset,
904 .queue_stats_mapping_set = NULL,
905 .dev_infos_get = enicpmd_dev_info_get,
906 .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
907 .mtu_set = enicpmd_mtu_set,
908 .vlan_filter_set = NULL,
909 .vlan_tpid_set = NULL,
910 .vlan_offload_set = enicpmd_vlan_offload_set,
911 .vlan_strip_queue_set = NULL,
912 .rx_queue_start = enicpmd_dev_rx_queue_start,
913 .rx_queue_stop = enicpmd_dev_rx_queue_stop,
914 .tx_queue_start = enicpmd_dev_tx_queue_start,
915 .tx_queue_stop = enicpmd_dev_tx_queue_stop,
916 .rx_queue_setup = enicpmd_dev_rx_queue_setup,
917 .rx_queue_release = enicpmd_dev_rx_queue_release,
918 .rx_queue_count = enicpmd_dev_rx_queue_count,
919 .rx_descriptor_done = NULL,
920 .tx_queue_setup = enicpmd_dev_tx_queue_setup,
921 .tx_queue_release = enicpmd_dev_tx_queue_release,
922 .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
923 .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
924 .rxq_info_get = enicpmd_dev_rxq_info_get,
925 .txq_info_get = enicpmd_dev_txq_info_get,
928 .flow_ctrl_get = NULL,
929 .flow_ctrl_set = NULL,
930 .priority_flow_ctrl_set = NULL,
931 .mac_addr_add = enicpmd_add_mac_addr,
932 .mac_addr_remove = enicpmd_remove_mac_addr,
933 .mac_addr_set = enicpmd_set_mac_addr,
934 .filter_ctrl = enicpmd_dev_filter_ctrl,
935 .reta_query = enicpmd_dev_rss_reta_query,
936 .reta_update = enicpmd_dev_rss_reta_update,
937 .rss_hash_conf_get = enicpmd_dev_rss_hash_conf_get,
938 .rss_hash_update = enicpmd_dev_rss_hash_update,
939 .udp_tunnel_port_add = enicpmd_dev_udp_tunnel_port_add,
940 .udp_tunnel_port_del = enicpmd_dev_udp_tunnel_port_del,
943 static int enic_parse_zero_one(const char *key,
950 enic = (struct enic *)opaque;
951 if (strcmp(value, "0") == 0) {
953 } else if (strcmp(value, "1") == 0) {
956 dev_err(enic, "Invalid value for %s"
957 ": expected=0|1 given=%s\n", key, value);
960 if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
961 enic->disable_overlay = b;
962 if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
963 enic->enable_avx2_rx = b;
967 static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
973 enic = (struct enic *)opaque;
974 if (strcmp(value, "trunk") == 0) {
975 /* Trunk mode: always tag */
976 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
977 } else if (strcmp(value, "untag") == 0) {
978 /* Untag default VLAN mode: untag if VLAN = default VLAN */
979 enic->ig_vlan_rewrite_mode =
980 IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
981 } else if (strcmp(value, "priority") == 0) {
983 * Priority-tag default VLAN mode: priority tag (VLAN header
984 * with ID=0) if VLAN = default
986 enic->ig_vlan_rewrite_mode =
987 IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
988 } else if (strcmp(value, "pass") == 0) {
989 /* Pass through mode: do not touch tags */
990 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
992 dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
993 ": expected=trunk|untag|priority|pass given=%s\n",
1000 static int enic_check_devargs(struct rte_eth_dev *dev)
1002 static const char *const valid_keys[] = {
1003 ENIC_DEVARG_DISABLE_OVERLAY,
1004 ENIC_DEVARG_ENABLE_AVX2_RX,
1005 ENIC_DEVARG_IG_VLAN_REWRITE,
1007 struct enic *enic = pmd_priv(dev);
1008 struct rte_kvargs *kvlist;
1010 ENICPMD_FUNC_TRACE();
1012 enic->disable_overlay = false;
1013 enic->enable_avx2_rx = false;
1014 enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
1015 if (!dev->device->devargs)
1017 kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
1020 if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
1021 enic_parse_zero_one, enic) < 0 ||
1022 rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
1023 enic_parse_zero_one, enic) < 0 ||
1024 rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
1025 enic_parse_ig_vlan_rewrite, enic) < 0) {
1026 rte_kvargs_free(kvlist);
1029 rte_kvargs_free(kvlist);
1033 /* Initialize the driver
1034 * It returns 0 on success.
1036 static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
1038 struct rte_pci_device *pdev;
1039 struct rte_pci_addr *addr;
1040 struct enic *enic = pmd_priv(eth_dev);
1043 ENICPMD_FUNC_TRACE();
1045 enic->port_id = eth_dev->data->port_id;
1046 enic->rte_dev = eth_dev;
1047 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
1048 eth_dev->rx_pkt_burst = &enic_recv_pkts;
1049 eth_dev->tx_pkt_burst = &enic_xmit_pkts;
1050 eth_dev->tx_pkt_prepare = &enic_prep_pkts;
1052 pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1053 rte_eth_copy_pci_info(eth_dev, pdev);
1057 snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
1058 addr->domain, addr->bus, addr->devid, addr->function);
1060 err = enic_check_devargs(eth_dev);
1063 return enic_probe(enic);
1066 static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
1067 struct rte_pci_device *pci_dev)
1069 return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
1070 eth_enicpmd_dev_init);
1073 static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
1075 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
1078 static struct rte_pci_driver rte_enic_pmd = {
1079 .id_table = pci_id_enic_map,
1080 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
1081 RTE_PCI_DRV_IOVA_AS_VA,
1082 .probe = eth_enic_pci_probe,
1083 .remove = eth_enic_pci_remove,
1086 RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
1087 RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
1088 RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
1089 RTE_PMD_REGISTER_PARAM_STRING(net_enic,
1090 ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
1091 ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
1092 ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");