2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <rte_ethdev.h>
37 #include "sfc_debug.h"
39 #include "sfc_kvargs.h"
46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
48 struct sfc_adapter *sa = dev->data->dev_private;
50 sfc_log_init(sa, "entry");
52 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
53 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
55 /* Autonegotiation may be disabled */
56 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
57 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
58 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
59 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
60 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
61 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
62 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
64 dev_info->max_rx_queues = sa->rxq_max;
65 dev_info->max_tx_queues = sa->txq_max;
67 /* By default packets are dropped if no descriptors are available */
68 dev_info->default_rxconf.rx_drop_en = 1;
70 dev_info->tx_offload_capa =
71 DEV_TX_OFFLOAD_IPV4_CKSUM |
72 DEV_TX_OFFLOAD_UDP_CKSUM |
73 DEV_TX_OFFLOAD_TCP_CKSUM;
75 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOVLANOFFL |
76 ETH_TXQ_FLAGS_NOXSUMSCTP;
78 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
79 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
80 /* The RXQ hardware requires that the descriptor count is a power
81 * of 2, but rx_desc_lim cannot properly describe that constraint.
83 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
85 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
86 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
88 * The TXQ hardware requires that the descriptor count is a power
89 * of 2, but tx_desc_lim cannot properly describe that constraint
91 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
95 sfc_dev_configure(struct rte_eth_dev *dev)
97 struct rte_eth_dev_data *dev_data = dev->data;
98 struct sfc_adapter *sa = dev_data->dev_private;
101 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
102 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
104 sfc_adapter_lock(sa);
106 case SFC_ADAPTER_CONFIGURED:
108 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
110 case SFC_ADAPTER_INITIALIZED:
111 rc = sfc_configure(sa);
114 sfc_err(sa, "unexpected adapter state %u to configure",
119 sfc_adapter_unlock(sa);
121 sfc_log_init(sa, "done %d", rc);
127 sfc_dev_start(struct rte_eth_dev *dev)
129 struct sfc_adapter *sa = dev->data->dev_private;
132 sfc_log_init(sa, "entry");
134 sfc_adapter_lock(sa);
136 sfc_adapter_unlock(sa);
138 sfc_log_init(sa, "done %d", rc);
144 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
146 struct sfc_adapter *sa = dev->data->dev_private;
147 struct rte_eth_link *dev_link = &dev->data->dev_link;
148 struct rte_eth_link old_link;
149 struct rte_eth_link current_link;
151 sfc_log_init(sa, "entry");
153 if (sa->state != SFC_ADAPTER_STARTED)
157 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
158 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
160 if (wait_to_complete) {
161 efx_link_mode_t link_mode;
163 efx_port_poll(sa->nic, &link_mode);
164 sfc_port_link_mode_to_info(link_mode, ¤t_link);
166 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
167 *(uint64_t *)&old_link,
168 *(uint64_t *)¤t_link))
171 sfc_ev_mgmt_qpoll(sa);
172 *(int64_t *)¤t_link =
173 rte_atomic64_read((rte_atomic64_t *)dev_link);
176 if (old_link.link_status != current_link.link_status)
177 sfc_info(sa, "Link status is %s",
178 current_link.link_status ? "UP" : "DOWN");
180 return old_link.link_status == current_link.link_status ? 0 : -1;
184 sfc_dev_stop(struct rte_eth_dev *dev)
186 struct sfc_adapter *sa = dev->data->dev_private;
188 sfc_log_init(sa, "entry");
190 sfc_adapter_lock(sa);
192 sfc_adapter_unlock(sa);
194 sfc_log_init(sa, "done");
198 sfc_dev_close(struct rte_eth_dev *dev)
200 struct sfc_adapter *sa = dev->data->dev_private;
202 sfc_log_init(sa, "entry");
204 sfc_adapter_lock(sa);
206 case SFC_ADAPTER_STARTED:
208 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
210 case SFC_ADAPTER_CONFIGURED:
212 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
214 case SFC_ADAPTER_INITIALIZED:
217 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
220 sfc_adapter_unlock(sa);
222 sfc_log_init(sa, "done");
226 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
227 uint16_t nb_rx_desc, unsigned int socket_id,
228 const struct rte_eth_rxconf *rx_conf,
229 struct rte_mempool *mb_pool)
231 struct sfc_adapter *sa = dev->data->dev_private;
234 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
235 rx_queue_id, nb_rx_desc, socket_id);
237 sfc_adapter_lock(sa);
239 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
244 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
246 sfc_adapter_unlock(sa);
251 sfc_adapter_unlock(sa);
257 sfc_rx_queue_release(void *queue)
259 struct sfc_rxq *rxq = queue;
260 struct sfc_adapter *sa;
261 unsigned int sw_index;
267 sfc_adapter_lock(sa);
269 sw_index = sfc_rxq_sw_index(rxq);
271 sfc_log_init(sa, "RxQ=%u", sw_index);
273 sa->eth_dev->data->rx_queues[sw_index] = NULL;
275 sfc_rx_qfini(sa, sw_index);
277 sfc_adapter_unlock(sa);
281 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
282 uint16_t nb_tx_desc, unsigned int socket_id,
283 const struct rte_eth_txconf *tx_conf)
285 struct sfc_adapter *sa = dev->data->dev_private;
288 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
289 tx_queue_id, nb_tx_desc, socket_id);
291 sfc_adapter_lock(sa);
293 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
297 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq;
299 sfc_adapter_unlock(sa);
303 sfc_adapter_unlock(sa);
309 sfc_tx_queue_release(void *queue)
311 struct sfc_txq *txq = queue;
312 unsigned int sw_index;
313 struct sfc_adapter *sa;
318 sw_index = sfc_txq_sw_index(txq);
320 SFC_ASSERT(txq->evq != NULL);
323 sfc_log_init(sa, "TxQ = %u", sw_index);
325 sfc_adapter_lock(sa);
327 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
328 sa->eth_dev->data->tx_queues[sw_index] = NULL;
330 sfc_tx_qfini(sa, sw_index);
332 sfc_adapter_unlock(sa);
336 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
338 struct sfc_adapter *sa = dev->data->dev_private;
339 struct sfc_port *port = &sa->port;
342 rte_spinlock_lock(&port->mac_stats_lock);
344 if (sfc_port_update_mac_stats(sa) != 0)
347 mac_stats = port->mac_stats_buf;
349 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
350 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
352 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
353 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
354 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
356 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
357 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
358 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
360 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
361 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
362 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
364 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
365 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
366 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
367 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
368 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
369 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
371 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
372 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
373 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
374 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
376 * Take into account stats which are whenever supported
377 * on EF10. If some stat is not supported by current
378 * firmware variant or HW revision, it is guaranteed
379 * to be zero in mac_stats.
382 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
383 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
384 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
385 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
386 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
387 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
388 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
389 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
390 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
391 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
393 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
394 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
395 mac_stats[EFX_MAC_RX_JABBER_PKTS];
396 /* no oerrors counters supported on EF10 */
400 rte_spinlock_unlock(&port->mac_stats_lock);
404 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
405 unsigned int xstats_count)
407 struct sfc_adapter *sa = dev->data->dev_private;
408 struct sfc_port *port = &sa->port;
414 rte_spinlock_lock(&port->mac_stats_lock);
416 rc = sfc_port_update_mac_stats(sa);
423 mac_stats = port->mac_stats_buf;
425 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
426 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
427 if (xstats != NULL && nstats < (int)xstats_count) {
428 xstats[nstats].id = nstats;
429 xstats[nstats].value = mac_stats[i];
436 rte_spinlock_unlock(&port->mac_stats_lock);
442 sfc_xstats_get_names(struct rte_eth_dev *dev,
443 struct rte_eth_xstat_name *xstats_names,
444 unsigned int xstats_count)
446 struct sfc_adapter *sa = dev->data->dev_private;
447 struct sfc_port *port = &sa->port;
449 unsigned int nstats = 0;
451 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
452 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
453 if (xstats_names != NULL && nstats < xstats_count)
454 strncpy(xstats_names[nstats].name,
455 efx_mac_stat_name(sa->nic, i),
456 sizeof(xstats_names[0].name));
465 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
467 struct sfc_adapter *sa = dev->data->dev_private;
468 unsigned int wanted_fc, link_fc;
470 memset(fc_conf, 0, sizeof(*fc_conf));
472 sfc_adapter_lock(sa);
474 if (sa->state == SFC_ADAPTER_STARTED)
475 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
477 link_fc = sa->port.flow_ctrl;
481 fc_conf->mode = RTE_FC_NONE;
483 case EFX_FCNTL_RESPOND:
484 fc_conf->mode = RTE_FC_RX_PAUSE;
486 case EFX_FCNTL_GENERATE:
487 fc_conf->mode = RTE_FC_TX_PAUSE;
489 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
490 fc_conf->mode = RTE_FC_FULL;
493 sfc_err(sa, "%s: unexpected flow control value %#x",
497 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
499 sfc_adapter_unlock(sa);
505 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
507 struct sfc_adapter *sa = dev->data->dev_private;
508 struct sfc_port *port = &sa->port;
512 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
513 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
514 fc_conf->mac_ctrl_frame_fwd != 0) {
515 sfc_err(sa, "unsupported flow control settings specified");
520 switch (fc_conf->mode) {
524 case RTE_FC_RX_PAUSE:
525 fcntl = EFX_FCNTL_RESPOND;
527 case RTE_FC_TX_PAUSE:
528 fcntl = EFX_FCNTL_GENERATE;
531 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
538 sfc_adapter_lock(sa);
540 if (sa->state == SFC_ADAPTER_STARTED) {
541 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
543 goto fail_mac_fcntl_set;
546 port->flow_ctrl = fcntl;
547 port->flow_ctrl_autoneg = fc_conf->autoneg;
549 sfc_adapter_unlock(sa);
554 sfc_adapter_unlock(sa);
561 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
563 struct sfc_adapter *sa = dev->data->dev_private;
564 size_t pdu = EFX_MAC_PDU(mtu);
568 sfc_log_init(sa, "mtu=%u", mtu);
571 if (pdu < EFX_MAC_PDU_MIN) {
572 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
573 (unsigned int)mtu, (unsigned int)pdu,
577 if (pdu > EFX_MAC_PDU_MAX) {
578 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
579 (unsigned int)mtu, (unsigned int)pdu,
584 sfc_adapter_lock(sa);
586 if (pdu != sa->port.pdu) {
587 if (sa->state == SFC_ADAPTER_STARTED) {
590 old_pdu = sa->port.pdu;
601 * The driver does not use it, but other PMDs update jumbo_frame
602 * flag and max_rx_pkt_len when MTU is set.
604 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
605 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
607 sfc_adapter_unlock(sa);
609 sfc_log_init(sa, "done");
613 sa->port.pdu = old_pdu;
614 if (sfc_start(sa) != 0)
615 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
616 "PDU max size - port is stopped",
617 (unsigned int)pdu, (unsigned int)old_pdu);
618 sfc_adapter_unlock(sa);
621 sfc_log_init(sa, "failed %d", rc);
626 static const struct eth_dev_ops sfc_eth_dev_ops = {
627 .dev_configure = sfc_dev_configure,
628 .dev_start = sfc_dev_start,
629 .dev_stop = sfc_dev_stop,
630 .dev_close = sfc_dev_close,
631 .link_update = sfc_dev_link_update,
632 .stats_get = sfc_stats_get,
633 .xstats_get = sfc_xstats_get,
634 .xstats_get_names = sfc_xstats_get_names,
635 .dev_infos_get = sfc_dev_infos_get,
636 .mtu_set = sfc_dev_set_mtu,
637 .rx_queue_setup = sfc_rx_queue_setup,
638 .rx_queue_release = sfc_rx_queue_release,
639 .tx_queue_setup = sfc_tx_queue_setup,
640 .tx_queue_release = sfc_tx_queue_release,
641 .flow_ctrl_get = sfc_flow_ctrl_get,
642 .flow_ctrl_set = sfc_flow_ctrl_set,
646 sfc_eth_dev_init(struct rte_eth_dev *dev)
648 struct sfc_adapter *sa = dev->data->dev_private;
649 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
651 const efx_nic_cfg_t *encp;
652 const struct ether_addr *from;
654 /* Required for logging */
657 /* Copy PCI device info to the dev->data */
658 rte_eth_copy_pci_info(dev, pci_dev);
660 rc = sfc_kvargs_parse(sa);
662 goto fail_kvargs_parse;
664 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
665 sfc_kvarg_bool_handler, &sa->debug_init);
667 goto fail_kvarg_debug_init;
669 sfc_log_init(sa, "entry");
671 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
672 if (dev->data->mac_addrs == NULL) {
677 sfc_adapter_lock_init(sa);
678 sfc_adapter_lock(sa);
680 sfc_log_init(sa, "attaching");
685 encp = efx_nic_cfg_get(sa->nic);
688 * The arguments are really reverse order in comparison to
689 * Linux kernel. Copy from NIC config to Ethernet device data.
691 from = (const struct ether_addr *)(encp->enc_mac_addr);
692 ether_addr_copy(from, &dev->data->mac_addrs[0]);
694 dev->dev_ops = &sfc_eth_dev_ops;
695 dev->rx_pkt_burst = &sfc_recv_pkts;
696 dev->tx_pkt_burst = &sfc_xmit_pkts;
698 sfc_adapter_unlock(sa);
700 sfc_log_init(sa, "done");
704 sfc_adapter_unlock(sa);
705 sfc_adapter_lock_fini(sa);
706 rte_free(dev->data->mac_addrs);
707 dev->data->mac_addrs = NULL;
710 fail_kvarg_debug_init:
711 sfc_kvargs_cleanup(sa);
714 sfc_log_init(sa, "failed %d", rc);
720 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
722 struct sfc_adapter *sa = dev->data->dev_private;
724 sfc_log_init(sa, "entry");
726 sfc_adapter_lock(sa);
730 rte_free(dev->data->mac_addrs);
731 dev->data->mac_addrs = NULL;
734 dev->rx_pkt_burst = NULL;
735 dev->tx_pkt_burst = NULL;
737 sfc_kvargs_cleanup(sa);
739 sfc_adapter_unlock(sa);
740 sfc_adapter_lock_fini(sa);
742 sfc_log_init(sa, "done");
744 /* Required for logging, so cleanup last */
749 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
750 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
751 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
752 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
753 { .vendor_id = 0 /* sentinel */ }
756 static struct eth_driver sfc_efx_pmd = {
758 .id_table = pci_id_sfc_efx_map,
760 RTE_PCI_DRV_INTR_LSC |
761 RTE_PCI_DRV_NEED_MAPPING,
762 .probe = rte_eth_dev_pci_probe,
763 .remove = rte_eth_dev_pci_remove,
765 .eth_dev_init = sfc_eth_dev_init,
766 .eth_dev_uninit = sfc_eth_dev_uninit,
767 .dev_private_size = sizeof(struct sfc_adapter),
770 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv);
771 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
772 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
773 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
774 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
775 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);