2 * Copyright (c) 2016 Solarflare Communications Inc.
5 * This software was jointly developed between OKTET Labs (under contract
6 * for Solarflare) and Solarflare Communications, Inc.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright notice,
14 * this list of conditions and the following disclaimer in the documentation
15 * and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include <rte_ethdev.h>
37 #include "sfc_debug.h"
39 #include "sfc_kvargs.h"
46 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
48 struct sfc_adapter *sa = dev->data->dev_private;
50 sfc_log_init(sa, "entry");
52 dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
53 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
55 /* Autonegotiation may be disabled */
56 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
57 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
58 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
59 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
60 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
61 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
62 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
64 dev_info->max_rx_queues = sa->rxq_max;
65 dev_info->max_tx_queues = sa->txq_max;
67 /* By default packets are dropped if no descriptors are available */
68 dev_info->default_rxconf.rx_drop_en = 1;
70 dev_info->tx_offload_capa =
71 DEV_TX_OFFLOAD_IPV4_CKSUM |
72 DEV_TX_OFFLOAD_UDP_CKSUM |
73 DEV_TX_OFFLOAD_TCP_CKSUM;
75 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOVLANOFFL |
76 ETH_TXQ_FLAGS_NOXSUMSCTP;
78 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
79 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
80 /* The RXQ hardware requires that the descriptor count is a power
81 * of 2, but rx_desc_lim cannot properly describe that constraint.
83 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
85 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
86 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
88 * The TXQ hardware requires that the descriptor count is a power
89 * of 2, but tx_desc_lim cannot properly describe that constraint
91 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
95 sfc_dev_configure(struct rte_eth_dev *dev)
97 struct rte_eth_dev_data *dev_data = dev->data;
98 struct sfc_adapter *sa = dev_data->dev_private;
101 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
102 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
104 sfc_adapter_lock(sa);
106 case SFC_ADAPTER_CONFIGURED:
108 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
110 case SFC_ADAPTER_INITIALIZED:
111 rc = sfc_configure(sa);
114 sfc_err(sa, "unexpected adapter state %u to configure",
119 sfc_adapter_unlock(sa);
121 sfc_log_init(sa, "done %d", rc);
127 sfc_dev_start(struct rte_eth_dev *dev)
129 struct sfc_adapter *sa = dev->data->dev_private;
132 sfc_log_init(sa, "entry");
134 sfc_adapter_lock(sa);
136 sfc_adapter_unlock(sa);
138 sfc_log_init(sa, "done %d", rc);
144 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
146 struct sfc_adapter *sa = dev->data->dev_private;
147 struct rte_eth_link *dev_link = &dev->data->dev_link;
148 struct rte_eth_link old_link;
149 struct rte_eth_link current_link;
151 sfc_log_init(sa, "entry");
153 if (sa->state != SFC_ADAPTER_STARTED)
157 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
158 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
160 if (wait_to_complete) {
161 efx_link_mode_t link_mode;
163 efx_port_poll(sa->nic, &link_mode);
164 sfc_port_link_mode_to_info(link_mode, ¤t_link);
166 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
167 *(uint64_t *)&old_link,
168 *(uint64_t *)¤t_link))
171 sfc_ev_mgmt_qpoll(sa);
172 *(int64_t *)¤t_link =
173 rte_atomic64_read((rte_atomic64_t *)dev_link);
176 if (old_link.link_status != current_link.link_status)
177 sfc_info(sa, "Link status is %s",
178 current_link.link_status ? "UP" : "DOWN");
180 return old_link.link_status == current_link.link_status ? 0 : -1;
184 sfc_dev_stop(struct rte_eth_dev *dev)
186 struct sfc_adapter *sa = dev->data->dev_private;
188 sfc_log_init(sa, "entry");
190 sfc_adapter_lock(sa);
192 sfc_adapter_unlock(sa);
194 sfc_log_init(sa, "done");
198 sfc_dev_set_link_up(struct rte_eth_dev *dev)
200 struct sfc_adapter *sa = dev->data->dev_private;
203 sfc_log_init(sa, "entry");
205 sfc_adapter_lock(sa);
207 sfc_adapter_unlock(sa);
214 sfc_dev_set_link_down(struct rte_eth_dev *dev)
216 struct sfc_adapter *sa = dev->data->dev_private;
218 sfc_log_init(sa, "entry");
220 sfc_adapter_lock(sa);
222 sfc_adapter_unlock(sa);
228 sfc_dev_close(struct rte_eth_dev *dev)
230 struct sfc_adapter *sa = dev->data->dev_private;
232 sfc_log_init(sa, "entry");
234 sfc_adapter_lock(sa);
236 case SFC_ADAPTER_STARTED:
238 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
240 case SFC_ADAPTER_CONFIGURED:
242 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
244 case SFC_ADAPTER_INITIALIZED:
247 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
250 sfc_adapter_unlock(sa);
252 sfc_log_init(sa, "done");
256 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
257 uint16_t nb_rx_desc, unsigned int socket_id,
258 const struct rte_eth_rxconf *rx_conf,
259 struct rte_mempool *mb_pool)
261 struct sfc_adapter *sa = dev->data->dev_private;
264 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
265 rx_queue_id, nb_rx_desc, socket_id);
267 sfc_adapter_lock(sa);
269 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
274 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq;
276 sfc_adapter_unlock(sa);
281 sfc_adapter_unlock(sa);
287 sfc_rx_queue_release(void *queue)
289 struct sfc_rxq *rxq = queue;
290 struct sfc_adapter *sa;
291 unsigned int sw_index;
297 sfc_adapter_lock(sa);
299 sw_index = sfc_rxq_sw_index(rxq);
301 sfc_log_init(sa, "RxQ=%u", sw_index);
303 sa->eth_dev->data->rx_queues[sw_index] = NULL;
305 sfc_rx_qfini(sa, sw_index);
307 sfc_adapter_unlock(sa);
311 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
312 uint16_t nb_tx_desc, unsigned int socket_id,
313 const struct rte_eth_txconf *tx_conf)
315 struct sfc_adapter *sa = dev->data->dev_private;
318 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
319 tx_queue_id, nb_tx_desc, socket_id);
321 sfc_adapter_lock(sa);
323 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
327 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq;
329 sfc_adapter_unlock(sa);
333 sfc_adapter_unlock(sa);
339 sfc_tx_queue_release(void *queue)
341 struct sfc_txq *txq = queue;
342 unsigned int sw_index;
343 struct sfc_adapter *sa;
348 sw_index = sfc_txq_sw_index(txq);
350 SFC_ASSERT(txq->evq != NULL);
353 sfc_log_init(sa, "TxQ = %u", sw_index);
355 sfc_adapter_lock(sa);
357 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
358 sa->eth_dev->data->tx_queues[sw_index] = NULL;
360 sfc_tx_qfini(sa, sw_index);
362 sfc_adapter_unlock(sa);
366 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
368 struct sfc_adapter *sa = dev->data->dev_private;
369 struct sfc_port *port = &sa->port;
372 rte_spinlock_lock(&port->mac_stats_lock);
374 if (sfc_port_update_mac_stats(sa) != 0)
377 mac_stats = port->mac_stats_buf;
379 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
380 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
382 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
383 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
384 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
386 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
387 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
388 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
390 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
391 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
392 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
394 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
395 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
396 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
397 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
398 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
399 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
401 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
402 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
403 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
404 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
406 * Take into account stats which are whenever supported
407 * on EF10. If some stat is not supported by current
408 * firmware variant or HW revision, it is guaranteed
409 * to be zero in mac_stats.
412 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
413 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
414 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
415 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
416 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
417 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
418 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
419 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
420 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
421 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
423 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
424 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
425 mac_stats[EFX_MAC_RX_JABBER_PKTS];
426 /* no oerrors counters supported on EF10 */
430 rte_spinlock_unlock(&port->mac_stats_lock);
434 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
435 unsigned int xstats_count)
437 struct sfc_adapter *sa = dev->data->dev_private;
438 struct sfc_port *port = &sa->port;
444 rte_spinlock_lock(&port->mac_stats_lock);
446 rc = sfc_port_update_mac_stats(sa);
453 mac_stats = port->mac_stats_buf;
455 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
456 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
457 if (xstats != NULL && nstats < (int)xstats_count) {
458 xstats[nstats].id = nstats;
459 xstats[nstats].value = mac_stats[i];
466 rte_spinlock_unlock(&port->mac_stats_lock);
472 sfc_xstats_get_names(struct rte_eth_dev *dev,
473 struct rte_eth_xstat_name *xstats_names,
474 unsigned int xstats_count)
476 struct sfc_adapter *sa = dev->data->dev_private;
477 struct sfc_port *port = &sa->port;
479 unsigned int nstats = 0;
481 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
482 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
483 if (xstats_names != NULL && nstats < xstats_count)
484 strncpy(xstats_names[nstats].name,
485 efx_mac_stat_name(sa->nic, i),
486 sizeof(xstats_names[0].name));
495 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
497 struct sfc_adapter *sa = dev->data->dev_private;
498 unsigned int wanted_fc, link_fc;
500 memset(fc_conf, 0, sizeof(*fc_conf));
502 sfc_adapter_lock(sa);
504 if (sa->state == SFC_ADAPTER_STARTED)
505 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
507 link_fc = sa->port.flow_ctrl;
511 fc_conf->mode = RTE_FC_NONE;
513 case EFX_FCNTL_RESPOND:
514 fc_conf->mode = RTE_FC_RX_PAUSE;
516 case EFX_FCNTL_GENERATE:
517 fc_conf->mode = RTE_FC_TX_PAUSE;
519 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
520 fc_conf->mode = RTE_FC_FULL;
523 sfc_err(sa, "%s: unexpected flow control value %#x",
527 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
529 sfc_adapter_unlock(sa);
535 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
537 struct sfc_adapter *sa = dev->data->dev_private;
538 struct sfc_port *port = &sa->port;
542 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
543 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
544 fc_conf->mac_ctrl_frame_fwd != 0) {
545 sfc_err(sa, "unsupported flow control settings specified");
550 switch (fc_conf->mode) {
554 case RTE_FC_RX_PAUSE:
555 fcntl = EFX_FCNTL_RESPOND;
557 case RTE_FC_TX_PAUSE:
558 fcntl = EFX_FCNTL_GENERATE;
561 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
568 sfc_adapter_lock(sa);
570 if (sa->state == SFC_ADAPTER_STARTED) {
571 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
573 goto fail_mac_fcntl_set;
576 port->flow_ctrl = fcntl;
577 port->flow_ctrl_autoneg = fc_conf->autoneg;
579 sfc_adapter_unlock(sa);
584 sfc_adapter_unlock(sa);
591 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
593 struct sfc_adapter *sa = dev->data->dev_private;
594 size_t pdu = EFX_MAC_PDU(mtu);
598 sfc_log_init(sa, "mtu=%u", mtu);
601 if (pdu < EFX_MAC_PDU_MIN) {
602 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
603 (unsigned int)mtu, (unsigned int)pdu,
607 if (pdu > EFX_MAC_PDU_MAX) {
608 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
609 (unsigned int)mtu, (unsigned int)pdu,
614 sfc_adapter_lock(sa);
616 if (pdu != sa->port.pdu) {
617 if (sa->state == SFC_ADAPTER_STARTED) {
620 old_pdu = sa->port.pdu;
631 * The driver does not use it, but other PMDs update jumbo_frame
632 * flag and max_rx_pkt_len when MTU is set.
634 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
635 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
637 sfc_adapter_unlock(sa);
639 sfc_log_init(sa, "done");
643 sa->port.pdu = old_pdu;
644 if (sfc_start(sa) != 0)
645 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
646 "PDU max size - port is stopped",
647 (unsigned int)pdu, (unsigned int)old_pdu);
648 sfc_adapter_unlock(sa);
651 sfc_log_init(sa, "failed %d", rc);
656 static const struct eth_dev_ops sfc_eth_dev_ops = {
657 .dev_configure = sfc_dev_configure,
658 .dev_start = sfc_dev_start,
659 .dev_stop = sfc_dev_stop,
660 .dev_set_link_up = sfc_dev_set_link_up,
661 .dev_set_link_down = sfc_dev_set_link_down,
662 .dev_close = sfc_dev_close,
663 .link_update = sfc_dev_link_update,
664 .stats_get = sfc_stats_get,
665 .xstats_get = sfc_xstats_get,
666 .xstats_get_names = sfc_xstats_get_names,
667 .dev_infos_get = sfc_dev_infos_get,
668 .mtu_set = sfc_dev_set_mtu,
669 .rx_queue_setup = sfc_rx_queue_setup,
670 .rx_queue_release = sfc_rx_queue_release,
671 .tx_queue_setup = sfc_tx_queue_setup,
672 .tx_queue_release = sfc_tx_queue_release,
673 .flow_ctrl_get = sfc_flow_ctrl_get,
674 .flow_ctrl_set = sfc_flow_ctrl_set,
678 sfc_eth_dev_init(struct rte_eth_dev *dev)
680 struct sfc_adapter *sa = dev->data->dev_private;
681 struct rte_pci_device *pci_dev = SFC_DEV_TO_PCI(dev);
683 const efx_nic_cfg_t *encp;
684 const struct ether_addr *from;
686 /* Required for logging */
689 /* Copy PCI device info to the dev->data */
690 rte_eth_copy_pci_info(dev, pci_dev);
692 rc = sfc_kvargs_parse(sa);
694 goto fail_kvargs_parse;
696 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
697 sfc_kvarg_bool_handler, &sa->debug_init);
699 goto fail_kvarg_debug_init;
701 sfc_log_init(sa, "entry");
703 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
704 if (dev->data->mac_addrs == NULL) {
709 sfc_adapter_lock_init(sa);
710 sfc_adapter_lock(sa);
712 sfc_log_init(sa, "attaching");
717 encp = efx_nic_cfg_get(sa->nic);
720 * The arguments are really reverse order in comparison to
721 * Linux kernel. Copy from NIC config to Ethernet device data.
723 from = (const struct ether_addr *)(encp->enc_mac_addr);
724 ether_addr_copy(from, &dev->data->mac_addrs[0]);
726 dev->dev_ops = &sfc_eth_dev_ops;
727 dev->rx_pkt_burst = &sfc_recv_pkts;
728 dev->tx_pkt_burst = &sfc_xmit_pkts;
730 sfc_adapter_unlock(sa);
732 sfc_log_init(sa, "done");
736 sfc_adapter_unlock(sa);
737 sfc_adapter_lock_fini(sa);
738 rte_free(dev->data->mac_addrs);
739 dev->data->mac_addrs = NULL;
742 fail_kvarg_debug_init:
743 sfc_kvargs_cleanup(sa);
746 sfc_log_init(sa, "failed %d", rc);
752 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
754 struct sfc_adapter *sa = dev->data->dev_private;
756 sfc_log_init(sa, "entry");
758 sfc_adapter_lock(sa);
762 rte_free(dev->data->mac_addrs);
763 dev->data->mac_addrs = NULL;
766 dev->rx_pkt_burst = NULL;
767 dev->tx_pkt_burst = NULL;
769 sfc_kvargs_cleanup(sa);
771 sfc_adapter_unlock(sa);
772 sfc_adapter_lock_fini(sa);
774 sfc_log_init(sa, "done");
776 /* Required for logging, so cleanup last */
781 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
782 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
783 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
784 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
785 { .vendor_id = 0 /* sentinel */ }
788 static struct eth_driver sfc_efx_pmd = {
790 .id_table = pci_id_sfc_efx_map,
792 RTE_PCI_DRV_INTR_LSC |
793 RTE_PCI_DRV_NEED_MAPPING,
794 .probe = rte_eth_dev_pci_probe,
795 .remove = rte_eth_dev_pci_remove,
797 .eth_dev_init = sfc_eth_dev_init,
798 .eth_dev_uninit = sfc_eth_dev_uninit,
799 .dev_private_size = sizeof(struct sfc_adapter),
802 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd.pci_drv);
803 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
804 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
805 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
806 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
807 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);