1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
16 #include <rte_string_fns.h>
17 #include <rte_ether.h>
22 #include "sfc_debug.h"
24 #include "sfc_kvargs.h"
30 #include "sfc_dp_rx.h"
32 uint32_t sfc_logtype_driver;
34 static struct sfc_dp_list sfc_dp_head =
35 TAILQ_HEAD_INITIALIZER(sfc_dp_head);
38 static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev);
42 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
44 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
45 efx_nic_fw_info_t enfi;
49 rc = efx_nic_get_fw_version(sa->nic, &enfi);
53 ret = snprintf(fw_version, fw_size,
54 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
55 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
56 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
60 if (enfi.enfi_dpcpu_fw_ids_valid) {
61 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
64 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
65 fw_size - dpcpu_fw_ids_offset,
66 " rx%" PRIx16 " tx%" PRIx16,
67 enfi.enfi_rx_dpcpu_fw_id,
68 enfi.enfi_tx_dpcpu_fw_id);
75 if (fw_size < (size_t)(++ret))
82 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
84 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
85 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
86 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
87 struct sfc_rss *rss = &sas->rss;
88 struct sfc_mae *mae = &sa->mae;
89 uint64_t txq_offloads_def = 0;
91 sfc_log_init(sa, "entry");
93 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
94 dev_info->max_mtu = EFX_MAC_SDU_MAX;
96 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
98 dev_info->max_vfs = sa->sriov.num_vfs;
100 /* Autonegotiation may be disabled */
101 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
102 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
103 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
104 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
105 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
106 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
107 dev_info->speed_capa |= ETH_LINK_SPEED_25G;
108 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
109 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
110 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
111 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
112 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
113 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
115 dev_info->max_rx_queues = sa->rxq_max;
116 dev_info->max_tx_queues = sa->txq_max;
118 /* By default packets are dropped if no descriptors are available */
119 dev_info->default_rxconf.rx_drop_en = 1;
121 dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
124 * rx_offload_capa includes both device and queue offloads since
125 * the latter may be requested on a per device basis which makes
126 * sense when some offloads are needed to be set on all queues.
128 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
129 dev_info->rx_queue_offload_capa;
131 dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
134 * tx_offload_capa includes both device and queue offloads since
135 * the latter may be requested on a per device basis which makes
136 * sense when some offloads are needed to be set on all queues.
138 dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
139 dev_info->tx_queue_offload_capa;
141 if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
142 txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
144 dev_info->default_txconf.offloads |= txq_offloads_def;
146 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
150 for (i = 0; i < rss->hf_map_nb_entries; ++i)
151 rte_hf |= rss->hf_map[i].rte;
153 dev_info->reta_size = EFX_RSS_TBL_SIZE;
154 dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
155 dev_info->flow_type_rss_offloads = rte_hf;
158 /* Initialize to hardware limits */
159 dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries;
160 dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries;
161 /* The RXQ hardware requires that the descriptor count is a power
162 * of 2, but rx_desc_lim cannot properly describe that constraint.
164 dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries;
166 /* Initialize to hardware limits */
167 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
168 dev_info->tx_desc_lim.nb_min = sa->txq_min_entries;
170 * The TXQ hardware requires that the descriptor count is a power
171 * of 2, but tx_desc_lim cannot properly describe that constraint
173 dev_info->tx_desc_lim.nb_align = sa->txq_min_entries;
175 if (sap->dp_rx->get_dev_info != NULL)
176 sap->dp_rx->get_dev_info(dev_info);
177 if (sap->dp_tx->get_dev_info != NULL)
178 sap->dp_tx->get_dev_info(dev_info);
180 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
181 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
183 if (mae->status == SFC_MAE_STATUS_SUPPORTED) {
184 dev_info->switch_info.name = dev->device->driver->name;
185 dev_info->switch_info.domain_id = mae->switch_domain_id;
186 dev_info->switch_info.port_id = mae->switch_port_id;
192 static const uint32_t *
193 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
195 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
197 return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
201 sfc_dev_configure(struct rte_eth_dev *dev)
203 struct rte_eth_dev_data *dev_data = dev->data;
204 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
207 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
208 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
210 sfc_adapter_lock(sa);
212 case SFC_ADAPTER_CONFIGURED:
214 case SFC_ADAPTER_INITIALIZED:
215 rc = sfc_configure(sa);
218 sfc_err(sa, "unexpected adapter state %u to configure",
223 sfc_adapter_unlock(sa);
225 sfc_log_init(sa, "done %d", rc);
231 sfc_dev_start(struct rte_eth_dev *dev)
233 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
236 sfc_log_init(sa, "entry");
238 sfc_adapter_lock(sa);
240 sfc_adapter_unlock(sa);
242 sfc_log_init(sa, "done %d", rc);
248 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
250 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
251 struct rte_eth_link current_link;
254 sfc_log_init(sa, "entry");
256 if (sa->state != SFC_ADAPTER_STARTED) {
257 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
258 } else if (wait_to_complete) {
259 efx_link_mode_t link_mode;
261 if (efx_port_poll(sa->nic, &link_mode) != 0)
262 link_mode = EFX_LINK_UNKNOWN;
263 sfc_port_link_mode_to_info(link_mode, ¤t_link);
266 sfc_ev_mgmt_qpoll(sa);
267 rte_eth_linkstatus_get(dev, ¤t_link);
270 ret = rte_eth_linkstatus_set(dev, ¤t_link);
272 sfc_notice(sa, "Link status is %s",
273 current_link.link_status ? "UP" : "DOWN");
279 sfc_dev_stop(struct rte_eth_dev *dev)
281 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
283 sfc_log_init(sa, "entry");
285 sfc_adapter_lock(sa);
287 sfc_adapter_unlock(sa);
289 sfc_log_init(sa, "done");
295 sfc_dev_set_link_up(struct rte_eth_dev *dev)
297 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
300 sfc_log_init(sa, "entry");
302 sfc_adapter_lock(sa);
304 sfc_adapter_unlock(sa);
311 sfc_dev_set_link_down(struct rte_eth_dev *dev)
313 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
315 sfc_log_init(sa, "entry");
317 sfc_adapter_lock(sa);
319 sfc_adapter_unlock(sa);
325 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
327 free(dev->process_private);
328 rte_eth_dev_release_port(dev);
332 sfc_dev_close(struct rte_eth_dev *dev)
334 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
336 sfc_log_init(sa, "entry");
338 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
339 sfc_eth_dev_secondary_clear_ops(dev);
343 sfc_adapter_lock(sa);
345 case SFC_ADAPTER_STARTED:
347 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
349 case SFC_ADAPTER_CONFIGURED:
351 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
353 case SFC_ADAPTER_INITIALIZED:
356 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
361 * Cleanup all resources.
362 * Rollback primary process sfc_eth_dev_init() below.
365 sfc_eth_dev_clear_ops(dev);
370 sfc_kvargs_cleanup(sa);
372 sfc_adapter_unlock(sa);
373 sfc_adapter_lock_fini(sa);
375 sfc_log_init(sa, "done");
377 /* Required for logging, so cleanup last */
386 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
389 struct sfc_port *port;
391 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
392 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
393 const char *desc = (allmulti) ? "all-multi" : "promiscuous";
396 sfc_adapter_lock(sa);
399 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
401 if (*toggle != enabled) {
404 if (sfc_sa2shared(sa)->isolated) {
405 sfc_warn(sa, "isolated mode is active on the port");
406 sfc_warn(sa, "the change is to be applied on the next "
407 "start provided that isolated mode is "
408 "disabled prior the next start");
409 } else if ((sa->state == SFC_ADAPTER_STARTED) &&
410 ((rc = sfc_set_rx_mode(sa)) != 0)) {
411 *toggle = !(enabled);
412 sfc_warn(sa, "Failed to %s %s mode, rc = %d",
413 ((enabled) ? "enable" : "disable"), desc, rc);
416 * For promiscuous and all-multicast filters a
417 * permission failure should be reported as an
418 * unsupported filter.
425 sfc_adapter_unlock(sa);
430 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
432 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
439 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
441 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
448 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
450 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
457 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
459 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
466 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
467 uint16_t nb_rx_desc, unsigned int socket_id,
468 const struct rte_eth_rxconf *rx_conf,
469 struct rte_mempool *mb_pool)
471 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
472 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
473 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
474 struct sfc_rxq_info *rxq_info;
475 sfc_sw_index_t sw_index;
478 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
479 ethdev_qid, nb_rx_desc, socket_id);
481 sfc_adapter_lock(sa);
483 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
484 rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id,
489 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
490 dev->data->rx_queues[ethdev_qid] = rxq_info->dp;
492 sfc_adapter_unlock(sa);
497 sfc_adapter_unlock(sa);
503 sfc_rx_queue_release(void *queue)
505 struct sfc_dp_rxq *dp_rxq = queue;
507 struct sfc_adapter *sa;
508 sfc_sw_index_t sw_index;
513 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
515 sfc_adapter_lock(sa);
517 sw_index = dp_rxq->dpq.queue_id;
519 sfc_log_init(sa, "RxQ=%u", sw_index);
521 sfc_rx_qfini(sa, sw_index);
523 sfc_adapter_unlock(sa);
527 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
528 uint16_t nb_tx_desc, unsigned int socket_id,
529 const struct rte_eth_txconf *tx_conf)
531 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
532 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
533 struct sfc_txq_info *txq_info;
534 sfc_sw_index_t sw_index;
537 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
538 ethdev_qid, nb_tx_desc, socket_id);
540 sfc_adapter_lock(sa);
542 sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
543 rc = sfc_tx_qinit(sa, sw_index, nb_tx_desc, socket_id, tx_conf);
547 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
548 dev->data->tx_queues[ethdev_qid] = txq_info->dp;
550 sfc_adapter_unlock(sa);
554 sfc_adapter_unlock(sa);
560 sfc_tx_queue_release(void *queue)
562 struct sfc_dp_txq *dp_txq = queue;
564 sfc_sw_index_t sw_index;
565 struct sfc_adapter *sa;
570 txq = sfc_txq_by_dp_txq(dp_txq);
571 sw_index = dp_txq->dpq.queue_id;
573 SFC_ASSERT(txq->evq != NULL);
576 sfc_log_init(sa, "TxQ = %u", sw_index);
578 sfc_adapter_lock(sa);
580 sfc_tx_qfini(sa, sw_index);
582 sfc_adapter_unlock(sa);
586 * Some statistics are computed as A - B where A and B each increase
587 * monotonically with some hardware counter(s) and the counters are read
590 * If packet X is counted in A, but not counted in B yet, computed value is
593 * If packet X is not counted in A at the moment of reading the counter,
594 * but counted in B at the moment of reading the counter, computed value
597 * However, counter which grows backward is worse evil than slightly wrong
598 * value. So, let's try to guarantee that it never happens except may be
599 * the case when the MAC stats are zeroed as a result of a NIC reset.
602 sfc_update_diff_stat(uint64_t *stat, uint64_t newval)
604 if ((int64_t)(newval - *stat) > 0 || newval == 0)
609 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
611 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
612 struct sfc_port *port = &sa->port;
616 sfc_adapter_lock(sa);
618 ret = sfc_port_update_mac_stats(sa, B_FALSE);
622 mac_stats = port->mac_stats_buf;
624 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
625 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
627 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
628 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
629 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
631 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
632 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
633 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
635 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
636 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
637 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
639 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
640 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
641 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
642 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
643 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
645 /* CRC is included in these stats, but shouldn't be */
646 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
647 stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
649 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
650 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
651 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
653 /* CRC is included in these stats, but shouldn't be */
654 stats->ibytes -= mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
655 stats->obytes -= mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
658 * Take into account stats which are whenever supported
659 * on EF10. If some stat is not supported by current
660 * firmware variant or HW revision, it is guaranteed
661 * to be zero in mac_stats.
664 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
665 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
666 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
667 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
668 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
669 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
670 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
671 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
672 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
673 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
675 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
676 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
677 mac_stats[EFX_MAC_RX_JABBER_PKTS];
678 /* no oerrors counters supported on EF10 */
680 /* Exclude missed, errors and pauses from Rx packets */
681 sfc_update_diff_stat(&port->ipackets,
682 mac_stats[EFX_MAC_RX_PKTS] -
683 mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
684 stats->imissed - stats->ierrors);
685 stats->ipackets = port->ipackets;
689 sfc_adapter_unlock(sa);
690 SFC_ASSERT(ret >= 0);
695 sfc_stats_reset(struct rte_eth_dev *dev)
697 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
698 struct sfc_port *port = &sa->port;
701 sfc_adapter_lock(sa);
703 if (sa->state != SFC_ADAPTER_STARTED) {
705 * The operation cannot be done if port is not started; it
706 * will be scheduled to be done during the next port start
708 port->mac_stats_reset_pending = B_TRUE;
709 sfc_adapter_unlock(sa);
713 rc = sfc_port_reset_mac_stats(sa);
715 sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
717 sfc_adapter_unlock(sa);
724 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
725 unsigned int xstats_count)
727 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
728 struct sfc_port *port = &sa->port;
729 unsigned int nb_written = 0;
730 unsigned int nb_supp;
732 if (unlikely(xstats == NULL)) {
733 sfc_adapter_lock(sa);
734 nb_supp = port->mac_stats_nb_supported;
735 sfc_adapter_unlock(sa);
739 return sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written);
743 sfc_xstats_get_names(struct rte_eth_dev *dev,
744 struct rte_eth_xstat_name *xstats_names,
745 unsigned int xstats_count)
747 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
748 struct sfc_port *port = &sa->port;
750 unsigned int nstats = 0;
752 if (unlikely(xstats_names == NULL)) {
753 sfc_adapter_lock(sa);
754 nstats = port->mac_stats_nb_supported;
755 sfc_adapter_unlock(sa);
759 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
760 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
761 if (nstats < xstats_count)
762 strlcpy(xstats_names[nstats].name,
763 efx_mac_stat_name(sa->nic, i),
764 sizeof(xstats_names[0].name));
773 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
774 uint64_t *values, unsigned int n)
776 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
778 if (unlikely(ids == NULL || values == NULL))
781 return sfc_port_get_mac_stats_by_id(sa, ids, values, n);
785 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
786 struct rte_eth_xstat_name *xstats_names,
787 const uint64_t *ids, unsigned int size)
789 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
790 struct sfc_port *port = &sa->port;
791 unsigned int nb_supported;
794 if (unlikely(xstats_names == NULL && ids != NULL) ||
795 unlikely(xstats_names != NULL && ids == NULL))
798 sfc_adapter_lock(sa);
800 if (unlikely(xstats_names == NULL && ids == NULL)) {
801 nb_supported = port->mac_stats_nb_supported;
802 sfc_adapter_unlock(sa);
806 SFC_ASSERT(port->mac_stats_nb_supported <=
807 RTE_DIM(port->mac_stats_by_id));
809 for (i = 0; i < size; i++) {
810 if (ids[i] < port->mac_stats_nb_supported) {
811 strlcpy(xstats_names[i].name,
812 efx_mac_stat_name(sa->nic,
813 port->mac_stats_by_id[ids[i]]),
814 sizeof(xstats_names[0].name));
816 sfc_adapter_unlock(sa);
821 sfc_adapter_unlock(sa);
827 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
829 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
830 unsigned int wanted_fc, link_fc;
832 memset(fc_conf, 0, sizeof(*fc_conf));
834 sfc_adapter_lock(sa);
836 if (sa->state == SFC_ADAPTER_STARTED)
837 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
839 link_fc = sa->port.flow_ctrl;
843 fc_conf->mode = RTE_FC_NONE;
845 case EFX_FCNTL_RESPOND:
846 fc_conf->mode = RTE_FC_RX_PAUSE;
848 case EFX_FCNTL_GENERATE:
849 fc_conf->mode = RTE_FC_TX_PAUSE;
851 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
852 fc_conf->mode = RTE_FC_FULL;
855 sfc_err(sa, "%s: unexpected flow control value %#x",
859 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
861 sfc_adapter_unlock(sa);
867 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
869 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
870 struct sfc_port *port = &sa->port;
874 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
875 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
876 fc_conf->mac_ctrl_frame_fwd != 0) {
877 sfc_err(sa, "unsupported flow control settings specified");
882 switch (fc_conf->mode) {
886 case RTE_FC_RX_PAUSE:
887 fcntl = EFX_FCNTL_RESPOND;
889 case RTE_FC_TX_PAUSE:
890 fcntl = EFX_FCNTL_GENERATE;
893 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
900 sfc_adapter_lock(sa);
902 if (sa->state == SFC_ADAPTER_STARTED) {
903 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
905 goto fail_mac_fcntl_set;
908 port->flow_ctrl = fcntl;
909 port->flow_ctrl_autoneg = fc_conf->autoneg;
911 sfc_adapter_unlock(sa);
916 sfc_adapter_unlock(sa);
923 sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
925 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
926 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
927 boolean_t scatter_enabled;
931 for (i = 0; i < sas->rxq_count; i++) {
932 if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0)
935 scatter_enabled = (sas->rxq_info[i].type_flags &
936 EFX_RXQ_FLAG_SCATTER);
938 if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size,
939 encp->enc_rx_prefix_size,
941 encp->enc_rx_scatter_max, &error)) {
942 sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
952 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
954 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
955 size_t pdu = EFX_MAC_PDU(mtu);
959 sfc_log_init(sa, "mtu=%u", mtu);
962 if (pdu < EFX_MAC_PDU_MIN) {
963 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
964 (unsigned int)mtu, (unsigned int)pdu,
968 if (pdu > EFX_MAC_PDU_MAX) {
969 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
970 (unsigned int)mtu, (unsigned int)pdu,
971 (unsigned int)EFX_MAC_PDU_MAX);
975 sfc_adapter_lock(sa);
977 rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
979 goto fail_check_scatter;
981 if (pdu != sa->port.pdu) {
982 if (sa->state == SFC_ADAPTER_STARTED) {
985 old_pdu = sa->port.pdu;
996 * The driver does not use it, but other PMDs update jumbo frame
997 * flag and max_rx_pkt_len when MTU is set.
999 if (mtu > RTE_ETHER_MTU) {
1000 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1001 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1004 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
1006 sfc_adapter_unlock(sa);
1008 sfc_log_init(sa, "done");
1012 sa->port.pdu = old_pdu;
1013 if (sfc_start(sa) != 0)
1014 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
1015 "PDU max size - port is stopped",
1016 (unsigned int)pdu, (unsigned int)old_pdu);
1019 sfc_adapter_unlock(sa);
1022 sfc_log_init(sa, "failed %d", rc);
1027 sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1029 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1030 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1031 struct sfc_port *port = &sa->port;
1032 struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0];
1035 sfc_adapter_lock(sa);
1037 if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr))
1041 * Copy the address to the device private data so that
1042 * it could be recalled in the case of adapter restart.
1044 rte_ether_addr_copy(mac_addr, &port->default_mac_addr);
1047 * Neither of the two following checks can return
1048 * an error. The new MAC address is preserved in
1049 * the device private data and can be activated
1050 * on the next port start if the user prevents
1051 * isolated mode from being enabled.
1053 if (sfc_sa2shared(sa)->isolated) {
1054 sfc_warn(sa, "isolated mode is active on the port");
1055 sfc_warn(sa, "will not set MAC address");
1059 if (sa->state != SFC_ADAPTER_STARTED) {
1060 sfc_notice(sa, "the port is not started");
1061 sfc_notice(sa, "the new MAC address will be set on port start");
1066 if (encp->enc_allow_set_mac_with_installed_filters) {
1067 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
1069 sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
1074 * Changing the MAC address by means of MCDI request
1075 * has no effect on received traffic, therefore
1076 * we also need to update unicast filters
1078 rc = sfc_set_rx_mode_unchecked(sa);
1080 sfc_err(sa, "cannot set filter (rc = %u)", rc);
1081 /* Rollback the old address */
1082 (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
1083 (void)sfc_set_rx_mode_unchecked(sa);
1086 sfc_warn(sa, "cannot set MAC address with filters installed");
1087 sfc_warn(sa, "adapter will be restarted to pick the new MAC");
1088 sfc_warn(sa, "(some traffic may be dropped)");
1091 * Since setting MAC address with filters installed is not
1092 * allowed on the adapter, the new MAC address will be set
1093 * by means of adapter restart. sfc_start() shall retrieve
1094 * the new address from the device private data and set it.
1099 sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
1104 rte_ether_addr_copy(old_addr, &port->default_mac_addr);
1106 sfc_adapter_unlock(sa);
1108 SFC_ASSERT(rc >= 0);
1114 sfc_set_mc_addr_list(struct rte_eth_dev *dev,
1115 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
1117 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1118 struct sfc_port *port = &sa->port;
1119 uint8_t *mc_addrs = port->mcast_addrs;
1123 if (sfc_sa2shared(sa)->isolated) {
1124 sfc_err(sa, "isolated mode is active on the port");
1125 sfc_err(sa, "will not set multicast address list");
1129 if (mc_addrs == NULL)
1132 if (nb_mc_addr > port->max_mcast_addrs) {
1133 sfc_err(sa, "too many multicast addresses: %u > %u",
1134 nb_mc_addr, port->max_mcast_addrs);
1138 for (i = 0; i < nb_mc_addr; ++i) {
1139 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
1141 mc_addrs += EFX_MAC_ADDR_LEN;
1144 port->nb_mcast_addrs = nb_mc_addr;
1146 if (sa->state != SFC_ADAPTER_STARTED)
1149 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1150 port->nb_mcast_addrs);
1152 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
1154 SFC_ASSERT(rc >= 0);
1159 * The function is used by the secondary process as well. It must not
1160 * use any process-local pointers from the adapter data.
1163 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
1164 struct rte_eth_rxq_info *qinfo)
1166 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1167 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1168 struct sfc_rxq_info *rxq_info;
1170 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1172 qinfo->mp = rxq_info->refill_mb_pool;
1173 qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
1174 qinfo->conf.rx_drop_en = 1;
1175 qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1176 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
1177 if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
1178 qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
1179 qinfo->scattered_rx = 1;
1181 qinfo->nb_desc = rxq_info->entries;
1185 * The function is used by the secondary process as well. It must not
1186 * use any process-local pointers from the adapter data.
1189 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
1190 struct rte_eth_txq_info *qinfo)
1192 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1193 struct sfc_txq_info *txq_info;
1195 SFC_ASSERT(ethdev_qid < sas->ethdev_txq_count);
1197 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1199 memset(qinfo, 0, sizeof(*qinfo));
1201 qinfo->conf.offloads = txq_info->offloads;
1202 qinfo->conf.tx_free_thresh = txq_info->free_thresh;
1203 qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1204 qinfo->nb_desc = txq_info->entries;
1208 * The function is used by the secondary process as well. It must not
1209 * use any process-local pointers from the adapter data.
1212 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1214 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1215 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1216 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1217 struct sfc_rxq_info *rxq_info;
1219 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1221 if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
1224 return sap->dp_rx->qdesc_npending(rxq_info->dp);
1228 * The function is used by the secondary process as well. It must not
1229 * use any process-local pointers from the adapter data.
1232 sfc_rx_descriptor_done(void *queue, uint16_t offset)
1234 struct sfc_dp_rxq *dp_rxq = queue;
1235 const struct sfc_dp_rx *dp_rx;
1237 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1239 return offset < dp_rx->qdesc_npending(dp_rxq);
1243 * The function is used by the secondary process as well. It must not
1244 * use any process-local pointers from the adapter data.
1247 sfc_rx_descriptor_status(void *queue, uint16_t offset)
1249 struct sfc_dp_rxq *dp_rxq = queue;
1250 const struct sfc_dp_rx *dp_rx;
1252 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1254 return dp_rx->qdesc_status(dp_rxq, offset);
1258 * The function is used by the secondary process as well. It must not
1259 * use any process-local pointers from the adapter data.
1262 sfc_tx_descriptor_status(void *queue, uint16_t offset)
1264 struct sfc_dp_txq *dp_txq = queue;
1265 const struct sfc_dp_tx *dp_tx;
1267 dp_tx = sfc_dp_tx_by_dp_txq(dp_txq);
1269 return dp_tx->qdesc_status(dp_txq, offset);
1273 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1275 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1276 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1277 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1278 struct sfc_rxq_info *rxq_info;
1279 sfc_sw_index_t sw_index;
1282 sfc_log_init(sa, "RxQ=%u", ethdev_qid);
1284 sfc_adapter_lock(sa);
1287 if (sa->state != SFC_ADAPTER_STARTED)
1288 goto fail_not_started;
1290 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1291 if (rxq_info->state != SFC_RXQ_INITIALIZED)
1292 goto fail_not_setup;
1294 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
1295 rc = sfc_rx_qstart(sa, sw_index);
1297 goto fail_rx_qstart;
1299 rxq_info->deferred_started = B_TRUE;
1301 sfc_adapter_unlock(sa);
1308 sfc_adapter_unlock(sa);
1314 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1316 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1317 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1318 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1319 struct sfc_rxq_info *rxq_info;
1320 sfc_sw_index_t sw_index;
1322 sfc_log_init(sa, "RxQ=%u", ethdev_qid);
1324 sfc_adapter_lock(sa);
1326 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
1327 sfc_rx_qstop(sa, sw_index);
1329 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1330 rxq_info->deferred_started = B_FALSE;
1332 sfc_adapter_unlock(sa);
1338 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1340 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1341 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1342 struct sfc_txq_info *txq_info;
1343 sfc_sw_index_t sw_index;
1346 sfc_log_init(sa, "TxQ = %u", ethdev_qid);
1348 sfc_adapter_lock(sa);
1351 if (sa->state != SFC_ADAPTER_STARTED)
1352 goto fail_not_started;
1354 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1355 if (txq_info->state != SFC_TXQ_INITIALIZED)
1356 goto fail_not_setup;
1358 sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
1359 rc = sfc_tx_qstart(sa, sw_index);
1361 goto fail_tx_qstart;
1363 txq_info->deferred_started = B_TRUE;
1365 sfc_adapter_unlock(sa);
1372 sfc_adapter_unlock(sa);
1378 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1380 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1381 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1382 struct sfc_txq_info *txq_info;
1383 sfc_sw_index_t sw_index;
1385 sfc_log_init(sa, "TxQ = %u", ethdev_qid);
1387 sfc_adapter_lock(sa);
1389 sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
1390 sfc_tx_qstop(sa, sw_index);
1392 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1393 txq_info->deferred_started = B_FALSE;
1395 sfc_adapter_unlock(sa);
1399 static efx_tunnel_protocol_t
1400 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
1403 case RTE_TUNNEL_TYPE_VXLAN:
1404 return EFX_TUNNEL_PROTOCOL_VXLAN;
1405 case RTE_TUNNEL_TYPE_GENEVE:
1406 return EFX_TUNNEL_PROTOCOL_GENEVE;
1408 return EFX_TUNNEL_NPROTOS;
1412 enum sfc_udp_tunnel_op_e {
1413 SFC_UDP_TUNNEL_ADD_PORT,
1414 SFC_UDP_TUNNEL_DEL_PORT,
1418 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
1419 struct rte_eth_udp_tunnel *tunnel_udp,
1420 enum sfc_udp_tunnel_op_e op)
1422 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1423 efx_tunnel_protocol_t tunnel_proto;
1426 sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
1427 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
1428 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
1429 tunnel_udp->udp_port, tunnel_udp->prot_type);
1432 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
1433 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
1435 goto fail_bad_proto;
1438 sfc_adapter_lock(sa);
1441 case SFC_UDP_TUNNEL_ADD_PORT:
1442 rc = efx_tunnel_config_udp_add(sa->nic,
1443 tunnel_udp->udp_port,
1446 case SFC_UDP_TUNNEL_DEL_PORT:
1447 rc = efx_tunnel_config_udp_remove(sa->nic,
1448 tunnel_udp->udp_port,
1459 if (sa->state == SFC_ADAPTER_STARTED) {
1460 rc = efx_tunnel_reconfigure(sa->nic);
1463 * Configuration is accepted by FW and MC reboot
1464 * is initiated to apply the changes. MC reboot
1465 * will be handled in a usual way (MC reboot
1466 * event on management event queue and adapter
1470 } else if (rc != 0) {
1471 goto fail_reconfigure;
1475 sfc_adapter_unlock(sa);
1479 /* Remove/restore entry since the change makes the trouble */
1481 case SFC_UDP_TUNNEL_ADD_PORT:
1482 (void)efx_tunnel_config_udp_remove(sa->nic,
1483 tunnel_udp->udp_port,
1486 case SFC_UDP_TUNNEL_DEL_PORT:
1487 (void)efx_tunnel_config_udp_add(sa->nic,
1488 tunnel_udp->udp_port,
1495 sfc_adapter_unlock(sa);
1503 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1504 struct rte_eth_udp_tunnel *tunnel_udp)
1506 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
1510 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1511 struct rte_eth_udp_tunnel *tunnel_udp)
1513 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
1517 * The function is used by the secondary process as well. It must not
1518 * use any process-local pointers from the adapter data.
1521 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1522 struct rte_eth_rss_conf *rss_conf)
1524 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1525 struct sfc_rss *rss = &sas->rss;
1527 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE)
1531 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1532 * hence, conversion is done here to derive a correct set of ETH_RSS
1533 * flags which corresponds to the active EFX configuration stored
1534 * locally in 'sfc_adapter' and kept up-to-date
1536 rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types);
1537 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1538 if (rss_conf->rss_key != NULL)
1539 rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
1545 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1546 struct rte_eth_rss_conf *rss_conf)
1548 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1549 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1550 unsigned int efx_hash_types;
1551 uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context};
1552 unsigned int n_contexts;
1553 unsigned int mode_i = 0;
1554 unsigned int key_i = 0;
1558 n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2;
1560 if (sfc_sa2shared(sa)->isolated)
1563 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1564 sfc_err(sa, "RSS is not available");
1568 if (rss->channels == 0) {
1569 sfc_err(sa, "RSS is not configured");
1573 if ((rss_conf->rss_key != NULL) &&
1574 (rss_conf->rss_key_len != sizeof(rss->key))) {
1575 sfc_err(sa, "RSS key size is wrong (should be %zu)",
1580 sfc_adapter_lock(sa);
1582 rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
1584 goto fail_rx_hf_rte_to_efx;
1586 for (mode_i = 0; mode_i < n_contexts; mode_i++) {
1587 rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
1588 rss->hash_alg, efx_hash_types,
1591 goto fail_scale_mode_set;
1594 if (rss_conf->rss_key != NULL) {
1595 if (sa->state == SFC_ADAPTER_STARTED) {
1596 for (key_i = 0; key_i < n_contexts; key_i++) {
1597 rc = efx_rx_scale_key_set(sa->nic,
1602 goto fail_scale_key_set;
1606 rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
1609 rss->hash_types = efx_hash_types;
1611 sfc_adapter_unlock(sa);
1616 for (i = 0; i < key_i; i++) {
1617 if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key,
1618 sizeof(rss->key)) != 0)
1619 sfc_err(sa, "failed to restore RSS key");
1622 fail_scale_mode_set:
1623 for (i = 0; i < mode_i; i++) {
1624 if (efx_rx_scale_mode_set(sa->nic, contexts[i],
1625 EFX_RX_HASHALG_TOEPLITZ,
1626 rss->hash_types, B_TRUE) != 0)
1627 sfc_err(sa, "failed to restore RSS mode");
1630 fail_rx_hf_rte_to_efx:
1631 sfc_adapter_unlock(sa);
1636 * The function is used by the secondary process as well. It must not
1637 * use any process-local pointers from the adapter data.
1640 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1641 struct rte_eth_rss_reta_entry64 *reta_conf,
1644 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1645 struct sfc_rss *rss = &sas->rss;
1648 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated)
1651 if (rss->channels == 0)
1654 if (reta_size != EFX_RSS_TBL_SIZE)
1657 for (entry = 0; entry < reta_size; entry++) {
1658 int grp = entry / RTE_RETA_GROUP_SIZE;
1659 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1661 if ((reta_conf[grp].mask >> grp_idx) & 1)
1662 reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
1669 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1670 struct rte_eth_rss_reta_entry64 *reta_conf,
1673 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1674 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1675 unsigned int *rss_tbl_new;
1680 if (sfc_sa2shared(sa)->isolated)
1683 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1684 sfc_err(sa, "RSS is not available");
1688 if (rss->channels == 0) {
1689 sfc_err(sa, "RSS is not configured");
1693 if (reta_size != EFX_RSS_TBL_SIZE) {
1694 sfc_err(sa, "RETA size is wrong (should be %u)",
1699 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
1700 if (rss_tbl_new == NULL)
1703 sfc_adapter_lock(sa);
1705 rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
1707 for (entry = 0; entry < reta_size; entry++) {
1708 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1709 struct rte_eth_rss_reta_entry64 *grp;
1711 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1713 if (grp->mask & (1ull << grp_idx)) {
1714 if (grp->reta[grp_idx] >= rss->channels) {
1716 goto bad_reta_entry;
1718 rss_tbl_new[entry] = grp->reta[grp_idx];
1722 if (sa->state == SFC_ADAPTER_STARTED) {
1723 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1724 rss_tbl_new, EFX_RSS_TBL_SIZE);
1726 goto fail_scale_tbl_set;
1729 rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
1733 sfc_adapter_unlock(sa);
1735 rte_free(rss_tbl_new);
1737 SFC_ASSERT(rc >= 0);
1742 sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
1743 const struct rte_flow_ops **ops)
1745 *ops = &sfc_flow_ops;
1750 sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
1752 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1755 * If Rx datapath does not provide callback to check mempool,
1756 * all pools are supported.
1758 if (sap->dp_rx->pool_ops_supported == NULL)
1761 return sap->dp_rx->pool_ops_supported(pool);
1765 sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1767 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1768 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1769 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1770 struct sfc_rxq_info *rxq_info;
1772 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1774 return sap->dp_rx->intr_enable(rxq_info->dp);
1778 sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1780 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1781 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1782 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1783 struct sfc_rxq_info *rxq_info;
1785 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1787 return sap->dp_rx->intr_disable(rxq_info->dp);
1790 static const struct eth_dev_ops sfc_eth_dev_ops = {
1791 .dev_configure = sfc_dev_configure,
1792 .dev_start = sfc_dev_start,
1793 .dev_stop = sfc_dev_stop,
1794 .dev_set_link_up = sfc_dev_set_link_up,
1795 .dev_set_link_down = sfc_dev_set_link_down,
1796 .dev_close = sfc_dev_close,
1797 .promiscuous_enable = sfc_dev_promisc_enable,
1798 .promiscuous_disable = sfc_dev_promisc_disable,
1799 .allmulticast_enable = sfc_dev_allmulti_enable,
1800 .allmulticast_disable = sfc_dev_allmulti_disable,
1801 .link_update = sfc_dev_link_update,
1802 .stats_get = sfc_stats_get,
1803 .stats_reset = sfc_stats_reset,
1804 .xstats_get = sfc_xstats_get,
1805 .xstats_reset = sfc_stats_reset,
1806 .xstats_get_names = sfc_xstats_get_names,
1807 .dev_infos_get = sfc_dev_infos_get,
1808 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
1809 .mtu_set = sfc_dev_set_mtu,
1810 .rx_queue_start = sfc_rx_queue_start,
1811 .rx_queue_stop = sfc_rx_queue_stop,
1812 .tx_queue_start = sfc_tx_queue_start,
1813 .tx_queue_stop = sfc_tx_queue_stop,
1814 .rx_queue_setup = sfc_rx_queue_setup,
1815 .rx_queue_release = sfc_rx_queue_release,
1816 .rx_queue_intr_enable = sfc_rx_queue_intr_enable,
1817 .rx_queue_intr_disable = sfc_rx_queue_intr_disable,
1818 .tx_queue_setup = sfc_tx_queue_setup,
1819 .tx_queue_release = sfc_tx_queue_release,
1820 .flow_ctrl_get = sfc_flow_ctrl_get,
1821 .flow_ctrl_set = sfc_flow_ctrl_set,
1822 .mac_addr_set = sfc_mac_addr_set,
1823 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
1824 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
1825 .reta_update = sfc_dev_rss_reta_update,
1826 .reta_query = sfc_dev_rss_reta_query,
1827 .rss_hash_update = sfc_dev_rss_hash_update,
1828 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
1829 .flow_ops_get = sfc_dev_flow_ops_get,
1830 .set_mc_addr_list = sfc_set_mc_addr_list,
1831 .rxq_info_get = sfc_rx_queue_info_get,
1832 .txq_info_get = sfc_tx_queue_info_get,
1833 .fw_version_get = sfc_fw_version_get,
1834 .xstats_get_by_id = sfc_xstats_get_by_id,
1835 .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
1836 .pool_ops_supported = sfc_pool_ops_supported,
1840 * Duplicate a string in potentially shared memory required for
1841 * multi-process support.
1843 * strdup() allocates from process-local heap/memory.
1846 sfc_strdup(const char *str)
1854 size = strlen(str) + 1;
1855 copy = rte_malloc(__func__, size, 0);
1857 rte_memcpy(copy, str, size);
1863 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
1865 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1866 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1867 const struct sfc_dp_rx *dp_rx;
1868 const struct sfc_dp_tx *dp_tx;
1869 const efx_nic_cfg_t *encp;
1870 unsigned int avail_caps = 0;
1871 const char *rx_name = NULL;
1872 const char *tx_name = NULL;
1875 switch (sa->family) {
1876 case EFX_FAMILY_HUNTINGTON:
1877 case EFX_FAMILY_MEDFORD:
1878 case EFX_FAMILY_MEDFORD2:
1879 avail_caps |= SFC_DP_HW_FW_CAP_EF10;
1880 avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX;
1881 avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX;
1883 case EFX_FAMILY_RIVERHEAD:
1884 avail_caps |= SFC_DP_HW_FW_CAP_EF100;
1890 encp = efx_nic_cfg_get(sa->nic);
1891 if (encp->enc_rx_es_super_buffer_supported)
1892 avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
1894 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
1895 sfc_kvarg_string_handler, &rx_name);
1897 goto fail_kvarg_rx_datapath;
1899 if (rx_name != NULL) {
1900 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
1901 if (dp_rx == NULL) {
1902 sfc_err(sa, "Rx datapath %s not found", rx_name);
1906 if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
1908 "Insufficient Hw/FW capabilities to use Rx datapath %s",
1911 goto fail_dp_rx_caps;
1914 dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
1915 if (dp_rx == NULL) {
1916 sfc_err(sa, "Rx datapath by caps %#x not found",
1923 sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
1924 if (sas->dp_rx_name == NULL) {
1926 goto fail_dp_rx_name;
1929 sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
1931 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
1932 sfc_kvarg_string_handler, &tx_name);
1934 goto fail_kvarg_tx_datapath;
1936 if (tx_name != NULL) {
1937 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
1938 if (dp_tx == NULL) {
1939 sfc_err(sa, "Tx datapath %s not found", tx_name);
1943 if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
1945 "Insufficient Hw/FW capabilities to use Tx datapath %s",
1948 goto fail_dp_tx_caps;
1951 dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
1952 if (dp_tx == NULL) {
1953 sfc_err(sa, "Tx datapath by caps %#x not found",
1960 sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
1961 if (sas->dp_tx_name == NULL) {
1963 goto fail_dp_tx_name;
1966 sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
1968 sa->priv.dp_rx = dp_rx;
1969 sa->priv.dp_tx = dp_tx;
1971 dev->rx_pkt_burst = dp_rx->pkt_burst;
1972 dev->tx_pkt_prepare = dp_tx->pkt_prepare;
1973 dev->tx_pkt_burst = dp_tx->pkt_burst;
1975 dev->rx_queue_count = sfc_rx_queue_count;
1976 dev->rx_descriptor_done = sfc_rx_descriptor_done;
1977 dev->rx_descriptor_status = sfc_rx_descriptor_status;
1978 dev->tx_descriptor_status = sfc_tx_descriptor_status;
1979 dev->dev_ops = &sfc_eth_dev_ops;
1986 fail_kvarg_tx_datapath:
1987 rte_free(sas->dp_rx_name);
1988 sas->dp_rx_name = NULL;
1993 fail_kvarg_rx_datapath:
1998 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
2000 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2001 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2003 dev->dev_ops = NULL;
2004 dev->tx_pkt_prepare = NULL;
2005 dev->rx_pkt_burst = NULL;
2006 dev->tx_pkt_burst = NULL;
2008 rte_free(sas->dp_tx_name);
2009 sas->dp_tx_name = NULL;
2010 sa->priv.dp_tx = NULL;
2012 rte_free(sas->dp_rx_name);
2013 sas->dp_rx_name = NULL;
2014 sa->priv.dp_rx = NULL;
2017 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
2018 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
2019 .reta_query = sfc_dev_rss_reta_query,
2020 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
2021 .rxq_info_get = sfc_rx_queue_info_get,
2022 .txq_info_get = sfc_tx_queue_info_get,
2026 sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
2028 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2029 struct sfc_adapter_priv *sap;
2030 const struct sfc_dp_rx *dp_rx;
2031 const struct sfc_dp_tx *dp_tx;
2035 * Allocate process private data from heap, since it should not
2036 * be located in shared memory allocated using rte_malloc() API.
2038 sap = calloc(1, sizeof(*sap));
2041 goto fail_alloc_priv;
2044 sap->logtype_main = logtype_main;
2046 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
2047 if (dp_rx == NULL) {
2048 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2049 "cannot find %s Rx datapath", sas->dp_rx_name);
2053 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
2054 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2055 "%s Rx datapath does not support multi-process",
2058 goto fail_dp_rx_multi_process;
2061 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
2062 if (dp_tx == NULL) {
2063 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2064 "cannot find %s Tx datapath", sas->dp_tx_name);
2068 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
2069 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2070 "%s Tx datapath does not support multi-process",
2073 goto fail_dp_tx_multi_process;
2079 dev->process_private = sap;
2080 dev->rx_pkt_burst = dp_rx->pkt_burst;
2081 dev->tx_pkt_prepare = dp_tx->pkt_prepare;
2082 dev->tx_pkt_burst = dp_tx->pkt_burst;
2083 dev->rx_queue_count = sfc_rx_queue_count;
2084 dev->rx_descriptor_done = sfc_rx_descriptor_done;
2085 dev->rx_descriptor_status = sfc_rx_descriptor_status;
2086 dev->tx_descriptor_status = sfc_tx_descriptor_status;
2087 dev->dev_ops = &sfc_eth_dev_secondary_ops;
2091 fail_dp_tx_multi_process:
2093 fail_dp_rx_multi_process:
2102 sfc_register_dp(void)
2105 if (TAILQ_EMPTY(&sfc_dp_head)) {
2106 /* Prefer EF10 datapath */
2107 sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
2108 sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
2109 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
2110 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
2112 sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
2113 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
2114 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
2115 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
2120 sfc_eth_dev_init(struct rte_eth_dev *dev)
2122 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2123 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2124 uint32_t logtype_main;
2125 struct sfc_adapter *sa;
2127 const efx_nic_cfg_t *encp;
2128 const struct rte_ether_addr *from;
2131 if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
2132 SFC_EFX_DEV_CLASS_NET) {
2133 SFC_GENERIC_LOG(DEBUG,
2134 "Incompatible device class: skip probing, should be probed by other sfc driver.");
2140 logtype_main = sfc_register_logtype(&pci_dev->addr,
2141 SFC_LOGTYPE_MAIN_STR,
2144 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2145 return -sfc_eth_dev_secondary_init(dev, logtype_main);
2147 /* Required for logging */
2148 ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
2149 "PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
2150 pci_dev->addr.domain, pci_dev->addr.bus,
2151 pci_dev->addr.devid, pci_dev->addr.function,
2152 dev->data->port_id);
2153 if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
2154 SFC_GENERIC_LOG(ERR,
2155 "reserved log prefix is too short for " PCI_PRI_FMT,
2156 pci_dev->addr.domain, pci_dev->addr.bus,
2157 pci_dev->addr.devid, pci_dev->addr.function);
2160 sas->pci_addr = pci_dev->addr;
2161 sas->port_id = dev->data->port_id;
2164 * Allocate process private data from heap, since it should not
2165 * be located in shared memory allocated using rte_malloc() API.
2167 sa = calloc(1, sizeof(*sa));
2173 dev->process_private = sa;
2175 /* Required for logging */
2176 sa->priv.shared = sas;
2177 sa->priv.logtype_main = logtype_main;
2181 /* Copy PCI device info to the dev->data */
2182 rte_eth_copy_pci_info(dev, pci_dev);
2183 dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2184 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2186 rc = sfc_kvargs_parse(sa);
2188 goto fail_kvargs_parse;
2190 sfc_log_init(sa, "entry");
2192 dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
2193 if (dev->data->mac_addrs == NULL) {
2195 goto fail_mac_addrs;
2198 sfc_adapter_lock_init(sa);
2199 sfc_adapter_lock(sa);
2201 sfc_log_init(sa, "probing");
2206 sfc_log_init(sa, "set device ops");
2207 rc = sfc_eth_dev_set_ops(dev);
2211 sfc_log_init(sa, "attaching");
2212 rc = sfc_attach(sa);
2216 encp = efx_nic_cfg_get(sa->nic);
2219 * The arguments are really reverse order in comparison to
2220 * Linux kernel. Copy from NIC config to Ethernet device data.
2222 from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
2223 rte_ether_addr_copy(from, &dev->data->mac_addrs[0]);
2225 sfc_adapter_unlock(sa);
2227 sfc_log_init(sa, "done");
2231 sfc_eth_dev_clear_ops(dev);
2237 sfc_adapter_unlock(sa);
2238 sfc_adapter_lock_fini(sa);
2239 rte_free(dev->data->mac_addrs);
2240 dev->data->mac_addrs = NULL;
2243 sfc_kvargs_cleanup(sa);
2246 sfc_log_init(sa, "failed %d", rc);
2247 dev->process_private = NULL;
2256 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
2263 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
2264 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
2265 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
2266 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
2267 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
2268 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
2269 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
2270 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
2271 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
2272 { RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD) },
2273 { .vendor_id = 0 /* sentinel */ }
2276 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2277 struct rte_pci_device *pci_dev)
2279 return rte_eth_dev_pci_generic_probe(pci_dev,
2280 sizeof(struct sfc_adapter_shared), sfc_eth_dev_init);
2283 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2285 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
2288 static struct rte_pci_driver sfc_efx_pmd = {
2289 .id_table = pci_id_sfc_efx_map,
2291 RTE_PCI_DRV_INTR_LSC |
2292 RTE_PCI_DRV_NEED_MAPPING,
2293 .probe = sfc_eth_dev_pci_probe,
2294 .remove = sfc_eth_dev_pci_remove,
2297 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
2298 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
2299 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
2300 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
2301 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
2302 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
2303 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
2304 SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
2305 SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
2306 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
2308 RTE_INIT(sfc_driver_register_logtype)
2312 ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
2314 sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;