1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
11 #include <ethdev_driver.h>
12 #include <ethdev_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
16 #include <rte_string_fns.h>
17 #include <rte_ether.h>
22 #include "sfc_debug.h"
24 #include "sfc_kvargs.h"
29 #include "sfc_flow_tunnel.h"
31 #include "sfc_dp_rx.h"
33 #include "sfc_sw_stats.h"
34 #include "sfc_switch.h"
36 #define SFC_XSTAT_ID_INVALID_VAL UINT64_MAX
37 #define SFC_XSTAT_ID_INVALID_NAME '\0'
39 uint32_t sfc_logtype_driver;
41 static struct sfc_dp_list sfc_dp_head =
42 TAILQ_HEAD_INITIALIZER(sfc_dp_head);
45 static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev);
49 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
51 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
52 efx_nic_fw_info_t enfi;
56 rc = efx_nic_get_fw_version(sa->nic, &enfi);
60 ret = snprintf(fw_version, fw_size,
61 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
62 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
63 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
67 if (enfi.enfi_dpcpu_fw_ids_valid) {
68 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
71 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
72 fw_size - dpcpu_fw_ids_offset,
73 " rx%" PRIx16 " tx%" PRIx16,
74 enfi.enfi_rx_dpcpu_fw_id,
75 enfi.enfi_tx_dpcpu_fw_id);
82 if (fw_size < (size_t)(++ret))
89 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
91 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
92 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
93 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
94 struct sfc_rss *rss = &sas->rss;
95 struct sfc_mae *mae = &sa->mae;
96 uint64_t txq_offloads_def = 0;
98 sfc_log_init(sa, "entry");
100 dev_info->min_mtu = RTE_ETHER_MIN_MTU;
101 dev_info->max_mtu = EFX_MAC_SDU_MAX;
103 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
105 dev_info->max_vfs = sa->sriov.num_vfs;
107 /* Autonegotiation may be disabled */
108 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
109 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
110 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
111 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
112 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
113 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
114 dev_info->speed_capa |= ETH_LINK_SPEED_25G;
115 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
116 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
117 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
118 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
119 if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
120 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
122 dev_info->max_rx_queues = sa->rxq_max;
123 dev_info->max_tx_queues = sa->txq_max;
125 /* By default packets are dropped if no descriptors are available */
126 dev_info->default_rxconf.rx_drop_en = 1;
128 dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
131 * rx_offload_capa includes both device and queue offloads since
132 * the latter may be requested on a per device basis which makes
133 * sense when some offloads are needed to be set on all queues.
135 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
136 dev_info->rx_queue_offload_capa;
138 dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
141 * tx_offload_capa includes both device and queue offloads since
142 * the latter may be requested on a per device basis which makes
143 * sense when some offloads are needed to be set on all queues.
145 dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
146 dev_info->tx_queue_offload_capa;
148 if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
149 txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
151 dev_info->default_txconf.offloads |= txq_offloads_def;
153 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
157 for (i = 0; i < rss->hf_map_nb_entries; ++i)
158 rte_hf |= rss->hf_map[i].rte;
160 dev_info->reta_size = EFX_RSS_TBL_SIZE;
161 dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
162 dev_info->flow_type_rss_offloads = rte_hf;
165 /* Initialize to hardware limits */
166 dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries;
167 dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries;
168 /* The RXQ hardware requires that the descriptor count is a power
169 * of 2, but rx_desc_lim cannot properly describe that constraint.
171 dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries;
173 /* Initialize to hardware limits */
174 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
175 dev_info->tx_desc_lim.nb_min = sa->txq_min_entries;
177 * The TXQ hardware requires that the descriptor count is a power
178 * of 2, but tx_desc_lim cannot properly describe that constraint
180 dev_info->tx_desc_lim.nb_align = sa->txq_min_entries;
182 if (sap->dp_rx->get_dev_info != NULL)
183 sap->dp_rx->get_dev_info(dev_info);
184 if (sap->dp_tx->get_dev_info != NULL)
185 sap->dp_tx->get_dev_info(dev_info);
187 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
188 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
190 if (mae->status == SFC_MAE_STATUS_SUPPORTED) {
191 dev_info->switch_info.name = dev->device->driver->name;
192 dev_info->switch_info.domain_id = mae->switch_domain_id;
193 dev_info->switch_info.port_id = mae->switch_port_id;
199 static const uint32_t *
200 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
202 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
204 return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
208 sfc_dev_configure(struct rte_eth_dev *dev)
210 struct rte_eth_dev_data *dev_data = dev->data;
211 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
214 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
215 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
217 sfc_adapter_lock(sa);
219 case SFC_ETHDEV_CONFIGURED:
221 case SFC_ETHDEV_INITIALIZED:
222 rc = sfc_configure(sa);
225 sfc_err(sa, "unexpected adapter state %u to configure",
230 sfc_adapter_unlock(sa);
232 sfc_log_init(sa, "done %d", rc);
238 sfc_dev_start(struct rte_eth_dev *dev)
240 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
243 sfc_log_init(sa, "entry");
245 sfc_adapter_lock(sa);
247 sfc_adapter_unlock(sa);
249 sfc_log_init(sa, "done %d", rc);
255 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
257 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
258 struct rte_eth_link current_link;
261 sfc_log_init(sa, "entry");
263 if (sa->state != SFC_ETHDEV_STARTED) {
264 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
265 } else if (wait_to_complete) {
266 efx_link_mode_t link_mode;
268 if (efx_port_poll(sa->nic, &link_mode) != 0)
269 link_mode = EFX_LINK_UNKNOWN;
270 sfc_port_link_mode_to_info(link_mode, ¤t_link);
273 sfc_ev_mgmt_qpoll(sa);
274 rte_eth_linkstatus_get(dev, ¤t_link);
277 ret = rte_eth_linkstatus_set(dev, ¤t_link);
279 sfc_notice(sa, "Link status is %s",
280 current_link.link_status ? "UP" : "DOWN");
286 sfc_dev_stop(struct rte_eth_dev *dev)
288 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
290 sfc_log_init(sa, "entry");
292 sfc_adapter_lock(sa);
294 sfc_adapter_unlock(sa);
296 sfc_log_init(sa, "done");
302 sfc_dev_set_link_up(struct rte_eth_dev *dev)
304 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
307 sfc_log_init(sa, "entry");
309 sfc_adapter_lock(sa);
311 sfc_adapter_unlock(sa);
318 sfc_dev_set_link_down(struct rte_eth_dev *dev)
320 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
322 sfc_log_init(sa, "entry");
324 sfc_adapter_lock(sa);
326 sfc_adapter_unlock(sa);
332 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
334 free(dev->process_private);
335 rte_eth_dev_release_port(dev);
339 sfc_dev_close(struct rte_eth_dev *dev)
341 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
343 sfc_log_init(sa, "entry");
345 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
346 sfc_eth_dev_secondary_clear_ops(dev);
352 sfc_adapter_lock(sa);
354 case SFC_ETHDEV_STARTED:
356 SFC_ASSERT(sa->state == SFC_ETHDEV_CONFIGURED);
358 case SFC_ETHDEV_CONFIGURED:
360 SFC_ASSERT(sa->state == SFC_ETHDEV_INITIALIZED);
362 case SFC_ETHDEV_INITIALIZED:
365 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
370 * Cleanup all resources.
371 * Rollback primary process sfc_eth_dev_init() below.
374 sfc_eth_dev_clear_ops(dev);
379 sfc_kvargs_cleanup(sa);
381 sfc_adapter_unlock(sa);
382 sfc_adapter_lock_fini(sa);
384 sfc_log_init(sa, "done");
386 /* Required for logging, so cleanup last */
395 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
398 struct sfc_port *port;
400 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
401 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
402 const char *desc = (allmulti) ? "all-multi" : "promiscuous";
405 sfc_adapter_lock(sa);
408 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
410 if (*toggle != enabled) {
413 if (sfc_sa2shared(sa)->isolated) {
414 sfc_warn(sa, "isolated mode is active on the port");
415 sfc_warn(sa, "the change is to be applied on the next "
416 "start provided that isolated mode is "
417 "disabled prior the next start");
418 } else if ((sa->state == SFC_ETHDEV_STARTED) &&
419 ((rc = sfc_set_rx_mode(sa)) != 0)) {
420 *toggle = !(enabled);
421 sfc_warn(sa, "Failed to %s %s mode, rc = %d",
422 ((enabled) ? "enable" : "disable"), desc, rc);
425 * For promiscuous and all-multicast filters a
426 * permission failure should be reported as an
427 * unsupported filter.
434 sfc_adapter_unlock(sa);
439 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
441 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
448 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
450 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
457 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
459 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
466 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
468 int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
475 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
476 uint16_t nb_rx_desc, unsigned int socket_id,
477 const struct rte_eth_rxconf *rx_conf,
478 struct rte_mempool *mb_pool)
480 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
481 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
482 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
483 struct sfc_rxq_info *rxq_info;
484 sfc_sw_index_t sw_index;
487 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
488 ethdev_qid, nb_rx_desc, socket_id);
490 sfc_adapter_lock(sa);
492 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
493 rc = sfc_rx_qinit(sa, sw_index, nb_rx_desc, socket_id,
498 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
499 dev->data->rx_queues[ethdev_qid] = rxq_info->dp;
501 sfc_adapter_unlock(sa);
506 sfc_adapter_unlock(sa);
512 sfc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
514 struct sfc_dp_rxq *dp_rxq = dev->data->rx_queues[qid];
516 struct sfc_adapter *sa;
517 sfc_sw_index_t sw_index;
522 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
524 sfc_adapter_lock(sa);
526 sw_index = dp_rxq->dpq.queue_id;
528 sfc_log_init(sa, "RxQ=%u", sw_index);
530 sfc_rx_qfini(sa, sw_index);
532 sfc_adapter_unlock(sa);
536 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t ethdev_qid,
537 uint16_t nb_tx_desc, unsigned int socket_id,
538 const struct rte_eth_txconf *tx_conf)
540 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
541 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
542 struct sfc_txq_info *txq_info;
543 sfc_sw_index_t sw_index;
546 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
547 ethdev_qid, nb_tx_desc, socket_id);
549 sfc_adapter_lock(sa);
551 sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
552 rc = sfc_tx_qinit(sa, sw_index, nb_tx_desc, socket_id, tx_conf);
556 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
557 dev->data->tx_queues[ethdev_qid] = txq_info->dp;
559 sfc_adapter_unlock(sa);
563 sfc_adapter_unlock(sa);
569 sfc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
571 struct sfc_dp_txq *dp_txq = dev->data->tx_queues[qid];
573 sfc_sw_index_t sw_index;
574 struct sfc_adapter *sa;
579 txq = sfc_txq_by_dp_txq(dp_txq);
580 sw_index = dp_txq->dpq.queue_id;
582 SFC_ASSERT(txq->evq != NULL);
585 sfc_log_init(sa, "TxQ = %u", sw_index);
587 sfc_adapter_lock(sa);
589 sfc_tx_qfini(sa, sw_index);
591 sfc_adapter_unlock(sa);
595 sfc_stats_get_dp_rx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
597 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
598 uint64_t pkts_sum = 0;
599 uint64_t bytes_sum = 0;
602 for (i = 0; i < sas->ethdev_rxq_count; ++i) {
603 struct sfc_rxq_info *rxq_info;
605 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, i);
606 if (rxq_info->state & SFC_RXQ_INITIALIZED) {
607 union sfc_pkts_bytes qstats;
609 sfc_pkts_bytes_get(&rxq_info->dp->dpq.stats, &qstats);
610 pkts_sum += qstats.pkts -
611 sa->sw_stats.reset_rx_pkts[i];
612 bytes_sum += qstats.bytes -
613 sa->sw_stats.reset_rx_bytes[i];
622 sfc_stats_get_dp_tx(struct sfc_adapter *sa, uint64_t *pkts, uint64_t *bytes)
624 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
625 uint64_t pkts_sum = 0;
626 uint64_t bytes_sum = 0;
629 for (i = 0; i < sas->ethdev_txq_count; ++i) {
630 struct sfc_txq_info *txq_info;
632 txq_info = sfc_txq_info_by_ethdev_qid(sas, i);
633 if (txq_info->state & SFC_TXQ_INITIALIZED) {
634 union sfc_pkts_bytes qstats;
636 sfc_pkts_bytes_get(&txq_info->dp->dpq.stats, &qstats);
637 pkts_sum += qstats.pkts -
638 sa->sw_stats.reset_tx_pkts[i];
639 bytes_sum += qstats.bytes -
640 sa->sw_stats.reset_tx_bytes[i];
649 * Some statistics are computed as A - B where A and B each increase
650 * monotonically with some hardware counter(s) and the counters are read
653 * If packet X is counted in A, but not counted in B yet, computed value is
656 * If packet X is not counted in A at the moment of reading the counter,
657 * but counted in B at the moment of reading the counter, computed value
660 * However, counter which grows backward is worse evil than slightly wrong
661 * value. So, let's try to guarantee that it never happens except may be
662 * the case when the MAC stats are zeroed as a result of a NIC reset.
665 sfc_update_diff_stat(uint64_t *stat, uint64_t newval)
667 if ((int64_t)(newval - *stat) > 0 || newval == 0)
672 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
674 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
675 bool have_dp_rx_stats = sap->dp_rx->features & SFC_DP_RX_FEAT_STATS;
676 bool have_dp_tx_stats = sap->dp_tx->features & SFC_DP_TX_FEAT_STATS;
677 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
678 struct sfc_port *port = &sa->port;
682 sfc_adapter_lock(sa);
684 if (have_dp_rx_stats)
685 sfc_stats_get_dp_rx(sa, &stats->ipackets, &stats->ibytes);
686 if (have_dp_tx_stats)
687 sfc_stats_get_dp_tx(sa, &stats->opackets, &stats->obytes);
689 ret = sfc_port_update_mac_stats(sa, B_FALSE);
693 mac_stats = port->mac_stats_buf;
695 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
696 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
697 if (!have_dp_rx_stats) {
699 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
700 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
701 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
703 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
704 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
705 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
707 /* CRC is included in these stats, but shouldn't be */
708 stats->ibytes -= stats->ipackets * RTE_ETHER_CRC_LEN;
710 if (!have_dp_tx_stats) {
712 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
713 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
714 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
716 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
717 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
718 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
720 /* CRC is included in these stats, but shouldn't be */
721 stats->obytes -= stats->opackets * RTE_ETHER_CRC_LEN;
723 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
724 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
726 if (!have_dp_tx_stats) {
727 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
728 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS] -
729 mac_stats[EFX_MAC_TX_PKTS] * RTE_ETHER_CRC_LEN;
733 * Take into account stats which are whenever supported
734 * on EF10. If some stat is not supported by current
735 * firmware variant or HW revision, it is guaranteed
736 * to be zero in mac_stats.
739 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
740 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
741 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
742 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
743 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
744 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
745 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
746 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
747 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
748 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
750 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
751 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
752 mac_stats[EFX_MAC_RX_JABBER_PKTS];
753 /* no oerrors counters supported on EF10 */
755 if (!have_dp_rx_stats) {
756 /* Exclude missed, errors and pauses from Rx packets */
757 sfc_update_diff_stat(&port->ipackets,
758 mac_stats[EFX_MAC_RX_PKTS] -
759 mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
760 stats->imissed - stats->ierrors);
761 stats->ipackets = port->ipackets;
762 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS] -
763 mac_stats[EFX_MAC_RX_PKTS] * RTE_ETHER_CRC_LEN;
768 sfc_adapter_unlock(sa);
769 SFC_ASSERT(ret >= 0);
774 sfc_stats_reset(struct rte_eth_dev *dev)
776 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
777 struct sfc_port *port = &sa->port;
780 sfc_adapter_lock(sa);
782 if (sa->state != SFC_ETHDEV_STARTED) {
784 * The operation cannot be done if port is not started; it
785 * will be scheduled to be done during the next port start
787 port->mac_stats_reset_pending = B_TRUE;
788 sfc_adapter_unlock(sa);
792 rc = sfc_port_reset_mac_stats(sa);
794 sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
796 sfc_sw_xstats_reset(sa);
798 sfc_adapter_unlock(sa);
805 sfc_xstats_get_nb_supported(struct sfc_adapter *sa)
807 struct sfc_port *port = &sa->port;
808 unsigned int nb_supported;
810 sfc_adapter_lock(sa);
811 nb_supported = port->mac_stats_nb_supported +
812 sfc_sw_xstats_get_nb_supported(sa);
813 sfc_adapter_unlock(sa);
819 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
820 unsigned int xstats_count)
822 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
823 unsigned int nb_written = 0;
824 unsigned int nb_supported = 0;
827 if (unlikely(xstats == NULL))
828 return sfc_xstats_get_nb_supported(sa);
830 rc = sfc_port_get_mac_stats(sa, xstats, xstats_count, &nb_written);
835 sfc_sw_xstats_get_vals(sa, xstats, xstats_count, &nb_written,
842 sfc_xstats_get_names(struct rte_eth_dev *dev,
843 struct rte_eth_xstat_name *xstats_names,
844 unsigned int xstats_count)
846 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
847 struct sfc_port *port = &sa->port;
849 unsigned int nstats = 0;
850 unsigned int nb_written = 0;
853 if (unlikely(xstats_names == NULL))
854 return sfc_xstats_get_nb_supported(sa);
856 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
857 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
858 if (nstats < xstats_count) {
859 strlcpy(xstats_names[nstats].name,
860 efx_mac_stat_name(sa->nic, i),
861 sizeof(xstats_names[0].name));
868 ret = sfc_sw_xstats_get_names(sa, xstats_names, xstats_count,
869 &nb_written, &nstats);
879 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
880 uint64_t *values, unsigned int n)
882 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
883 struct sfc_port *port = &sa->port;
884 unsigned int nb_supported;
888 if (unlikely(ids == NULL || values == NULL))
892 * Values array could be filled in nonsequential order. Fill values with
893 * constant indicating invalid ID first.
895 for (i = 0; i < n; i++)
896 values[i] = SFC_XSTAT_ID_INVALID_VAL;
898 rc = sfc_port_get_mac_stats_by_id(sa, ids, values, n);
902 nb_supported = port->mac_stats_nb_supported;
903 sfc_sw_xstats_get_vals_by_id(sa, ids, values, n, &nb_supported);
905 /* Return number of written stats before invalid ID is encountered. */
906 for (i = 0; i < n; i++) {
907 if (values[i] == SFC_XSTAT_ID_INVALID_VAL)
915 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
917 struct rte_eth_xstat_name *xstats_names,
920 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
921 struct sfc_port *port = &sa->port;
922 unsigned int nb_supported;
926 if (unlikely(xstats_names == NULL && ids != NULL) ||
927 unlikely(xstats_names != NULL && ids == NULL))
930 if (unlikely(xstats_names == NULL && ids == NULL))
931 return sfc_xstats_get_nb_supported(sa);
934 * Names array could be filled in nonsequential order. Fill names with
935 * string indicating invalid ID first.
937 for (i = 0; i < size; i++)
938 xstats_names[i].name[0] = SFC_XSTAT_ID_INVALID_NAME;
940 sfc_adapter_lock(sa);
942 SFC_ASSERT(port->mac_stats_nb_supported <=
943 RTE_DIM(port->mac_stats_by_id));
945 for (i = 0; i < size; i++) {
946 if (ids[i] < port->mac_stats_nb_supported) {
947 strlcpy(xstats_names[i].name,
948 efx_mac_stat_name(sa->nic,
949 port->mac_stats_by_id[ids[i]]),
950 sizeof(xstats_names[0].name));
954 nb_supported = port->mac_stats_nb_supported;
956 sfc_adapter_unlock(sa);
958 ret = sfc_sw_xstats_get_names_by_id(sa, ids, xstats_names, size,
965 /* Return number of written names before invalid ID is encountered. */
966 for (i = 0; i < size; i++) {
967 if (xstats_names[i].name[0] == SFC_XSTAT_ID_INVALID_NAME)
975 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
977 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
978 unsigned int wanted_fc, link_fc;
980 memset(fc_conf, 0, sizeof(*fc_conf));
982 sfc_adapter_lock(sa);
984 if (sa->state == SFC_ETHDEV_STARTED)
985 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
987 link_fc = sa->port.flow_ctrl;
991 fc_conf->mode = RTE_FC_NONE;
993 case EFX_FCNTL_RESPOND:
994 fc_conf->mode = RTE_FC_RX_PAUSE;
996 case EFX_FCNTL_GENERATE:
997 fc_conf->mode = RTE_FC_TX_PAUSE;
999 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
1000 fc_conf->mode = RTE_FC_FULL;
1003 sfc_err(sa, "%s: unexpected flow control value %#x",
1007 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
1009 sfc_adapter_unlock(sa);
1015 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1017 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1018 struct sfc_port *port = &sa->port;
1022 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
1023 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
1024 fc_conf->mac_ctrl_frame_fwd != 0) {
1025 sfc_err(sa, "unsupported flow control settings specified");
1030 switch (fc_conf->mode) {
1034 case RTE_FC_RX_PAUSE:
1035 fcntl = EFX_FCNTL_RESPOND;
1037 case RTE_FC_TX_PAUSE:
1038 fcntl = EFX_FCNTL_GENERATE;
1041 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
1048 sfc_adapter_lock(sa);
1050 if (sa->state == SFC_ETHDEV_STARTED) {
1051 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
1053 goto fail_mac_fcntl_set;
1056 port->flow_ctrl = fcntl;
1057 port->flow_ctrl_autoneg = fc_conf->autoneg;
1059 sfc_adapter_unlock(sa);
1064 sfc_adapter_unlock(sa);
1071 sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
1073 struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1074 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1075 boolean_t scatter_enabled;
1079 for (i = 0; i < sas->rxq_count; i++) {
1080 if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0)
1083 scatter_enabled = (sas->rxq_info[i].type_flags &
1084 EFX_RXQ_FLAG_SCATTER);
1086 if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size,
1087 encp->enc_rx_prefix_size,
1089 encp->enc_rx_scatter_max, &error)) {
1090 sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
1100 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
1102 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1103 size_t pdu = EFX_MAC_PDU(mtu);
1107 sfc_log_init(sa, "mtu=%u", mtu);
1110 if (pdu < EFX_MAC_PDU_MIN) {
1111 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
1112 (unsigned int)mtu, (unsigned int)pdu,
1116 if (pdu > EFX_MAC_PDU_MAX) {
1117 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
1118 (unsigned int)mtu, (unsigned int)pdu,
1119 (unsigned int)EFX_MAC_PDU_MAX);
1123 sfc_adapter_lock(sa);
1125 rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
1127 goto fail_check_scatter;
1129 if (pdu != sa->port.pdu) {
1130 if (sa->state == SFC_ETHDEV_STARTED) {
1133 old_pdu = sa->port.pdu;
1143 sfc_adapter_unlock(sa);
1145 sfc_log_init(sa, "done");
1149 sa->port.pdu = old_pdu;
1150 if (sfc_start(sa) != 0)
1151 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
1152 "PDU max size - port is stopped",
1153 (unsigned int)pdu, (unsigned int)old_pdu);
1156 sfc_adapter_unlock(sa);
1159 sfc_log_init(sa, "failed %d", rc);
1164 sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1166 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1167 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1168 struct sfc_port *port = &sa->port;
1169 struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0];
1172 sfc_adapter_lock(sa);
1174 if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr))
1178 * Copy the address to the device private data so that
1179 * it could be recalled in the case of adapter restart.
1181 rte_ether_addr_copy(mac_addr, &port->default_mac_addr);
1184 * Neither of the two following checks can return
1185 * an error. The new MAC address is preserved in
1186 * the device private data and can be activated
1187 * on the next port start if the user prevents
1188 * isolated mode from being enabled.
1190 if (sfc_sa2shared(sa)->isolated) {
1191 sfc_warn(sa, "isolated mode is active on the port");
1192 sfc_warn(sa, "will not set MAC address");
1196 if (sa->state != SFC_ETHDEV_STARTED) {
1197 sfc_notice(sa, "the port is not started");
1198 sfc_notice(sa, "the new MAC address will be set on port start");
1203 if (encp->enc_allow_set_mac_with_installed_filters) {
1204 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
1206 sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
1211 * Changing the MAC address by means of MCDI request
1212 * has no effect on received traffic, therefore
1213 * we also need to update unicast filters
1215 rc = sfc_set_rx_mode_unchecked(sa);
1217 sfc_err(sa, "cannot set filter (rc = %u)", rc);
1218 /* Rollback the old address */
1219 (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
1220 (void)sfc_set_rx_mode_unchecked(sa);
1223 sfc_warn(sa, "cannot set MAC address with filters installed");
1224 sfc_warn(sa, "adapter will be restarted to pick the new MAC");
1225 sfc_warn(sa, "(some traffic may be dropped)");
1228 * Since setting MAC address with filters installed is not
1229 * allowed on the adapter, the new MAC address will be set
1230 * by means of adapter restart. sfc_start() shall retrieve
1231 * the new address from the device private data and set it.
1236 sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
1241 rte_ether_addr_copy(old_addr, &port->default_mac_addr);
1243 sfc_adapter_unlock(sa);
1245 SFC_ASSERT(rc >= 0);
1251 sfc_set_mc_addr_list(struct rte_eth_dev *dev,
1252 struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
1254 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1255 struct sfc_port *port = &sa->port;
1256 uint8_t *mc_addrs = port->mcast_addrs;
1260 if (sfc_sa2shared(sa)->isolated) {
1261 sfc_err(sa, "isolated mode is active on the port");
1262 sfc_err(sa, "will not set multicast address list");
1266 if (mc_addrs == NULL)
1269 if (nb_mc_addr > port->max_mcast_addrs) {
1270 sfc_err(sa, "too many multicast addresses: %u > %u",
1271 nb_mc_addr, port->max_mcast_addrs);
1275 for (i = 0; i < nb_mc_addr; ++i) {
1276 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
1278 mc_addrs += EFX_MAC_ADDR_LEN;
1281 port->nb_mcast_addrs = nb_mc_addr;
1283 if (sa->state != SFC_ETHDEV_STARTED)
1286 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1287 port->nb_mcast_addrs);
1289 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
1291 SFC_ASSERT(rc >= 0);
1296 * The function is used by the secondary process as well. It must not
1297 * use any process-local pointers from the adapter data.
1300 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
1301 struct rte_eth_rxq_info *qinfo)
1303 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1304 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1305 struct sfc_rxq_info *rxq_info;
1307 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1309 qinfo->mp = rxq_info->refill_mb_pool;
1310 qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
1311 qinfo->conf.rx_drop_en = 1;
1312 qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1313 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
1314 if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
1315 qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
1316 qinfo->scattered_rx = 1;
1318 qinfo->nb_desc = rxq_info->entries;
1322 * The function is used by the secondary process as well. It must not
1323 * use any process-local pointers from the adapter data.
1326 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t ethdev_qid,
1327 struct rte_eth_txq_info *qinfo)
1329 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1330 struct sfc_txq_info *txq_info;
1332 SFC_ASSERT(ethdev_qid < sas->ethdev_txq_count);
1334 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1336 memset(qinfo, 0, sizeof(*qinfo));
1338 qinfo->conf.offloads = txq_info->offloads;
1339 qinfo->conf.tx_free_thresh = txq_info->free_thresh;
1340 qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1341 qinfo->nb_desc = txq_info->entries;
1345 * The function is used by the secondary process as well. It must not
1346 * use any process-local pointers from the adapter data.
1349 sfc_rx_queue_count(void *rx_queue)
1351 struct sfc_dp_rxq *dp_rxq = rx_queue;
1352 const struct sfc_dp_rx *dp_rx;
1353 struct sfc_rxq_info *rxq_info;
1355 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1356 rxq_info = sfc_rxq_info_by_dp_rxq(dp_rxq);
1358 if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
1361 return dp_rx->qdesc_npending(dp_rxq);
1365 * The function is used by the secondary process as well. It must not
1366 * use any process-local pointers from the adapter data.
1369 sfc_rx_descriptor_status(void *queue, uint16_t offset)
1371 struct sfc_dp_rxq *dp_rxq = queue;
1372 const struct sfc_dp_rx *dp_rx;
1374 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1376 return dp_rx->qdesc_status(dp_rxq, offset);
1380 * The function is used by the secondary process as well. It must not
1381 * use any process-local pointers from the adapter data.
1384 sfc_tx_descriptor_status(void *queue, uint16_t offset)
1386 struct sfc_dp_txq *dp_txq = queue;
1387 const struct sfc_dp_tx *dp_tx;
1389 dp_tx = sfc_dp_tx_by_dp_txq(dp_txq);
1391 return dp_tx->qdesc_status(dp_txq, offset);
1395 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1397 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1398 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1399 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1400 struct sfc_rxq_info *rxq_info;
1401 sfc_sw_index_t sw_index;
1404 sfc_log_init(sa, "RxQ=%u", ethdev_qid);
1406 sfc_adapter_lock(sa);
1409 if (sa->state != SFC_ETHDEV_STARTED)
1410 goto fail_not_started;
1412 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1413 if (rxq_info->state != SFC_RXQ_INITIALIZED)
1414 goto fail_not_setup;
1416 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
1417 rc = sfc_rx_qstart(sa, sw_index);
1419 goto fail_rx_qstart;
1421 rxq_info->deferred_started = B_TRUE;
1423 sfc_adapter_unlock(sa);
1430 sfc_adapter_unlock(sa);
1436 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1438 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1439 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1440 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1441 struct sfc_rxq_info *rxq_info;
1442 sfc_sw_index_t sw_index;
1444 sfc_log_init(sa, "RxQ=%u", ethdev_qid);
1446 sfc_adapter_lock(sa);
1448 sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, sfc_ethdev_qid);
1449 sfc_rx_qstop(sa, sw_index);
1451 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1452 rxq_info->deferred_started = B_FALSE;
1454 sfc_adapter_unlock(sa);
1460 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1462 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1463 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1464 struct sfc_txq_info *txq_info;
1465 sfc_sw_index_t sw_index;
1468 sfc_log_init(sa, "TxQ = %u", ethdev_qid);
1470 sfc_adapter_lock(sa);
1473 if (sa->state != SFC_ETHDEV_STARTED)
1474 goto fail_not_started;
1476 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1477 if (txq_info->state != SFC_TXQ_INITIALIZED)
1478 goto fail_not_setup;
1480 sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
1481 rc = sfc_tx_qstart(sa, sw_index);
1483 goto fail_tx_qstart;
1485 txq_info->deferred_started = B_TRUE;
1487 sfc_adapter_unlock(sa);
1494 sfc_adapter_unlock(sa);
1500 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1502 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1503 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1504 struct sfc_txq_info *txq_info;
1505 sfc_sw_index_t sw_index;
1507 sfc_log_init(sa, "TxQ = %u", ethdev_qid);
1509 sfc_adapter_lock(sa);
1511 sw_index = sfc_txq_sw_index_by_ethdev_tx_qid(sas, ethdev_qid);
1512 sfc_tx_qstop(sa, sw_index);
1514 txq_info = sfc_txq_info_by_ethdev_qid(sas, ethdev_qid);
1515 txq_info->deferred_started = B_FALSE;
1517 sfc_adapter_unlock(sa);
1521 static efx_tunnel_protocol_t
1522 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
1525 case RTE_TUNNEL_TYPE_VXLAN:
1526 return EFX_TUNNEL_PROTOCOL_VXLAN;
1527 case RTE_TUNNEL_TYPE_GENEVE:
1528 return EFX_TUNNEL_PROTOCOL_GENEVE;
1530 return EFX_TUNNEL_NPROTOS;
1534 enum sfc_udp_tunnel_op_e {
1535 SFC_UDP_TUNNEL_ADD_PORT,
1536 SFC_UDP_TUNNEL_DEL_PORT,
1540 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
1541 struct rte_eth_udp_tunnel *tunnel_udp,
1542 enum sfc_udp_tunnel_op_e op)
1544 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1545 efx_tunnel_protocol_t tunnel_proto;
1548 sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
1549 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
1550 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
1551 tunnel_udp->udp_port, tunnel_udp->prot_type);
1554 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
1555 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
1557 goto fail_bad_proto;
1560 sfc_adapter_lock(sa);
1563 case SFC_UDP_TUNNEL_ADD_PORT:
1564 rc = efx_tunnel_config_udp_add(sa->nic,
1565 tunnel_udp->udp_port,
1568 case SFC_UDP_TUNNEL_DEL_PORT:
1569 rc = efx_tunnel_config_udp_remove(sa->nic,
1570 tunnel_udp->udp_port,
1581 if (sa->state == SFC_ETHDEV_STARTED) {
1582 rc = efx_tunnel_reconfigure(sa->nic);
1585 * Configuration is accepted by FW and MC reboot
1586 * is initiated to apply the changes. MC reboot
1587 * will be handled in a usual way (MC reboot
1588 * event on management event queue and adapter
1592 } else if (rc != 0) {
1593 goto fail_reconfigure;
1597 sfc_adapter_unlock(sa);
1601 /* Remove/restore entry since the change makes the trouble */
1603 case SFC_UDP_TUNNEL_ADD_PORT:
1604 (void)efx_tunnel_config_udp_remove(sa->nic,
1605 tunnel_udp->udp_port,
1608 case SFC_UDP_TUNNEL_DEL_PORT:
1609 (void)efx_tunnel_config_udp_add(sa->nic,
1610 tunnel_udp->udp_port,
1617 sfc_adapter_unlock(sa);
1625 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1626 struct rte_eth_udp_tunnel *tunnel_udp)
1628 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
1632 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1633 struct rte_eth_udp_tunnel *tunnel_udp)
1635 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
1639 * The function is used by the secondary process as well. It must not
1640 * use any process-local pointers from the adapter data.
1643 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1644 struct rte_eth_rss_conf *rss_conf)
1646 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1647 struct sfc_rss *rss = &sas->rss;
1649 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE)
1653 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1654 * hence, conversion is done here to derive a correct set of ETH_RSS
1655 * flags which corresponds to the active EFX configuration stored
1656 * locally in 'sfc_adapter' and kept up-to-date
1658 rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types);
1659 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1660 if (rss_conf->rss_key != NULL)
1661 rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
1667 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1668 struct rte_eth_rss_conf *rss_conf)
1670 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1671 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1672 unsigned int efx_hash_types;
1673 uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context};
1674 unsigned int n_contexts;
1675 unsigned int mode_i = 0;
1676 unsigned int key_i = 0;
1680 n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2;
1682 if (sfc_sa2shared(sa)->isolated)
1685 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1686 sfc_err(sa, "RSS is not available");
1690 if (rss->channels == 0) {
1691 sfc_err(sa, "RSS is not configured");
1695 if ((rss_conf->rss_key != NULL) &&
1696 (rss_conf->rss_key_len != sizeof(rss->key))) {
1697 sfc_err(sa, "RSS key size is wrong (should be %zu)",
1702 sfc_adapter_lock(sa);
1704 rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
1706 goto fail_rx_hf_rte_to_efx;
1708 for (mode_i = 0; mode_i < n_contexts; mode_i++) {
1709 rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
1710 rss->hash_alg, efx_hash_types,
1713 goto fail_scale_mode_set;
1716 if (rss_conf->rss_key != NULL) {
1717 if (sa->state == SFC_ETHDEV_STARTED) {
1718 for (key_i = 0; key_i < n_contexts; key_i++) {
1719 rc = efx_rx_scale_key_set(sa->nic,
1724 goto fail_scale_key_set;
1728 rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
1731 rss->hash_types = efx_hash_types;
1733 sfc_adapter_unlock(sa);
1738 for (i = 0; i < key_i; i++) {
1739 if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key,
1740 sizeof(rss->key)) != 0)
1741 sfc_err(sa, "failed to restore RSS key");
1744 fail_scale_mode_set:
1745 for (i = 0; i < mode_i; i++) {
1746 if (efx_rx_scale_mode_set(sa->nic, contexts[i],
1747 EFX_RX_HASHALG_TOEPLITZ,
1748 rss->hash_types, B_TRUE) != 0)
1749 sfc_err(sa, "failed to restore RSS mode");
1752 fail_rx_hf_rte_to_efx:
1753 sfc_adapter_unlock(sa);
1758 * The function is used by the secondary process as well. It must not
1759 * use any process-local pointers from the adapter data.
1762 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1763 struct rte_eth_rss_reta_entry64 *reta_conf,
1766 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1767 struct sfc_rss *rss = &sas->rss;
1770 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated)
1773 if (rss->channels == 0)
1776 if (reta_size != EFX_RSS_TBL_SIZE)
1779 for (entry = 0; entry < reta_size; entry++) {
1780 int grp = entry / RTE_RETA_GROUP_SIZE;
1781 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1783 if ((reta_conf[grp].mask >> grp_idx) & 1)
1784 reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
1791 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1792 struct rte_eth_rss_reta_entry64 *reta_conf,
1795 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1796 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1797 unsigned int *rss_tbl_new;
1802 if (sfc_sa2shared(sa)->isolated)
1805 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1806 sfc_err(sa, "RSS is not available");
1810 if (rss->channels == 0) {
1811 sfc_err(sa, "RSS is not configured");
1815 if (reta_size != EFX_RSS_TBL_SIZE) {
1816 sfc_err(sa, "RETA size is wrong (should be %u)",
1821 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
1822 if (rss_tbl_new == NULL)
1825 sfc_adapter_lock(sa);
1827 rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
1829 for (entry = 0; entry < reta_size; entry++) {
1830 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1831 struct rte_eth_rss_reta_entry64 *grp;
1833 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1835 if (grp->mask & (1ull << grp_idx)) {
1836 if (grp->reta[grp_idx] >= rss->channels) {
1838 goto bad_reta_entry;
1840 rss_tbl_new[entry] = grp->reta[grp_idx];
1844 if (sa->state == SFC_ETHDEV_STARTED) {
1845 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1846 rss_tbl_new, EFX_RSS_TBL_SIZE);
1848 goto fail_scale_tbl_set;
1851 rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
1855 sfc_adapter_unlock(sa);
1857 rte_free(rss_tbl_new);
1859 SFC_ASSERT(rc >= 0);
1864 sfc_dev_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
1865 const struct rte_flow_ops **ops)
1867 *ops = &sfc_flow_ops;
1872 sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
1874 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1877 * If Rx datapath does not provide callback to check mempool,
1878 * all pools are supported.
1880 if (sap->dp_rx->pool_ops_supported == NULL)
1883 return sap->dp_rx->pool_ops_supported(pool);
1887 sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1889 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1890 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1891 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1892 struct sfc_rxq_info *rxq_info;
1894 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1896 return sap->dp_rx->intr_enable(rxq_info->dp);
1900 sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t ethdev_qid)
1902 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1903 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1904 sfc_ethdev_qid_t sfc_ethdev_qid = ethdev_qid;
1905 struct sfc_rxq_info *rxq_info;
1907 rxq_info = sfc_rxq_info_by_ethdev_qid(sas, sfc_ethdev_qid);
1909 return sap->dp_rx->intr_disable(rxq_info->dp);
1912 struct sfc_mport_journal_ctx {
1913 struct sfc_adapter *sa;
1914 uint16_t switch_domain_id;
1915 uint32_t mcdi_handle;
1916 bool controllers_assigned;
1917 efx_pcie_interface_t *controllers;
1918 size_t nb_controllers;
1922 sfc_journal_ctx_add_controller(struct sfc_mport_journal_ctx *ctx,
1923 efx_pcie_interface_t intf)
1925 efx_pcie_interface_t *new_controllers;
1929 if (ctx->controllers == NULL) {
1930 ctx->controllers = rte_malloc("sfc_controller_mapping",
1931 sizeof(ctx->controllers[0]), 0);
1932 if (ctx->controllers == NULL)
1935 ctx->controllers[0] = intf;
1936 ctx->nb_controllers = 1;
1941 for (i = 0; i < ctx->nb_controllers; i++) {
1942 if (ctx->controllers[i] == intf)
1944 if (ctx->controllers[i] > intf)
1949 ctx->nb_controllers += 1;
1950 new_size = ctx->nb_controllers * sizeof(ctx->controllers[0]);
1952 new_controllers = rte_realloc(ctx->controllers, new_size, 0);
1953 if (new_controllers == NULL) {
1954 rte_free(ctx->controllers);
1957 ctx->controllers = new_controllers;
1959 for (i = target + 1; i < ctx->nb_controllers; i++)
1960 ctx->controllers[i] = ctx->controllers[i - 1];
1962 ctx->controllers[target] = intf;
1968 sfc_process_mport_journal_entry(struct sfc_mport_journal_ctx *ctx,
1969 efx_mport_desc_t *mport)
1971 struct sfc_mae_switch_port_request req;
1972 efx_mport_sel_t entity_selector;
1973 efx_mport_sel_t ethdev_mport;
1974 uint16_t switch_port_id;
1979 "processing mport id %u (controller %u pf %u vf %u)",
1980 mport->emd_id.id, mport->emd_vnic.ev_intf,
1981 mport->emd_vnic.ev_pf, mport->emd_vnic.ev_vf);
1982 efx_mae_mport_invalid(ðdev_mport);
1984 if (!ctx->controllers_assigned) {
1985 rc = sfc_journal_ctx_add_controller(ctx,
1986 mport->emd_vnic.ev_intf);
1991 /* Build Mport selector */
1992 efx_rc = efx_mae_mport_by_pcie_mh_function(mport->emd_vnic.ev_intf,
1993 mport->emd_vnic.ev_pf,
1994 mport->emd_vnic.ev_vf,
1997 sfc_err(ctx->sa, "failed to build entity mport selector for c%upf%uvf%u",
1998 mport->emd_vnic.ev_intf,
1999 mport->emd_vnic.ev_pf,
2000 mport->emd_vnic.ev_vf);
2004 rc = sfc_mae_switch_port_id_by_entity(ctx->switch_domain_id,
2006 SFC_MAE_SWITCH_PORT_REPRESENTOR,
2010 /* Already registered */
2014 * No representor has been created for this entity.
2015 * Create a dummy switch registry entry with an invalid ethdev
2016 * mport selector. When a corresponding representor is created,
2017 * this entry will be updated.
2019 req.type = SFC_MAE_SWITCH_PORT_REPRESENTOR;
2020 req.entity_mportp = &entity_selector;
2021 req.ethdev_mportp = ðdev_mport;
2022 req.ethdev_port_id = RTE_MAX_ETHPORTS;
2023 req.port_data.repr.intf = mport->emd_vnic.ev_intf;
2024 req.port_data.repr.pf = mport->emd_vnic.ev_pf;
2025 req.port_data.repr.vf = mport->emd_vnic.ev_vf;
2027 rc = sfc_mae_assign_switch_port(ctx->switch_domain_id,
2028 &req, &switch_port_id);
2031 "failed to assign MAE switch port for c%upf%uvf%u: %s",
2032 mport->emd_vnic.ev_intf,
2033 mport->emd_vnic.ev_pf,
2034 mport->emd_vnic.ev_vf,
2040 sfc_err(ctx->sa, "failed to find MAE switch port for c%upf%uvf%u: %s",
2041 mport->emd_vnic.ev_intf,
2042 mport->emd_vnic.ev_pf,
2043 mport->emd_vnic.ev_vf,
2052 sfc_process_mport_journal_cb(void *data, efx_mport_desc_t *mport,
2055 struct sfc_mport_journal_ctx *ctx = data;
2057 if (ctx == NULL || ctx->sa == NULL) {
2058 sfc_err(ctx->sa, "received NULL context or SFC adapter");
2062 if (mport_len != sizeof(*mport)) {
2063 sfc_err(ctx->sa, "actual and expected mport buffer sizes differ");
2067 SFC_ASSERT(sfc_adapter_is_locked(ctx->sa));
2070 * If a zombie flag is set, it means the mport has been marked for
2071 * deletion and cannot be used for any new operations. The mport will
2072 * be destroyed completely once all references to it are released.
2074 if (mport->emd_zombie) {
2075 sfc_dbg(ctx->sa, "mport is a zombie, skipping");
2078 if (mport->emd_type != EFX_MPORT_TYPE_VNIC) {
2079 sfc_dbg(ctx->sa, "mport is not a VNIC, skipping");
2082 if (mport->emd_vnic.ev_client_type != EFX_MPORT_VNIC_CLIENT_FUNCTION) {
2083 sfc_dbg(ctx->sa, "mport is not a function, skipping");
2086 if (mport->emd_vnic.ev_handle == ctx->mcdi_handle) {
2087 sfc_dbg(ctx->sa, "mport is this driver instance, skipping");
2091 return sfc_process_mport_journal_entry(ctx, mport);
2095 sfc_process_mport_journal(struct sfc_adapter *sa)
2097 struct sfc_mport_journal_ctx ctx;
2098 const efx_pcie_interface_t *controllers;
2099 size_t nb_controllers;
2103 memset(&ctx, 0, sizeof(ctx));
2105 ctx.switch_domain_id = sa->mae.switch_domain_id;
2107 efx_rc = efx_mcdi_get_own_client_handle(sa->nic, &ctx.mcdi_handle);
2109 sfc_err(sa, "failed to get own MCDI handle");
2110 SFC_ASSERT(efx_rc > 0);
2114 rc = sfc_mae_switch_domain_controllers(ctx.switch_domain_id,
2115 &controllers, &nb_controllers);
2117 sfc_err(sa, "failed to get controller mapping");
2121 ctx.controllers_assigned = controllers != NULL;
2122 ctx.controllers = NULL;
2123 ctx.nb_controllers = 0;
2125 efx_rc = efx_mae_read_mport_journal(sa->nic,
2126 sfc_process_mport_journal_cb, &ctx);
2128 sfc_err(sa, "failed to process MAE mport journal");
2129 SFC_ASSERT(efx_rc > 0);
2133 if (controllers == NULL) {
2134 rc = sfc_mae_switch_domain_map_controllers(ctx.switch_domain_id,
2136 ctx.nb_controllers);
2145 sfc_count_representors_cb(enum sfc_mae_switch_port_type type,
2146 const efx_mport_sel_t *ethdev_mportp __rte_unused,
2147 uint16_t ethdev_port_id __rte_unused,
2148 const efx_mport_sel_t *entity_mportp __rte_unused,
2149 uint16_t switch_port_id __rte_unused,
2150 union sfc_mae_switch_port_data *port_datap
2154 int *counter = user_datap;
2156 SFC_ASSERT(counter != NULL);
2158 if (type == SFC_MAE_SWITCH_PORT_REPRESENTOR)
2162 struct sfc_get_representors_ctx {
2163 struct rte_eth_representor_info *info;
2164 struct sfc_adapter *sa;
2165 uint16_t switch_domain_id;
2166 const efx_pcie_interface_t *controllers;
2167 size_t nb_controllers;
2171 sfc_get_representors_cb(enum sfc_mae_switch_port_type type,
2172 const efx_mport_sel_t *ethdev_mportp __rte_unused,
2173 uint16_t ethdev_port_id __rte_unused,
2174 const efx_mport_sel_t *entity_mportp __rte_unused,
2175 uint16_t switch_port_id,
2176 union sfc_mae_switch_port_data *port_datap,
2179 struct sfc_get_representors_ctx *ctx = user_datap;
2180 struct rte_eth_representor_range *range;
2184 SFC_ASSERT(ctx != NULL);
2185 SFC_ASSERT(ctx->info != NULL);
2186 SFC_ASSERT(ctx->sa != NULL);
2188 if (type != SFC_MAE_SWITCH_PORT_REPRESENTOR) {
2189 sfc_dbg(ctx->sa, "not a representor, skipping");
2192 if (ctx->info->nb_ranges >= ctx->info->nb_ranges_alloc) {
2193 sfc_dbg(ctx->sa, "info structure is full already");
2197 range = &ctx->info->ranges[ctx->info->nb_ranges];
2198 rc = sfc_mae_switch_controller_from_mapping(ctx->controllers,
2199 ctx->nb_controllers,
2200 port_datap->repr.intf,
2201 &range->controller);
2203 sfc_err(ctx->sa, "invalid representor controller: %d",
2204 port_datap->repr.intf);
2205 range->controller = -1;
2207 range->pf = port_datap->repr.pf;
2208 range->id_base = switch_port_id;
2209 range->id_end = switch_port_id;
2211 if (port_datap->repr.vf != EFX_PCI_VF_INVALID) {
2212 range->type = RTE_ETH_REPRESENTOR_VF;
2213 range->vf = port_datap->repr.vf;
2214 ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN,
2215 "c%dpf%dvf%d", range->controller, range->pf,
2218 range->type = RTE_ETH_REPRESENTOR_PF;
2219 ret = snprintf(range->name, RTE_DEV_NAME_MAX_LEN,
2220 "c%dpf%d", range->controller, range->pf);
2222 if (ret >= RTE_DEV_NAME_MAX_LEN) {
2223 sfc_err(ctx->sa, "representor name has been truncated: %s",
2227 ctx->info->nb_ranges++;
2231 sfc_representor_info_get(struct rte_eth_dev *dev,
2232 struct rte_eth_representor_info *info)
2234 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2235 struct sfc_get_representors_ctx get_repr_ctx;
2236 const efx_nic_cfg_t *nic_cfg;
2237 uint16_t switch_domain_id;
2242 sfc_adapter_lock(sa);
2244 if (sa->mae.status != SFC_MAE_STATUS_SUPPORTED) {
2245 sfc_adapter_unlock(sa);
2249 rc = sfc_process_mport_journal(sa);
2251 sfc_adapter_unlock(sa);
2256 switch_domain_id = sa->mae.switch_domain_id;
2259 rc = sfc_mae_switch_ports_iterate(switch_domain_id,
2260 sfc_count_representors_cb,
2263 sfc_adapter_unlock(sa);
2269 sfc_adapter_unlock(sa);
2273 rc = sfc_mae_switch_domain_controllers(switch_domain_id,
2274 &get_repr_ctx.controllers,
2275 &get_repr_ctx.nb_controllers);
2277 sfc_adapter_unlock(sa);
2282 nic_cfg = efx_nic_cfg_get(sa->nic);
2284 rc = sfc_mae_switch_domain_get_controller(switch_domain_id,
2288 sfc_err(sa, "invalid controller: %d", nic_cfg->enc_intf);
2292 info->controller = controller;
2293 info->pf = nic_cfg->enc_pf;
2295 get_repr_ctx.info = info;
2296 get_repr_ctx.sa = sa;
2297 get_repr_ctx.switch_domain_id = switch_domain_id;
2298 rc = sfc_mae_switch_ports_iterate(switch_domain_id,
2299 sfc_get_representors_cb,
2302 sfc_adapter_unlock(sa);
2307 sfc_adapter_unlock(sa);
2312 sfc_rx_metadata_negotiate(struct rte_eth_dev *dev, uint64_t *features)
2314 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2315 uint64_t supported = 0;
2317 sfc_adapter_lock(sa);
2319 if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_FLAG) != 0)
2320 supported |= RTE_ETH_RX_METADATA_USER_FLAG;
2322 if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_MARK) != 0)
2323 supported |= RTE_ETH_RX_METADATA_USER_MARK;
2325 if (sfc_flow_tunnel_is_supported(sa))
2326 supported |= RTE_ETH_RX_METADATA_TUNNEL_ID;
2328 sa->negotiated_rx_metadata = supported & *features;
2329 *features = sa->negotiated_rx_metadata;
2331 sfc_adapter_unlock(sa);
2336 static const struct eth_dev_ops sfc_eth_dev_ops = {
2337 .dev_configure = sfc_dev_configure,
2338 .dev_start = sfc_dev_start,
2339 .dev_stop = sfc_dev_stop,
2340 .dev_set_link_up = sfc_dev_set_link_up,
2341 .dev_set_link_down = sfc_dev_set_link_down,
2342 .dev_close = sfc_dev_close,
2343 .promiscuous_enable = sfc_dev_promisc_enable,
2344 .promiscuous_disable = sfc_dev_promisc_disable,
2345 .allmulticast_enable = sfc_dev_allmulti_enable,
2346 .allmulticast_disable = sfc_dev_allmulti_disable,
2347 .link_update = sfc_dev_link_update,
2348 .stats_get = sfc_stats_get,
2349 .stats_reset = sfc_stats_reset,
2350 .xstats_get = sfc_xstats_get,
2351 .xstats_reset = sfc_stats_reset,
2352 .xstats_get_names = sfc_xstats_get_names,
2353 .dev_infos_get = sfc_dev_infos_get,
2354 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
2355 .mtu_set = sfc_dev_set_mtu,
2356 .rx_queue_start = sfc_rx_queue_start,
2357 .rx_queue_stop = sfc_rx_queue_stop,
2358 .tx_queue_start = sfc_tx_queue_start,
2359 .tx_queue_stop = sfc_tx_queue_stop,
2360 .rx_queue_setup = sfc_rx_queue_setup,
2361 .rx_queue_release = sfc_rx_queue_release,
2362 .rx_queue_intr_enable = sfc_rx_queue_intr_enable,
2363 .rx_queue_intr_disable = sfc_rx_queue_intr_disable,
2364 .tx_queue_setup = sfc_tx_queue_setup,
2365 .tx_queue_release = sfc_tx_queue_release,
2366 .flow_ctrl_get = sfc_flow_ctrl_get,
2367 .flow_ctrl_set = sfc_flow_ctrl_set,
2368 .mac_addr_set = sfc_mac_addr_set,
2369 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
2370 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
2371 .reta_update = sfc_dev_rss_reta_update,
2372 .reta_query = sfc_dev_rss_reta_query,
2373 .rss_hash_update = sfc_dev_rss_hash_update,
2374 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
2375 .flow_ops_get = sfc_dev_flow_ops_get,
2376 .set_mc_addr_list = sfc_set_mc_addr_list,
2377 .rxq_info_get = sfc_rx_queue_info_get,
2378 .txq_info_get = sfc_tx_queue_info_get,
2379 .fw_version_get = sfc_fw_version_get,
2380 .xstats_get_by_id = sfc_xstats_get_by_id,
2381 .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
2382 .pool_ops_supported = sfc_pool_ops_supported,
2383 .representor_info_get = sfc_representor_info_get,
2384 .rx_metadata_negotiate = sfc_rx_metadata_negotiate,
2387 struct sfc_ethdev_init_data {
2388 uint16_t nb_representors;
2392 * Duplicate a string in potentially shared memory required for
2393 * multi-process support.
2395 * strdup() allocates from process-local heap/memory.
2398 sfc_strdup(const char *str)
2406 size = strlen(str) + 1;
2407 copy = rte_malloc(__func__, size, 0);
2409 rte_memcpy(copy, str, size);
2415 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
2417 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2418 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2419 const struct sfc_dp_rx *dp_rx;
2420 const struct sfc_dp_tx *dp_tx;
2421 const efx_nic_cfg_t *encp;
2422 unsigned int avail_caps = 0;
2423 const char *rx_name = NULL;
2424 const char *tx_name = NULL;
2427 switch (sa->family) {
2428 case EFX_FAMILY_HUNTINGTON:
2429 case EFX_FAMILY_MEDFORD:
2430 case EFX_FAMILY_MEDFORD2:
2431 avail_caps |= SFC_DP_HW_FW_CAP_EF10;
2432 avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX;
2433 avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX;
2435 case EFX_FAMILY_RIVERHEAD:
2436 avail_caps |= SFC_DP_HW_FW_CAP_EF100;
2442 encp = efx_nic_cfg_get(sa->nic);
2443 if (encp->enc_rx_es_super_buffer_supported)
2444 avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
2446 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
2447 sfc_kvarg_string_handler, &rx_name);
2449 goto fail_kvarg_rx_datapath;
2451 if (rx_name != NULL) {
2452 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
2453 if (dp_rx == NULL) {
2454 sfc_err(sa, "Rx datapath %s not found", rx_name);
2458 if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
2460 "Insufficient Hw/FW capabilities to use Rx datapath %s",
2463 goto fail_dp_rx_caps;
2466 dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
2467 if (dp_rx == NULL) {
2468 sfc_err(sa, "Rx datapath by caps %#x not found",
2475 sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
2476 if (sas->dp_rx_name == NULL) {
2478 goto fail_dp_rx_name;
2481 if (strcmp(dp_rx->dp.name, SFC_KVARG_DATAPATH_EF10_ESSB) == 0) {
2482 /* FLAG and MARK are always available from Rx prefix. */
2483 sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG;
2484 sa->negotiated_rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK;
2487 sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
2489 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
2490 sfc_kvarg_string_handler, &tx_name);
2492 goto fail_kvarg_tx_datapath;
2494 if (tx_name != NULL) {
2495 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
2496 if (dp_tx == NULL) {
2497 sfc_err(sa, "Tx datapath %s not found", tx_name);
2501 if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
2503 "Insufficient Hw/FW capabilities to use Tx datapath %s",
2506 goto fail_dp_tx_caps;
2509 dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
2510 if (dp_tx == NULL) {
2511 sfc_err(sa, "Tx datapath by caps %#x not found",
2518 sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
2519 if (sas->dp_tx_name == NULL) {
2521 goto fail_dp_tx_name;
2524 sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
2526 sa->priv.dp_rx = dp_rx;
2527 sa->priv.dp_tx = dp_tx;
2529 dev->rx_pkt_burst = dp_rx->pkt_burst;
2530 dev->tx_pkt_prepare = dp_tx->pkt_prepare;
2531 dev->tx_pkt_burst = dp_tx->pkt_burst;
2533 dev->rx_queue_count = sfc_rx_queue_count;
2534 dev->rx_descriptor_status = sfc_rx_descriptor_status;
2535 dev->tx_descriptor_status = sfc_tx_descriptor_status;
2536 dev->dev_ops = &sfc_eth_dev_ops;
2543 fail_kvarg_tx_datapath:
2544 rte_free(sas->dp_rx_name);
2545 sas->dp_rx_name = NULL;
2550 fail_kvarg_rx_datapath:
2555 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
2557 struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2558 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2560 dev->dev_ops = NULL;
2561 dev->tx_pkt_prepare = NULL;
2562 dev->rx_pkt_burst = NULL;
2563 dev->tx_pkt_burst = NULL;
2565 rte_free(sas->dp_tx_name);
2566 sas->dp_tx_name = NULL;
2567 sa->priv.dp_tx = NULL;
2569 rte_free(sas->dp_rx_name);
2570 sas->dp_rx_name = NULL;
2571 sa->priv.dp_rx = NULL;
2574 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
2575 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
2576 .reta_query = sfc_dev_rss_reta_query,
2577 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
2578 .rxq_info_get = sfc_rx_queue_info_get,
2579 .txq_info_get = sfc_tx_queue_info_get,
2583 sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
2585 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2586 struct sfc_adapter_priv *sap;
2587 const struct sfc_dp_rx *dp_rx;
2588 const struct sfc_dp_tx *dp_tx;
2592 * Allocate process private data from heap, since it should not
2593 * be located in shared memory allocated using rte_malloc() API.
2595 sap = calloc(1, sizeof(*sap));
2598 goto fail_alloc_priv;
2601 sap->logtype_main = logtype_main;
2603 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
2604 if (dp_rx == NULL) {
2605 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2606 "cannot find %s Rx datapath", sas->dp_rx_name);
2610 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
2611 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2612 "%s Rx datapath does not support multi-process",
2615 goto fail_dp_rx_multi_process;
2618 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
2619 if (dp_tx == NULL) {
2620 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2621 "cannot find %s Tx datapath", sas->dp_tx_name);
2625 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
2626 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2627 "%s Tx datapath does not support multi-process",
2630 goto fail_dp_tx_multi_process;
2636 dev->process_private = sap;
2637 dev->rx_pkt_burst = dp_rx->pkt_burst;
2638 dev->tx_pkt_prepare = dp_tx->pkt_prepare;
2639 dev->tx_pkt_burst = dp_tx->pkt_burst;
2640 dev->rx_queue_count = sfc_rx_queue_count;
2641 dev->rx_descriptor_status = sfc_rx_descriptor_status;
2642 dev->tx_descriptor_status = sfc_tx_descriptor_status;
2643 dev->dev_ops = &sfc_eth_dev_secondary_ops;
2647 fail_dp_tx_multi_process:
2649 fail_dp_rx_multi_process:
2658 sfc_register_dp(void)
2661 if (TAILQ_EMPTY(&sfc_dp_head)) {
2662 /* Prefer EF10 datapath */
2663 sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
2664 sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
2665 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
2666 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
2668 sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
2669 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
2670 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
2671 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
2676 sfc_parse_switch_mode(struct sfc_adapter *sa, bool has_representors)
2678 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
2679 const char *switch_mode = NULL;
2682 sfc_log_init(sa, "entry");
2684 rc = sfc_kvargs_process(sa, SFC_KVARG_SWITCH_MODE,
2685 sfc_kvarg_string_handler, &switch_mode);
2689 if (switch_mode == NULL) {
2690 sa->switchdev = encp->enc_mae_supported &&
2691 (!encp->enc_datapath_cap_evb ||
2693 } else if (strcasecmp(switch_mode, SFC_KVARG_SWITCH_MODE_LEGACY) == 0) {
2694 sa->switchdev = false;
2695 } else if (strcasecmp(switch_mode,
2696 SFC_KVARG_SWITCH_MODE_SWITCHDEV) == 0) {
2697 sa->switchdev = true;
2699 sfc_err(sa, "invalid switch mode device argument '%s'",
2705 sfc_log_init(sa, "done");
2711 sfc_log_init(sa, "failed: %s", rte_strerror(rc));
2717 sfc_eth_dev_init(struct rte_eth_dev *dev, void *init_params)
2719 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2720 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2721 struct sfc_ethdev_init_data *init_data = init_params;
2722 uint32_t logtype_main;
2723 struct sfc_adapter *sa;
2725 const efx_nic_cfg_t *encp;
2726 const struct rte_ether_addr *from;
2729 if (sfc_efx_dev_class_get(pci_dev->device.devargs) !=
2730 SFC_EFX_DEV_CLASS_NET) {
2731 SFC_GENERIC_LOG(DEBUG,
2732 "Incompatible device class: skip probing, should be probed by other sfc driver.");
2736 rc = sfc_dp_mport_register();
2742 logtype_main = sfc_register_logtype(&pci_dev->addr,
2743 SFC_LOGTYPE_MAIN_STR,
2746 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2747 return -sfc_eth_dev_secondary_init(dev, logtype_main);
2749 /* Required for logging */
2750 ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
2751 "PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
2752 pci_dev->addr.domain, pci_dev->addr.bus,
2753 pci_dev->addr.devid, pci_dev->addr.function,
2754 dev->data->port_id);
2755 if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
2756 SFC_GENERIC_LOG(ERR,
2757 "reserved log prefix is too short for " PCI_PRI_FMT,
2758 pci_dev->addr.domain, pci_dev->addr.bus,
2759 pci_dev->addr.devid, pci_dev->addr.function);
2762 sas->pci_addr = pci_dev->addr;
2763 sas->port_id = dev->data->port_id;
2766 * Allocate process private data from heap, since it should not
2767 * be located in shared memory allocated using rte_malloc() API.
2769 sa = calloc(1, sizeof(*sa));
2775 dev->process_private = sa;
2777 /* Required for logging */
2778 sa->priv.shared = sas;
2779 sa->priv.logtype_main = logtype_main;
2783 /* Copy PCI device info to the dev->data */
2784 rte_eth_copy_pci_info(dev, pci_dev);
2785 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2787 rc = sfc_kvargs_parse(sa);
2789 goto fail_kvargs_parse;
2791 sfc_log_init(sa, "entry");
2793 dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
2794 if (dev->data->mac_addrs == NULL) {
2796 goto fail_mac_addrs;
2799 sfc_adapter_lock_init(sa);
2800 sfc_adapter_lock(sa);
2802 sfc_log_init(sa, "probing");
2808 * Selecting a default switch mode requires the NIC to be probed and
2809 * to have its capabilities filled in.
2811 rc = sfc_parse_switch_mode(sa, init_data->nb_representors > 0);
2813 goto fail_switch_mode;
2815 sfc_log_init(sa, "set device ops");
2816 rc = sfc_eth_dev_set_ops(dev);
2820 sfc_log_init(sa, "attaching");
2821 rc = sfc_attach(sa);
2825 if (sa->switchdev && sa->mae.status != SFC_MAE_STATUS_SUPPORTED) {
2827 "failed to enable switchdev mode without MAE support");
2829 goto fail_switchdev_no_mae;
2832 encp = efx_nic_cfg_get(sa->nic);
2835 * The arguments are really reverse order in comparison to
2836 * Linux kernel. Copy from NIC config to Ethernet device data.
2838 from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
2839 rte_ether_addr_copy(from, &dev->data->mac_addrs[0]);
2841 sfc_adapter_unlock(sa);
2843 sfc_log_init(sa, "done");
2846 fail_switchdev_no_mae:
2850 sfc_eth_dev_clear_ops(dev);
2857 sfc_adapter_unlock(sa);
2858 sfc_adapter_lock_fini(sa);
2859 rte_free(dev->data->mac_addrs);
2860 dev->data->mac_addrs = NULL;
2863 sfc_kvargs_cleanup(sa);
2866 sfc_log_init(sa, "failed %d", rc);
2867 dev->process_private = NULL;
2876 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
2883 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
2884 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
2885 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
2886 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
2887 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
2888 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
2889 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
2890 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
2891 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
2892 { RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD) },
2893 { .vendor_id = 0 /* sentinel */ }
2897 sfc_parse_rte_devargs(const char *args, struct rte_eth_devargs *devargs)
2899 struct rte_eth_devargs eth_da = { .nb_representor_ports = 0 };
2903 rc = rte_eth_devargs_parse(args, ð_da);
2905 SFC_GENERIC_LOG(ERR,
2906 "Failed to parse generic devargs '%s'",
2918 sfc_eth_dev_find_or_create(struct rte_pci_device *pci_dev,
2919 struct sfc_ethdev_init_data *init_data,
2920 struct rte_eth_dev **devp,
2923 struct rte_eth_dev *dev;
2924 bool created = false;
2927 dev = rte_eth_dev_allocated(pci_dev->device.name);
2929 rc = rte_eth_dev_create(&pci_dev->device, pci_dev->device.name,
2930 sizeof(struct sfc_adapter_shared),
2931 eth_dev_pci_specific_init, pci_dev,
2932 sfc_eth_dev_init, init_data);
2934 SFC_GENERIC_LOG(ERR, "Failed to create sfc ethdev '%s'",
2935 pci_dev->device.name);
2941 dev = rte_eth_dev_allocated(pci_dev->device.name);
2943 SFC_GENERIC_LOG(ERR,
2944 "Failed to find allocated sfc ethdev '%s'",
2945 pci_dev->device.name);
2951 *dev_created = created;
2957 sfc_eth_dev_create_repr(struct sfc_adapter *sa,
2958 efx_pcie_interface_t controller,
2961 enum rte_eth_representor_type type)
2963 struct sfc_repr_entity_info entity;
2964 efx_mport_sel_t mport_sel;
2968 case RTE_ETH_REPRESENTOR_NONE:
2970 case RTE_ETH_REPRESENTOR_VF:
2971 case RTE_ETH_REPRESENTOR_PF:
2973 case RTE_ETH_REPRESENTOR_SF:
2974 sfc_err(sa, "SF representors are not supported");
2977 sfc_err(sa, "unknown representor type: %d", type);
2981 rc = efx_mae_mport_by_pcie_mh_function(controller,
2987 "failed to get m-port selector for controller %u port %u repr_port %u: %s",
2988 controller, port, repr_port, rte_strerror(-rc));
2992 memset(&entity, 0, sizeof(entity));
2994 entity.intf = controller;
2996 entity.vf = repr_port;
2998 rc = sfc_repr_create(sa->eth_dev, &entity, sa->mae.switch_domain_id,
3002 "failed to create representor for controller %u port %u repr_port %u: %s",
3003 controller, port, repr_port, rte_strerror(-rc));
3011 sfc_eth_dev_create_repr_port(struct sfc_adapter *sa,
3012 const struct rte_eth_devargs *eth_da,
3013 efx_pcie_interface_t controller,
3016 int first_error = 0;
3020 if (eth_da->type == RTE_ETH_REPRESENTOR_PF) {
3021 return sfc_eth_dev_create_repr(sa, controller, port,
3026 for (i = 0; i < eth_da->nb_representor_ports; i++) {
3027 rc = sfc_eth_dev_create_repr(sa, controller, port,
3028 eth_da->representor_ports[i],
3030 if (rc != 0 && first_error == 0)
3038 sfc_eth_dev_create_repr_controller(struct sfc_adapter *sa,
3039 const struct rte_eth_devargs *eth_da,
3040 efx_pcie_interface_t controller)
3042 const efx_nic_cfg_t *encp;
3043 int first_error = 0;
3044 uint16_t default_port;
3048 if (eth_da->nb_ports == 0) {
3049 encp = efx_nic_cfg_get(sa->nic);
3050 default_port = encp->enc_intf == controller ? encp->enc_pf : 0;
3051 return sfc_eth_dev_create_repr_port(sa, eth_da, controller,
3055 for (i = 0; i < eth_da->nb_ports; i++) {
3056 rc = sfc_eth_dev_create_repr_port(sa, eth_da, controller,
3058 if (rc != 0 && first_error == 0)
3066 sfc_eth_dev_create_representors(struct rte_eth_dev *dev,
3067 const struct rte_eth_devargs *eth_da)
3069 efx_pcie_interface_t intf;
3070 const efx_nic_cfg_t *encp;
3071 struct sfc_adapter *sa;
3072 uint16_t switch_domain_id;
3076 sa = sfc_adapter_by_eth_dev(dev);
3077 switch_domain_id = sa->mae.switch_domain_id;
3079 switch (eth_da->type) {
3080 case RTE_ETH_REPRESENTOR_NONE:
3082 case RTE_ETH_REPRESENTOR_PF:
3083 case RTE_ETH_REPRESENTOR_VF:
3085 case RTE_ETH_REPRESENTOR_SF:
3086 sfc_err(sa, "SF representors are not supported");
3089 sfc_err(sa, "unknown representor type: %d",
3094 if (!sa->switchdev) {
3095 sfc_err(sa, "cannot create representors in non-switchdev mode");
3099 if (!sfc_repr_available(sfc_sa2shared(sa))) {
3100 sfc_err(sa, "cannot create representors: unsupported");
3106 * This is needed to construct the DPDK controller -> EFX interface
3109 sfc_adapter_lock(sa);
3110 rc = sfc_process_mport_journal(sa);
3111 sfc_adapter_unlock(sa);
3117 if (eth_da->nb_mh_controllers > 0) {
3118 for (i = 0; i < eth_da->nb_mh_controllers; i++) {
3119 rc = sfc_mae_switch_domain_get_intf(switch_domain_id,
3120 eth_da->mh_controllers[i],
3123 sfc_err(sa, "failed to get representor");
3126 sfc_eth_dev_create_repr_controller(sa, eth_da, intf);
3129 encp = efx_nic_cfg_get(sa->nic);
3130 sfc_eth_dev_create_repr_controller(sa, eth_da, encp->enc_intf);
3136 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3137 struct rte_pci_device *pci_dev)
3139 struct sfc_ethdev_init_data init_data;
3140 struct rte_eth_devargs eth_da;
3141 struct rte_eth_dev *dev;
3145 if (pci_dev->device.devargs != NULL) {
3146 rc = sfc_parse_rte_devargs(pci_dev->device.devargs->args,
3151 memset(ð_da, 0, sizeof(eth_da));
3154 /* If no VF representors specified, check for PF ones */
3155 if (eth_da.nb_representor_ports > 0)
3156 init_data.nb_representors = eth_da.nb_representor_ports;
3158 init_data.nb_representors = eth_da.nb_ports;
3160 if (init_data.nb_representors > 0 &&
3161 rte_eal_process_type() != RTE_PROC_PRIMARY) {
3162 SFC_GENERIC_LOG(ERR,
3163 "Create representors from secondary process not supported, dev '%s'",
3164 pci_dev->device.name);
3169 * Driver supports RTE_PCI_DRV_PROBE_AGAIN. Hence create device only
3170 * if it does not already exist. Re-probing an existing device is
3171 * expected to allow additional representors to be configured.
3173 rc = sfc_eth_dev_find_or_create(pci_dev, &init_data, &dev,
3178 rc = sfc_eth_dev_create_representors(dev, ð_da);
3181 (void)rte_eth_dev_destroy(dev, sfc_eth_dev_uninit);
3189 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
3191 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
3194 static struct rte_pci_driver sfc_efx_pmd = {
3195 .id_table = pci_id_sfc_efx_map,
3197 RTE_PCI_DRV_INTR_LSC |
3198 RTE_PCI_DRV_NEED_MAPPING |
3199 RTE_PCI_DRV_PROBE_AGAIN,
3200 .probe = sfc_eth_dev_pci_probe,
3201 .remove = sfc_eth_dev_pci_remove,
3204 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
3205 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
3206 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
3207 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
3208 SFC_KVARG_SWITCH_MODE "=" SFC_KVARG_VALUES_SWITCH_MODE " "
3209 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
3210 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
3211 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
3212 SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
3213 SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
3214 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
3216 RTE_INIT(sfc_driver_register_logtype)
3220 ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
3222 sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;