1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
20 #include "sfc_debug.h"
22 #include "sfc_kvargs.h"
28 #include "sfc_dp_rx.h"
30 uint32_t sfc_logtype_driver;
32 static struct sfc_dp_list sfc_dp_head =
33 TAILQ_HEAD_INITIALIZER(sfc_dp_head);
36 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
38 struct sfc_adapter *sa = dev->data->dev_private;
39 efx_nic_fw_info_t enfi;
44 * Return value of the callback is likely supposed to be
45 * equal to or greater than 0, nevertheless, if an error
46 * occurs, it will be desirable to pass it to the caller
48 if ((fw_version == NULL) || (fw_size == 0))
51 rc = efx_nic_get_fw_version(sa->nic, &enfi);
55 ret = snprintf(fw_version, fw_size,
56 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
57 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
58 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
62 if (enfi.enfi_dpcpu_fw_ids_valid) {
63 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
66 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
67 fw_size - dpcpu_fw_ids_offset,
68 " rx%" PRIx16 " tx%" PRIx16,
69 enfi.enfi_rx_dpcpu_fw_id,
70 enfi.enfi_tx_dpcpu_fw_id);
77 if (fw_size < (size_t)(++ret))
84 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
86 struct sfc_adapter *sa = dev->data->dev_private;
87 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
88 struct sfc_rss *rss = &sa->rss;
89 uint64_t txq_offloads_def = 0;
91 sfc_log_init(sa, "entry");
93 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
95 /* Autonegotiation may be disabled */
96 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
97 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
98 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
99 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
100 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
101 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
102 dev_info->speed_capa |= ETH_LINK_SPEED_25G;
103 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
104 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
105 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
106 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
107 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
108 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
110 dev_info->max_rx_queues = sa->rxq_max;
111 dev_info->max_tx_queues = sa->txq_max;
113 /* By default packets are dropped if no descriptors are available */
114 dev_info->default_rxconf.rx_drop_en = 1;
116 dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
119 * rx_offload_capa includes both device and queue offloads since
120 * the latter may be requested on a per device basis which makes
121 * sense when some offloads are needed to be set on all queues.
123 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
124 dev_info->rx_queue_offload_capa;
126 dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
129 * tx_offload_capa includes both device and queue offloads since
130 * the latter may be requested on a per device basis which makes
131 * sense when some offloads are needed to be set on all queues.
133 dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
134 dev_info->tx_queue_offload_capa;
136 if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
137 txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
139 dev_info->default_txconf.offloads |= txq_offloads_def;
141 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
142 if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
143 !encp->enc_hw_tx_insert_vlan_enabled)
144 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
146 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
147 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
149 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)
150 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP;
152 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)
153 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
155 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
159 for (i = 0; i < rss->hf_map_nb_entries; ++i)
160 rte_hf |= rss->hf_map[i].rte;
162 dev_info->reta_size = EFX_RSS_TBL_SIZE;
163 dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
164 dev_info->flow_type_rss_offloads = rte_hf;
167 /* Initialize to hardware limits */
168 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
169 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
170 /* The RXQ hardware requires that the descriptor count is a power
171 * of 2, but rx_desc_lim cannot properly describe that constraint.
173 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
175 /* Initialize to hardware limits */
176 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
177 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
179 * The TXQ hardware requires that the descriptor count is a power
180 * of 2, but tx_desc_lim cannot properly describe that constraint
182 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
184 if (sa->dp_rx->get_dev_info != NULL)
185 sa->dp_rx->get_dev_info(dev_info);
186 if (sa->dp_tx->get_dev_info != NULL)
187 sa->dp_tx->get_dev_info(dev_info);
190 static const uint32_t *
191 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
193 struct sfc_adapter *sa = dev->data->dev_private;
194 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
195 uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
197 return sa->dp_rx->supported_ptypes_get(tunnel_encaps);
201 sfc_dev_configure(struct rte_eth_dev *dev)
203 struct rte_eth_dev_data *dev_data = dev->data;
204 struct sfc_adapter *sa = dev_data->dev_private;
207 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
208 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
210 sfc_adapter_lock(sa);
212 case SFC_ADAPTER_CONFIGURED:
214 case SFC_ADAPTER_INITIALIZED:
215 rc = sfc_configure(sa);
218 sfc_err(sa, "unexpected adapter state %u to configure",
223 sfc_adapter_unlock(sa);
225 sfc_log_init(sa, "done %d", rc);
231 sfc_dev_start(struct rte_eth_dev *dev)
233 struct sfc_adapter *sa = dev->data->dev_private;
236 sfc_log_init(sa, "entry");
238 sfc_adapter_lock(sa);
240 sfc_adapter_unlock(sa);
242 sfc_log_init(sa, "done %d", rc);
248 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
250 struct sfc_adapter *sa = dev->data->dev_private;
251 struct rte_eth_link current_link;
254 sfc_log_init(sa, "entry");
256 if (sa->state != SFC_ADAPTER_STARTED) {
257 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
258 } else if (wait_to_complete) {
259 efx_link_mode_t link_mode;
261 if (efx_port_poll(sa->nic, &link_mode) != 0)
262 link_mode = EFX_LINK_UNKNOWN;
263 sfc_port_link_mode_to_info(link_mode, ¤t_link);
266 sfc_ev_mgmt_qpoll(sa);
267 rte_eth_linkstatus_get(dev, ¤t_link);
270 ret = rte_eth_linkstatus_set(dev, ¤t_link);
272 sfc_notice(sa, "Link status is %s",
273 current_link.link_status ? "UP" : "DOWN");
279 sfc_dev_stop(struct rte_eth_dev *dev)
281 struct sfc_adapter *sa = dev->data->dev_private;
283 sfc_log_init(sa, "entry");
285 sfc_adapter_lock(sa);
287 sfc_adapter_unlock(sa);
289 sfc_log_init(sa, "done");
293 sfc_dev_set_link_up(struct rte_eth_dev *dev)
295 struct sfc_adapter *sa = dev->data->dev_private;
298 sfc_log_init(sa, "entry");
300 sfc_adapter_lock(sa);
302 sfc_adapter_unlock(sa);
309 sfc_dev_set_link_down(struct rte_eth_dev *dev)
311 struct sfc_adapter *sa = dev->data->dev_private;
313 sfc_log_init(sa, "entry");
315 sfc_adapter_lock(sa);
317 sfc_adapter_unlock(sa);
323 sfc_dev_close(struct rte_eth_dev *dev)
325 struct sfc_adapter *sa = dev->data->dev_private;
327 sfc_log_init(sa, "entry");
329 sfc_adapter_lock(sa);
331 case SFC_ADAPTER_STARTED:
333 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
335 case SFC_ADAPTER_CONFIGURED:
337 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
339 case SFC_ADAPTER_INITIALIZED:
342 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
345 sfc_adapter_unlock(sa);
347 sfc_log_init(sa, "done");
351 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
354 struct sfc_port *port;
356 struct sfc_adapter *sa = dev->data->dev_private;
357 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
358 const char *desc = (allmulti) ? "all-multi" : "promiscuous";
360 sfc_adapter_lock(sa);
363 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
365 if (*toggle != enabled) {
368 if (port->isolated) {
369 sfc_warn(sa, "isolated mode is active on the port");
370 sfc_warn(sa, "the change is to be applied on the next "
371 "start provided that isolated mode is "
372 "disabled prior the next start");
373 } else if ((sa->state == SFC_ADAPTER_STARTED) &&
374 (sfc_set_rx_mode(sa) != 0)) {
375 *toggle = !(enabled);
376 sfc_warn(sa, "Failed to %s %s mode",
377 ((enabled) ? "enable" : "disable"), desc);
381 sfc_adapter_unlock(sa);
385 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
387 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
391 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
393 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
397 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
399 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
403 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
405 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
409 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
410 uint16_t nb_rx_desc, unsigned int socket_id,
411 const struct rte_eth_rxconf *rx_conf,
412 struct rte_mempool *mb_pool)
414 struct sfc_adapter *sa = dev->data->dev_private;
417 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
418 rx_queue_id, nb_rx_desc, socket_id);
420 sfc_adapter_lock(sa);
422 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
427 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
429 sfc_adapter_unlock(sa);
434 sfc_adapter_unlock(sa);
440 sfc_rx_queue_release(void *queue)
442 struct sfc_dp_rxq *dp_rxq = queue;
444 struct sfc_adapter *sa;
445 unsigned int sw_index;
450 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
452 sfc_adapter_lock(sa);
454 sw_index = sfc_rxq_sw_index(rxq);
456 sfc_log_init(sa, "RxQ=%u", sw_index);
458 sa->eth_dev->data->rx_queues[sw_index] = NULL;
460 sfc_rx_qfini(sa, sw_index);
462 sfc_adapter_unlock(sa);
466 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
467 uint16_t nb_tx_desc, unsigned int socket_id,
468 const struct rte_eth_txconf *tx_conf)
470 struct sfc_adapter *sa = dev->data->dev_private;
473 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
474 tx_queue_id, nb_tx_desc, socket_id);
476 sfc_adapter_lock(sa);
478 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
482 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
484 sfc_adapter_unlock(sa);
488 sfc_adapter_unlock(sa);
494 sfc_tx_queue_release(void *queue)
496 struct sfc_dp_txq *dp_txq = queue;
498 unsigned int sw_index;
499 struct sfc_adapter *sa;
504 txq = sfc_txq_by_dp_txq(dp_txq);
505 sw_index = sfc_txq_sw_index(txq);
507 SFC_ASSERT(txq->evq != NULL);
510 sfc_log_init(sa, "TxQ = %u", sw_index);
512 sfc_adapter_lock(sa);
514 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
515 sa->eth_dev->data->tx_queues[sw_index] = NULL;
517 sfc_tx_qfini(sa, sw_index);
519 sfc_adapter_unlock(sa);
523 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
525 struct sfc_adapter *sa = dev->data->dev_private;
526 struct sfc_port *port = &sa->port;
530 rte_spinlock_lock(&port->mac_stats_lock);
532 ret = sfc_port_update_mac_stats(sa);
536 mac_stats = port->mac_stats_buf;
538 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
539 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
541 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
542 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
543 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
545 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
546 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
547 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
549 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
550 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
551 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
553 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
554 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
555 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
556 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
557 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
558 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
560 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
561 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
562 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
563 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
565 * Take into account stats which are whenever supported
566 * on EF10. If some stat is not supported by current
567 * firmware variant or HW revision, it is guaranteed
568 * to be zero in mac_stats.
571 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
572 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
573 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
574 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
575 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
576 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
577 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
578 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
579 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
580 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
582 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
583 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
584 mac_stats[EFX_MAC_RX_JABBER_PKTS];
585 /* no oerrors counters supported on EF10 */
589 rte_spinlock_unlock(&port->mac_stats_lock);
590 SFC_ASSERT(ret >= 0);
595 sfc_stats_reset(struct rte_eth_dev *dev)
597 struct sfc_adapter *sa = dev->data->dev_private;
598 struct sfc_port *port = &sa->port;
601 if (sa->state != SFC_ADAPTER_STARTED) {
603 * The operation cannot be done if port is not started; it
604 * will be scheduled to be done during the next port start
606 port->mac_stats_reset_pending = B_TRUE;
610 rc = sfc_port_reset_mac_stats(sa);
612 sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
616 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
617 unsigned int xstats_count)
619 struct sfc_adapter *sa = dev->data->dev_private;
620 struct sfc_port *port = &sa->port;
626 rte_spinlock_lock(&port->mac_stats_lock);
628 rc = sfc_port_update_mac_stats(sa);
635 mac_stats = port->mac_stats_buf;
637 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
638 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
639 if (xstats != NULL && nstats < (int)xstats_count) {
640 xstats[nstats].id = nstats;
641 xstats[nstats].value = mac_stats[i];
648 rte_spinlock_unlock(&port->mac_stats_lock);
654 sfc_xstats_get_names(struct rte_eth_dev *dev,
655 struct rte_eth_xstat_name *xstats_names,
656 unsigned int xstats_count)
658 struct sfc_adapter *sa = dev->data->dev_private;
659 struct sfc_port *port = &sa->port;
661 unsigned int nstats = 0;
663 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
664 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
665 if (xstats_names != NULL && nstats < xstats_count)
666 strncpy(xstats_names[nstats].name,
667 efx_mac_stat_name(sa->nic, i),
668 sizeof(xstats_names[0].name));
677 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
678 uint64_t *values, unsigned int n)
680 struct sfc_adapter *sa = dev->data->dev_private;
681 struct sfc_port *port = &sa->port;
683 unsigned int nb_supported = 0;
684 unsigned int nb_written = 0;
689 if (unlikely(values == NULL) ||
690 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
691 return port->mac_stats_nb_supported;
693 rte_spinlock_lock(&port->mac_stats_lock);
695 rc = sfc_port_update_mac_stats(sa);
702 mac_stats = port->mac_stats_buf;
704 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
705 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
708 if ((ids == NULL) || (ids[nb_written] == nb_supported))
709 values[nb_written++] = mac_stats[i];
717 rte_spinlock_unlock(&port->mac_stats_lock);
723 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
724 struct rte_eth_xstat_name *xstats_names,
725 const uint64_t *ids, unsigned int size)
727 struct sfc_adapter *sa = dev->data->dev_private;
728 struct sfc_port *port = &sa->port;
729 unsigned int nb_supported = 0;
730 unsigned int nb_written = 0;
733 if (unlikely(xstats_names == NULL) ||
734 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
735 return port->mac_stats_nb_supported;
737 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
738 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
741 if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
742 char *name = xstats_names[nb_written++].name;
744 strncpy(name, efx_mac_stat_name(sa->nic, i),
745 sizeof(xstats_names[0].name));
746 name[sizeof(xstats_names[0].name) - 1] = '\0';
756 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
758 struct sfc_adapter *sa = dev->data->dev_private;
759 unsigned int wanted_fc, link_fc;
761 memset(fc_conf, 0, sizeof(*fc_conf));
763 sfc_adapter_lock(sa);
765 if (sa->state == SFC_ADAPTER_STARTED)
766 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
768 link_fc = sa->port.flow_ctrl;
772 fc_conf->mode = RTE_FC_NONE;
774 case EFX_FCNTL_RESPOND:
775 fc_conf->mode = RTE_FC_RX_PAUSE;
777 case EFX_FCNTL_GENERATE:
778 fc_conf->mode = RTE_FC_TX_PAUSE;
780 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
781 fc_conf->mode = RTE_FC_FULL;
784 sfc_err(sa, "%s: unexpected flow control value %#x",
788 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
790 sfc_adapter_unlock(sa);
796 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
798 struct sfc_adapter *sa = dev->data->dev_private;
799 struct sfc_port *port = &sa->port;
803 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
804 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
805 fc_conf->mac_ctrl_frame_fwd != 0) {
806 sfc_err(sa, "unsupported flow control settings specified");
811 switch (fc_conf->mode) {
815 case RTE_FC_RX_PAUSE:
816 fcntl = EFX_FCNTL_RESPOND;
818 case RTE_FC_TX_PAUSE:
819 fcntl = EFX_FCNTL_GENERATE;
822 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
829 sfc_adapter_lock(sa);
831 if (sa->state == SFC_ADAPTER_STARTED) {
832 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
834 goto fail_mac_fcntl_set;
837 port->flow_ctrl = fcntl;
838 port->flow_ctrl_autoneg = fc_conf->autoneg;
840 sfc_adapter_unlock(sa);
845 sfc_adapter_unlock(sa);
852 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
854 struct sfc_adapter *sa = dev->data->dev_private;
855 size_t pdu = EFX_MAC_PDU(mtu);
859 sfc_log_init(sa, "mtu=%u", mtu);
862 if (pdu < EFX_MAC_PDU_MIN) {
863 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
864 (unsigned int)mtu, (unsigned int)pdu,
868 if (pdu > EFX_MAC_PDU_MAX) {
869 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
870 (unsigned int)mtu, (unsigned int)pdu,
875 sfc_adapter_lock(sa);
877 if (pdu != sa->port.pdu) {
878 if (sa->state == SFC_ADAPTER_STARTED) {
881 old_pdu = sa->port.pdu;
892 * The driver does not use it, but other PMDs update jumbo_frame
893 * flag and max_rx_pkt_len when MTU is set.
895 if (mtu > ETHER_MAX_LEN) {
896 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
898 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
899 rxmode->jumbo_frame = 1;
902 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
904 sfc_adapter_unlock(sa);
906 sfc_log_init(sa, "done");
910 sa->port.pdu = old_pdu;
911 if (sfc_start(sa) != 0)
912 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
913 "PDU max size - port is stopped",
914 (unsigned int)pdu, (unsigned int)old_pdu);
915 sfc_adapter_unlock(sa);
918 sfc_log_init(sa, "failed %d", rc);
923 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
925 struct sfc_adapter *sa = dev->data->dev_private;
926 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
927 struct sfc_port *port = &sa->port;
928 struct ether_addr *old_addr = &dev->data->mac_addrs[0];
931 sfc_adapter_lock(sa);
934 * Copy the address to the device private data so that
935 * it could be recalled in the case of adapter restart.
937 ether_addr_copy(mac_addr, &port->default_mac_addr);
940 * Neither of the two following checks can return
941 * an error. The new MAC address is preserved in
942 * the device private data and can be activated
943 * on the next port start if the user prevents
944 * isolated mode from being enabled.
946 if (port->isolated) {
947 sfc_warn(sa, "isolated mode is active on the port");
948 sfc_warn(sa, "will not set MAC address");
952 if (sa->state != SFC_ADAPTER_STARTED) {
953 sfc_notice(sa, "the port is not started");
954 sfc_notice(sa, "the new MAC address will be set on port start");
959 if (encp->enc_allow_set_mac_with_installed_filters) {
960 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
962 sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
967 * Changing the MAC address by means of MCDI request
968 * has no effect on received traffic, therefore
969 * we also need to update unicast filters
971 rc = sfc_set_rx_mode(sa);
973 sfc_err(sa, "cannot set filter (rc = %u)", rc);
974 /* Rollback the old address */
975 (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
976 (void)sfc_set_rx_mode(sa);
979 sfc_warn(sa, "cannot set MAC address with filters installed");
980 sfc_warn(sa, "adapter will be restarted to pick the new MAC");
981 sfc_warn(sa, "(some traffic may be dropped)");
984 * Since setting MAC address with filters installed is not
985 * allowed on the adapter, the new MAC address will be set
986 * by means of adapter restart. sfc_start() shall retrieve
987 * the new address from the device private data and set it.
992 sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
997 ether_addr_copy(old_addr, &port->default_mac_addr);
999 sfc_adapter_unlock(sa);
1001 SFC_ASSERT(rc >= 0);
1007 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
1008 uint32_t nb_mc_addr)
1010 struct sfc_adapter *sa = dev->data->dev_private;
1011 struct sfc_port *port = &sa->port;
1012 uint8_t *mc_addrs = port->mcast_addrs;
1016 if (port->isolated) {
1017 sfc_err(sa, "isolated mode is active on the port");
1018 sfc_err(sa, "will not set multicast address list");
1022 if (mc_addrs == NULL)
1025 if (nb_mc_addr > port->max_mcast_addrs) {
1026 sfc_err(sa, "too many multicast addresses: %u > %u",
1027 nb_mc_addr, port->max_mcast_addrs);
1031 for (i = 0; i < nb_mc_addr; ++i) {
1032 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
1034 mc_addrs += EFX_MAC_ADDR_LEN;
1037 port->nb_mcast_addrs = nb_mc_addr;
1039 if (sa->state != SFC_ADAPTER_STARTED)
1042 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1043 port->nb_mcast_addrs);
1045 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
1052 * The function is used by the secondary process as well. It must not
1053 * use any process-local pointers from the adapter data.
1056 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1057 struct rte_eth_rxq_info *qinfo)
1059 struct sfc_adapter *sa = dev->data->dev_private;
1060 struct sfc_rxq_info *rxq_info;
1061 struct sfc_rxq *rxq;
1063 sfc_adapter_lock(sa);
1065 SFC_ASSERT(rx_queue_id < sa->rxq_count);
1067 rxq_info = &sa->rxq_info[rx_queue_id];
1068 rxq = rxq_info->rxq;
1069 SFC_ASSERT(rxq != NULL);
1071 qinfo->mp = rxq->refill_mb_pool;
1072 qinfo->conf.rx_free_thresh = rxq->refill_threshold;
1073 qinfo->conf.rx_drop_en = 1;
1074 qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1075 qinfo->conf.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
1076 DEV_RX_OFFLOAD_UDP_CKSUM |
1077 DEV_RX_OFFLOAD_TCP_CKSUM;
1078 if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
1079 qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
1080 qinfo->scattered_rx = 1;
1082 qinfo->nb_desc = rxq_info->entries;
1084 sfc_adapter_unlock(sa);
1088 * The function is used by the secondary process as well. It must not
1089 * use any process-local pointers from the adapter data.
1092 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1093 struct rte_eth_txq_info *qinfo)
1095 struct sfc_adapter *sa = dev->data->dev_private;
1096 struct sfc_txq_info *txq_info;
1098 sfc_adapter_lock(sa);
1100 SFC_ASSERT(tx_queue_id < sa->txq_count);
1102 txq_info = &sa->txq_info[tx_queue_id];
1103 SFC_ASSERT(txq_info->txq != NULL);
1105 memset(qinfo, 0, sizeof(*qinfo));
1107 qinfo->conf.txq_flags = txq_info->txq->flags;
1108 qinfo->conf.offloads = txq_info->txq->offloads;
1109 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
1110 qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1111 qinfo->nb_desc = txq_info->entries;
1113 sfc_adapter_unlock(sa);
1117 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1119 struct sfc_adapter *sa = dev->data->dev_private;
1121 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1123 return sfc_rx_qdesc_npending(sa, rx_queue_id);
1127 sfc_rx_descriptor_done(void *queue, uint16_t offset)
1129 struct sfc_dp_rxq *dp_rxq = queue;
1131 return sfc_rx_qdesc_done(dp_rxq, offset);
1135 sfc_rx_descriptor_status(void *queue, uint16_t offset)
1137 struct sfc_dp_rxq *dp_rxq = queue;
1138 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
1140 return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset);
1144 sfc_tx_descriptor_status(void *queue, uint16_t offset)
1146 struct sfc_dp_txq *dp_txq = queue;
1147 struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq);
1149 return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset);
1153 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1155 struct sfc_adapter *sa = dev->data->dev_private;
1158 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1160 sfc_adapter_lock(sa);
1163 if (sa->state != SFC_ADAPTER_STARTED)
1164 goto fail_not_started;
1166 rc = sfc_rx_qstart(sa, rx_queue_id);
1168 goto fail_rx_qstart;
1170 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
1172 sfc_adapter_unlock(sa);
1178 sfc_adapter_unlock(sa);
1184 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1186 struct sfc_adapter *sa = dev->data->dev_private;
1188 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1190 sfc_adapter_lock(sa);
1191 sfc_rx_qstop(sa, rx_queue_id);
1193 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
1195 sfc_adapter_unlock(sa);
1201 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1203 struct sfc_adapter *sa = dev->data->dev_private;
1206 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1208 sfc_adapter_lock(sa);
1211 if (sa->state != SFC_ADAPTER_STARTED)
1212 goto fail_not_started;
1214 rc = sfc_tx_qstart(sa, tx_queue_id);
1216 goto fail_tx_qstart;
1218 sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
1220 sfc_adapter_unlock(sa);
1226 sfc_adapter_unlock(sa);
1232 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1234 struct sfc_adapter *sa = dev->data->dev_private;
1236 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1238 sfc_adapter_lock(sa);
1240 sfc_tx_qstop(sa, tx_queue_id);
1242 sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
1244 sfc_adapter_unlock(sa);
1248 static efx_tunnel_protocol_t
1249 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
1252 case RTE_TUNNEL_TYPE_VXLAN:
1253 return EFX_TUNNEL_PROTOCOL_VXLAN;
1254 case RTE_TUNNEL_TYPE_GENEVE:
1255 return EFX_TUNNEL_PROTOCOL_GENEVE;
1257 return EFX_TUNNEL_NPROTOS;
1261 enum sfc_udp_tunnel_op_e {
1262 SFC_UDP_TUNNEL_ADD_PORT,
1263 SFC_UDP_TUNNEL_DEL_PORT,
1267 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
1268 struct rte_eth_udp_tunnel *tunnel_udp,
1269 enum sfc_udp_tunnel_op_e op)
1271 struct sfc_adapter *sa = dev->data->dev_private;
1272 efx_tunnel_protocol_t tunnel_proto;
1275 sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
1276 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
1277 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
1278 tunnel_udp->udp_port, tunnel_udp->prot_type);
1281 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
1282 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
1284 goto fail_bad_proto;
1287 sfc_adapter_lock(sa);
1290 case SFC_UDP_TUNNEL_ADD_PORT:
1291 rc = efx_tunnel_config_udp_add(sa->nic,
1292 tunnel_udp->udp_port,
1295 case SFC_UDP_TUNNEL_DEL_PORT:
1296 rc = efx_tunnel_config_udp_remove(sa->nic,
1297 tunnel_udp->udp_port,
1308 if (sa->state == SFC_ADAPTER_STARTED) {
1309 rc = efx_tunnel_reconfigure(sa->nic);
1312 * Configuration is accepted by FW and MC reboot
1313 * is initiated to apply the changes. MC reboot
1314 * will be handled in a usual way (MC reboot
1315 * event on management event queue and adapter
1319 } else if (rc != 0) {
1320 goto fail_reconfigure;
1324 sfc_adapter_unlock(sa);
1328 /* Remove/restore entry since the change makes the trouble */
1330 case SFC_UDP_TUNNEL_ADD_PORT:
1331 (void)efx_tunnel_config_udp_remove(sa->nic,
1332 tunnel_udp->udp_port,
1335 case SFC_UDP_TUNNEL_DEL_PORT:
1336 (void)efx_tunnel_config_udp_add(sa->nic,
1337 tunnel_udp->udp_port,
1344 sfc_adapter_unlock(sa);
1352 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1353 struct rte_eth_udp_tunnel *tunnel_udp)
1355 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
1359 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1360 struct rte_eth_udp_tunnel *tunnel_udp)
1362 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
1366 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1367 struct rte_eth_rss_conf *rss_conf)
1369 struct sfc_adapter *sa = dev->data->dev_private;
1370 struct sfc_rss *rss = &sa->rss;
1371 struct sfc_port *port = &sa->port;
1373 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
1376 if (rss->channels == 0)
1379 sfc_adapter_lock(sa);
1382 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1383 * hence, conversion is done here to derive a correct set of ETH_RSS
1384 * flags which corresponds to the active EFX configuration stored
1385 * locally in 'sfc_adapter' and kept up-to-date
1387 rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types);
1388 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1389 if (rss_conf->rss_key != NULL)
1390 rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
1392 sfc_adapter_unlock(sa);
1398 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1399 struct rte_eth_rss_conf *rss_conf)
1401 struct sfc_adapter *sa = dev->data->dev_private;
1402 struct sfc_rss *rss = &sa->rss;
1403 struct sfc_port *port = &sa->port;
1404 unsigned int efx_hash_types;
1410 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1411 sfc_err(sa, "RSS is not available");
1415 if (rss->channels == 0) {
1416 sfc_err(sa, "RSS is not configured");
1420 if ((rss_conf->rss_key != NULL) &&
1421 (rss_conf->rss_key_len != sizeof(rss->key))) {
1422 sfc_err(sa, "RSS key size is wrong (should be %lu)",
1427 sfc_adapter_lock(sa);
1429 rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
1431 goto fail_rx_hf_rte_to_efx;
1433 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1434 rss->hash_alg, efx_hash_types, B_TRUE);
1436 goto fail_scale_mode_set;
1438 if (rss_conf->rss_key != NULL) {
1439 if (sa->state == SFC_ADAPTER_STARTED) {
1440 rc = efx_rx_scale_key_set(sa->nic,
1441 EFX_RSS_CONTEXT_DEFAULT,
1445 goto fail_scale_key_set;
1448 rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
1451 rss->hash_types = efx_hash_types;
1453 sfc_adapter_unlock(sa);
1458 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1459 EFX_RX_HASHALG_TOEPLITZ,
1460 rss->hash_types, B_TRUE) != 0)
1461 sfc_err(sa, "failed to restore RSS mode");
1463 fail_scale_mode_set:
1464 fail_rx_hf_rte_to_efx:
1465 sfc_adapter_unlock(sa);
1470 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1471 struct rte_eth_rss_reta_entry64 *reta_conf,
1474 struct sfc_adapter *sa = dev->data->dev_private;
1475 struct sfc_rss *rss = &sa->rss;
1476 struct sfc_port *port = &sa->port;
1479 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
1482 if (rss->channels == 0)
1485 if (reta_size != EFX_RSS_TBL_SIZE)
1488 sfc_adapter_lock(sa);
1490 for (entry = 0; entry < reta_size; entry++) {
1491 int grp = entry / RTE_RETA_GROUP_SIZE;
1492 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1494 if ((reta_conf[grp].mask >> grp_idx) & 1)
1495 reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
1498 sfc_adapter_unlock(sa);
1504 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1505 struct rte_eth_rss_reta_entry64 *reta_conf,
1508 struct sfc_adapter *sa = dev->data->dev_private;
1509 struct sfc_rss *rss = &sa->rss;
1510 struct sfc_port *port = &sa->port;
1511 unsigned int *rss_tbl_new;
1519 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1520 sfc_err(sa, "RSS is not available");
1524 if (rss->channels == 0) {
1525 sfc_err(sa, "RSS is not configured");
1529 if (reta_size != EFX_RSS_TBL_SIZE) {
1530 sfc_err(sa, "RETA size is wrong (should be %u)",
1535 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
1536 if (rss_tbl_new == NULL)
1539 sfc_adapter_lock(sa);
1541 rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
1543 for (entry = 0; entry < reta_size; entry++) {
1544 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1545 struct rte_eth_rss_reta_entry64 *grp;
1547 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1549 if (grp->mask & (1ull << grp_idx)) {
1550 if (grp->reta[grp_idx] >= rss->channels) {
1552 goto bad_reta_entry;
1554 rss_tbl_new[entry] = grp->reta[grp_idx];
1558 if (sa->state == SFC_ADAPTER_STARTED) {
1559 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1560 rss_tbl_new, EFX_RSS_TBL_SIZE);
1562 goto fail_scale_tbl_set;
1565 rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
1569 sfc_adapter_unlock(sa);
1571 rte_free(rss_tbl_new);
1573 SFC_ASSERT(rc >= 0);
1578 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1579 enum rte_filter_op filter_op,
1582 struct sfc_adapter *sa = dev->data->dev_private;
1585 sfc_log_init(sa, "entry");
1587 switch (filter_type) {
1588 case RTE_ETH_FILTER_NONE:
1589 sfc_err(sa, "Global filters configuration not supported");
1591 case RTE_ETH_FILTER_MACVLAN:
1592 sfc_err(sa, "MACVLAN filters not supported");
1594 case RTE_ETH_FILTER_ETHERTYPE:
1595 sfc_err(sa, "EtherType filters not supported");
1597 case RTE_ETH_FILTER_FLEXIBLE:
1598 sfc_err(sa, "Flexible filters not supported");
1600 case RTE_ETH_FILTER_SYN:
1601 sfc_err(sa, "SYN filters not supported");
1603 case RTE_ETH_FILTER_NTUPLE:
1604 sfc_err(sa, "NTUPLE filters not supported");
1606 case RTE_ETH_FILTER_TUNNEL:
1607 sfc_err(sa, "Tunnel filters not supported");
1609 case RTE_ETH_FILTER_FDIR:
1610 sfc_err(sa, "Flow Director filters not supported");
1612 case RTE_ETH_FILTER_HASH:
1613 sfc_err(sa, "Hash filters not supported");
1615 case RTE_ETH_FILTER_GENERIC:
1616 if (filter_op != RTE_ETH_FILTER_GET) {
1619 *(const void **)arg = &sfc_flow_ops;
1624 sfc_err(sa, "Unknown filter type %u", filter_type);
1628 sfc_log_init(sa, "exit: %d", -rc);
1629 SFC_ASSERT(rc >= 0);
1634 sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
1636 struct sfc_adapter *sa = dev->data->dev_private;
1639 * If Rx datapath does not provide callback to check mempool,
1640 * all pools are supported.
1642 if (sa->dp_rx->pool_ops_supported == NULL)
1645 return sa->dp_rx->pool_ops_supported(pool);
1648 static const struct eth_dev_ops sfc_eth_dev_ops = {
1649 .dev_configure = sfc_dev_configure,
1650 .dev_start = sfc_dev_start,
1651 .dev_stop = sfc_dev_stop,
1652 .dev_set_link_up = sfc_dev_set_link_up,
1653 .dev_set_link_down = sfc_dev_set_link_down,
1654 .dev_close = sfc_dev_close,
1655 .promiscuous_enable = sfc_dev_promisc_enable,
1656 .promiscuous_disable = sfc_dev_promisc_disable,
1657 .allmulticast_enable = sfc_dev_allmulti_enable,
1658 .allmulticast_disable = sfc_dev_allmulti_disable,
1659 .link_update = sfc_dev_link_update,
1660 .stats_get = sfc_stats_get,
1661 .stats_reset = sfc_stats_reset,
1662 .xstats_get = sfc_xstats_get,
1663 .xstats_reset = sfc_stats_reset,
1664 .xstats_get_names = sfc_xstats_get_names,
1665 .dev_infos_get = sfc_dev_infos_get,
1666 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
1667 .mtu_set = sfc_dev_set_mtu,
1668 .rx_queue_start = sfc_rx_queue_start,
1669 .rx_queue_stop = sfc_rx_queue_stop,
1670 .tx_queue_start = sfc_tx_queue_start,
1671 .tx_queue_stop = sfc_tx_queue_stop,
1672 .rx_queue_setup = sfc_rx_queue_setup,
1673 .rx_queue_release = sfc_rx_queue_release,
1674 .rx_queue_count = sfc_rx_queue_count,
1675 .rx_descriptor_done = sfc_rx_descriptor_done,
1676 .rx_descriptor_status = sfc_rx_descriptor_status,
1677 .tx_descriptor_status = sfc_tx_descriptor_status,
1678 .tx_queue_setup = sfc_tx_queue_setup,
1679 .tx_queue_release = sfc_tx_queue_release,
1680 .flow_ctrl_get = sfc_flow_ctrl_get,
1681 .flow_ctrl_set = sfc_flow_ctrl_set,
1682 .mac_addr_set = sfc_mac_addr_set,
1683 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
1684 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
1685 .reta_update = sfc_dev_rss_reta_update,
1686 .reta_query = sfc_dev_rss_reta_query,
1687 .rss_hash_update = sfc_dev_rss_hash_update,
1688 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
1689 .filter_ctrl = sfc_dev_filter_ctrl,
1690 .set_mc_addr_list = sfc_set_mc_addr_list,
1691 .rxq_info_get = sfc_rx_queue_info_get,
1692 .txq_info_get = sfc_tx_queue_info_get,
1693 .fw_version_get = sfc_fw_version_get,
1694 .xstats_get_by_id = sfc_xstats_get_by_id,
1695 .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
1696 .pool_ops_supported = sfc_pool_ops_supported,
1700 * Duplicate a string in potentially shared memory required for
1701 * multi-process support.
1703 * strdup() allocates from process-local heap/memory.
1706 sfc_strdup(const char *str)
1714 size = strlen(str) + 1;
1715 copy = rte_malloc(__func__, size, 0);
1717 rte_memcpy(copy, str, size);
1723 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
1725 struct sfc_adapter *sa = dev->data->dev_private;
1726 const efx_nic_cfg_t *encp;
1727 unsigned int avail_caps = 0;
1728 const char *rx_name = NULL;
1729 const char *tx_name = NULL;
1732 switch (sa->family) {
1733 case EFX_FAMILY_HUNTINGTON:
1734 case EFX_FAMILY_MEDFORD:
1735 case EFX_FAMILY_MEDFORD2:
1736 avail_caps |= SFC_DP_HW_FW_CAP_EF10;
1742 encp = efx_nic_cfg_get(sa->nic);
1743 if (encp->enc_rx_es_super_buffer_supported)
1744 avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
1746 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
1747 sfc_kvarg_string_handler, &rx_name);
1749 goto fail_kvarg_rx_datapath;
1751 if (rx_name != NULL) {
1752 sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
1753 if (sa->dp_rx == NULL) {
1754 sfc_err(sa, "Rx datapath %s not found", rx_name);
1758 if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
1760 "Insufficient Hw/FW capabilities to use Rx datapath %s",
1763 goto fail_dp_rx_caps;
1766 sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
1767 if (sa->dp_rx == NULL) {
1768 sfc_err(sa, "Rx datapath by caps %#x not found",
1775 sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name);
1776 if (sa->dp_rx_name == NULL) {
1778 goto fail_dp_rx_name;
1781 sfc_notice(sa, "use %s Rx datapath", sa->dp_rx_name);
1783 dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
1785 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
1786 sfc_kvarg_string_handler, &tx_name);
1788 goto fail_kvarg_tx_datapath;
1790 if (tx_name != NULL) {
1791 sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
1792 if (sa->dp_tx == NULL) {
1793 sfc_err(sa, "Tx datapath %s not found", tx_name);
1797 if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
1799 "Insufficient Hw/FW capabilities to use Tx datapath %s",
1802 goto fail_dp_tx_caps;
1805 sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
1806 if (sa->dp_tx == NULL) {
1807 sfc_err(sa, "Tx datapath by caps %#x not found",
1814 sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name);
1815 if (sa->dp_tx_name == NULL) {
1817 goto fail_dp_tx_name;
1820 sfc_notice(sa, "use %s Tx datapath", sa->dp_tx_name);
1822 dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
1824 dev->dev_ops = &sfc_eth_dev_ops;
1833 fail_kvarg_tx_datapath:
1834 rte_free(sa->dp_rx_name);
1835 sa->dp_rx_name = NULL;
1842 fail_kvarg_rx_datapath:
1847 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
1849 struct sfc_adapter *sa = dev->data->dev_private;
1851 dev->dev_ops = NULL;
1852 dev->rx_pkt_burst = NULL;
1853 dev->tx_pkt_burst = NULL;
1855 rte_free(sa->dp_tx_name);
1856 sa->dp_tx_name = NULL;
1859 rte_free(sa->dp_rx_name);
1860 sa->dp_rx_name = NULL;
1864 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
1865 .rxq_info_get = sfc_rx_queue_info_get,
1866 .txq_info_get = sfc_tx_queue_info_get,
1870 sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
1873 * Device private data has really many process-local pointers.
1874 * Below code should be extremely careful to use data located
1875 * in shared memory only.
1877 struct sfc_adapter *sa = dev->data->dev_private;
1878 const struct sfc_dp_rx *dp_rx;
1879 const struct sfc_dp_tx *dp_tx;
1882 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
1883 if (dp_rx == NULL) {
1884 sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name);
1888 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
1889 sfc_err(sa, "%s Rx datapath does not support multi-process",
1892 goto fail_dp_rx_multi_process;
1895 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
1896 if (dp_tx == NULL) {
1897 sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
1901 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
1902 sfc_err(sa, "%s Tx datapath does not support multi-process",
1905 goto fail_dp_tx_multi_process;
1908 dev->rx_pkt_burst = dp_rx->pkt_burst;
1909 dev->tx_pkt_burst = dp_tx->pkt_burst;
1910 dev->dev_ops = &sfc_eth_dev_secondary_ops;
1914 fail_dp_tx_multi_process:
1916 fail_dp_rx_multi_process:
1922 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
1924 dev->dev_ops = NULL;
1925 dev->tx_pkt_burst = NULL;
1926 dev->rx_pkt_burst = NULL;
1930 sfc_register_dp(void)
1933 if (TAILQ_EMPTY(&sfc_dp_head)) {
1934 /* Prefer EF10 datapath */
1935 sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
1936 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
1937 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
1939 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
1940 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
1941 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
1946 sfc_eth_dev_init(struct rte_eth_dev *dev)
1948 struct sfc_adapter *sa = dev->data->dev_private;
1949 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1951 const efx_nic_cfg_t *encp;
1952 const struct ether_addr *from;
1956 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1957 return -sfc_eth_dev_secondary_set_ops(dev);
1959 /* Required for logging */
1960 sa->pci_addr = pci_dev->addr;
1961 sa->port_id = dev->data->port_id;
1965 /* Copy PCI device info to the dev->data */
1966 rte_eth_copy_pci_info(dev, pci_dev);
1968 sa->logtype_main = sfc_register_logtype(sa, SFC_LOGTYPE_MAIN_STR,
1971 rc = sfc_kvargs_parse(sa);
1973 goto fail_kvargs_parse;
1975 sfc_log_init(sa, "entry");
1977 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
1978 if (dev->data->mac_addrs == NULL) {
1980 goto fail_mac_addrs;
1983 sfc_adapter_lock_init(sa);
1984 sfc_adapter_lock(sa);
1986 sfc_log_init(sa, "probing");
1991 sfc_log_init(sa, "set device ops");
1992 rc = sfc_eth_dev_set_ops(dev);
1996 sfc_log_init(sa, "attaching");
1997 rc = sfc_attach(sa);
2001 encp = efx_nic_cfg_get(sa->nic);
2004 * The arguments are really reverse order in comparison to
2005 * Linux kernel. Copy from NIC config to Ethernet device data.
2007 from = (const struct ether_addr *)(encp->enc_mac_addr);
2008 ether_addr_copy(from, &dev->data->mac_addrs[0]);
2010 sfc_adapter_unlock(sa);
2012 sfc_log_init(sa, "done");
2016 sfc_eth_dev_clear_ops(dev);
2022 sfc_adapter_unlock(sa);
2023 sfc_adapter_lock_fini(sa);
2024 rte_free(dev->data->mac_addrs);
2025 dev->data->mac_addrs = NULL;
2028 sfc_kvargs_cleanup(sa);
2031 sfc_log_init(sa, "failed %d", rc);
2037 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
2039 struct sfc_adapter *sa;
2041 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2042 sfc_eth_dev_secondary_clear_ops(dev);
2046 sa = dev->data->dev_private;
2047 sfc_log_init(sa, "entry");
2049 sfc_adapter_lock(sa);
2051 sfc_eth_dev_clear_ops(dev);
2056 rte_free(dev->data->mac_addrs);
2057 dev->data->mac_addrs = NULL;
2059 sfc_kvargs_cleanup(sa);
2061 sfc_adapter_unlock(sa);
2062 sfc_adapter_lock_fini(sa);
2064 sfc_log_init(sa, "done");
2066 /* Required for logging, so cleanup last */
2071 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
2072 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
2073 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
2074 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
2075 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
2076 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
2077 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
2078 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
2079 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
2080 { .vendor_id = 0 /* sentinel */ }
2083 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2084 struct rte_pci_device *pci_dev)
2086 return rte_eth_dev_pci_generic_probe(pci_dev,
2087 sizeof(struct sfc_adapter), sfc_eth_dev_init);
2090 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2092 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
2095 static struct rte_pci_driver sfc_efx_pmd = {
2096 .id_table = pci_id_sfc_efx_map,
2098 RTE_PCI_DRV_INTR_LSC |
2099 RTE_PCI_DRV_NEED_MAPPING,
2100 .probe = sfc_eth_dev_pci_probe,
2101 .remove = sfc_eth_dev_pci_remove,
2104 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
2105 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
2106 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
2107 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
2108 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
2109 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
2110 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
2111 SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
2112 SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
2113 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
2115 RTE_INIT(sfc_driver_register_logtype);
2117 sfc_driver_register_logtype(void)
2121 ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
2123 sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;