1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
16 #include <rte_string_fns.h>
21 #include "sfc_debug.h"
23 #include "sfc_kvargs.h"
29 #include "sfc_dp_rx.h"
31 uint32_t sfc_logtype_driver;
33 static struct sfc_dp_list sfc_dp_head =
34 TAILQ_HEAD_INITIALIZER(sfc_dp_head);
37 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
39 struct sfc_adapter *sa = dev->data->dev_private;
40 efx_nic_fw_info_t enfi;
45 * Return value of the callback is likely supposed to be
46 * equal to or greater than 0, nevertheless, if an error
47 * occurs, it will be desirable to pass it to the caller
49 if ((fw_version == NULL) || (fw_size == 0))
52 rc = efx_nic_get_fw_version(sa->nic, &enfi);
56 ret = snprintf(fw_version, fw_size,
57 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
58 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
59 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
63 if (enfi.enfi_dpcpu_fw_ids_valid) {
64 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
67 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
68 fw_size - dpcpu_fw_ids_offset,
69 " rx%" PRIx16 " tx%" PRIx16,
70 enfi.enfi_rx_dpcpu_fw_id,
71 enfi.enfi_tx_dpcpu_fw_id);
78 if (fw_size < (size_t)(++ret))
85 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
87 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
88 struct sfc_adapter *sa = dev->data->dev_private;
89 struct sfc_rss *rss = &sa->rss;
90 uint64_t txq_offloads_def = 0;
92 sfc_log_init(sa, "entry");
94 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
96 /* Autonegotiation may be disabled */
97 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
98 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
99 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
100 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
101 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
102 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_25000FDX)
103 dev_info->speed_capa |= ETH_LINK_SPEED_25G;
104 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
105 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
106 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_50000FDX)
107 dev_info->speed_capa |= ETH_LINK_SPEED_50G;
108 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_100000FDX)
109 dev_info->speed_capa |= ETH_LINK_SPEED_100G;
111 dev_info->max_rx_queues = sa->rxq_max;
112 dev_info->max_tx_queues = sa->txq_max;
114 /* By default packets are dropped if no descriptors are available */
115 dev_info->default_rxconf.rx_drop_en = 1;
117 dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
120 * rx_offload_capa includes both device and queue offloads since
121 * the latter may be requested on a per device basis which makes
122 * sense when some offloads are needed to be set on all queues.
124 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
125 dev_info->rx_queue_offload_capa;
127 dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
130 * tx_offload_capa includes both device and queue offloads since
131 * the latter may be requested on a per device basis which makes
132 * sense when some offloads are needed to be set on all queues.
134 dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
135 dev_info->tx_queue_offload_capa;
137 if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
138 txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
140 dev_info->default_txconf.offloads |= txq_offloads_def;
142 if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
146 for (i = 0; i < rss->hf_map_nb_entries; ++i)
147 rte_hf |= rss->hf_map[i].rte;
149 dev_info->reta_size = EFX_RSS_TBL_SIZE;
150 dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
151 dev_info->flow_type_rss_offloads = rte_hf;
154 /* Initialize to hardware limits */
155 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
156 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
157 /* The RXQ hardware requires that the descriptor count is a power
158 * of 2, but rx_desc_lim cannot properly describe that constraint.
160 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
162 /* Initialize to hardware limits */
163 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
164 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
166 * The TXQ hardware requires that the descriptor count is a power
167 * of 2, but tx_desc_lim cannot properly describe that constraint
169 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
171 if (sap->dp_rx->get_dev_info != NULL)
172 sap->dp_rx->get_dev_info(dev_info);
173 if (sap->dp_tx->get_dev_info != NULL)
174 sap->dp_tx->get_dev_info(dev_info);
176 dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
177 RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
180 static const uint32_t *
181 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
183 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
184 struct sfc_adapter *sa = dev->data->dev_private;
185 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
186 uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
188 return sap->dp_rx->supported_ptypes_get(tunnel_encaps);
192 sfc_dev_configure(struct rte_eth_dev *dev)
194 struct rte_eth_dev_data *dev_data = dev->data;
195 struct sfc_adapter *sa = dev_data->dev_private;
198 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
199 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
201 sfc_adapter_lock(sa);
203 case SFC_ADAPTER_CONFIGURED:
205 case SFC_ADAPTER_INITIALIZED:
206 rc = sfc_configure(sa);
209 sfc_err(sa, "unexpected adapter state %u to configure",
214 sfc_adapter_unlock(sa);
216 sfc_log_init(sa, "done %d", rc);
222 sfc_dev_start(struct rte_eth_dev *dev)
224 struct sfc_adapter *sa = dev->data->dev_private;
227 sfc_log_init(sa, "entry");
229 sfc_adapter_lock(sa);
231 sfc_adapter_unlock(sa);
233 sfc_log_init(sa, "done %d", rc);
239 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
241 struct sfc_adapter *sa = dev->data->dev_private;
242 struct rte_eth_link current_link;
245 sfc_log_init(sa, "entry");
247 if (sa->state != SFC_ADAPTER_STARTED) {
248 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
249 } else if (wait_to_complete) {
250 efx_link_mode_t link_mode;
252 if (efx_port_poll(sa->nic, &link_mode) != 0)
253 link_mode = EFX_LINK_UNKNOWN;
254 sfc_port_link_mode_to_info(link_mode, ¤t_link);
257 sfc_ev_mgmt_qpoll(sa);
258 rte_eth_linkstatus_get(dev, ¤t_link);
261 ret = rte_eth_linkstatus_set(dev, ¤t_link);
263 sfc_notice(sa, "Link status is %s",
264 current_link.link_status ? "UP" : "DOWN");
270 sfc_dev_stop(struct rte_eth_dev *dev)
272 struct sfc_adapter *sa = dev->data->dev_private;
274 sfc_log_init(sa, "entry");
276 sfc_adapter_lock(sa);
278 sfc_adapter_unlock(sa);
280 sfc_log_init(sa, "done");
284 sfc_dev_set_link_up(struct rte_eth_dev *dev)
286 struct sfc_adapter *sa = dev->data->dev_private;
289 sfc_log_init(sa, "entry");
291 sfc_adapter_lock(sa);
293 sfc_adapter_unlock(sa);
300 sfc_dev_set_link_down(struct rte_eth_dev *dev)
302 struct sfc_adapter *sa = dev->data->dev_private;
304 sfc_log_init(sa, "entry");
306 sfc_adapter_lock(sa);
308 sfc_adapter_unlock(sa);
314 sfc_dev_close(struct rte_eth_dev *dev)
316 struct sfc_adapter *sa = dev->data->dev_private;
318 sfc_log_init(sa, "entry");
320 sfc_adapter_lock(sa);
322 case SFC_ADAPTER_STARTED:
324 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
326 case SFC_ADAPTER_CONFIGURED:
328 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
330 case SFC_ADAPTER_INITIALIZED:
333 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
336 sfc_adapter_unlock(sa);
338 sfc_log_init(sa, "done");
342 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
345 struct sfc_port *port;
347 struct sfc_adapter *sa = dev->data->dev_private;
348 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
349 const char *desc = (allmulti) ? "all-multi" : "promiscuous";
351 sfc_adapter_lock(sa);
354 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
356 if (*toggle != enabled) {
359 if (port->isolated) {
360 sfc_warn(sa, "isolated mode is active on the port");
361 sfc_warn(sa, "the change is to be applied on the next "
362 "start provided that isolated mode is "
363 "disabled prior the next start");
364 } else if ((sa->state == SFC_ADAPTER_STARTED) &&
365 (sfc_set_rx_mode(sa) != 0)) {
366 *toggle = !(enabled);
367 sfc_warn(sa, "Failed to %s %s mode",
368 ((enabled) ? "enable" : "disable"), desc);
372 sfc_adapter_unlock(sa);
376 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
378 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
382 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
384 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
388 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
390 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
394 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
396 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
400 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
401 uint16_t nb_rx_desc, unsigned int socket_id,
402 const struct rte_eth_rxconf *rx_conf,
403 struct rte_mempool *mb_pool)
405 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
406 struct sfc_adapter *sa = dev->data->dev_private;
409 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
410 rx_queue_id, nb_rx_desc, socket_id);
412 sfc_adapter_lock(sa);
414 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
419 dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;
421 sfc_adapter_unlock(sa);
426 sfc_adapter_unlock(sa);
432 sfc_rx_queue_release(void *queue)
434 struct sfc_dp_rxq *dp_rxq = queue;
436 struct sfc_adapter *sa;
437 unsigned int sw_index;
442 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
444 sfc_adapter_lock(sa);
446 sw_index = dp_rxq->dpq.queue_id;
448 sfc_log_init(sa, "RxQ=%u", sw_index);
450 sfc_rx_qfini(sa, sw_index);
452 sfc_adapter_unlock(sa);
456 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
457 uint16_t nb_tx_desc, unsigned int socket_id,
458 const struct rte_eth_txconf *tx_conf)
460 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
461 struct sfc_adapter *sa = dev->data->dev_private;
464 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
465 tx_queue_id, nb_tx_desc, socket_id);
467 sfc_adapter_lock(sa);
469 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
473 dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp;
475 sfc_adapter_unlock(sa);
479 sfc_adapter_unlock(sa);
485 sfc_tx_queue_release(void *queue)
487 struct sfc_dp_txq *dp_txq = queue;
489 unsigned int sw_index;
490 struct sfc_adapter *sa;
495 txq = sfc_txq_by_dp_txq(dp_txq);
496 sw_index = dp_txq->dpq.queue_id;
498 SFC_ASSERT(txq->evq != NULL);
501 sfc_log_init(sa, "TxQ = %u", sw_index);
503 sfc_adapter_lock(sa);
505 sfc_tx_qfini(sa, sw_index);
507 sfc_adapter_unlock(sa);
511 * Some statistics are computed as A - B where A and B each increase
512 * monotonically with some hardware counter(s) and the counters are read
515 * If packet X is counted in A, but not counted in B yet, computed value is
518 * If packet X is not counted in A at the moment of reading the counter,
519 * but counted in B at the moment of reading the counter, computed value
522 * However, counter which grows backward is worse evil than slightly wrong
523 * value. So, let's try to guarantee that it never happens except may be
524 * the case when the MAC stats are zeroed as a result of a NIC reset.
527 sfc_update_diff_stat(uint64_t *stat, uint64_t newval)
529 if ((int64_t)(newval - *stat) > 0 || newval == 0)
534 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
536 struct sfc_adapter *sa = dev->data->dev_private;
537 struct sfc_port *port = &sa->port;
541 rte_spinlock_lock(&port->mac_stats_lock);
543 ret = sfc_port_update_mac_stats(sa);
547 mac_stats = port->mac_stats_buf;
549 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
550 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
552 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
553 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
554 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
556 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
557 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
558 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
560 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
561 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
562 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
564 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
565 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
566 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
567 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
568 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
570 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
571 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
572 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
574 * Take into account stats which are whenever supported
575 * on EF10. If some stat is not supported by current
576 * firmware variant or HW revision, it is guaranteed
577 * to be zero in mac_stats.
580 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
581 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
582 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
583 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
584 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
585 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
586 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
587 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
588 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
589 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
591 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
592 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
593 mac_stats[EFX_MAC_RX_JABBER_PKTS];
594 /* no oerrors counters supported on EF10 */
596 /* Exclude missed, errors and pauses from Rx packets */
597 sfc_update_diff_stat(&port->ipackets,
598 mac_stats[EFX_MAC_RX_PKTS] -
599 mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
600 stats->imissed - stats->ierrors);
601 stats->ipackets = port->ipackets;
605 rte_spinlock_unlock(&port->mac_stats_lock);
606 SFC_ASSERT(ret >= 0);
611 sfc_stats_reset(struct rte_eth_dev *dev)
613 struct sfc_adapter *sa = dev->data->dev_private;
614 struct sfc_port *port = &sa->port;
617 if (sa->state != SFC_ADAPTER_STARTED) {
619 * The operation cannot be done if port is not started; it
620 * will be scheduled to be done during the next port start
622 port->mac_stats_reset_pending = B_TRUE;
626 rc = sfc_port_reset_mac_stats(sa);
628 sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
632 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
633 unsigned int xstats_count)
635 struct sfc_adapter *sa = dev->data->dev_private;
636 struct sfc_port *port = &sa->port;
642 rte_spinlock_lock(&port->mac_stats_lock);
644 rc = sfc_port_update_mac_stats(sa);
651 mac_stats = port->mac_stats_buf;
653 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
654 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
655 if (xstats != NULL && nstats < (int)xstats_count) {
656 xstats[nstats].id = nstats;
657 xstats[nstats].value = mac_stats[i];
664 rte_spinlock_unlock(&port->mac_stats_lock);
670 sfc_xstats_get_names(struct rte_eth_dev *dev,
671 struct rte_eth_xstat_name *xstats_names,
672 unsigned int xstats_count)
674 struct sfc_adapter *sa = dev->data->dev_private;
675 struct sfc_port *port = &sa->port;
677 unsigned int nstats = 0;
679 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
680 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
681 if (xstats_names != NULL && nstats < xstats_count)
682 strlcpy(xstats_names[nstats].name,
683 efx_mac_stat_name(sa->nic, i),
684 sizeof(xstats_names[0].name));
693 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
694 uint64_t *values, unsigned int n)
696 struct sfc_adapter *sa = dev->data->dev_private;
697 struct sfc_port *port = &sa->port;
699 unsigned int nb_supported = 0;
700 unsigned int nb_written = 0;
705 if (unlikely(values == NULL) ||
706 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
707 return port->mac_stats_nb_supported;
709 rte_spinlock_lock(&port->mac_stats_lock);
711 rc = sfc_port_update_mac_stats(sa);
718 mac_stats = port->mac_stats_buf;
720 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
721 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
724 if ((ids == NULL) || (ids[nb_written] == nb_supported))
725 values[nb_written++] = mac_stats[i];
733 rte_spinlock_unlock(&port->mac_stats_lock);
739 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
740 struct rte_eth_xstat_name *xstats_names,
741 const uint64_t *ids, unsigned int size)
743 struct sfc_adapter *sa = dev->data->dev_private;
744 struct sfc_port *port = &sa->port;
745 unsigned int nb_supported = 0;
746 unsigned int nb_written = 0;
749 if (unlikely(xstats_names == NULL) ||
750 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
751 return port->mac_stats_nb_supported;
753 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
754 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
757 if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
758 char *name = xstats_names[nb_written++].name;
760 strlcpy(name, efx_mac_stat_name(sa->nic, i),
761 sizeof(xstats_names[0].name));
771 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
773 struct sfc_adapter *sa = dev->data->dev_private;
774 unsigned int wanted_fc, link_fc;
776 memset(fc_conf, 0, sizeof(*fc_conf));
778 sfc_adapter_lock(sa);
780 if (sa->state == SFC_ADAPTER_STARTED)
781 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
783 link_fc = sa->port.flow_ctrl;
787 fc_conf->mode = RTE_FC_NONE;
789 case EFX_FCNTL_RESPOND:
790 fc_conf->mode = RTE_FC_RX_PAUSE;
792 case EFX_FCNTL_GENERATE:
793 fc_conf->mode = RTE_FC_TX_PAUSE;
795 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
796 fc_conf->mode = RTE_FC_FULL;
799 sfc_err(sa, "%s: unexpected flow control value %#x",
803 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
805 sfc_adapter_unlock(sa);
811 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
813 struct sfc_adapter *sa = dev->data->dev_private;
814 struct sfc_port *port = &sa->port;
818 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
819 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
820 fc_conf->mac_ctrl_frame_fwd != 0) {
821 sfc_err(sa, "unsupported flow control settings specified");
826 switch (fc_conf->mode) {
830 case RTE_FC_RX_PAUSE:
831 fcntl = EFX_FCNTL_RESPOND;
833 case RTE_FC_TX_PAUSE:
834 fcntl = EFX_FCNTL_GENERATE;
837 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
844 sfc_adapter_lock(sa);
846 if (sa->state == SFC_ADAPTER_STARTED) {
847 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
849 goto fail_mac_fcntl_set;
852 port->flow_ctrl = fcntl;
853 port->flow_ctrl_autoneg = fc_conf->autoneg;
855 sfc_adapter_unlock(sa);
860 sfc_adapter_unlock(sa);
867 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
869 struct sfc_adapter *sa = dev->data->dev_private;
870 size_t pdu = EFX_MAC_PDU(mtu);
874 sfc_log_init(sa, "mtu=%u", mtu);
877 if (pdu < EFX_MAC_PDU_MIN) {
878 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
879 (unsigned int)mtu, (unsigned int)pdu,
883 if (pdu > EFX_MAC_PDU_MAX) {
884 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
885 (unsigned int)mtu, (unsigned int)pdu,
890 sfc_adapter_lock(sa);
892 if (pdu != sa->port.pdu) {
893 if (sa->state == SFC_ADAPTER_STARTED) {
896 old_pdu = sa->port.pdu;
907 * The driver does not use it, but other PMDs update jumbo frame
908 * flag and max_rx_pkt_len when MTU is set.
910 if (mtu > ETHER_MAX_LEN) {
911 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
912 rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
915 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
917 sfc_adapter_unlock(sa);
919 sfc_log_init(sa, "done");
923 sa->port.pdu = old_pdu;
924 if (sfc_start(sa) != 0)
925 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
926 "PDU max size - port is stopped",
927 (unsigned int)pdu, (unsigned int)old_pdu);
928 sfc_adapter_unlock(sa);
931 sfc_log_init(sa, "failed %d", rc);
936 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
938 struct sfc_adapter *sa = dev->data->dev_private;
939 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
940 struct sfc_port *port = &sa->port;
941 struct ether_addr *old_addr = &dev->data->mac_addrs[0];
944 sfc_adapter_lock(sa);
947 * Copy the address to the device private data so that
948 * it could be recalled in the case of adapter restart.
950 ether_addr_copy(mac_addr, &port->default_mac_addr);
953 * Neither of the two following checks can return
954 * an error. The new MAC address is preserved in
955 * the device private data and can be activated
956 * on the next port start if the user prevents
957 * isolated mode from being enabled.
959 if (port->isolated) {
960 sfc_warn(sa, "isolated mode is active on the port");
961 sfc_warn(sa, "will not set MAC address");
965 if (sa->state != SFC_ADAPTER_STARTED) {
966 sfc_notice(sa, "the port is not started");
967 sfc_notice(sa, "the new MAC address will be set on port start");
972 if (encp->enc_allow_set_mac_with_installed_filters) {
973 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
975 sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
980 * Changing the MAC address by means of MCDI request
981 * has no effect on received traffic, therefore
982 * we also need to update unicast filters
984 rc = sfc_set_rx_mode(sa);
986 sfc_err(sa, "cannot set filter (rc = %u)", rc);
987 /* Rollback the old address */
988 (void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
989 (void)sfc_set_rx_mode(sa);
992 sfc_warn(sa, "cannot set MAC address with filters installed");
993 sfc_warn(sa, "adapter will be restarted to pick the new MAC");
994 sfc_warn(sa, "(some traffic may be dropped)");
997 * Since setting MAC address with filters installed is not
998 * allowed on the adapter, the new MAC address will be set
999 * by means of adapter restart. sfc_start() shall retrieve
1000 * the new address from the device private data and set it.
1005 sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
1010 ether_addr_copy(old_addr, &port->default_mac_addr);
1012 sfc_adapter_unlock(sa);
1014 SFC_ASSERT(rc >= 0);
1020 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
1021 uint32_t nb_mc_addr)
1023 struct sfc_adapter *sa = dev->data->dev_private;
1024 struct sfc_port *port = &sa->port;
1025 uint8_t *mc_addrs = port->mcast_addrs;
1029 if (port->isolated) {
1030 sfc_err(sa, "isolated mode is active on the port");
1031 sfc_err(sa, "will not set multicast address list");
1035 if (mc_addrs == NULL)
1038 if (nb_mc_addr > port->max_mcast_addrs) {
1039 sfc_err(sa, "too many multicast addresses: %u > %u",
1040 nb_mc_addr, port->max_mcast_addrs);
1044 for (i = 0; i < nb_mc_addr; ++i) {
1045 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
1047 mc_addrs += EFX_MAC_ADDR_LEN;
1050 port->nb_mcast_addrs = nb_mc_addr;
1052 if (sa->state != SFC_ADAPTER_STARTED)
1055 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1056 port->nb_mcast_addrs);
1058 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
1060 SFC_ASSERT(rc >= 0);
1065 * The function is used by the secondary process as well. It must not
1066 * use any process-local pointers from the adapter data.
1069 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1070 struct rte_eth_rxq_info *qinfo)
1072 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1073 struct sfc_adapter *sa = dev->data->dev_private;
1074 struct sfc_rxq_info *rxq_info;
1076 sfc_adapter_lock(sa);
1078 SFC_ASSERT(rx_queue_id < sas->rxq_count);
1080 rxq_info = &sas->rxq_info[rx_queue_id];
1082 qinfo->mp = rxq_info->refill_mb_pool;
1083 qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
1084 qinfo->conf.rx_drop_en = 1;
1085 qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1086 qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
1087 if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
1088 qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
1089 qinfo->scattered_rx = 1;
1091 qinfo->nb_desc = rxq_info->entries;
1093 sfc_adapter_unlock(sa);
1097 * The function is used by the secondary process as well. It must not
1098 * use any process-local pointers from the adapter data.
1101 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1102 struct rte_eth_txq_info *qinfo)
1104 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1105 struct sfc_adapter *sa = dev->data->dev_private;
1106 struct sfc_txq_info *txq_info;
1108 sfc_adapter_lock(sa);
1110 SFC_ASSERT(tx_queue_id < sas->txq_count);
1112 txq_info = &sas->txq_info[tx_queue_id];
1114 memset(qinfo, 0, sizeof(*qinfo));
1116 qinfo->conf.offloads = txq_info->offloads;
1117 qinfo->conf.tx_free_thresh = txq_info->free_thresh;
1118 qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1119 qinfo->nb_desc = txq_info->entries;
1121 sfc_adapter_unlock(sa);
1125 * The function is used by the secondary process as well. It must not
1126 * use any process-local pointers from the adapter data.
1129 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1131 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1132 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1133 struct sfc_rxq_info *rxq_info;
1135 SFC_ASSERT(rx_queue_id < sas->rxq_count);
1136 rxq_info = &sas->rxq_info[rx_queue_id];
1138 if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
1141 return sap->dp_rx->qdesc_npending(rxq_info->dp);
1145 * The function is used by the secondary process as well. It must not
1146 * use any process-local pointers from the adapter data.
1149 sfc_rx_descriptor_done(void *queue, uint16_t offset)
1151 struct sfc_dp_rxq *dp_rxq = queue;
1152 const struct sfc_dp_rx *dp_rx;
1154 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1156 return offset < dp_rx->qdesc_npending(dp_rxq);
1160 * The function is used by the secondary process as well. It must not
1161 * use any process-local pointers from the adapter data.
1164 sfc_rx_descriptor_status(void *queue, uint16_t offset)
1166 struct sfc_dp_rxq *dp_rxq = queue;
1167 const struct sfc_dp_rx *dp_rx;
1169 dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1171 return dp_rx->qdesc_status(dp_rxq, offset);
1175 * The function is used by the secondary process as well. It must not
1176 * use any process-local pointers from the adapter data.
1179 sfc_tx_descriptor_status(void *queue, uint16_t offset)
1181 struct sfc_dp_txq *dp_txq = queue;
1182 const struct sfc_dp_tx *dp_tx;
1184 dp_tx = sfc_dp_tx_by_dp_txq(dp_txq);
1186 return dp_tx->qdesc_status(dp_txq, offset);
1190 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1192 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1193 struct sfc_adapter *sa = dev->data->dev_private;
1196 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1198 sfc_adapter_lock(sa);
1201 if (sa->state != SFC_ADAPTER_STARTED)
1202 goto fail_not_started;
1204 if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
1205 goto fail_not_setup;
1207 rc = sfc_rx_qstart(sa, rx_queue_id);
1209 goto fail_rx_qstart;
1211 sas->rxq_info[rx_queue_id].deferred_started = B_TRUE;
1213 sfc_adapter_unlock(sa);
1220 sfc_adapter_unlock(sa);
1226 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1228 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1229 struct sfc_adapter *sa = dev->data->dev_private;
1231 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1233 sfc_adapter_lock(sa);
1234 sfc_rx_qstop(sa, rx_queue_id);
1236 sas->rxq_info[rx_queue_id].deferred_started = B_FALSE;
1238 sfc_adapter_unlock(sa);
1244 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1246 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1247 struct sfc_adapter *sa = dev->data->dev_private;
1250 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1252 sfc_adapter_lock(sa);
1255 if (sa->state != SFC_ADAPTER_STARTED)
1256 goto fail_not_started;
1258 if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED)
1259 goto fail_not_setup;
1261 rc = sfc_tx_qstart(sa, tx_queue_id);
1263 goto fail_tx_qstart;
1265 sas->txq_info[tx_queue_id].deferred_started = B_TRUE;
1267 sfc_adapter_unlock(sa);
1274 sfc_adapter_unlock(sa);
1280 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1282 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1283 struct sfc_adapter *sa = dev->data->dev_private;
1285 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1287 sfc_adapter_lock(sa);
1289 sfc_tx_qstop(sa, tx_queue_id);
1291 sas->txq_info[tx_queue_id].deferred_started = B_FALSE;
1293 sfc_adapter_unlock(sa);
1297 static efx_tunnel_protocol_t
1298 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
1301 case RTE_TUNNEL_TYPE_VXLAN:
1302 return EFX_TUNNEL_PROTOCOL_VXLAN;
1303 case RTE_TUNNEL_TYPE_GENEVE:
1304 return EFX_TUNNEL_PROTOCOL_GENEVE;
1306 return EFX_TUNNEL_NPROTOS;
1310 enum sfc_udp_tunnel_op_e {
1311 SFC_UDP_TUNNEL_ADD_PORT,
1312 SFC_UDP_TUNNEL_DEL_PORT,
1316 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
1317 struct rte_eth_udp_tunnel *tunnel_udp,
1318 enum sfc_udp_tunnel_op_e op)
1320 struct sfc_adapter *sa = dev->data->dev_private;
1321 efx_tunnel_protocol_t tunnel_proto;
1324 sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
1325 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
1326 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
1327 tunnel_udp->udp_port, tunnel_udp->prot_type);
1330 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
1331 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
1333 goto fail_bad_proto;
1336 sfc_adapter_lock(sa);
1339 case SFC_UDP_TUNNEL_ADD_PORT:
1340 rc = efx_tunnel_config_udp_add(sa->nic,
1341 tunnel_udp->udp_port,
1344 case SFC_UDP_TUNNEL_DEL_PORT:
1345 rc = efx_tunnel_config_udp_remove(sa->nic,
1346 tunnel_udp->udp_port,
1357 if (sa->state == SFC_ADAPTER_STARTED) {
1358 rc = efx_tunnel_reconfigure(sa->nic);
1361 * Configuration is accepted by FW and MC reboot
1362 * is initiated to apply the changes. MC reboot
1363 * will be handled in a usual way (MC reboot
1364 * event on management event queue and adapter
1368 } else if (rc != 0) {
1369 goto fail_reconfigure;
1373 sfc_adapter_unlock(sa);
1377 /* Remove/restore entry since the change makes the trouble */
1379 case SFC_UDP_TUNNEL_ADD_PORT:
1380 (void)efx_tunnel_config_udp_remove(sa->nic,
1381 tunnel_udp->udp_port,
1384 case SFC_UDP_TUNNEL_DEL_PORT:
1385 (void)efx_tunnel_config_udp_add(sa->nic,
1386 tunnel_udp->udp_port,
1393 sfc_adapter_unlock(sa);
1401 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1402 struct rte_eth_udp_tunnel *tunnel_udp)
1404 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
1408 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1409 struct rte_eth_udp_tunnel *tunnel_udp)
1411 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
1415 * The function is used by the secondary process as well. It must not
1416 * use any process-local pointers from the adapter data.
1419 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1420 struct rte_eth_rss_conf *rss_conf)
1422 struct sfc_adapter *sa = dev->data->dev_private;
1423 struct sfc_rss *rss = &sa->rss;
1425 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE)
1428 sfc_adapter_lock(sa);
1431 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1432 * hence, conversion is done here to derive a correct set of ETH_RSS
1433 * flags which corresponds to the active EFX configuration stored
1434 * locally in 'sfc_adapter' and kept up-to-date
1436 rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(sa, rss->hash_types);
1437 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1438 if (rss_conf->rss_key != NULL)
1439 rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
1441 sfc_adapter_unlock(sa);
1447 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1448 struct rte_eth_rss_conf *rss_conf)
1450 struct sfc_adapter *sa = dev->data->dev_private;
1451 struct sfc_rss *rss = &sa->rss;
1452 struct sfc_port *port = &sa->port;
1453 unsigned int efx_hash_types;
1459 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1460 sfc_err(sa, "RSS is not available");
1464 if (rss->channels == 0) {
1465 sfc_err(sa, "RSS is not configured");
1469 if ((rss_conf->rss_key != NULL) &&
1470 (rss_conf->rss_key_len != sizeof(rss->key))) {
1471 sfc_err(sa, "RSS key size is wrong (should be %lu)",
1476 sfc_adapter_lock(sa);
1478 rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
1480 goto fail_rx_hf_rte_to_efx;
1482 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1483 rss->hash_alg, efx_hash_types, B_TRUE);
1485 goto fail_scale_mode_set;
1487 if (rss_conf->rss_key != NULL) {
1488 if (sa->state == SFC_ADAPTER_STARTED) {
1489 rc = efx_rx_scale_key_set(sa->nic,
1490 EFX_RSS_CONTEXT_DEFAULT,
1494 goto fail_scale_key_set;
1497 rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
1500 rss->hash_types = efx_hash_types;
1502 sfc_adapter_unlock(sa);
1507 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1508 EFX_RX_HASHALG_TOEPLITZ,
1509 rss->hash_types, B_TRUE) != 0)
1510 sfc_err(sa, "failed to restore RSS mode");
1512 fail_scale_mode_set:
1513 fail_rx_hf_rte_to_efx:
1514 sfc_adapter_unlock(sa);
1519 * The function is used by the secondary process as well. It must not
1520 * use any process-local pointers from the adapter data.
1523 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1524 struct rte_eth_rss_reta_entry64 *reta_conf,
1527 struct sfc_adapter *sa = dev->data->dev_private;
1528 struct sfc_rss *rss = &sa->rss;
1529 struct sfc_port *port = &sa->port;
1532 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || port->isolated)
1535 if (rss->channels == 0)
1538 if (reta_size != EFX_RSS_TBL_SIZE)
1541 sfc_adapter_lock(sa);
1543 for (entry = 0; entry < reta_size; entry++) {
1544 int grp = entry / RTE_RETA_GROUP_SIZE;
1545 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1547 if ((reta_conf[grp].mask >> grp_idx) & 1)
1548 reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
1551 sfc_adapter_unlock(sa);
1557 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1558 struct rte_eth_rss_reta_entry64 *reta_conf,
1561 struct sfc_adapter *sa = dev->data->dev_private;
1562 struct sfc_rss *rss = &sa->rss;
1563 struct sfc_port *port = &sa->port;
1564 unsigned int *rss_tbl_new;
1572 if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1573 sfc_err(sa, "RSS is not available");
1577 if (rss->channels == 0) {
1578 sfc_err(sa, "RSS is not configured");
1582 if (reta_size != EFX_RSS_TBL_SIZE) {
1583 sfc_err(sa, "RETA size is wrong (should be %u)",
1588 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
1589 if (rss_tbl_new == NULL)
1592 sfc_adapter_lock(sa);
1594 rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
1596 for (entry = 0; entry < reta_size; entry++) {
1597 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1598 struct rte_eth_rss_reta_entry64 *grp;
1600 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1602 if (grp->mask & (1ull << grp_idx)) {
1603 if (grp->reta[grp_idx] >= rss->channels) {
1605 goto bad_reta_entry;
1607 rss_tbl_new[entry] = grp->reta[grp_idx];
1611 if (sa->state == SFC_ADAPTER_STARTED) {
1612 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1613 rss_tbl_new, EFX_RSS_TBL_SIZE);
1615 goto fail_scale_tbl_set;
1618 rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
1622 sfc_adapter_unlock(sa);
1624 rte_free(rss_tbl_new);
1626 SFC_ASSERT(rc >= 0);
1631 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1632 enum rte_filter_op filter_op,
1635 struct sfc_adapter *sa = dev->data->dev_private;
1638 sfc_log_init(sa, "entry");
1640 switch (filter_type) {
1641 case RTE_ETH_FILTER_NONE:
1642 sfc_err(sa, "Global filters configuration not supported");
1644 case RTE_ETH_FILTER_MACVLAN:
1645 sfc_err(sa, "MACVLAN filters not supported");
1647 case RTE_ETH_FILTER_ETHERTYPE:
1648 sfc_err(sa, "EtherType filters not supported");
1650 case RTE_ETH_FILTER_FLEXIBLE:
1651 sfc_err(sa, "Flexible filters not supported");
1653 case RTE_ETH_FILTER_SYN:
1654 sfc_err(sa, "SYN filters not supported");
1656 case RTE_ETH_FILTER_NTUPLE:
1657 sfc_err(sa, "NTUPLE filters not supported");
1659 case RTE_ETH_FILTER_TUNNEL:
1660 sfc_err(sa, "Tunnel filters not supported");
1662 case RTE_ETH_FILTER_FDIR:
1663 sfc_err(sa, "Flow Director filters not supported");
1665 case RTE_ETH_FILTER_HASH:
1666 sfc_err(sa, "Hash filters not supported");
1668 case RTE_ETH_FILTER_GENERIC:
1669 if (filter_op != RTE_ETH_FILTER_GET) {
1672 *(const void **)arg = &sfc_flow_ops;
1677 sfc_err(sa, "Unknown filter type %u", filter_type);
1681 sfc_log_init(sa, "exit: %d", -rc);
1682 SFC_ASSERT(rc >= 0);
1687 sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
1689 const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1692 * If Rx datapath does not provide callback to check mempool,
1693 * all pools are supported.
1695 if (sap->dp_rx->pool_ops_supported == NULL)
1698 return sap->dp_rx->pool_ops_supported(pool);
1701 static const struct eth_dev_ops sfc_eth_dev_ops = {
1702 .dev_configure = sfc_dev_configure,
1703 .dev_start = sfc_dev_start,
1704 .dev_stop = sfc_dev_stop,
1705 .dev_set_link_up = sfc_dev_set_link_up,
1706 .dev_set_link_down = sfc_dev_set_link_down,
1707 .dev_close = sfc_dev_close,
1708 .promiscuous_enable = sfc_dev_promisc_enable,
1709 .promiscuous_disable = sfc_dev_promisc_disable,
1710 .allmulticast_enable = sfc_dev_allmulti_enable,
1711 .allmulticast_disable = sfc_dev_allmulti_disable,
1712 .link_update = sfc_dev_link_update,
1713 .stats_get = sfc_stats_get,
1714 .stats_reset = sfc_stats_reset,
1715 .xstats_get = sfc_xstats_get,
1716 .xstats_reset = sfc_stats_reset,
1717 .xstats_get_names = sfc_xstats_get_names,
1718 .dev_infos_get = sfc_dev_infos_get,
1719 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
1720 .mtu_set = sfc_dev_set_mtu,
1721 .rx_queue_start = sfc_rx_queue_start,
1722 .rx_queue_stop = sfc_rx_queue_stop,
1723 .tx_queue_start = sfc_tx_queue_start,
1724 .tx_queue_stop = sfc_tx_queue_stop,
1725 .rx_queue_setup = sfc_rx_queue_setup,
1726 .rx_queue_release = sfc_rx_queue_release,
1727 .rx_queue_count = sfc_rx_queue_count,
1728 .rx_descriptor_done = sfc_rx_descriptor_done,
1729 .rx_descriptor_status = sfc_rx_descriptor_status,
1730 .tx_descriptor_status = sfc_tx_descriptor_status,
1731 .tx_queue_setup = sfc_tx_queue_setup,
1732 .tx_queue_release = sfc_tx_queue_release,
1733 .flow_ctrl_get = sfc_flow_ctrl_get,
1734 .flow_ctrl_set = sfc_flow_ctrl_set,
1735 .mac_addr_set = sfc_mac_addr_set,
1736 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
1737 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
1738 .reta_update = sfc_dev_rss_reta_update,
1739 .reta_query = sfc_dev_rss_reta_query,
1740 .rss_hash_update = sfc_dev_rss_hash_update,
1741 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
1742 .filter_ctrl = sfc_dev_filter_ctrl,
1743 .set_mc_addr_list = sfc_set_mc_addr_list,
1744 .rxq_info_get = sfc_rx_queue_info_get,
1745 .txq_info_get = sfc_tx_queue_info_get,
1746 .fw_version_get = sfc_fw_version_get,
1747 .xstats_get_by_id = sfc_xstats_get_by_id,
1748 .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
1749 .pool_ops_supported = sfc_pool_ops_supported,
1753 * Duplicate a string in potentially shared memory required for
1754 * multi-process support.
1756 * strdup() allocates from process-local heap/memory.
1759 sfc_strdup(const char *str)
1767 size = strlen(str) + 1;
1768 copy = rte_malloc(__func__, size, 0);
1770 rte_memcpy(copy, str, size);
1776 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
1778 struct sfc_adapter *sa = dev->data->dev_private;
1779 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1780 const struct sfc_dp_rx *dp_rx;
1781 const struct sfc_dp_tx *dp_tx;
1782 const efx_nic_cfg_t *encp;
1783 unsigned int avail_caps = 0;
1784 const char *rx_name = NULL;
1785 const char *tx_name = NULL;
1788 switch (sa->family) {
1789 case EFX_FAMILY_HUNTINGTON:
1790 case EFX_FAMILY_MEDFORD:
1791 case EFX_FAMILY_MEDFORD2:
1792 avail_caps |= SFC_DP_HW_FW_CAP_EF10;
1798 encp = efx_nic_cfg_get(sa->nic);
1799 if (encp->enc_rx_es_super_buffer_supported)
1800 avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
1802 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
1803 sfc_kvarg_string_handler, &rx_name);
1805 goto fail_kvarg_rx_datapath;
1807 if (rx_name != NULL) {
1808 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
1809 if (dp_rx == NULL) {
1810 sfc_err(sa, "Rx datapath %s not found", rx_name);
1814 if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
1816 "Insufficient Hw/FW capabilities to use Rx datapath %s",
1819 goto fail_dp_rx_caps;
1822 dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
1823 if (dp_rx == NULL) {
1824 sfc_err(sa, "Rx datapath by caps %#x not found",
1831 sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
1832 if (sas->dp_rx_name == NULL) {
1834 goto fail_dp_rx_name;
1837 sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
1839 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
1840 sfc_kvarg_string_handler, &tx_name);
1842 goto fail_kvarg_tx_datapath;
1844 if (tx_name != NULL) {
1845 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
1846 if (dp_tx == NULL) {
1847 sfc_err(sa, "Tx datapath %s not found", tx_name);
1851 if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
1853 "Insufficient Hw/FW capabilities to use Tx datapath %s",
1856 goto fail_dp_tx_caps;
1859 dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
1860 if (dp_tx == NULL) {
1861 sfc_err(sa, "Tx datapath by caps %#x not found",
1868 sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
1869 if (sas->dp_tx_name == NULL) {
1871 goto fail_dp_tx_name;
1874 sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
1876 sa->priv.dp_rx = dp_rx;
1877 sa->priv.dp_tx = dp_tx;
1879 dev->rx_pkt_burst = dp_rx->pkt_burst;
1880 dev->tx_pkt_burst = dp_tx->pkt_burst;
1882 dev->dev_ops = &sfc_eth_dev_ops;
1889 fail_kvarg_tx_datapath:
1890 rte_free(sas->dp_rx_name);
1891 sas->dp_rx_name = NULL;
1896 fail_kvarg_rx_datapath:
1901 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
1903 struct sfc_adapter *sa = dev->data->dev_private;
1904 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1906 dev->dev_ops = NULL;
1907 dev->rx_pkt_burst = NULL;
1908 dev->tx_pkt_burst = NULL;
1910 rte_free(sas->dp_tx_name);
1911 sas->dp_tx_name = NULL;
1912 sa->priv.dp_tx = NULL;
1914 rte_free(sas->dp_rx_name);
1915 sas->dp_rx_name = NULL;
1916 sa->priv.dp_rx = NULL;
1919 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
1920 .rx_queue_count = sfc_rx_queue_count,
1921 .rx_descriptor_done = sfc_rx_descriptor_done,
1922 .rx_descriptor_status = sfc_rx_descriptor_status,
1923 .tx_descriptor_status = sfc_tx_descriptor_status,
1924 .reta_query = sfc_dev_rss_reta_query,
1925 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
1926 .rxq_info_get = sfc_rx_queue_info_get,
1927 .txq_info_get = sfc_tx_queue_info_get,
1931 sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
1933 struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1934 struct sfc_adapter_priv *sap;
1935 const struct sfc_dp_rx *dp_rx;
1936 const struct sfc_dp_tx *dp_tx;
1940 * Allocate process private data from heap, since it should not
1941 * be located in shared memory allocated using rte_malloc() API.
1943 sap = calloc(1, sizeof(*sap));
1946 goto fail_alloc_priv;
1949 sap->logtype_main = logtype_main;
1951 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
1952 if (dp_rx == NULL) {
1953 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
1954 "cannot find %s Rx datapath", sas->dp_rx_name);
1958 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
1959 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
1960 "%s Rx datapath does not support multi-process",
1963 goto fail_dp_rx_multi_process;
1966 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
1967 if (dp_tx == NULL) {
1968 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
1969 "cannot find %s Tx datapath", sas->dp_tx_name);
1973 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
1974 SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
1975 "%s Tx datapath does not support multi-process",
1978 goto fail_dp_tx_multi_process;
1984 dev->process_private = sap;
1985 dev->rx_pkt_burst = dp_rx->pkt_burst;
1986 dev->tx_pkt_burst = dp_tx->pkt_burst;
1987 dev->dev_ops = &sfc_eth_dev_secondary_ops;
1991 fail_dp_tx_multi_process:
1993 fail_dp_rx_multi_process:
2002 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
2004 free(dev->process_private);
2005 dev->process_private = NULL;
2006 dev->dev_ops = NULL;
2007 dev->tx_pkt_burst = NULL;
2008 dev->rx_pkt_burst = NULL;
2012 sfc_register_dp(void)
2015 if (TAILQ_EMPTY(&sfc_dp_head)) {
2016 /* Prefer EF10 datapath */
2017 sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
2018 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
2019 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
2021 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
2022 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
2023 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
2028 sfc_eth_dev_init(struct rte_eth_dev *dev)
2030 struct sfc_adapter *sa = dev->data->dev_private;
2031 struct sfc_adapter_shared *sas;
2032 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2033 uint32_t logtype_main;
2035 const efx_nic_cfg_t *encp;
2036 const struct ether_addr *from;
2040 logtype_main = sfc_register_logtype(&pci_dev->addr,
2041 SFC_LOGTYPE_MAIN_STR,
2044 sa->priv.shared = &sa->_shared;
2045 sas = sa->priv.shared;
2047 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2048 return -sfc_eth_dev_secondary_init(dev, logtype_main);
2051 * sfc_adapter is a mixture of shared and process private data.
2052 * During transition period use it in both kinds. When the
2053 * driver becomes ready to separate it, sfc_adapter will become
2054 * primary process private only.
2056 dev->process_private = sa;
2058 /* Required for logging */
2059 sas->pci_addr = pci_dev->addr;
2060 sas->port_id = dev->data->port_id;
2061 sa->priv.logtype_main = logtype_main;
2065 /* Copy PCI device info to the dev->data */
2066 rte_eth_copy_pci_info(dev, pci_dev);
2068 rc = sfc_kvargs_parse(sa);
2070 goto fail_kvargs_parse;
2072 sfc_log_init(sa, "entry");
2074 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
2075 if (dev->data->mac_addrs == NULL) {
2077 goto fail_mac_addrs;
2080 sfc_adapter_lock_init(sa);
2081 sfc_adapter_lock(sa);
2083 sfc_log_init(sa, "probing");
2088 sfc_log_init(sa, "set device ops");
2089 rc = sfc_eth_dev_set_ops(dev);
2093 sfc_log_init(sa, "attaching");
2094 rc = sfc_attach(sa);
2098 encp = efx_nic_cfg_get(sa->nic);
2101 * The arguments are really reverse order in comparison to
2102 * Linux kernel. Copy from NIC config to Ethernet device data.
2104 from = (const struct ether_addr *)(encp->enc_mac_addr);
2105 ether_addr_copy(from, &dev->data->mac_addrs[0]);
2107 sfc_adapter_unlock(sa);
2109 sfc_log_init(sa, "done");
2113 sfc_eth_dev_clear_ops(dev);
2119 sfc_adapter_unlock(sa);
2120 sfc_adapter_lock_fini(sa);
2121 rte_free(dev->data->mac_addrs);
2122 dev->data->mac_addrs = NULL;
2125 sfc_kvargs_cleanup(sa);
2128 sfc_log_init(sa, "failed %d", rc);
2129 dev->process_private = NULL;
2135 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
2137 struct sfc_adapter *sa;
2139 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2140 sfc_eth_dev_secondary_clear_ops(dev);
2144 sa = dev->data->dev_private;
2145 sfc_log_init(sa, "entry");
2147 sfc_adapter_lock(sa);
2149 sfc_eth_dev_clear_ops(dev);
2154 sfc_kvargs_cleanup(sa);
2156 sfc_adapter_unlock(sa);
2157 sfc_adapter_lock_fini(sa);
2159 sfc_log_init(sa, "done");
2161 /* Required for logging, so cleanup last */
2166 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
2167 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
2168 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
2169 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
2170 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
2171 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
2172 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
2173 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
2174 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
2175 { .vendor_id = 0 /* sentinel */ }
2178 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2179 struct rte_pci_device *pci_dev)
2181 return rte_eth_dev_pci_generic_probe(pci_dev,
2182 sizeof(struct sfc_adapter), sfc_eth_dev_init);
2185 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2187 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
2190 static struct rte_pci_driver sfc_efx_pmd = {
2191 .id_table = pci_id_sfc_efx_map,
2193 RTE_PCI_DRV_INTR_LSC |
2194 RTE_PCI_DRV_NEED_MAPPING,
2195 .probe = sfc_eth_dev_pci_probe,
2196 .remove = sfc_eth_dev_pci_remove,
2199 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
2200 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
2201 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
2202 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
2203 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
2204 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
2205 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
2206 SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
2207 SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
2208 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
2210 RTE_INIT(sfc_driver_register_logtype)
2214 ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
2216 sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;