1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
11 #include <rte_ethdev.h>
12 #include <rte_ethdev_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
20 #include "sfc_debug.h"
22 #include "sfc_kvargs.h"
28 #include "sfc_dp_rx.h"
30 static struct sfc_dp_list sfc_dp_head =
31 TAILQ_HEAD_INITIALIZER(sfc_dp_head);
34 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
36 struct sfc_adapter *sa = dev->data->dev_private;
37 efx_nic_fw_info_t enfi;
42 * Return value of the callback is likely supposed to be
43 * equal to or greater than 0, nevertheless, if an error
44 * occurs, it will be desirable to pass it to the caller
46 if ((fw_version == NULL) || (fw_size == 0))
49 rc = efx_nic_get_fw_version(sa->nic, &enfi);
53 ret = snprintf(fw_version, fw_size,
54 "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
55 enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
56 enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
60 if (enfi.enfi_dpcpu_fw_ids_valid) {
61 size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
64 ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
65 fw_size - dpcpu_fw_ids_offset,
66 " rx%" PRIx16 " tx%" PRIx16,
67 enfi.enfi_rx_dpcpu_fw_id,
68 enfi.enfi_tx_dpcpu_fw_id);
75 if (fw_size < (size_t)(++ret))
82 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
84 struct sfc_adapter *sa = dev->data->dev_private;
85 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
87 sfc_log_init(sa, "entry");
89 dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
90 dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
92 /* Autonegotiation may be disabled */
93 dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
94 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_1000FDX)
95 dev_info->speed_capa |= ETH_LINK_SPEED_1G;
96 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_10000FDX)
97 dev_info->speed_capa |= ETH_LINK_SPEED_10G;
98 if (sa->port.phy_adv_cap_mask & EFX_PHY_CAP_40000FDX)
99 dev_info->speed_capa |= ETH_LINK_SPEED_40G;
101 dev_info->max_rx_queues = sa->rxq_max;
102 dev_info->max_tx_queues = sa->txq_max;
104 /* By default packets are dropped if no descriptors are available */
105 dev_info->default_rxconf.rx_drop_en = 1;
107 dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa);
109 dev_info->tx_offload_capa =
110 DEV_TX_OFFLOAD_IPV4_CKSUM |
111 DEV_TX_OFFLOAD_UDP_CKSUM |
112 DEV_TX_OFFLOAD_TCP_CKSUM;
114 if (encp->enc_tunnel_encapsulations_supported != 0)
115 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
117 dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
118 if ((~sa->dp_tx->features & SFC_DP_TX_FEAT_VLAN_INSERT) ||
119 !encp->enc_hw_tx_insert_vlan_enabled)
120 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
122 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_VLAN_INSERT;
124 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_SEG)
125 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
127 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_MULTI_POOL)
128 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOMULTMEMP;
130 if (~sa->dp_tx->features & SFC_DP_TX_FEAT_REFCNT)
131 dev_info->default_txconf.txq_flags |= ETH_TXQ_FLAGS_NOREFCOUNT;
133 #if EFSYS_OPT_RX_SCALE
134 if (sa->rss_support != EFX_RX_SCALE_UNAVAILABLE) {
135 dev_info->reta_size = EFX_RSS_TBL_SIZE;
136 dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
137 dev_info->flow_type_rss_offloads = SFC_RSS_OFFLOADS;
142 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
144 /* Initialize to hardware limits */
145 dev_info->rx_desc_lim.nb_max = EFX_RXQ_MAXNDESCS;
146 dev_info->rx_desc_lim.nb_min = EFX_RXQ_MINNDESCS;
147 /* The RXQ hardware requires that the descriptor count is a power
148 * of 2, but rx_desc_lim cannot properly describe that constraint.
150 dev_info->rx_desc_lim.nb_align = EFX_RXQ_MINNDESCS;
152 /* Initialize to hardware limits */
153 dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
154 dev_info->tx_desc_lim.nb_min = EFX_TXQ_MINNDESCS;
156 * The TXQ hardware requires that the descriptor count is a power
157 * of 2, but tx_desc_lim cannot properly describe that constraint
159 dev_info->tx_desc_lim.nb_align = EFX_TXQ_MINNDESCS;
161 if (sa->dp_rx->get_dev_info != NULL)
162 sa->dp_rx->get_dev_info(dev_info);
163 if (sa->dp_tx->get_dev_info != NULL)
164 sa->dp_tx->get_dev_info(dev_info);
167 static const uint32_t *
168 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
170 struct sfc_adapter *sa = dev->data->dev_private;
171 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
172 uint32_t tunnel_encaps = encp->enc_tunnel_encapsulations_supported;
174 return sa->dp_rx->supported_ptypes_get(tunnel_encaps);
178 sfc_dev_configure(struct rte_eth_dev *dev)
180 struct rte_eth_dev_data *dev_data = dev->data;
181 struct sfc_adapter *sa = dev_data->dev_private;
184 sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
185 dev_data->nb_rx_queues, dev_data->nb_tx_queues);
187 sfc_adapter_lock(sa);
189 case SFC_ADAPTER_CONFIGURED:
191 case SFC_ADAPTER_INITIALIZED:
192 rc = sfc_configure(sa);
195 sfc_err(sa, "unexpected adapter state %u to configure",
200 sfc_adapter_unlock(sa);
202 sfc_log_init(sa, "done %d", rc);
208 sfc_dev_start(struct rte_eth_dev *dev)
210 struct sfc_adapter *sa = dev->data->dev_private;
213 sfc_log_init(sa, "entry");
215 sfc_adapter_lock(sa);
217 sfc_adapter_unlock(sa);
219 sfc_log_init(sa, "done %d", rc);
225 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
227 struct sfc_adapter *sa = dev->data->dev_private;
228 struct rte_eth_link *dev_link = &dev->data->dev_link;
229 struct rte_eth_link old_link;
230 struct rte_eth_link current_link;
232 sfc_log_init(sa, "entry");
235 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
236 *(int64_t *)&old_link = rte_atomic64_read((rte_atomic64_t *)dev_link);
238 if (sa->state != SFC_ADAPTER_STARTED) {
239 sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, ¤t_link);
240 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
241 *(uint64_t *)&old_link,
242 *(uint64_t *)¤t_link))
244 } else if (wait_to_complete) {
245 efx_link_mode_t link_mode;
247 if (efx_port_poll(sa->nic, &link_mode) != 0)
248 link_mode = EFX_LINK_UNKNOWN;
249 sfc_port_link_mode_to_info(link_mode, ¤t_link);
251 if (!rte_atomic64_cmpset((volatile uint64_t *)dev_link,
252 *(uint64_t *)&old_link,
253 *(uint64_t *)¤t_link))
256 sfc_ev_mgmt_qpoll(sa);
257 *(int64_t *)¤t_link =
258 rte_atomic64_read((rte_atomic64_t *)dev_link);
261 if (old_link.link_status != current_link.link_status)
262 sfc_info(sa, "Link status is %s",
263 current_link.link_status ? "UP" : "DOWN");
265 return old_link.link_status == current_link.link_status ? 0 : -1;
269 sfc_dev_stop(struct rte_eth_dev *dev)
271 struct sfc_adapter *sa = dev->data->dev_private;
273 sfc_log_init(sa, "entry");
275 sfc_adapter_lock(sa);
277 sfc_adapter_unlock(sa);
279 sfc_log_init(sa, "done");
283 sfc_dev_set_link_up(struct rte_eth_dev *dev)
285 struct sfc_adapter *sa = dev->data->dev_private;
288 sfc_log_init(sa, "entry");
290 sfc_adapter_lock(sa);
292 sfc_adapter_unlock(sa);
299 sfc_dev_set_link_down(struct rte_eth_dev *dev)
301 struct sfc_adapter *sa = dev->data->dev_private;
303 sfc_log_init(sa, "entry");
305 sfc_adapter_lock(sa);
307 sfc_adapter_unlock(sa);
313 sfc_dev_close(struct rte_eth_dev *dev)
315 struct sfc_adapter *sa = dev->data->dev_private;
317 sfc_log_init(sa, "entry");
319 sfc_adapter_lock(sa);
321 case SFC_ADAPTER_STARTED:
323 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
325 case SFC_ADAPTER_CONFIGURED:
327 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
329 case SFC_ADAPTER_INITIALIZED:
332 sfc_err(sa, "unexpected adapter state %u on close", sa->state);
335 sfc_adapter_unlock(sa);
337 sfc_log_init(sa, "done");
341 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
344 struct sfc_port *port;
346 struct sfc_adapter *sa = dev->data->dev_private;
347 boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
348 const char *desc = (allmulti) ? "all-multi" : "promiscuous";
350 sfc_adapter_lock(sa);
353 toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
355 if (*toggle != enabled) {
358 if (port->isolated) {
359 sfc_warn(sa, "isolated mode is active on the port");
360 sfc_warn(sa, "the change is to be applied on the next "
361 "start provided that isolated mode is "
362 "disabled prior the next start");
363 } else if ((sa->state == SFC_ADAPTER_STARTED) &&
364 (sfc_set_rx_mode(sa) != 0)) {
365 *toggle = !(enabled);
366 sfc_warn(sa, "Failed to %s %s mode",
367 ((enabled) ? "enable" : "disable"), desc);
371 sfc_adapter_unlock(sa);
375 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
377 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
381 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
383 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
387 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
389 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
393 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
395 sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
399 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
400 uint16_t nb_rx_desc, unsigned int socket_id,
401 const struct rte_eth_rxconf *rx_conf,
402 struct rte_mempool *mb_pool)
404 struct sfc_adapter *sa = dev->data->dev_private;
407 sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
408 rx_queue_id, nb_rx_desc, socket_id);
410 sfc_adapter_lock(sa);
412 rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
417 dev->data->rx_queues[rx_queue_id] = sa->rxq_info[rx_queue_id].rxq->dp;
419 sfc_adapter_unlock(sa);
424 sfc_adapter_unlock(sa);
430 sfc_rx_queue_release(void *queue)
432 struct sfc_dp_rxq *dp_rxq = queue;
434 struct sfc_adapter *sa;
435 unsigned int sw_index;
440 rxq = sfc_rxq_by_dp_rxq(dp_rxq);
442 sfc_adapter_lock(sa);
444 sw_index = sfc_rxq_sw_index(rxq);
446 sfc_log_init(sa, "RxQ=%u", sw_index);
448 sa->eth_dev->data->rx_queues[sw_index] = NULL;
450 sfc_rx_qfini(sa, sw_index);
452 sfc_adapter_unlock(sa);
456 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
457 uint16_t nb_tx_desc, unsigned int socket_id,
458 const struct rte_eth_txconf *tx_conf)
460 struct sfc_adapter *sa = dev->data->dev_private;
463 sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
464 tx_queue_id, nb_tx_desc, socket_id);
466 sfc_adapter_lock(sa);
468 rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
472 dev->data->tx_queues[tx_queue_id] = sa->txq_info[tx_queue_id].txq->dp;
474 sfc_adapter_unlock(sa);
478 sfc_adapter_unlock(sa);
484 sfc_tx_queue_release(void *queue)
486 struct sfc_dp_txq *dp_txq = queue;
488 unsigned int sw_index;
489 struct sfc_adapter *sa;
494 txq = sfc_txq_by_dp_txq(dp_txq);
495 sw_index = sfc_txq_sw_index(txq);
497 SFC_ASSERT(txq->evq != NULL);
500 sfc_log_init(sa, "TxQ = %u", sw_index);
502 sfc_adapter_lock(sa);
504 SFC_ASSERT(sw_index < sa->eth_dev->data->nb_tx_queues);
505 sa->eth_dev->data->tx_queues[sw_index] = NULL;
507 sfc_tx_qfini(sa, sw_index);
509 sfc_adapter_unlock(sa);
513 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
515 struct sfc_adapter *sa = dev->data->dev_private;
516 struct sfc_port *port = &sa->port;
520 rte_spinlock_lock(&port->mac_stats_lock);
522 ret = sfc_port_update_mac_stats(sa);
526 mac_stats = port->mac_stats_buf;
528 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
529 EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
531 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
532 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
533 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
535 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
536 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
537 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
539 mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
540 mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
541 mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
543 mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
544 mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
545 mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
546 stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_OVERFLOW];
547 stats->ierrors = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
548 stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
550 stats->ipackets = mac_stats[EFX_MAC_RX_PKTS];
551 stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
552 stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
553 stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
555 * Take into account stats which are whenever supported
556 * on EF10. If some stat is not supported by current
557 * firmware variant or HW revision, it is guaranteed
558 * to be zero in mac_stats.
561 mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
562 mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
563 mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
564 mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
565 mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
566 mac_stats[EFX_MAC_PM_TRUNC_QBB] +
567 mac_stats[EFX_MAC_PM_DISCARD_QBB] +
568 mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
569 mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
570 mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
572 mac_stats[EFX_MAC_RX_FCS_ERRORS] +
573 mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
574 mac_stats[EFX_MAC_RX_JABBER_PKTS];
575 /* no oerrors counters supported on EF10 */
579 rte_spinlock_unlock(&port->mac_stats_lock);
580 SFC_ASSERT(ret >= 0);
585 sfc_stats_reset(struct rte_eth_dev *dev)
587 struct sfc_adapter *sa = dev->data->dev_private;
588 struct sfc_port *port = &sa->port;
591 if (sa->state != SFC_ADAPTER_STARTED) {
593 * The operation cannot be done if port is not started; it
594 * will be scheduled to be done during the next port start
596 port->mac_stats_reset_pending = B_TRUE;
600 rc = sfc_port_reset_mac_stats(sa);
602 sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
606 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
607 unsigned int xstats_count)
609 struct sfc_adapter *sa = dev->data->dev_private;
610 struct sfc_port *port = &sa->port;
616 rte_spinlock_lock(&port->mac_stats_lock);
618 rc = sfc_port_update_mac_stats(sa);
625 mac_stats = port->mac_stats_buf;
627 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
628 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
629 if (xstats != NULL && nstats < (int)xstats_count) {
630 xstats[nstats].id = nstats;
631 xstats[nstats].value = mac_stats[i];
638 rte_spinlock_unlock(&port->mac_stats_lock);
644 sfc_xstats_get_names(struct rte_eth_dev *dev,
645 struct rte_eth_xstat_name *xstats_names,
646 unsigned int xstats_count)
648 struct sfc_adapter *sa = dev->data->dev_private;
649 struct sfc_port *port = &sa->port;
651 unsigned int nstats = 0;
653 for (i = 0; i < EFX_MAC_NSTATS; ++i) {
654 if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
655 if (xstats_names != NULL && nstats < xstats_count)
656 strncpy(xstats_names[nstats].name,
657 efx_mac_stat_name(sa->nic, i),
658 sizeof(xstats_names[0].name));
667 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
668 uint64_t *values, unsigned int n)
670 struct sfc_adapter *sa = dev->data->dev_private;
671 struct sfc_port *port = &sa->port;
673 unsigned int nb_supported = 0;
674 unsigned int nb_written = 0;
679 if (unlikely(values == NULL) ||
680 unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
681 return port->mac_stats_nb_supported;
683 rte_spinlock_lock(&port->mac_stats_lock);
685 rc = sfc_port_update_mac_stats(sa);
692 mac_stats = port->mac_stats_buf;
694 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
695 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
698 if ((ids == NULL) || (ids[nb_written] == nb_supported))
699 values[nb_written++] = mac_stats[i];
707 rte_spinlock_unlock(&port->mac_stats_lock);
713 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
714 struct rte_eth_xstat_name *xstats_names,
715 const uint64_t *ids, unsigned int size)
717 struct sfc_adapter *sa = dev->data->dev_private;
718 struct sfc_port *port = &sa->port;
719 unsigned int nb_supported = 0;
720 unsigned int nb_written = 0;
723 if (unlikely(xstats_names == NULL) ||
724 unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
725 return port->mac_stats_nb_supported;
727 for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
728 if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
731 if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
732 char *name = xstats_names[nb_written++].name;
734 strncpy(name, efx_mac_stat_name(sa->nic, i),
735 sizeof(xstats_names[0].name));
736 name[sizeof(xstats_names[0].name) - 1] = '\0';
746 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
748 struct sfc_adapter *sa = dev->data->dev_private;
749 unsigned int wanted_fc, link_fc;
751 memset(fc_conf, 0, sizeof(*fc_conf));
753 sfc_adapter_lock(sa);
755 if (sa->state == SFC_ADAPTER_STARTED)
756 efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
758 link_fc = sa->port.flow_ctrl;
762 fc_conf->mode = RTE_FC_NONE;
764 case EFX_FCNTL_RESPOND:
765 fc_conf->mode = RTE_FC_RX_PAUSE;
767 case EFX_FCNTL_GENERATE:
768 fc_conf->mode = RTE_FC_TX_PAUSE;
770 case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
771 fc_conf->mode = RTE_FC_FULL;
774 sfc_err(sa, "%s: unexpected flow control value %#x",
778 fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
780 sfc_adapter_unlock(sa);
786 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
788 struct sfc_adapter *sa = dev->data->dev_private;
789 struct sfc_port *port = &sa->port;
793 if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
794 fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
795 fc_conf->mac_ctrl_frame_fwd != 0) {
796 sfc_err(sa, "unsupported flow control settings specified");
801 switch (fc_conf->mode) {
805 case RTE_FC_RX_PAUSE:
806 fcntl = EFX_FCNTL_RESPOND;
808 case RTE_FC_TX_PAUSE:
809 fcntl = EFX_FCNTL_GENERATE;
812 fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
819 sfc_adapter_lock(sa);
821 if (sa->state == SFC_ADAPTER_STARTED) {
822 rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
824 goto fail_mac_fcntl_set;
827 port->flow_ctrl = fcntl;
828 port->flow_ctrl_autoneg = fc_conf->autoneg;
830 sfc_adapter_unlock(sa);
835 sfc_adapter_unlock(sa);
842 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
844 struct sfc_adapter *sa = dev->data->dev_private;
845 size_t pdu = EFX_MAC_PDU(mtu);
849 sfc_log_init(sa, "mtu=%u", mtu);
852 if (pdu < EFX_MAC_PDU_MIN) {
853 sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
854 (unsigned int)mtu, (unsigned int)pdu,
858 if (pdu > EFX_MAC_PDU_MAX) {
859 sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
860 (unsigned int)mtu, (unsigned int)pdu,
865 sfc_adapter_lock(sa);
867 if (pdu != sa->port.pdu) {
868 if (sa->state == SFC_ADAPTER_STARTED) {
871 old_pdu = sa->port.pdu;
882 * The driver does not use it, but other PMDs update jumbo_frame
883 * flag and max_rx_pkt_len when MTU is set.
885 dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
886 dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
888 sfc_adapter_unlock(sa);
890 sfc_log_init(sa, "done");
894 sa->port.pdu = old_pdu;
895 if (sfc_start(sa) != 0)
896 sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
897 "PDU max size - port is stopped",
898 (unsigned int)pdu, (unsigned int)old_pdu);
899 sfc_adapter_unlock(sa);
902 sfc_log_init(sa, "failed %d", rc);
907 sfc_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
909 struct sfc_adapter *sa = dev->data->dev_private;
910 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
911 struct sfc_port *port = &sa->port;
914 sfc_adapter_lock(sa);
917 * Copy the address to the device private data so that
918 * it could be recalled in the case of adapter restart.
920 ether_addr_copy(mac_addr, &port->default_mac_addr);
922 if (port->isolated) {
923 sfc_err(sa, "isolated mode is active on the port");
924 sfc_err(sa, "will not set MAC address");
928 if (sa->state != SFC_ADAPTER_STARTED) {
929 sfc_info(sa, "the port is not started");
930 sfc_info(sa, "the new MAC address will be set on port start");
935 if (encp->enc_allow_set_mac_with_installed_filters) {
936 rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
938 sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
943 * Changing the MAC address by means of MCDI request
944 * has no effect on received traffic, therefore
945 * we also need to update unicast filters
947 rc = sfc_set_rx_mode(sa);
949 sfc_err(sa, "cannot set filter (rc = %u)", rc);
951 sfc_warn(sa, "cannot set MAC address with filters installed");
952 sfc_warn(sa, "adapter will be restarted to pick the new MAC");
953 sfc_warn(sa, "(some traffic may be dropped)");
956 * Since setting MAC address with filters installed is not
957 * allowed on the adapter, the new MAC address will be set
958 * by means of adapter restart. sfc_start() shall retrieve
959 * the new address from the device private data and set it.
964 sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
969 * In the case of failure sa->port->default_mac_addr does not
970 * need rollback since no error code is returned, and the upper
971 * API will anyway update the external MAC address storage.
972 * To be consistent with that new value it is better to keep
973 * the device private value the same.
975 sfc_adapter_unlock(sa);
980 sfc_set_mc_addr_list(struct rte_eth_dev *dev, struct ether_addr *mc_addr_set,
983 struct sfc_adapter *sa = dev->data->dev_private;
984 struct sfc_port *port = &sa->port;
985 uint8_t *mc_addrs = port->mcast_addrs;
989 if (port->isolated) {
990 sfc_err(sa, "isolated mode is active on the port");
991 sfc_err(sa, "will not set multicast address list");
995 if (mc_addrs == NULL)
998 if (nb_mc_addr > port->max_mcast_addrs) {
999 sfc_err(sa, "too many multicast addresses: %u > %u",
1000 nb_mc_addr, port->max_mcast_addrs);
1004 for (i = 0; i < nb_mc_addr; ++i) {
1005 rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
1007 mc_addrs += EFX_MAC_ADDR_LEN;
1010 port->nb_mcast_addrs = nb_mc_addr;
1012 if (sa->state != SFC_ADAPTER_STARTED)
1015 rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1016 port->nb_mcast_addrs);
1018 sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
1025 * The function is used by the secondary process as well. It must not
1026 * use any process-local pointers from the adapter data.
1029 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1030 struct rte_eth_rxq_info *qinfo)
1032 struct sfc_adapter *sa = dev->data->dev_private;
1033 struct sfc_rxq_info *rxq_info;
1034 struct sfc_rxq *rxq;
1036 sfc_adapter_lock(sa);
1038 SFC_ASSERT(rx_queue_id < sa->rxq_count);
1040 rxq_info = &sa->rxq_info[rx_queue_id];
1041 rxq = rxq_info->rxq;
1042 SFC_ASSERT(rxq != NULL);
1044 qinfo->mp = rxq->refill_mb_pool;
1045 qinfo->conf.rx_free_thresh = rxq->refill_threshold;
1046 qinfo->conf.rx_drop_en = 1;
1047 qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1048 qinfo->scattered_rx =
1049 ((rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) != 0);
1050 qinfo->nb_desc = rxq_info->entries;
1052 sfc_adapter_unlock(sa);
1056 * The function is used by the secondary process as well. It must not
1057 * use any process-local pointers from the adapter data.
1060 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1061 struct rte_eth_txq_info *qinfo)
1063 struct sfc_adapter *sa = dev->data->dev_private;
1064 struct sfc_txq_info *txq_info;
1066 sfc_adapter_lock(sa);
1068 SFC_ASSERT(tx_queue_id < sa->txq_count);
1070 txq_info = &sa->txq_info[tx_queue_id];
1071 SFC_ASSERT(txq_info->txq != NULL);
1073 memset(qinfo, 0, sizeof(*qinfo));
1075 qinfo->conf.txq_flags = txq_info->txq->flags;
1076 qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
1077 qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1078 qinfo->nb_desc = txq_info->entries;
1080 sfc_adapter_unlock(sa);
1084 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1086 struct sfc_adapter *sa = dev->data->dev_private;
1088 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1090 return sfc_rx_qdesc_npending(sa, rx_queue_id);
1094 sfc_rx_descriptor_done(void *queue, uint16_t offset)
1096 struct sfc_dp_rxq *dp_rxq = queue;
1098 return sfc_rx_qdesc_done(dp_rxq, offset);
1102 sfc_rx_descriptor_status(void *queue, uint16_t offset)
1104 struct sfc_dp_rxq *dp_rxq = queue;
1105 struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
1107 return rxq->evq->sa->dp_rx->qdesc_status(dp_rxq, offset);
1111 sfc_tx_descriptor_status(void *queue, uint16_t offset)
1113 struct sfc_dp_txq *dp_txq = queue;
1114 struct sfc_txq *txq = sfc_txq_by_dp_txq(dp_txq);
1116 return txq->evq->sa->dp_tx->qdesc_status(dp_txq, offset);
1120 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1122 struct sfc_adapter *sa = dev->data->dev_private;
1125 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1127 sfc_adapter_lock(sa);
1130 if (sa->state != SFC_ADAPTER_STARTED)
1131 goto fail_not_started;
1133 rc = sfc_rx_qstart(sa, rx_queue_id);
1135 goto fail_rx_qstart;
1137 sa->rxq_info[rx_queue_id].deferred_started = B_TRUE;
1139 sfc_adapter_unlock(sa);
1145 sfc_adapter_unlock(sa);
1151 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1153 struct sfc_adapter *sa = dev->data->dev_private;
1155 sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1157 sfc_adapter_lock(sa);
1158 sfc_rx_qstop(sa, rx_queue_id);
1160 sa->rxq_info[rx_queue_id].deferred_started = B_FALSE;
1162 sfc_adapter_unlock(sa);
1168 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1170 struct sfc_adapter *sa = dev->data->dev_private;
1173 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1175 sfc_adapter_lock(sa);
1178 if (sa->state != SFC_ADAPTER_STARTED)
1179 goto fail_not_started;
1181 rc = sfc_tx_qstart(sa, tx_queue_id);
1183 goto fail_tx_qstart;
1185 sa->txq_info[tx_queue_id].deferred_started = B_TRUE;
1187 sfc_adapter_unlock(sa);
1193 sfc_adapter_unlock(sa);
1199 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1201 struct sfc_adapter *sa = dev->data->dev_private;
1203 sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1205 sfc_adapter_lock(sa);
1207 sfc_tx_qstop(sa, tx_queue_id);
1209 sa->txq_info[tx_queue_id].deferred_started = B_FALSE;
1211 sfc_adapter_unlock(sa);
1215 static efx_tunnel_protocol_t
1216 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
1219 case RTE_TUNNEL_TYPE_VXLAN:
1220 return EFX_TUNNEL_PROTOCOL_VXLAN;
1221 case RTE_TUNNEL_TYPE_GENEVE:
1222 return EFX_TUNNEL_PROTOCOL_GENEVE;
1224 return EFX_TUNNEL_NPROTOS;
1228 enum sfc_udp_tunnel_op_e {
1229 SFC_UDP_TUNNEL_ADD_PORT,
1230 SFC_UDP_TUNNEL_DEL_PORT,
1234 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
1235 struct rte_eth_udp_tunnel *tunnel_udp,
1236 enum sfc_udp_tunnel_op_e op)
1238 struct sfc_adapter *sa = dev->data->dev_private;
1239 efx_tunnel_protocol_t tunnel_proto;
1242 sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
1243 (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
1244 (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
1245 tunnel_udp->udp_port, tunnel_udp->prot_type);
1248 sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
1249 if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
1251 goto fail_bad_proto;
1254 sfc_adapter_lock(sa);
1257 case SFC_UDP_TUNNEL_ADD_PORT:
1258 rc = efx_tunnel_config_udp_add(sa->nic,
1259 tunnel_udp->udp_port,
1262 case SFC_UDP_TUNNEL_DEL_PORT:
1263 rc = efx_tunnel_config_udp_remove(sa->nic,
1264 tunnel_udp->udp_port,
1275 if (sa->state == SFC_ADAPTER_STARTED) {
1276 rc = efx_tunnel_reconfigure(sa->nic);
1279 * Configuration is accepted by FW and MC reboot
1280 * is initiated to apply the changes. MC reboot
1281 * will be handled in a usual way (MC reboot
1282 * event on management event queue and adapter
1286 } else if (rc != 0) {
1287 goto fail_reconfigure;
1291 sfc_adapter_unlock(sa);
1295 /* Remove/restore entry since the change makes the trouble */
1297 case SFC_UDP_TUNNEL_ADD_PORT:
1298 (void)efx_tunnel_config_udp_remove(sa->nic,
1299 tunnel_udp->udp_port,
1302 case SFC_UDP_TUNNEL_DEL_PORT:
1303 (void)efx_tunnel_config_udp_add(sa->nic,
1304 tunnel_udp->udp_port,
1311 sfc_adapter_unlock(sa);
1319 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1320 struct rte_eth_udp_tunnel *tunnel_udp)
1322 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
1326 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1327 struct rte_eth_udp_tunnel *tunnel_udp)
1329 return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
1332 #if EFSYS_OPT_RX_SCALE
1334 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1335 struct rte_eth_rss_conf *rss_conf)
1337 struct sfc_adapter *sa = dev->data->dev_private;
1338 struct sfc_port *port = &sa->port;
1340 if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
1343 if (sa->rss_channels == 0)
1346 sfc_adapter_lock(sa);
1349 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1350 * hence, conversion is done here to derive a correct set of ETH_RSS
1351 * flags which corresponds to the active EFX configuration stored
1352 * locally in 'sfc_adapter' and kept up-to-date
1354 rss_conf->rss_hf = sfc_efx_to_rte_hash_type(sa->rss_hash_types);
1355 rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1356 if (rss_conf->rss_key != NULL)
1357 rte_memcpy(rss_conf->rss_key, sa->rss_key, EFX_RSS_KEY_SIZE);
1359 sfc_adapter_unlock(sa);
1365 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1366 struct rte_eth_rss_conf *rss_conf)
1368 struct sfc_adapter *sa = dev->data->dev_private;
1369 struct sfc_port *port = &sa->port;
1370 unsigned int efx_hash_types;
1376 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
1377 sfc_err(sa, "RSS is not available");
1381 if (sa->rss_channels == 0) {
1382 sfc_err(sa, "RSS is not configured");
1386 if ((rss_conf->rss_key != NULL) &&
1387 (rss_conf->rss_key_len != sizeof(sa->rss_key))) {
1388 sfc_err(sa, "RSS key size is wrong (should be %lu)",
1389 sizeof(sa->rss_key));
1393 if ((rss_conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
1394 sfc_err(sa, "unsupported hash functions requested");
1398 sfc_adapter_lock(sa);
1400 efx_hash_types = sfc_rte_to_efx_hash_type(rss_conf->rss_hf);
1402 rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1403 EFX_RX_HASHALG_TOEPLITZ,
1404 efx_hash_types, B_TRUE);
1406 goto fail_scale_mode_set;
1408 if (rss_conf->rss_key != NULL) {
1409 if (sa->state == SFC_ADAPTER_STARTED) {
1410 rc = efx_rx_scale_key_set(sa->nic,
1411 EFX_RSS_CONTEXT_DEFAULT,
1413 sizeof(sa->rss_key));
1415 goto fail_scale_key_set;
1418 rte_memcpy(sa->rss_key, rss_conf->rss_key, sizeof(sa->rss_key));
1421 sa->rss_hash_types = efx_hash_types;
1423 sfc_adapter_unlock(sa);
1428 if (efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1429 EFX_RX_HASHALG_TOEPLITZ,
1430 sa->rss_hash_types, B_TRUE) != 0)
1431 sfc_err(sa, "failed to restore RSS mode");
1433 fail_scale_mode_set:
1434 sfc_adapter_unlock(sa);
1439 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1440 struct rte_eth_rss_reta_entry64 *reta_conf,
1443 struct sfc_adapter *sa = dev->data->dev_private;
1444 struct sfc_port *port = &sa->port;
1447 if ((sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) || port->isolated)
1450 if (sa->rss_channels == 0)
1453 if (reta_size != EFX_RSS_TBL_SIZE)
1456 sfc_adapter_lock(sa);
1458 for (entry = 0; entry < reta_size; entry++) {
1459 int grp = entry / RTE_RETA_GROUP_SIZE;
1460 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1462 if ((reta_conf[grp].mask >> grp_idx) & 1)
1463 reta_conf[grp].reta[grp_idx] = sa->rss_tbl[entry];
1466 sfc_adapter_unlock(sa);
1472 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1473 struct rte_eth_rss_reta_entry64 *reta_conf,
1476 struct sfc_adapter *sa = dev->data->dev_private;
1477 struct sfc_port *port = &sa->port;
1478 unsigned int *rss_tbl_new;
1486 if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
1487 sfc_err(sa, "RSS is not available");
1491 if (sa->rss_channels == 0) {
1492 sfc_err(sa, "RSS is not configured");
1496 if (reta_size != EFX_RSS_TBL_SIZE) {
1497 sfc_err(sa, "RETA size is wrong (should be %u)",
1502 rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(sa->rss_tbl), 0);
1503 if (rss_tbl_new == NULL)
1506 sfc_adapter_lock(sa);
1508 rte_memcpy(rss_tbl_new, sa->rss_tbl, sizeof(sa->rss_tbl));
1510 for (entry = 0; entry < reta_size; entry++) {
1511 int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1512 struct rte_eth_rss_reta_entry64 *grp;
1514 grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1516 if (grp->mask & (1ull << grp_idx)) {
1517 if (grp->reta[grp_idx] >= sa->rss_channels) {
1519 goto bad_reta_entry;
1521 rss_tbl_new[entry] = grp->reta[grp_idx];
1525 if (sa->state == SFC_ADAPTER_STARTED) {
1526 rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1527 rss_tbl_new, EFX_RSS_TBL_SIZE);
1529 goto fail_scale_tbl_set;
1532 rte_memcpy(sa->rss_tbl, rss_tbl_new, sizeof(sa->rss_tbl));
1536 sfc_adapter_unlock(sa);
1538 rte_free(rss_tbl_new);
1540 SFC_ASSERT(rc >= 0);
1546 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1547 enum rte_filter_op filter_op,
1550 struct sfc_adapter *sa = dev->data->dev_private;
1553 sfc_log_init(sa, "entry");
1555 switch (filter_type) {
1556 case RTE_ETH_FILTER_NONE:
1557 sfc_err(sa, "Global filters configuration not supported");
1559 case RTE_ETH_FILTER_MACVLAN:
1560 sfc_err(sa, "MACVLAN filters not supported");
1562 case RTE_ETH_FILTER_ETHERTYPE:
1563 sfc_err(sa, "EtherType filters not supported");
1565 case RTE_ETH_FILTER_FLEXIBLE:
1566 sfc_err(sa, "Flexible filters not supported");
1568 case RTE_ETH_FILTER_SYN:
1569 sfc_err(sa, "SYN filters not supported");
1571 case RTE_ETH_FILTER_NTUPLE:
1572 sfc_err(sa, "NTUPLE filters not supported");
1574 case RTE_ETH_FILTER_TUNNEL:
1575 sfc_err(sa, "Tunnel filters not supported");
1577 case RTE_ETH_FILTER_FDIR:
1578 sfc_err(sa, "Flow Director filters not supported");
1580 case RTE_ETH_FILTER_HASH:
1581 sfc_err(sa, "Hash filters not supported");
1583 case RTE_ETH_FILTER_GENERIC:
1584 if (filter_op != RTE_ETH_FILTER_GET) {
1587 *(const void **)arg = &sfc_flow_ops;
1592 sfc_err(sa, "Unknown filter type %u", filter_type);
1596 sfc_log_init(sa, "exit: %d", -rc);
1597 SFC_ASSERT(rc >= 0);
1601 static const struct eth_dev_ops sfc_eth_dev_ops = {
1602 .dev_configure = sfc_dev_configure,
1603 .dev_start = sfc_dev_start,
1604 .dev_stop = sfc_dev_stop,
1605 .dev_set_link_up = sfc_dev_set_link_up,
1606 .dev_set_link_down = sfc_dev_set_link_down,
1607 .dev_close = sfc_dev_close,
1608 .promiscuous_enable = sfc_dev_promisc_enable,
1609 .promiscuous_disable = sfc_dev_promisc_disable,
1610 .allmulticast_enable = sfc_dev_allmulti_enable,
1611 .allmulticast_disable = sfc_dev_allmulti_disable,
1612 .link_update = sfc_dev_link_update,
1613 .stats_get = sfc_stats_get,
1614 .stats_reset = sfc_stats_reset,
1615 .xstats_get = sfc_xstats_get,
1616 .xstats_reset = sfc_stats_reset,
1617 .xstats_get_names = sfc_xstats_get_names,
1618 .dev_infos_get = sfc_dev_infos_get,
1619 .dev_supported_ptypes_get = sfc_dev_supported_ptypes_get,
1620 .mtu_set = sfc_dev_set_mtu,
1621 .rx_queue_start = sfc_rx_queue_start,
1622 .rx_queue_stop = sfc_rx_queue_stop,
1623 .tx_queue_start = sfc_tx_queue_start,
1624 .tx_queue_stop = sfc_tx_queue_stop,
1625 .rx_queue_setup = sfc_rx_queue_setup,
1626 .rx_queue_release = sfc_rx_queue_release,
1627 .rx_queue_count = sfc_rx_queue_count,
1628 .rx_descriptor_done = sfc_rx_descriptor_done,
1629 .rx_descriptor_status = sfc_rx_descriptor_status,
1630 .tx_descriptor_status = sfc_tx_descriptor_status,
1631 .tx_queue_setup = sfc_tx_queue_setup,
1632 .tx_queue_release = sfc_tx_queue_release,
1633 .flow_ctrl_get = sfc_flow_ctrl_get,
1634 .flow_ctrl_set = sfc_flow_ctrl_set,
1635 .mac_addr_set = sfc_mac_addr_set,
1636 .udp_tunnel_port_add = sfc_dev_udp_tunnel_port_add,
1637 .udp_tunnel_port_del = sfc_dev_udp_tunnel_port_del,
1638 #if EFSYS_OPT_RX_SCALE
1639 .reta_update = sfc_dev_rss_reta_update,
1640 .reta_query = sfc_dev_rss_reta_query,
1641 .rss_hash_update = sfc_dev_rss_hash_update,
1642 .rss_hash_conf_get = sfc_dev_rss_hash_conf_get,
1644 .filter_ctrl = sfc_dev_filter_ctrl,
1645 .set_mc_addr_list = sfc_set_mc_addr_list,
1646 .rxq_info_get = sfc_rx_queue_info_get,
1647 .txq_info_get = sfc_tx_queue_info_get,
1648 .fw_version_get = sfc_fw_version_get,
1649 .xstats_get_by_id = sfc_xstats_get_by_id,
1650 .xstats_get_names_by_id = sfc_xstats_get_names_by_id,
1654 * Duplicate a string in potentially shared memory required for
1655 * multi-process support.
1657 * strdup() allocates from process-local heap/memory.
1660 sfc_strdup(const char *str)
1668 size = strlen(str) + 1;
1669 copy = rte_malloc(__func__, size, 0);
1671 rte_memcpy(copy, str, size);
1677 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
1679 struct sfc_adapter *sa = dev->data->dev_private;
1680 unsigned int avail_caps = 0;
1681 const char *rx_name = NULL;
1682 const char *tx_name = NULL;
1685 switch (sa->family) {
1686 case EFX_FAMILY_HUNTINGTON:
1687 case EFX_FAMILY_MEDFORD:
1688 avail_caps |= SFC_DP_HW_FW_CAP_EF10;
1694 rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
1695 sfc_kvarg_string_handler, &rx_name);
1697 goto fail_kvarg_rx_datapath;
1699 if (rx_name != NULL) {
1700 sa->dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
1701 if (sa->dp_rx == NULL) {
1702 sfc_err(sa, "Rx datapath %s not found", rx_name);
1706 if (!sfc_dp_match_hw_fw_caps(&sa->dp_rx->dp, avail_caps)) {
1708 "Insufficient Hw/FW capabilities to use Rx datapath %s",
1711 goto fail_dp_rx_caps;
1714 sa->dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
1715 if (sa->dp_rx == NULL) {
1716 sfc_err(sa, "Rx datapath by caps %#x not found",
1723 sa->dp_rx_name = sfc_strdup(sa->dp_rx->dp.name);
1724 if (sa->dp_rx_name == NULL) {
1726 goto fail_dp_rx_name;
1729 sfc_info(sa, "use %s Rx datapath", sa->dp_rx_name);
1731 dev->rx_pkt_burst = sa->dp_rx->pkt_burst;
1733 rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
1734 sfc_kvarg_string_handler, &tx_name);
1736 goto fail_kvarg_tx_datapath;
1738 if (tx_name != NULL) {
1739 sa->dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
1740 if (sa->dp_tx == NULL) {
1741 sfc_err(sa, "Tx datapath %s not found", tx_name);
1745 if (!sfc_dp_match_hw_fw_caps(&sa->dp_tx->dp, avail_caps)) {
1747 "Insufficient Hw/FW capabilities to use Tx datapath %s",
1750 goto fail_dp_tx_caps;
1753 sa->dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
1754 if (sa->dp_tx == NULL) {
1755 sfc_err(sa, "Tx datapath by caps %#x not found",
1762 sa->dp_tx_name = sfc_strdup(sa->dp_tx->dp.name);
1763 if (sa->dp_tx_name == NULL) {
1765 goto fail_dp_tx_name;
1768 sfc_info(sa, "use %s Tx datapath", sa->dp_tx_name);
1770 dev->tx_pkt_burst = sa->dp_tx->pkt_burst;
1772 dev->dev_ops = &sfc_eth_dev_ops;
1781 fail_kvarg_tx_datapath:
1782 rte_free(sa->dp_rx_name);
1783 sa->dp_rx_name = NULL;
1790 fail_kvarg_rx_datapath:
1795 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
1797 struct sfc_adapter *sa = dev->data->dev_private;
1799 dev->dev_ops = NULL;
1800 dev->rx_pkt_burst = NULL;
1801 dev->tx_pkt_burst = NULL;
1803 rte_free(sa->dp_tx_name);
1804 sa->dp_tx_name = NULL;
1807 rte_free(sa->dp_rx_name);
1808 sa->dp_rx_name = NULL;
1812 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
1813 .rxq_info_get = sfc_rx_queue_info_get,
1814 .txq_info_get = sfc_tx_queue_info_get,
1818 sfc_eth_dev_secondary_set_ops(struct rte_eth_dev *dev)
1821 * Device private data has really many process-local pointers.
1822 * Below code should be extremely careful to use data located
1823 * in shared memory only.
1825 struct sfc_adapter *sa = dev->data->dev_private;
1826 const struct sfc_dp_rx *dp_rx;
1827 const struct sfc_dp_tx *dp_tx;
1830 dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sa->dp_rx_name);
1831 if (dp_rx == NULL) {
1832 sfc_err(sa, "cannot find %s Rx datapath", sa->dp_tx_name);
1836 if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
1837 sfc_err(sa, "%s Rx datapath does not support multi-process",
1840 goto fail_dp_rx_multi_process;
1843 dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sa->dp_tx_name);
1844 if (dp_tx == NULL) {
1845 sfc_err(sa, "cannot find %s Tx datapath", sa->dp_tx_name);
1849 if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
1850 sfc_err(sa, "%s Tx datapath does not support multi-process",
1853 goto fail_dp_tx_multi_process;
1856 dev->rx_pkt_burst = dp_rx->pkt_burst;
1857 dev->tx_pkt_burst = dp_tx->pkt_burst;
1858 dev->dev_ops = &sfc_eth_dev_secondary_ops;
1862 fail_dp_tx_multi_process:
1864 fail_dp_rx_multi_process:
1870 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
1872 dev->dev_ops = NULL;
1873 dev->tx_pkt_burst = NULL;
1874 dev->rx_pkt_burst = NULL;
1878 sfc_register_dp(void)
1881 if (TAILQ_EMPTY(&sfc_dp_head)) {
1882 /* Prefer EF10 datapath */
1883 sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
1884 sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
1886 sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
1887 sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
1888 sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
1893 sfc_eth_dev_init(struct rte_eth_dev *dev)
1895 struct sfc_adapter *sa = dev->data->dev_private;
1896 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1898 const efx_nic_cfg_t *encp;
1899 const struct ether_addr *from;
1903 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1904 return -sfc_eth_dev_secondary_set_ops(dev);
1906 /* Required for logging */
1907 sa->pci_addr = pci_dev->addr;
1908 sa->port_id = dev->data->port_id;
1912 /* Copy PCI device info to the dev->data */
1913 rte_eth_copy_pci_info(dev, pci_dev);
1915 rc = sfc_kvargs_parse(sa);
1917 goto fail_kvargs_parse;
1919 rc = sfc_kvargs_process(sa, SFC_KVARG_DEBUG_INIT,
1920 sfc_kvarg_bool_handler, &sa->debug_init);
1922 goto fail_kvarg_debug_init;
1924 sfc_log_init(sa, "entry");
1926 dev->data->mac_addrs = rte_zmalloc("sfc", ETHER_ADDR_LEN, 0);
1927 if (dev->data->mac_addrs == NULL) {
1929 goto fail_mac_addrs;
1932 sfc_adapter_lock_init(sa);
1933 sfc_adapter_lock(sa);
1935 sfc_log_init(sa, "probing");
1940 sfc_log_init(sa, "set device ops");
1941 rc = sfc_eth_dev_set_ops(dev);
1945 sfc_log_init(sa, "attaching");
1946 rc = sfc_attach(sa);
1950 encp = efx_nic_cfg_get(sa->nic);
1953 * The arguments are really reverse order in comparison to
1954 * Linux kernel. Copy from NIC config to Ethernet device data.
1956 from = (const struct ether_addr *)(encp->enc_mac_addr);
1957 ether_addr_copy(from, &dev->data->mac_addrs[0]);
1959 sfc_adapter_unlock(sa);
1961 sfc_log_init(sa, "done");
1965 sfc_eth_dev_clear_ops(dev);
1971 sfc_adapter_unlock(sa);
1972 sfc_adapter_lock_fini(sa);
1973 rte_free(dev->data->mac_addrs);
1974 dev->data->mac_addrs = NULL;
1977 fail_kvarg_debug_init:
1978 sfc_kvargs_cleanup(sa);
1981 sfc_log_init(sa, "failed %d", rc);
1987 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
1989 struct sfc_adapter *sa;
1991 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
1992 sfc_eth_dev_secondary_clear_ops(dev);
1996 sa = dev->data->dev_private;
1997 sfc_log_init(sa, "entry");
1999 sfc_adapter_lock(sa);
2001 sfc_eth_dev_clear_ops(dev);
2006 rte_free(dev->data->mac_addrs);
2007 dev->data->mac_addrs = NULL;
2009 sfc_kvargs_cleanup(sa);
2011 sfc_adapter_unlock(sa);
2012 sfc_adapter_lock_fini(sa);
2014 sfc_log_init(sa, "done");
2016 /* Required for logging, so cleanup last */
2021 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
2022 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
2023 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
2024 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
2025 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
2026 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
2027 { RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
2028 { .vendor_id = 0 /* sentinel */ }
2031 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2032 struct rte_pci_device *pci_dev)
2034 return rte_eth_dev_pci_generic_probe(pci_dev,
2035 sizeof(struct sfc_adapter), sfc_eth_dev_init);
2038 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2040 return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
2043 static struct rte_pci_driver sfc_efx_pmd = {
2044 .id_table = pci_id_sfc_efx_map,
2046 RTE_PCI_DRV_INTR_LSC |
2047 RTE_PCI_DRV_NEED_MAPPING,
2048 .probe = sfc_eth_dev_pci_probe,
2049 .remove = sfc_eth_dev_pci_remove,
2052 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
2053 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
2054 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
2055 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
2056 SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
2057 SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
2058 SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
2059 SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long> "
2060 SFC_KVARG_MCDI_LOGGING "=" SFC_KVARG_VALUES_BOOL " "
2061 SFC_KVARG_DEBUG_INIT "=" SFC_KVARG_VALUES_BOOL);