1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
24 #include "sfc_tweak.h"
28 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
29 size_t len, int socket_id, efsys_mem_t *esmp)
31 const struct rte_memzone *mz;
33 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
34 name, id, len, socket_id);
36 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
37 sysconf(_SC_PAGESIZE), socket_id);
39 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
40 name, (unsigned int)id, (unsigned int)len, socket_id,
41 rte_strerror(rte_errno));
45 esmp->esm_addr = mz->iova;
46 if (esmp->esm_addr == RTE_BAD_IOVA) {
47 (void)rte_memzone_free(mz);
52 esmp->esm_base = mz->addr;
58 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
62 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
64 rc = rte_memzone_free(esmp->esm_mz);
66 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
68 memset(esmp, 0, sizeof(*esmp));
72 sfc_phy_cap_from_link_speeds(uint32_t speeds)
74 uint32_t phy_caps = 0;
76 if (~speeds & ETH_LINK_SPEED_FIXED) {
77 phy_caps |= (1 << EFX_PHY_CAP_AN);
79 * If no speeds are specified in the mask, any supported
82 if (speeds == ETH_LINK_SPEED_AUTONEG)
84 (1 << EFX_PHY_CAP_1000FDX) |
85 (1 << EFX_PHY_CAP_10000FDX) |
86 (1 << EFX_PHY_CAP_25000FDX) |
87 (1 << EFX_PHY_CAP_40000FDX) |
88 (1 << EFX_PHY_CAP_50000FDX) |
89 (1 << EFX_PHY_CAP_100000FDX);
91 if (speeds & ETH_LINK_SPEED_1G)
92 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
93 if (speeds & ETH_LINK_SPEED_10G)
94 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
95 if (speeds & ETH_LINK_SPEED_25G)
96 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
97 if (speeds & ETH_LINK_SPEED_40G)
98 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
99 if (speeds & ETH_LINK_SPEED_50G)
100 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
101 if (speeds & ETH_LINK_SPEED_100G)
102 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
108 * Check requested device level configuration.
109 * Receive and transmit configuration is checked in corresponding
113 sfc_check_conf(struct sfc_adapter *sa)
115 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
118 sa->port.phy_adv_cap =
119 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
120 sa->port.phy_adv_cap_mask;
121 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
122 sfc_err(sa, "No link speeds from mask %#x are supported",
127 #if !EFSYS_OPT_LOOPBACK
128 if (conf->lpbk_mode != 0) {
129 sfc_err(sa, "Loopback not supported");
134 if (conf->dcb_capability_en != 0) {
135 sfc_err(sa, "Priority-based flow control not supported");
139 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
140 sfc_err(sa, "Flow Director not supported");
144 if ((conf->intr_conf.lsc != 0) &&
145 (sa->intr.type != EFX_INTR_LINE) &&
146 (sa->intr.type != EFX_INTR_MESSAGE)) {
147 sfc_err(sa, "Link status change interrupt not supported");
151 if (conf->intr_conf.rxq != 0) {
152 sfc_err(sa, "Receive queue interrupt not supported");
160 * Find out maximum number of receive and transmit queues which could be
163 * NIC is kept initialized on success to allow other modules acquire
164 * defaults and capabilities.
167 sfc_estimate_resource_limits(struct sfc_adapter *sa)
169 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
170 efx_drv_limits_t limits;
172 uint32_t evq_allocated;
173 uint32_t rxq_allocated;
174 uint32_t txq_allocated;
176 memset(&limits, 0, sizeof(limits));
178 /* Request at least one Rx and Tx queue */
179 limits.edl_min_rxq_count = 1;
180 limits.edl_min_txq_count = 1;
181 /* Management event queue plus event queue for each Tx and Rx queue */
182 limits.edl_min_evq_count =
183 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
185 /* Divide by number of functions to guarantee that all functions
186 * will get promised resources
188 /* FIXME Divide by number of functions (not 2) below */
189 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
190 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
192 /* Split equally between receive and transmit */
193 limits.edl_max_rxq_count =
194 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
195 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
197 limits.edl_max_txq_count =
198 MIN(encp->enc_txq_limit,
199 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
202 limits.edl_max_txq_count =
203 MIN(limits.edl_max_txq_count,
204 encp->enc_fw_assisted_tso_v2_n_contexts /
205 encp->enc_hw_pf_count);
207 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
209 /* Configure the minimum required resources needed for the
210 * driver to operate, and the maximum desired resources that the
211 * driver is capable of using.
213 efx_nic_set_drv_limits(sa->nic, &limits);
215 sfc_log_init(sa, "init nic");
216 rc = efx_nic_init(sa->nic);
220 /* Find resource dimensions assigned by firmware to this function */
221 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
224 goto fail_get_vi_pool;
226 /* It still may allocate more than maximum, ensure limit */
227 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
228 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
229 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
231 /* Subtract management EVQ not used for traffic */
232 SFC_ASSERT(evq_allocated > 0);
235 /* Right now we use separate EVQ for Rx and Tx */
236 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
237 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
239 /* Keep NIC initialized */
244 efx_nic_fini(sa->nic);
249 sfc_set_drv_limits(struct sfc_adapter *sa)
251 const struct rte_eth_dev_data *data = sa->eth_dev->data;
252 efx_drv_limits_t lim;
254 memset(&lim, 0, sizeof(lim));
256 /* Limits are strict since take into account initial estimation */
257 lim.edl_min_evq_count = lim.edl_max_evq_count =
258 1 + data->nb_rx_queues + data->nb_tx_queues;
259 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
260 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
262 return efx_nic_set_drv_limits(sa->nic, &lim);
266 sfc_set_fw_subvariant(struct sfc_adapter *sa)
268 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
269 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
270 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
271 unsigned int txq_index;
272 efx_nic_fw_subvariant_t req_fw_subvariant;
273 efx_nic_fw_subvariant_t cur_fw_subvariant;
276 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
277 sfc_info(sa, "no-Tx-checksum subvariant not supported");
281 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
282 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
284 if (txq_info->state & SFC_TXQ_INITIALIZED)
285 tx_offloads |= txq_info->offloads;
288 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
289 DEV_TX_OFFLOAD_TCP_CKSUM |
290 DEV_TX_OFFLOAD_UDP_CKSUM |
291 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
292 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
294 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
296 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
298 sfc_err(sa, "failed to get FW subvariant: %d", rc);
301 sfc_info(sa, "FW subvariant is %u vs required %u",
302 cur_fw_subvariant, req_fw_subvariant);
304 if (cur_fw_subvariant == req_fw_subvariant)
307 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
309 sfc_err(sa, "failed to set FW subvariant %u: %d",
310 req_fw_subvariant, rc);
313 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
319 sfc_try_start(struct sfc_adapter *sa)
321 const efx_nic_cfg_t *encp;
324 sfc_log_init(sa, "entry");
326 SFC_ASSERT(sfc_adapter_is_locked(sa));
327 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
329 sfc_log_init(sa, "set FW subvariant");
330 rc = sfc_set_fw_subvariant(sa);
332 goto fail_set_fw_subvariant;
334 sfc_log_init(sa, "set resource limits");
335 rc = sfc_set_drv_limits(sa);
337 goto fail_set_drv_limits;
339 sfc_log_init(sa, "init nic");
340 rc = efx_nic_init(sa->nic);
344 encp = efx_nic_cfg_get(sa->nic);
347 * Refresh (since it may change on NIC reset/restart) a copy of
348 * supported tunnel encapsulations in shared memory to be used
349 * on supported Rx packet type classes get.
351 sa->priv.shared->tunnel_encaps =
352 encp->enc_tunnel_encapsulations_supported;
354 if (encp->enc_tunnel_encapsulations_supported != 0) {
355 sfc_log_init(sa, "apply tunnel config");
356 rc = efx_tunnel_reconfigure(sa->nic);
358 goto fail_tunnel_reconfigure;
361 rc = sfc_intr_start(sa);
363 goto fail_intr_start;
365 rc = sfc_ev_start(sa);
369 rc = sfc_port_start(sa);
371 goto fail_port_start;
373 rc = sfc_rx_start(sa);
377 rc = sfc_tx_start(sa);
381 rc = sfc_flow_start(sa);
383 goto fail_flows_insert;
385 sfc_log_init(sa, "done");
404 fail_tunnel_reconfigure:
405 efx_nic_fini(sa->nic);
409 fail_set_fw_subvariant:
410 sfc_log_init(sa, "failed %d", rc);
415 sfc_start(struct sfc_adapter *sa)
417 unsigned int start_tries = 3;
420 sfc_log_init(sa, "entry");
422 SFC_ASSERT(sfc_adapter_is_locked(sa));
425 case SFC_ADAPTER_CONFIGURED:
427 case SFC_ADAPTER_STARTED:
428 sfc_notice(sa, "already started");
435 sa->state = SFC_ADAPTER_STARTING;
438 rc = sfc_try_start(sa);
439 } while ((--start_tries > 0) &&
440 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
445 sa->state = SFC_ADAPTER_STARTED;
446 sfc_log_init(sa, "done");
450 sa->state = SFC_ADAPTER_CONFIGURED;
452 sfc_log_init(sa, "failed %d", rc);
457 sfc_stop(struct sfc_adapter *sa)
459 sfc_log_init(sa, "entry");
461 SFC_ASSERT(sfc_adapter_is_locked(sa));
464 case SFC_ADAPTER_STARTED:
466 case SFC_ADAPTER_CONFIGURED:
467 sfc_notice(sa, "already stopped");
470 sfc_err(sa, "stop in unexpected state %u", sa->state);
475 sa->state = SFC_ADAPTER_STOPPING;
483 efx_nic_fini(sa->nic);
485 sa->state = SFC_ADAPTER_CONFIGURED;
486 sfc_log_init(sa, "done");
490 sfc_restart(struct sfc_adapter *sa)
494 SFC_ASSERT(sfc_adapter_is_locked(sa));
496 if (sa->state != SFC_ADAPTER_STARTED)
503 sfc_err(sa, "restart failed");
509 sfc_restart_if_required(void *arg)
511 struct sfc_adapter *sa = arg;
513 /* If restart is scheduled, clear the flag and do it */
514 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
516 sfc_adapter_lock(sa);
517 if (sa->state == SFC_ADAPTER_STARTED)
518 (void)sfc_restart(sa);
519 sfc_adapter_unlock(sa);
524 sfc_schedule_restart(struct sfc_adapter *sa)
528 /* Schedule restart alarm if it is not scheduled yet */
529 if (!rte_atomic32_test_and_set(&sa->restart_required))
532 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
534 sfc_warn(sa, "alarms are not supported, restart is pending");
536 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
538 sfc_notice(sa, "restart scheduled");
542 sfc_configure(struct sfc_adapter *sa)
546 sfc_log_init(sa, "entry");
548 SFC_ASSERT(sfc_adapter_is_locked(sa));
550 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
551 sa->state == SFC_ADAPTER_CONFIGURED);
552 sa->state = SFC_ADAPTER_CONFIGURING;
554 rc = sfc_check_conf(sa);
556 goto fail_check_conf;
558 rc = sfc_intr_configure(sa);
560 goto fail_intr_configure;
562 rc = sfc_port_configure(sa);
564 goto fail_port_configure;
566 rc = sfc_rx_configure(sa);
568 goto fail_rx_configure;
570 rc = sfc_tx_configure(sa);
572 goto fail_tx_configure;
574 sa->state = SFC_ADAPTER_CONFIGURED;
575 sfc_log_init(sa, "done");
589 sa->state = SFC_ADAPTER_INITIALIZED;
590 sfc_log_init(sa, "failed %d", rc);
595 sfc_close(struct sfc_adapter *sa)
597 sfc_log_init(sa, "entry");
599 SFC_ASSERT(sfc_adapter_is_locked(sa));
601 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
602 sa->state = SFC_ADAPTER_CLOSING;
609 sa->state = SFC_ADAPTER_INITIALIZED;
610 sfc_log_init(sa, "done");
614 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
616 struct rte_eth_dev *eth_dev = sa->eth_dev;
617 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
618 efsys_bar_t *ebp = &sa->mem_bar;
619 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
621 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
622 ebp->esb_rid = membar;
623 ebp->esb_dev = pci_dev;
624 ebp->esb_base = res->addr;
629 sfc_mem_bar_fini(struct sfc_adapter *sa)
631 efsys_bar_t *ebp = &sa->mem_bar;
633 SFC_BAR_LOCK_DESTROY(ebp);
634 memset(ebp, 0, sizeof(*ebp));
638 * A fixed RSS key which has a property of being symmetric
639 * (symmetrical flows are distributed to the same CPU)
640 * and also known to give a uniform distribution
641 * (a good distribution of traffic between different CPUs)
643 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
644 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
645 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
646 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
647 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
648 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
652 sfc_rss_attach(struct sfc_adapter *sa)
654 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
657 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
661 rc = efx_ev_init(sa->nic);
665 rc = efx_rx_init(sa->nic);
669 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
671 goto fail_scale_support_get;
673 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
675 goto fail_hash_support_get;
677 rc = sfc_rx_hash_init(sa);
679 goto fail_rx_hash_init;
681 efx_rx_fini(sa->nic);
682 efx_ev_fini(sa->nic);
683 efx_intr_fini(sa->nic);
685 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
690 fail_hash_support_get:
691 fail_scale_support_get:
692 efx_rx_fini(sa->nic);
695 efx_ev_fini(sa->nic);
698 efx_intr_fini(sa->nic);
705 sfc_rss_detach(struct sfc_adapter *sa)
707 sfc_rx_hash_fini(sa);
711 sfc_attach(struct sfc_adapter *sa)
713 const efx_nic_cfg_t *encp;
714 efx_nic_t *enp = sa->nic;
717 sfc_log_init(sa, "entry");
719 SFC_ASSERT(sfc_adapter_is_locked(sa));
721 efx_mcdi_new_epoch(enp);
723 sfc_log_init(sa, "reset nic");
724 rc = efx_nic_reset(enp);
729 * Probed NIC is sufficient for tunnel init.
730 * Initialize tunnel support to be able to use libefx
731 * efx_tunnel_config_udp_{add,remove}() in any state and
732 * efx_tunnel_reconfigure() on start up.
734 rc = efx_tunnel_init(enp);
736 goto fail_tunnel_init;
738 encp = efx_nic_cfg_get(sa->nic);
741 * Make a copy of supported tunnel encapsulations in shared
742 * memory to be used on supported Rx packet type classes get.
744 sa->priv.shared->tunnel_encaps =
745 encp->enc_tunnel_encapsulations_supported;
747 if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO) {
748 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
750 sfc_info(sa, "TSO support isn't available on this adapter");
753 sfc_log_init(sa, "estimate resource limits");
754 rc = sfc_estimate_resource_limits(sa);
756 goto fail_estimate_rsrc_limits;
758 sa->evq_max_entries = encp->enc_evq_max_nevs;
759 SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
761 sa->evq_min_entries = encp->enc_evq_min_nevs;
762 SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
764 sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
765 SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
767 sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
768 SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
770 sa->txq_max_entries = encp->enc_txq_max_ndescs;
771 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
773 sa->txq_min_entries = encp->enc_txq_min_ndescs;
774 SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
776 rc = sfc_intr_attach(sa);
778 goto fail_intr_attach;
780 rc = sfc_ev_attach(sa);
784 rc = sfc_port_attach(sa);
786 goto fail_port_attach;
788 rc = sfc_rss_attach(sa);
790 goto fail_rss_attach;
792 rc = sfc_filter_attach(sa);
794 goto fail_filter_attach;
796 sfc_log_init(sa, "fini nic");
801 sa->state = SFC_ADAPTER_INITIALIZED;
803 sfc_log_init(sa, "done");
819 efx_nic_fini(sa->nic);
821 fail_estimate_rsrc_limits:
823 efx_tunnel_fini(sa->nic);
827 sfc_log_init(sa, "failed %d", rc);
832 sfc_detach(struct sfc_adapter *sa)
834 sfc_log_init(sa, "entry");
836 SFC_ASSERT(sfc_adapter_is_locked(sa));
840 sfc_filter_detach(sa);
845 efx_tunnel_fini(sa->nic);
847 sa->state = SFC_ADAPTER_UNINITIALIZED;
851 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
852 const char *value_str, void *opaque)
854 uint32_t *value = opaque;
856 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
857 *value = EFX_FW_VARIANT_DONT_CARE;
858 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
859 *value = EFX_FW_VARIANT_FULL_FEATURED;
860 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
861 *value = EFX_FW_VARIANT_LOW_LATENCY;
862 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
863 *value = EFX_FW_VARIANT_PACKED_STREAM;
864 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
865 *value = EFX_FW_VARIANT_DPDK;
873 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
875 efx_nic_fw_info_t enfi;
878 rc = efx_nic_get_fw_version(sa->nic, &enfi);
881 else if (!enfi.enfi_dpcpu_fw_ids_valid)
885 * Firmware variant can be uniquely identified by the RxDPCPU
888 switch (enfi.enfi_rx_dpcpu_fw_id) {
889 case EFX_RXDP_FULL_FEATURED_FW_ID:
890 *efv = EFX_FW_VARIANT_FULL_FEATURED;
893 case EFX_RXDP_LOW_LATENCY_FW_ID:
894 *efv = EFX_FW_VARIANT_LOW_LATENCY;
897 case EFX_RXDP_PACKED_STREAM_FW_ID:
898 *efv = EFX_FW_VARIANT_PACKED_STREAM;
901 case EFX_RXDP_DPDK_FW_ID:
902 *efv = EFX_FW_VARIANT_DPDK;
907 * Other firmware variants are not considered, since they are
908 * not supported in the device parameters
910 *efv = EFX_FW_VARIANT_DONT_CARE;
918 sfc_fw_variant2str(efx_fw_variant_t efv)
921 case EFX_RXDP_FULL_FEATURED_FW_ID:
922 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
923 case EFX_RXDP_LOW_LATENCY_FW_ID:
924 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
925 case EFX_RXDP_PACKED_STREAM_FW_ID:
926 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
927 case EFX_RXDP_DPDK_FW_ID:
928 return SFC_KVARG_FW_VARIANT_DPDK;
935 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
940 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
942 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
943 sfc_kvarg_long_handler, &value);
948 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
949 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
950 "was set (%ld);", value);
951 sfc_err(sa, "it must not be less than 0 or greater than %u",
952 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
956 sa->rxd_wait_timeout_ns = value;
961 sfc_nic_probe(struct sfc_adapter *sa)
963 efx_nic_t *enp = sa->nic;
964 efx_fw_variant_t preferred_efv;
965 efx_fw_variant_t efv;
968 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
969 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
970 sfc_kvarg_fv_variant_handler,
973 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
977 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
981 rc = efx_nic_probe(enp, preferred_efv);
983 /* Unprivileged functions cannot set FW variant */
984 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
989 rc = sfc_get_fw_variant(sa, &efv);
991 sfc_warn(sa, "FW variant can not be obtained");
997 /* Check that firmware variant was changed to the requested one */
998 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
999 sfc_warn(sa, "FW variant has not changed to the requested %s",
1000 sfc_fw_variant2str(preferred_efv));
1003 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
1009 sfc_probe(struct sfc_adapter *sa)
1011 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
1012 unsigned int membar;
1016 sfc_log_init(sa, "entry");
1018 SFC_ASSERT(sfc_adapter_is_locked(sa));
1020 sa->socket_id = rte_socket_id();
1021 rte_atomic32_init(&sa->restart_required);
1023 sfc_log_init(sa, "get family");
1024 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
1025 &sa->family, &membar);
1028 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
1030 sfc_log_init(sa, "init mem bar");
1031 rc = sfc_mem_bar_init(sa, membar);
1033 goto fail_mem_bar_init;
1035 sfc_log_init(sa, "create nic");
1036 rte_spinlock_init(&sa->nic_lock);
1037 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1038 &sa->mem_bar, &sa->nic_lock, &enp);
1040 goto fail_nic_create;
1043 rc = sfc_mcdi_init(sa);
1045 goto fail_mcdi_init;
1047 sfc_log_init(sa, "probe nic");
1048 rc = sfc_nic_probe(sa);
1050 goto fail_nic_probe;
1052 sfc_log_init(sa, "done");
1059 sfc_log_init(sa, "destroy nic");
1061 efx_nic_destroy(enp);
1064 sfc_mem_bar_fini(sa);
1068 sfc_log_init(sa, "failed %d", rc);
1073 sfc_unprobe(struct sfc_adapter *sa)
1075 efx_nic_t *enp = sa->nic;
1077 sfc_log_init(sa, "entry");
1079 SFC_ASSERT(sfc_adapter_is_locked(sa));
1081 sfc_log_init(sa, "unprobe nic");
1082 efx_nic_unprobe(enp);
1087 * Make sure there is no pending alarm to restart since we are
1088 * going to free device private which is passed as the callback
1089 * opaque data. A new alarm cannot be scheduled since MCDI is
1092 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1094 sfc_log_init(sa, "destroy nic");
1096 efx_nic_destroy(enp);
1098 sfc_mem_bar_fini(sa);
1101 sa->state = SFC_ADAPTER_UNINITIALIZED;
1105 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1106 const char *lt_prefix_str, uint32_t ll_default)
1108 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1109 size_t lt_str_size_max;
1110 char *lt_str = NULL;
1113 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1114 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1115 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1117 return sfc_logtype_driver;
1120 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1122 return sfc_logtype_driver;
1124 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1125 lt_str[lt_prefix_str_size - 1] = '.';
1126 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1127 lt_str_size_max - lt_prefix_str_size);
1128 lt_str[lt_str_size_max - 1] = '\0';
1130 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1134 return sfc_logtype_driver;