1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
24 #include "sfc_tweak.h"
28 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
29 size_t len, int socket_id, efsys_mem_t *esmp)
31 const struct rte_memzone *mz;
33 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
34 name, id, len, socket_id);
36 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
37 sysconf(_SC_PAGESIZE), socket_id);
39 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
40 name, (unsigned int)id, (unsigned int)len, socket_id,
41 rte_strerror(rte_errno));
45 esmp->esm_addr = mz->iova;
46 if (esmp->esm_addr == RTE_BAD_IOVA) {
47 (void)rte_memzone_free(mz);
52 esmp->esm_base = mz->addr;
58 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
62 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
64 rc = rte_memzone_free(esmp->esm_mz);
66 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
68 memset(esmp, 0, sizeof(*esmp));
72 sfc_phy_cap_from_link_speeds(uint32_t speeds)
74 uint32_t phy_caps = 0;
76 if (~speeds & ETH_LINK_SPEED_FIXED) {
77 phy_caps |= (1 << EFX_PHY_CAP_AN);
79 * If no speeds are specified in the mask, any supported
82 if (speeds == ETH_LINK_SPEED_AUTONEG)
84 (1 << EFX_PHY_CAP_1000FDX) |
85 (1 << EFX_PHY_CAP_10000FDX) |
86 (1 << EFX_PHY_CAP_25000FDX) |
87 (1 << EFX_PHY_CAP_40000FDX) |
88 (1 << EFX_PHY_CAP_50000FDX) |
89 (1 << EFX_PHY_CAP_100000FDX);
91 if (speeds & ETH_LINK_SPEED_1G)
92 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
93 if (speeds & ETH_LINK_SPEED_10G)
94 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
95 if (speeds & ETH_LINK_SPEED_25G)
96 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
97 if (speeds & ETH_LINK_SPEED_40G)
98 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
99 if (speeds & ETH_LINK_SPEED_50G)
100 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
101 if (speeds & ETH_LINK_SPEED_100G)
102 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
108 * Check requested device level configuration.
109 * Receive and transmit configuration is checked in corresponding
113 sfc_check_conf(struct sfc_adapter *sa)
115 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
118 sa->port.phy_adv_cap =
119 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
120 sa->port.phy_adv_cap_mask;
121 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
122 sfc_err(sa, "No link speeds from mask %#x are supported",
127 #if !EFSYS_OPT_LOOPBACK
128 if (conf->lpbk_mode != 0) {
129 sfc_err(sa, "Loopback not supported");
134 if (conf->dcb_capability_en != 0) {
135 sfc_err(sa, "Priority-based flow control not supported");
139 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
140 sfc_err(sa, "Flow Director not supported");
144 if ((conf->intr_conf.lsc != 0) &&
145 (sa->intr.type != EFX_INTR_LINE) &&
146 (sa->intr.type != EFX_INTR_MESSAGE)) {
147 sfc_err(sa, "Link status change interrupt not supported");
151 if (conf->intr_conf.rxq != 0) {
152 sfc_err(sa, "Receive queue interrupt not supported");
160 * Find out maximum number of receive and transmit queues which could be
163 * NIC is kept initialized on success to allow other modules acquire
164 * defaults and capabilities.
167 sfc_estimate_resource_limits(struct sfc_adapter *sa)
169 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
170 efx_drv_limits_t limits;
172 uint32_t evq_allocated;
173 uint32_t rxq_allocated;
174 uint32_t txq_allocated;
176 memset(&limits, 0, sizeof(limits));
178 /* Request at least one Rx and Tx queue */
179 limits.edl_min_rxq_count = 1;
180 limits.edl_min_txq_count = 1;
181 /* Management event queue plus event queue for each Tx and Rx queue */
182 limits.edl_min_evq_count =
183 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
185 /* Divide by number of functions to guarantee that all functions
186 * will get promised resources
188 /* FIXME Divide by number of functions (not 2) below */
189 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
190 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
192 /* Split equally between receive and transmit */
193 limits.edl_max_rxq_count =
194 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
195 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
197 limits.edl_max_txq_count =
198 MIN(encp->enc_txq_limit,
199 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
202 limits.edl_max_txq_count =
203 MIN(limits.edl_max_txq_count,
204 encp->enc_fw_assisted_tso_v2_n_contexts /
205 encp->enc_hw_pf_count);
207 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
209 /* Configure the minimum required resources needed for the
210 * driver to operate, and the maximum desired resources that the
211 * driver is capable of using.
213 efx_nic_set_drv_limits(sa->nic, &limits);
215 sfc_log_init(sa, "init nic");
216 rc = efx_nic_init(sa->nic);
220 /* Find resource dimensions assigned by firmware to this function */
221 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
224 goto fail_get_vi_pool;
226 /* It still may allocate more than maximum, ensure limit */
227 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
228 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
229 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
231 /* Subtract management EVQ not used for traffic */
232 SFC_ASSERT(evq_allocated > 0);
235 /* Right now we use separate EVQ for Rx and Tx */
236 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
237 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
239 /* Keep NIC initialized */
244 efx_nic_fini(sa->nic);
249 sfc_set_drv_limits(struct sfc_adapter *sa)
251 const struct rte_eth_dev_data *data = sa->eth_dev->data;
252 efx_drv_limits_t lim;
254 memset(&lim, 0, sizeof(lim));
256 /* Limits are strict since take into account initial estimation */
257 lim.edl_min_evq_count = lim.edl_max_evq_count =
258 1 + data->nb_rx_queues + data->nb_tx_queues;
259 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
260 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
262 return efx_nic_set_drv_limits(sa->nic, &lim);
266 sfc_set_fw_subvariant(struct sfc_adapter *sa)
268 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
269 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
270 unsigned int txq_index;
271 efx_nic_fw_subvariant_t req_fw_subvariant;
272 efx_nic_fw_subvariant_t cur_fw_subvariant;
275 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
276 sfc_info(sa, "no-Tx-checksum subvariant not supported");
280 for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
281 struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
283 if (txq_info->txq != NULL)
284 tx_offloads |= txq_info->txq->offloads;
287 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
288 DEV_TX_OFFLOAD_TCP_CKSUM |
289 DEV_TX_OFFLOAD_UDP_CKSUM |
290 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
291 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
293 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
295 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
297 sfc_err(sa, "failed to get FW subvariant: %d", rc);
300 sfc_info(sa, "FW subvariant is %u vs required %u",
301 cur_fw_subvariant, req_fw_subvariant);
303 if (cur_fw_subvariant == req_fw_subvariant)
306 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
308 sfc_err(sa, "failed to set FW subvariant %u: %d",
309 req_fw_subvariant, rc);
312 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
318 sfc_try_start(struct sfc_adapter *sa)
320 const efx_nic_cfg_t *encp;
323 sfc_log_init(sa, "entry");
325 SFC_ASSERT(sfc_adapter_is_locked(sa));
326 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
328 sfc_log_init(sa, "set FW subvariant");
329 rc = sfc_set_fw_subvariant(sa);
331 goto fail_set_fw_subvariant;
333 sfc_log_init(sa, "set resource limits");
334 rc = sfc_set_drv_limits(sa);
336 goto fail_set_drv_limits;
338 sfc_log_init(sa, "init nic");
339 rc = efx_nic_init(sa->nic);
343 encp = efx_nic_cfg_get(sa->nic);
344 if (encp->enc_tunnel_encapsulations_supported != 0) {
345 sfc_log_init(sa, "apply tunnel config");
346 rc = efx_tunnel_reconfigure(sa->nic);
348 goto fail_tunnel_reconfigure;
351 rc = sfc_intr_start(sa);
353 goto fail_intr_start;
355 rc = sfc_ev_start(sa);
359 rc = sfc_port_start(sa);
361 goto fail_port_start;
363 rc = sfc_rx_start(sa);
367 rc = sfc_tx_start(sa);
371 rc = sfc_flow_start(sa);
373 goto fail_flows_insert;
375 sfc_log_init(sa, "done");
394 fail_tunnel_reconfigure:
395 efx_nic_fini(sa->nic);
399 fail_set_fw_subvariant:
400 sfc_log_init(sa, "failed %d", rc);
405 sfc_start(struct sfc_adapter *sa)
407 unsigned int start_tries = 3;
410 sfc_log_init(sa, "entry");
412 SFC_ASSERT(sfc_adapter_is_locked(sa));
415 case SFC_ADAPTER_CONFIGURED:
417 case SFC_ADAPTER_STARTED:
418 sfc_notice(sa, "already started");
425 sa->state = SFC_ADAPTER_STARTING;
428 rc = sfc_try_start(sa);
429 } while ((--start_tries > 0) &&
430 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
435 sa->state = SFC_ADAPTER_STARTED;
436 sfc_log_init(sa, "done");
440 sa->state = SFC_ADAPTER_CONFIGURED;
442 sfc_log_init(sa, "failed %d", rc);
447 sfc_stop(struct sfc_adapter *sa)
449 sfc_log_init(sa, "entry");
451 SFC_ASSERT(sfc_adapter_is_locked(sa));
454 case SFC_ADAPTER_STARTED:
456 case SFC_ADAPTER_CONFIGURED:
457 sfc_notice(sa, "already stopped");
460 sfc_err(sa, "stop in unexpected state %u", sa->state);
465 sa->state = SFC_ADAPTER_STOPPING;
473 efx_nic_fini(sa->nic);
475 sa->state = SFC_ADAPTER_CONFIGURED;
476 sfc_log_init(sa, "done");
480 sfc_restart(struct sfc_adapter *sa)
484 SFC_ASSERT(sfc_adapter_is_locked(sa));
486 if (sa->state != SFC_ADAPTER_STARTED)
493 sfc_err(sa, "restart failed");
499 sfc_restart_if_required(void *arg)
501 struct sfc_adapter *sa = arg;
503 /* If restart is scheduled, clear the flag and do it */
504 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
506 sfc_adapter_lock(sa);
507 if (sa->state == SFC_ADAPTER_STARTED)
508 (void)sfc_restart(sa);
509 sfc_adapter_unlock(sa);
514 sfc_schedule_restart(struct sfc_adapter *sa)
518 /* Schedule restart alarm if it is not scheduled yet */
519 if (!rte_atomic32_test_and_set(&sa->restart_required))
522 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
524 sfc_warn(sa, "alarms are not supported, restart is pending");
526 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
528 sfc_notice(sa, "restart scheduled");
532 sfc_configure(struct sfc_adapter *sa)
536 sfc_log_init(sa, "entry");
538 SFC_ASSERT(sfc_adapter_is_locked(sa));
540 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
541 sa->state == SFC_ADAPTER_CONFIGURED);
542 sa->state = SFC_ADAPTER_CONFIGURING;
544 rc = sfc_check_conf(sa);
546 goto fail_check_conf;
548 rc = sfc_intr_configure(sa);
550 goto fail_intr_configure;
552 rc = sfc_port_configure(sa);
554 goto fail_port_configure;
556 rc = sfc_rx_configure(sa);
558 goto fail_rx_configure;
560 rc = sfc_tx_configure(sa);
562 goto fail_tx_configure;
564 sa->state = SFC_ADAPTER_CONFIGURED;
565 sfc_log_init(sa, "done");
579 sa->state = SFC_ADAPTER_INITIALIZED;
580 sfc_log_init(sa, "failed %d", rc);
585 sfc_close(struct sfc_adapter *sa)
587 sfc_log_init(sa, "entry");
589 SFC_ASSERT(sfc_adapter_is_locked(sa));
591 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
592 sa->state = SFC_ADAPTER_CLOSING;
599 sa->state = SFC_ADAPTER_INITIALIZED;
600 sfc_log_init(sa, "done");
604 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
606 struct rte_eth_dev *eth_dev = sa->eth_dev;
607 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
608 efsys_bar_t *ebp = &sa->mem_bar;
609 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
611 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
612 ebp->esb_rid = membar;
613 ebp->esb_dev = pci_dev;
614 ebp->esb_base = res->addr;
619 sfc_mem_bar_fini(struct sfc_adapter *sa)
621 efsys_bar_t *ebp = &sa->mem_bar;
623 SFC_BAR_LOCK_DESTROY(ebp);
624 memset(ebp, 0, sizeof(*ebp));
628 * A fixed RSS key which has a property of being symmetric
629 * (symmetrical flows are distributed to the same CPU)
630 * and also known to give a uniform distribution
631 * (a good distribution of traffic between different CPUs)
633 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
634 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
635 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
636 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
637 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
638 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
642 sfc_rss_attach(struct sfc_adapter *sa)
644 struct sfc_rss *rss = &sa->rss;
647 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
651 rc = efx_ev_init(sa->nic);
655 rc = efx_rx_init(sa->nic);
659 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
661 goto fail_scale_support_get;
663 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
665 goto fail_hash_support_get;
667 rc = sfc_rx_hash_init(sa);
669 goto fail_rx_hash_init;
671 efx_rx_fini(sa->nic);
672 efx_ev_fini(sa->nic);
673 efx_intr_fini(sa->nic);
675 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
680 fail_hash_support_get:
681 fail_scale_support_get:
682 efx_rx_fini(sa->nic);
685 efx_ev_fini(sa->nic);
688 efx_intr_fini(sa->nic);
695 sfc_rss_detach(struct sfc_adapter *sa)
697 sfc_rx_hash_fini(sa);
701 sfc_attach(struct sfc_adapter *sa)
703 const efx_nic_cfg_t *encp;
704 efx_nic_t *enp = sa->nic;
707 sfc_log_init(sa, "entry");
709 SFC_ASSERT(sfc_adapter_is_locked(sa));
711 efx_mcdi_new_epoch(enp);
713 sfc_log_init(sa, "reset nic");
714 rc = efx_nic_reset(enp);
719 * Probed NIC is sufficient for tunnel init.
720 * Initialize tunnel support to be able to use libefx
721 * efx_tunnel_config_udp_{add,remove}() in any state and
722 * efx_tunnel_reconfigure() on start up.
724 rc = efx_tunnel_init(enp);
726 goto fail_tunnel_init;
728 encp = efx_nic_cfg_get(sa->nic);
730 if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
731 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
734 "TSO support isn't available on this adapter");
737 sfc_log_init(sa, "estimate resource limits");
738 rc = sfc_estimate_resource_limits(sa);
740 goto fail_estimate_rsrc_limits;
742 sa->txq_max_entries = encp->enc_txq_max_ndescs;
743 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
745 rc = sfc_intr_attach(sa);
747 goto fail_intr_attach;
749 rc = sfc_ev_attach(sa);
753 rc = sfc_port_attach(sa);
755 goto fail_port_attach;
757 rc = sfc_rss_attach(sa);
759 goto fail_rss_attach;
761 rc = sfc_filter_attach(sa);
763 goto fail_filter_attach;
765 sfc_log_init(sa, "fini nic");
770 sa->state = SFC_ADAPTER_INITIALIZED;
772 sfc_log_init(sa, "done");
788 efx_nic_fini(sa->nic);
790 fail_estimate_rsrc_limits:
792 efx_tunnel_fini(sa->nic);
796 sfc_log_init(sa, "failed %d", rc);
801 sfc_detach(struct sfc_adapter *sa)
803 sfc_log_init(sa, "entry");
805 SFC_ASSERT(sfc_adapter_is_locked(sa));
809 sfc_filter_detach(sa);
814 efx_tunnel_fini(sa->nic);
816 sa->state = SFC_ADAPTER_UNINITIALIZED;
820 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
821 const char *value_str, void *opaque)
823 uint32_t *value = opaque;
825 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
826 *value = EFX_FW_VARIANT_DONT_CARE;
827 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
828 *value = EFX_FW_VARIANT_FULL_FEATURED;
829 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
830 *value = EFX_FW_VARIANT_LOW_LATENCY;
831 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
832 *value = EFX_FW_VARIANT_PACKED_STREAM;
833 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
834 *value = EFX_FW_VARIANT_DPDK;
842 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
844 efx_nic_fw_info_t enfi;
847 rc = efx_nic_get_fw_version(sa->nic, &enfi);
850 else if (!enfi.enfi_dpcpu_fw_ids_valid)
854 * Firmware variant can be uniquely identified by the RxDPCPU
857 switch (enfi.enfi_rx_dpcpu_fw_id) {
858 case EFX_RXDP_FULL_FEATURED_FW_ID:
859 *efv = EFX_FW_VARIANT_FULL_FEATURED;
862 case EFX_RXDP_LOW_LATENCY_FW_ID:
863 *efv = EFX_FW_VARIANT_LOW_LATENCY;
866 case EFX_RXDP_PACKED_STREAM_FW_ID:
867 *efv = EFX_FW_VARIANT_PACKED_STREAM;
870 case EFX_RXDP_DPDK_FW_ID:
871 *efv = EFX_FW_VARIANT_DPDK;
876 * Other firmware variants are not considered, since they are
877 * not supported in the device parameters
879 *efv = EFX_FW_VARIANT_DONT_CARE;
887 sfc_fw_variant2str(efx_fw_variant_t efv)
890 case EFX_RXDP_FULL_FEATURED_FW_ID:
891 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
892 case EFX_RXDP_LOW_LATENCY_FW_ID:
893 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
894 case EFX_RXDP_PACKED_STREAM_FW_ID:
895 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
896 case EFX_RXDP_DPDK_FW_ID:
897 return SFC_KVARG_FW_VARIANT_DPDK;
904 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
909 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
911 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
912 sfc_kvarg_long_handler, &value);
917 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
918 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
919 "was set (%ld);", value);
920 sfc_err(sa, "it must not be less than 0 or greater than %u",
921 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
925 sa->rxd_wait_timeout_ns = value;
930 sfc_nic_probe(struct sfc_adapter *sa)
932 efx_nic_t *enp = sa->nic;
933 efx_fw_variant_t preferred_efv;
934 efx_fw_variant_t efv;
937 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
938 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
939 sfc_kvarg_fv_variant_handler,
942 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
946 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
950 rc = efx_nic_probe(enp, preferred_efv);
952 /* Unprivileged functions cannot set FW variant */
953 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
958 rc = sfc_get_fw_variant(sa, &efv);
960 sfc_warn(sa, "FW variant can not be obtained");
966 /* Check that firmware variant was changed to the requested one */
967 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
968 sfc_warn(sa, "FW variant has not changed to the requested %s",
969 sfc_fw_variant2str(preferred_efv));
972 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
978 sfc_probe(struct sfc_adapter *sa)
980 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
985 sfc_log_init(sa, "entry");
987 SFC_ASSERT(sfc_adapter_is_locked(sa));
989 sa->socket_id = rte_socket_id();
990 rte_atomic32_init(&sa->restart_required);
992 sfc_log_init(sa, "get family");
993 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
994 &sa->family, &membar);
997 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
999 sfc_log_init(sa, "init mem bar");
1000 rc = sfc_mem_bar_init(sa, membar);
1002 goto fail_mem_bar_init;
1004 sfc_log_init(sa, "create nic");
1005 rte_spinlock_init(&sa->nic_lock);
1006 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1007 &sa->mem_bar, &sa->nic_lock, &enp);
1009 goto fail_nic_create;
1012 rc = sfc_mcdi_init(sa);
1014 goto fail_mcdi_init;
1016 sfc_log_init(sa, "probe nic");
1017 rc = sfc_nic_probe(sa);
1019 goto fail_nic_probe;
1021 sfc_log_init(sa, "done");
1028 sfc_log_init(sa, "destroy nic");
1030 efx_nic_destroy(enp);
1033 sfc_mem_bar_fini(sa);
1037 sfc_log_init(sa, "failed %d", rc);
1042 sfc_unprobe(struct sfc_adapter *sa)
1044 efx_nic_t *enp = sa->nic;
1046 sfc_log_init(sa, "entry");
1048 SFC_ASSERT(sfc_adapter_is_locked(sa));
1050 sfc_log_init(sa, "unprobe nic");
1051 efx_nic_unprobe(enp);
1056 * Make sure there is no pending alarm to restart since we are
1057 * going to free device private which is passed as the callback
1058 * opaque data. A new alarm cannot be scheduled since MCDI is
1061 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1063 sfc_log_init(sa, "destroy nic");
1065 efx_nic_destroy(enp);
1067 sfc_mem_bar_fini(sa);
1070 sa->state = SFC_ADAPTER_UNINITIALIZED;
1074 sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
1075 uint32_t ll_default)
1077 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1078 size_t lt_str_size_max;
1079 char *lt_str = NULL;
1082 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1083 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1084 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1086 return RTE_LOGTYPE_PMD;
1089 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1091 return RTE_LOGTYPE_PMD;
1093 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1094 lt_str[lt_prefix_str_size - 1] = '.';
1095 rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
1096 lt_str_size_max - lt_prefix_str_size);
1097 lt_str[lt_str_size_max - 1] = '\0';
1099 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1102 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;