1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
24 #include "sfc_tweak.h"
28 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
29 size_t len, int socket_id, efsys_mem_t *esmp)
31 const struct rte_memzone *mz;
33 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
34 name, id, len, socket_id);
36 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
37 sysconf(_SC_PAGESIZE), socket_id);
39 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
40 name, (unsigned int)id, (unsigned int)len, socket_id,
41 rte_strerror(rte_errno));
45 esmp->esm_addr = mz->iova;
46 if (esmp->esm_addr == RTE_BAD_IOVA) {
47 (void)rte_memzone_free(mz);
52 esmp->esm_base = mz->addr;
58 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
62 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
64 rc = rte_memzone_free(esmp->esm_mz);
66 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
68 memset(esmp, 0, sizeof(*esmp));
72 sfc_phy_cap_from_link_speeds(uint32_t speeds)
74 uint32_t phy_caps = 0;
76 if (~speeds & ETH_LINK_SPEED_FIXED) {
77 phy_caps |= (1 << EFX_PHY_CAP_AN);
79 * If no speeds are specified in the mask, any supported
82 if (speeds == ETH_LINK_SPEED_AUTONEG)
84 (1 << EFX_PHY_CAP_1000FDX) |
85 (1 << EFX_PHY_CAP_10000FDX) |
86 (1 << EFX_PHY_CAP_25000FDX) |
87 (1 << EFX_PHY_CAP_40000FDX) |
88 (1 << EFX_PHY_CAP_50000FDX) |
89 (1 << EFX_PHY_CAP_100000FDX);
91 if (speeds & ETH_LINK_SPEED_1G)
92 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
93 if (speeds & ETH_LINK_SPEED_10G)
94 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
95 if (speeds & ETH_LINK_SPEED_25G)
96 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
97 if (speeds & ETH_LINK_SPEED_40G)
98 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
99 if (speeds & ETH_LINK_SPEED_50G)
100 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
101 if (speeds & ETH_LINK_SPEED_100G)
102 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
108 * Check requested device level configuration.
109 * Receive and transmit configuration is checked in corresponding
113 sfc_check_conf(struct sfc_adapter *sa)
115 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
118 sa->port.phy_adv_cap =
119 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
120 sa->port.phy_adv_cap_mask;
121 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
122 sfc_err(sa, "No link speeds from mask %#x are supported",
127 #if !EFSYS_OPT_LOOPBACK
128 if (conf->lpbk_mode != 0) {
129 sfc_err(sa, "Loopback not supported");
134 if (conf->dcb_capability_en != 0) {
135 sfc_err(sa, "Priority-based flow control not supported");
139 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
140 sfc_err(sa, "Flow Director not supported");
144 if ((conf->intr_conf.lsc != 0) &&
145 (sa->intr.type != EFX_INTR_LINE) &&
146 (sa->intr.type != EFX_INTR_MESSAGE)) {
147 sfc_err(sa, "Link status change interrupt not supported");
151 if (conf->intr_conf.rxq != 0) {
152 sfc_err(sa, "Receive queue interrupt not supported");
160 * Find out maximum number of receive and transmit queues which could be
163 * NIC is kept initialized on success to allow other modules acquire
164 * defaults and capabilities.
167 sfc_estimate_resource_limits(struct sfc_adapter *sa)
169 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
170 efx_drv_limits_t limits;
172 uint32_t evq_allocated;
173 uint32_t rxq_allocated;
174 uint32_t txq_allocated;
176 memset(&limits, 0, sizeof(limits));
178 /* Request at least one Rx and Tx queue */
179 limits.edl_min_rxq_count = 1;
180 limits.edl_min_txq_count = 1;
181 /* Management event queue plus event queue for each Tx and Rx queue */
182 limits.edl_min_evq_count =
183 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
185 /* Divide by number of functions to guarantee that all functions
186 * will get promised resources
188 /* FIXME Divide by number of functions (not 2) below */
189 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
190 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
192 /* Split equally between receive and transmit */
193 limits.edl_max_rxq_count =
194 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
195 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
197 limits.edl_max_txq_count =
198 MIN(encp->enc_txq_limit,
199 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
202 limits.edl_max_txq_count =
203 MIN(limits.edl_max_txq_count,
204 encp->enc_fw_assisted_tso_v2_n_contexts /
205 encp->enc_hw_pf_count);
207 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
209 /* Configure the minimum required resources needed for the
210 * driver to operate, and the maximum desired resources that the
211 * driver is capable of using.
213 efx_nic_set_drv_limits(sa->nic, &limits);
215 sfc_log_init(sa, "init nic");
216 rc = efx_nic_init(sa->nic);
220 /* Find resource dimensions assigned by firmware to this function */
221 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
224 goto fail_get_vi_pool;
226 /* It still may allocate more than maximum, ensure limit */
227 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
228 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
229 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
231 /* Subtract management EVQ not used for traffic */
232 SFC_ASSERT(evq_allocated > 0);
235 /* Right now we use separate EVQ for Rx and Tx */
236 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
237 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
239 /* Keep NIC initialized */
244 efx_nic_fini(sa->nic);
249 sfc_set_drv_limits(struct sfc_adapter *sa)
251 const struct rte_eth_dev_data *data = sa->eth_dev->data;
252 efx_drv_limits_t lim;
254 memset(&lim, 0, sizeof(lim));
256 /* Limits are strict since take into account initial estimation */
257 lim.edl_min_evq_count = lim.edl_max_evq_count =
258 1 + data->nb_rx_queues + data->nb_tx_queues;
259 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
260 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
262 return efx_nic_set_drv_limits(sa->nic, &lim);
266 sfc_set_fw_subvariant(struct sfc_adapter *sa)
268 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
269 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
270 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
271 unsigned int txq_index;
272 efx_nic_fw_subvariant_t req_fw_subvariant;
273 efx_nic_fw_subvariant_t cur_fw_subvariant;
276 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
277 sfc_info(sa, "no-Tx-checksum subvariant not supported");
281 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
282 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
284 if (txq_info->state & SFC_TXQ_INITIALIZED)
285 tx_offloads |= txq_info->offloads;
288 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
289 DEV_TX_OFFLOAD_TCP_CKSUM |
290 DEV_TX_OFFLOAD_UDP_CKSUM |
291 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
292 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
294 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
296 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
298 sfc_err(sa, "failed to get FW subvariant: %d", rc);
301 sfc_info(sa, "FW subvariant is %u vs required %u",
302 cur_fw_subvariant, req_fw_subvariant);
304 if (cur_fw_subvariant == req_fw_subvariant)
307 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
309 sfc_err(sa, "failed to set FW subvariant %u: %d",
310 req_fw_subvariant, rc);
313 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
319 sfc_try_start(struct sfc_adapter *sa)
321 const efx_nic_cfg_t *encp;
324 sfc_log_init(sa, "entry");
326 SFC_ASSERT(sfc_adapter_is_locked(sa));
327 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
329 sfc_log_init(sa, "set FW subvariant");
330 rc = sfc_set_fw_subvariant(sa);
332 goto fail_set_fw_subvariant;
334 sfc_log_init(sa, "set resource limits");
335 rc = sfc_set_drv_limits(sa);
337 goto fail_set_drv_limits;
339 sfc_log_init(sa, "init nic");
340 rc = efx_nic_init(sa->nic);
344 encp = efx_nic_cfg_get(sa->nic);
345 if (encp->enc_tunnel_encapsulations_supported != 0) {
346 sfc_log_init(sa, "apply tunnel config");
347 rc = efx_tunnel_reconfigure(sa->nic);
349 goto fail_tunnel_reconfigure;
352 rc = sfc_intr_start(sa);
354 goto fail_intr_start;
356 rc = sfc_ev_start(sa);
360 rc = sfc_port_start(sa);
362 goto fail_port_start;
364 rc = sfc_rx_start(sa);
368 rc = sfc_tx_start(sa);
372 rc = sfc_flow_start(sa);
374 goto fail_flows_insert;
376 sfc_log_init(sa, "done");
395 fail_tunnel_reconfigure:
396 efx_nic_fini(sa->nic);
400 fail_set_fw_subvariant:
401 sfc_log_init(sa, "failed %d", rc);
406 sfc_start(struct sfc_adapter *sa)
408 unsigned int start_tries = 3;
411 sfc_log_init(sa, "entry");
413 SFC_ASSERT(sfc_adapter_is_locked(sa));
416 case SFC_ADAPTER_CONFIGURED:
418 case SFC_ADAPTER_STARTED:
419 sfc_notice(sa, "already started");
426 sa->state = SFC_ADAPTER_STARTING;
429 rc = sfc_try_start(sa);
430 } while ((--start_tries > 0) &&
431 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
436 sa->state = SFC_ADAPTER_STARTED;
437 sfc_log_init(sa, "done");
441 sa->state = SFC_ADAPTER_CONFIGURED;
443 sfc_log_init(sa, "failed %d", rc);
448 sfc_stop(struct sfc_adapter *sa)
450 sfc_log_init(sa, "entry");
452 SFC_ASSERT(sfc_adapter_is_locked(sa));
455 case SFC_ADAPTER_STARTED:
457 case SFC_ADAPTER_CONFIGURED:
458 sfc_notice(sa, "already stopped");
461 sfc_err(sa, "stop in unexpected state %u", sa->state);
466 sa->state = SFC_ADAPTER_STOPPING;
474 efx_nic_fini(sa->nic);
476 sa->state = SFC_ADAPTER_CONFIGURED;
477 sfc_log_init(sa, "done");
481 sfc_restart(struct sfc_adapter *sa)
485 SFC_ASSERT(sfc_adapter_is_locked(sa));
487 if (sa->state != SFC_ADAPTER_STARTED)
494 sfc_err(sa, "restart failed");
500 sfc_restart_if_required(void *arg)
502 struct sfc_adapter *sa = arg;
504 /* If restart is scheduled, clear the flag and do it */
505 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
507 sfc_adapter_lock(sa);
508 if (sa->state == SFC_ADAPTER_STARTED)
509 (void)sfc_restart(sa);
510 sfc_adapter_unlock(sa);
515 sfc_schedule_restart(struct sfc_adapter *sa)
519 /* Schedule restart alarm if it is not scheduled yet */
520 if (!rte_atomic32_test_and_set(&sa->restart_required))
523 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
525 sfc_warn(sa, "alarms are not supported, restart is pending");
527 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
529 sfc_notice(sa, "restart scheduled");
533 sfc_configure(struct sfc_adapter *sa)
537 sfc_log_init(sa, "entry");
539 SFC_ASSERT(sfc_adapter_is_locked(sa));
541 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
542 sa->state == SFC_ADAPTER_CONFIGURED);
543 sa->state = SFC_ADAPTER_CONFIGURING;
545 rc = sfc_check_conf(sa);
547 goto fail_check_conf;
549 rc = sfc_intr_configure(sa);
551 goto fail_intr_configure;
553 rc = sfc_port_configure(sa);
555 goto fail_port_configure;
557 rc = sfc_rx_configure(sa);
559 goto fail_rx_configure;
561 rc = sfc_tx_configure(sa);
563 goto fail_tx_configure;
565 sa->state = SFC_ADAPTER_CONFIGURED;
566 sfc_log_init(sa, "done");
580 sa->state = SFC_ADAPTER_INITIALIZED;
581 sfc_log_init(sa, "failed %d", rc);
586 sfc_close(struct sfc_adapter *sa)
588 sfc_log_init(sa, "entry");
590 SFC_ASSERT(sfc_adapter_is_locked(sa));
592 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
593 sa->state = SFC_ADAPTER_CLOSING;
600 sa->state = SFC_ADAPTER_INITIALIZED;
601 sfc_log_init(sa, "done");
605 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
607 struct rte_eth_dev *eth_dev = sa->eth_dev;
608 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
609 efsys_bar_t *ebp = &sa->mem_bar;
610 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
612 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
613 ebp->esb_rid = membar;
614 ebp->esb_dev = pci_dev;
615 ebp->esb_base = res->addr;
620 sfc_mem_bar_fini(struct sfc_adapter *sa)
622 efsys_bar_t *ebp = &sa->mem_bar;
624 SFC_BAR_LOCK_DESTROY(ebp);
625 memset(ebp, 0, sizeof(*ebp));
629 * A fixed RSS key which has a property of being symmetric
630 * (symmetrical flows are distributed to the same CPU)
631 * and also known to give a uniform distribution
632 * (a good distribution of traffic between different CPUs)
634 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
635 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
636 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
637 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
638 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
639 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
643 sfc_rss_attach(struct sfc_adapter *sa)
645 struct sfc_rss *rss = &sa->rss;
648 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
652 rc = efx_ev_init(sa->nic);
656 rc = efx_rx_init(sa->nic);
660 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
662 goto fail_scale_support_get;
664 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
666 goto fail_hash_support_get;
668 rc = sfc_rx_hash_init(sa);
670 goto fail_rx_hash_init;
672 efx_rx_fini(sa->nic);
673 efx_ev_fini(sa->nic);
674 efx_intr_fini(sa->nic);
676 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
681 fail_hash_support_get:
682 fail_scale_support_get:
683 efx_rx_fini(sa->nic);
686 efx_ev_fini(sa->nic);
689 efx_intr_fini(sa->nic);
696 sfc_rss_detach(struct sfc_adapter *sa)
698 sfc_rx_hash_fini(sa);
702 sfc_attach(struct sfc_adapter *sa)
704 const efx_nic_cfg_t *encp;
705 efx_nic_t *enp = sa->nic;
708 sfc_log_init(sa, "entry");
710 SFC_ASSERT(sfc_adapter_is_locked(sa));
712 efx_mcdi_new_epoch(enp);
714 sfc_log_init(sa, "reset nic");
715 rc = efx_nic_reset(enp);
720 * Probed NIC is sufficient for tunnel init.
721 * Initialize tunnel support to be able to use libefx
722 * efx_tunnel_config_udp_{add,remove}() in any state and
723 * efx_tunnel_reconfigure() on start up.
725 rc = efx_tunnel_init(enp);
727 goto fail_tunnel_init;
729 encp = efx_nic_cfg_get(sa->nic);
731 if (sa->priv.dp_tx->features & SFC_DP_TX_FEAT_TSO) {
732 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
735 "TSO support isn't available on this adapter");
738 sfc_log_init(sa, "estimate resource limits");
739 rc = sfc_estimate_resource_limits(sa);
741 goto fail_estimate_rsrc_limits;
743 sa->txq_max_entries = encp->enc_txq_max_ndescs;
744 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
746 rc = sfc_intr_attach(sa);
748 goto fail_intr_attach;
750 rc = sfc_ev_attach(sa);
754 rc = sfc_port_attach(sa);
756 goto fail_port_attach;
758 rc = sfc_rss_attach(sa);
760 goto fail_rss_attach;
762 rc = sfc_filter_attach(sa);
764 goto fail_filter_attach;
766 sfc_log_init(sa, "fini nic");
771 sa->state = SFC_ADAPTER_INITIALIZED;
773 sfc_log_init(sa, "done");
789 efx_nic_fini(sa->nic);
791 fail_estimate_rsrc_limits:
793 efx_tunnel_fini(sa->nic);
797 sfc_log_init(sa, "failed %d", rc);
802 sfc_detach(struct sfc_adapter *sa)
804 sfc_log_init(sa, "entry");
806 SFC_ASSERT(sfc_adapter_is_locked(sa));
810 sfc_filter_detach(sa);
815 efx_tunnel_fini(sa->nic);
817 sa->state = SFC_ADAPTER_UNINITIALIZED;
821 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
822 const char *value_str, void *opaque)
824 uint32_t *value = opaque;
826 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
827 *value = EFX_FW_VARIANT_DONT_CARE;
828 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
829 *value = EFX_FW_VARIANT_FULL_FEATURED;
830 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
831 *value = EFX_FW_VARIANT_LOW_LATENCY;
832 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
833 *value = EFX_FW_VARIANT_PACKED_STREAM;
834 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
835 *value = EFX_FW_VARIANT_DPDK;
843 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
845 efx_nic_fw_info_t enfi;
848 rc = efx_nic_get_fw_version(sa->nic, &enfi);
851 else if (!enfi.enfi_dpcpu_fw_ids_valid)
855 * Firmware variant can be uniquely identified by the RxDPCPU
858 switch (enfi.enfi_rx_dpcpu_fw_id) {
859 case EFX_RXDP_FULL_FEATURED_FW_ID:
860 *efv = EFX_FW_VARIANT_FULL_FEATURED;
863 case EFX_RXDP_LOW_LATENCY_FW_ID:
864 *efv = EFX_FW_VARIANT_LOW_LATENCY;
867 case EFX_RXDP_PACKED_STREAM_FW_ID:
868 *efv = EFX_FW_VARIANT_PACKED_STREAM;
871 case EFX_RXDP_DPDK_FW_ID:
872 *efv = EFX_FW_VARIANT_DPDK;
877 * Other firmware variants are not considered, since they are
878 * not supported in the device parameters
880 *efv = EFX_FW_VARIANT_DONT_CARE;
888 sfc_fw_variant2str(efx_fw_variant_t efv)
891 case EFX_RXDP_FULL_FEATURED_FW_ID:
892 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
893 case EFX_RXDP_LOW_LATENCY_FW_ID:
894 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
895 case EFX_RXDP_PACKED_STREAM_FW_ID:
896 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
897 case EFX_RXDP_DPDK_FW_ID:
898 return SFC_KVARG_FW_VARIANT_DPDK;
905 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
910 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
912 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
913 sfc_kvarg_long_handler, &value);
918 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
919 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
920 "was set (%ld);", value);
921 sfc_err(sa, "it must not be less than 0 or greater than %u",
922 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
926 sa->rxd_wait_timeout_ns = value;
931 sfc_nic_probe(struct sfc_adapter *sa)
933 efx_nic_t *enp = sa->nic;
934 efx_fw_variant_t preferred_efv;
935 efx_fw_variant_t efv;
938 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
939 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
940 sfc_kvarg_fv_variant_handler,
943 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
947 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
951 rc = efx_nic_probe(enp, preferred_efv);
953 /* Unprivileged functions cannot set FW variant */
954 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
959 rc = sfc_get_fw_variant(sa, &efv);
961 sfc_warn(sa, "FW variant can not be obtained");
967 /* Check that firmware variant was changed to the requested one */
968 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
969 sfc_warn(sa, "FW variant has not changed to the requested %s",
970 sfc_fw_variant2str(preferred_efv));
973 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
979 sfc_probe(struct sfc_adapter *sa)
981 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
986 sfc_log_init(sa, "entry");
988 SFC_ASSERT(sfc_adapter_is_locked(sa));
990 sa->socket_id = rte_socket_id();
991 rte_atomic32_init(&sa->restart_required);
993 sfc_log_init(sa, "get family");
994 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
995 &sa->family, &membar);
998 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
1000 sfc_log_init(sa, "init mem bar");
1001 rc = sfc_mem_bar_init(sa, membar);
1003 goto fail_mem_bar_init;
1005 sfc_log_init(sa, "create nic");
1006 rte_spinlock_init(&sa->nic_lock);
1007 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1008 &sa->mem_bar, &sa->nic_lock, &enp);
1010 goto fail_nic_create;
1013 rc = sfc_mcdi_init(sa);
1015 goto fail_mcdi_init;
1017 sfc_log_init(sa, "probe nic");
1018 rc = sfc_nic_probe(sa);
1020 goto fail_nic_probe;
1022 sfc_log_init(sa, "done");
1029 sfc_log_init(sa, "destroy nic");
1031 efx_nic_destroy(enp);
1034 sfc_mem_bar_fini(sa);
1038 sfc_log_init(sa, "failed %d", rc);
1043 sfc_unprobe(struct sfc_adapter *sa)
1045 efx_nic_t *enp = sa->nic;
1047 sfc_log_init(sa, "entry");
1049 SFC_ASSERT(sfc_adapter_is_locked(sa));
1051 sfc_log_init(sa, "unprobe nic");
1052 efx_nic_unprobe(enp);
1057 * Make sure there is no pending alarm to restart since we are
1058 * going to free device private which is passed as the callback
1059 * opaque data. A new alarm cannot be scheduled since MCDI is
1062 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1064 sfc_log_init(sa, "destroy nic");
1066 efx_nic_destroy(enp);
1068 sfc_mem_bar_fini(sa);
1071 sa->state = SFC_ADAPTER_UNINITIALIZED;
1075 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1076 const char *lt_prefix_str, uint32_t ll_default)
1078 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1079 size_t lt_str_size_max;
1080 char *lt_str = NULL;
1083 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1084 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1085 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1087 return RTE_LOGTYPE_PMD;
1090 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1092 return RTE_LOGTYPE_PMD;
1094 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1095 lt_str[lt_prefix_str_size - 1] = '.';
1096 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1097 lt_str_size_max - lt_prefix_str_size);
1098 lt_str[lt_str_size_max - 1] = '\0';
1100 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1103 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;