1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
27 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
28 size_t len, int socket_id, efsys_mem_t *esmp)
30 const struct rte_memzone *mz;
32 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
33 name, id, len, socket_id);
35 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
36 sysconf(_SC_PAGESIZE), socket_id);
38 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
39 name, (unsigned int)id, (unsigned int)len, socket_id,
40 rte_strerror(rte_errno));
44 esmp->esm_addr = mz->iova;
45 if (esmp->esm_addr == RTE_BAD_IOVA) {
46 (void)rte_memzone_free(mz);
51 esmp->esm_base = mz->addr;
57 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
61 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
63 rc = rte_memzone_free(esmp->esm_mz);
65 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
67 memset(esmp, 0, sizeof(*esmp));
71 sfc_phy_cap_from_link_speeds(uint32_t speeds)
73 uint32_t phy_caps = 0;
75 if (~speeds & ETH_LINK_SPEED_FIXED) {
76 phy_caps |= (1 << EFX_PHY_CAP_AN);
78 * If no speeds are specified in the mask, any supported
81 if (speeds == ETH_LINK_SPEED_AUTONEG)
83 (1 << EFX_PHY_CAP_1000FDX) |
84 (1 << EFX_PHY_CAP_10000FDX) |
85 (1 << EFX_PHY_CAP_25000FDX) |
86 (1 << EFX_PHY_CAP_40000FDX) |
87 (1 << EFX_PHY_CAP_50000FDX) |
88 (1 << EFX_PHY_CAP_100000FDX);
90 if (speeds & ETH_LINK_SPEED_1G)
91 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
92 if (speeds & ETH_LINK_SPEED_10G)
93 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
94 if (speeds & ETH_LINK_SPEED_25G)
95 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
96 if (speeds & ETH_LINK_SPEED_40G)
97 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
98 if (speeds & ETH_LINK_SPEED_50G)
99 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
100 if (speeds & ETH_LINK_SPEED_100G)
101 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
107 * Check requested device level configuration.
108 * Receive and transmit configuration is checked in corresponding
112 sfc_check_conf(struct sfc_adapter *sa)
114 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
117 sa->port.phy_adv_cap =
118 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
119 sa->port.phy_adv_cap_mask;
120 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
121 sfc_err(sa, "No link speeds from mask %#x are supported",
126 #if !EFSYS_OPT_LOOPBACK
127 if (conf->lpbk_mode != 0) {
128 sfc_err(sa, "Loopback not supported");
133 if (conf->dcb_capability_en != 0) {
134 sfc_err(sa, "Priority-based flow control not supported");
138 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
139 sfc_err(sa, "Flow Director not supported");
143 if ((conf->intr_conf.lsc != 0) &&
144 (sa->intr.type != EFX_INTR_LINE) &&
145 (sa->intr.type != EFX_INTR_MESSAGE)) {
146 sfc_err(sa, "Link status change interrupt not supported");
150 if (conf->intr_conf.rxq != 0) {
151 sfc_err(sa, "Receive queue interrupt not supported");
159 * Find out maximum number of receive and transmit queues which could be
162 * NIC is kept initialized on success to allow other modules acquire
163 * defaults and capabilities.
166 sfc_estimate_resource_limits(struct sfc_adapter *sa)
168 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
169 efx_drv_limits_t limits;
171 uint32_t evq_allocated;
172 uint32_t rxq_allocated;
173 uint32_t txq_allocated;
175 memset(&limits, 0, sizeof(limits));
177 /* Request at least one Rx and Tx queue */
178 limits.edl_min_rxq_count = 1;
179 limits.edl_min_txq_count = 1;
180 /* Management event queue plus event queue for each Tx and Rx queue */
181 limits.edl_min_evq_count =
182 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
184 /* Divide by number of functions to guarantee that all functions
185 * will get promised resources
187 /* FIXME Divide by number of functions (not 2) below */
188 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
189 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
191 /* Split equally between receive and transmit */
192 limits.edl_max_rxq_count =
193 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
194 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
196 limits.edl_max_txq_count =
197 MIN(encp->enc_txq_limit,
198 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
201 limits.edl_max_txq_count =
202 MIN(limits.edl_max_txq_count,
203 encp->enc_fw_assisted_tso_v2_n_contexts /
204 encp->enc_hw_pf_count);
206 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
208 /* Configure the minimum required resources needed for the
209 * driver to operate, and the maximum desired resources that the
210 * driver is capable of using.
212 efx_nic_set_drv_limits(sa->nic, &limits);
214 sfc_log_init(sa, "init nic");
215 rc = efx_nic_init(sa->nic);
219 /* Find resource dimensions assigned by firmware to this function */
220 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
223 goto fail_get_vi_pool;
225 /* It still may allocate more than maximum, ensure limit */
226 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
227 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
228 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
230 /* Subtract management EVQ not used for traffic */
231 SFC_ASSERT(evq_allocated > 0);
234 /* Right now we use separate EVQ for Rx and Tx */
235 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
236 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
238 /* Keep NIC initialized */
243 efx_nic_fini(sa->nic);
248 sfc_set_drv_limits(struct sfc_adapter *sa)
250 const struct rte_eth_dev_data *data = sa->eth_dev->data;
251 efx_drv_limits_t lim;
253 memset(&lim, 0, sizeof(lim));
255 /* Limits are strict since take into account initial estimation */
256 lim.edl_min_evq_count = lim.edl_max_evq_count =
257 1 + data->nb_rx_queues + data->nb_tx_queues;
258 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
259 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
261 return efx_nic_set_drv_limits(sa->nic, &lim);
265 sfc_set_fw_subvariant(struct sfc_adapter *sa)
267 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
268 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
269 unsigned int txq_index;
270 efx_nic_fw_subvariant_t req_fw_subvariant;
271 efx_nic_fw_subvariant_t cur_fw_subvariant;
274 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
275 sfc_info(sa, "no-Tx-checksum subvariant not supported");
279 for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
280 struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
282 if (txq_info->txq != NULL)
283 tx_offloads |= txq_info->txq->offloads;
286 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
287 DEV_TX_OFFLOAD_TCP_CKSUM |
288 DEV_TX_OFFLOAD_UDP_CKSUM |
289 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
290 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
292 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
294 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
296 sfc_err(sa, "failed to get FW subvariant: %d", rc);
299 sfc_info(sa, "FW subvariant is %u vs required %u",
300 cur_fw_subvariant, req_fw_subvariant);
302 if (cur_fw_subvariant == req_fw_subvariant)
305 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
307 sfc_err(sa, "failed to set FW subvariant %u: %d",
308 req_fw_subvariant, rc);
311 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
317 sfc_try_start(struct sfc_adapter *sa)
319 const efx_nic_cfg_t *encp;
322 sfc_log_init(sa, "entry");
324 SFC_ASSERT(sfc_adapter_is_locked(sa));
325 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
327 sfc_log_init(sa, "set FW subvariant");
328 rc = sfc_set_fw_subvariant(sa);
330 goto fail_set_fw_subvariant;
332 sfc_log_init(sa, "set resource limits");
333 rc = sfc_set_drv_limits(sa);
335 goto fail_set_drv_limits;
337 sfc_log_init(sa, "init nic");
338 rc = efx_nic_init(sa->nic);
342 encp = efx_nic_cfg_get(sa->nic);
343 if (encp->enc_tunnel_encapsulations_supported != 0) {
344 sfc_log_init(sa, "apply tunnel config");
345 rc = efx_tunnel_reconfigure(sa->nic);
347 goto fail_tunnel_reconfigure;
350 rc = sfc_intr_start(sa);
352 goto fail_intr_start;
354 rc = sfc_ev_start(sa);
358 rc = sfc_port_start(sa);
360 goto fail_port_start;
362 rc = sfc_rx_start(sa);
366 rc = sfc_tx_start(sa);
370 rc = sfc_flow_start(sa);
372 goto fail_flows_insert;
374 sfc_log_init(sa, "done");
393 fail_tunnel_reconfigure:
394 efx_nic_fini(sa->nic);
398 fail_set_fw_subvariant:
399 sfc_log_init(sa, "failed %d", rc);
404 sfc_start(struct sfc_adapter *sa)
406 unsigned int start_tries = 3;
409 sfc_log_init(sa, "entry");
411 SFC_ASSERT(sfc_adapter_is_locked(sa));
414 case SFC_ADAPTER_CONFIGURED:
416 case SFC_ADAPTER_STARTED:
417 sfc_notice(sa, "already started");
424 sa->state = SFC_ADAPTER_STARTING;
427 rc = sfc_try_start(sa);
428 } while ((--start_tries > 0) &&
429 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
434 sa->state = SFC_ADAPTER_STARTED;
435 sfc_log_init(sa, "done");
439 sa->state = SFC_ADAPTER_CONFIGURED;
441 sfc_log_init(sa, "failed %d", rc);
446 sfc_stop(struct sfc_adapter *sa)
448 sfc_log_init(sa, "entry");
450 SFC_ASSERT(sfc_adapter_is_locked(sa));
453 case SFC_ADAPTER_STARTED:
455 case SFC_ADAPTER_CONFIGURED:
456 sfc_notice(sa, "already stopped");
459 sfc_err(sa, "stop in unexpected state %u", sa->state);
464 sa->state = SFC_ADAPTER_STOPPING;
472 efx_nic_fini(sa->nic);
474 sa->state = SFC_ADAPTER_CONFIGURED;
475 sfc_log_init(sa, "done");
479 sfc_restart(struct sfc_adapter *sa)
483 SFC_ASSERT(sfc_adapter_is_locked(sa));
485 if (sa->state != SFC_ADAPTER_STARTED)
492 sfc_err(sa, "restart failed");
498 sfc_restart_if_required(void *arg)
500 struct sfc_adapter *sa = arg;
502 /* If restart is scheduled, clear the flag and do it */
503 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
505 sfc_adapter_lock(sa);
506 if (sa->state == SFC_ADAPTER_STARTED)
507 (void)sfc_restart(sa);
508 sfc_adapter_unlock(sa);
513 sfc_schedule_restart(struct sfc_adapter *sa)
517 /* Schedule restart alarm if it is not scheduled yet */
518 if (!rte_atomic32_test_and_set(&sa->restart_required))
521 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
523 sfc_warn(sa, "alarms are not supported, restart is pending");
525 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
527 sfc_notice(sa, "restart scheduled");
531 sfc_configure(struct sfc_adapter *sa)
535 sfc_log_init(sa, "entry");
537 SFC_ASSERT(sfc_adapter_is_locked(sa));
539 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
540 sa->state == SFC_ADAPTER_CONFIGURED);
541 sa->state = SFC_ADAPTER_CONFIGURING;
543 rc = sfc_check_conf(sa);
545 goto fail_check_conf;
547 rc = sfc_intr_configure(sa);
549 goto fail_intr_configure;
551 rc = sfc_port_configure(sa);
553 goto fail_port_configure;
555 rc = sfc_rx_configure(sa);
557 goto fail_rx_configure;
559 rc = sfc_tx_configure(sa);
561 goto fail_tx_configure;
563 sa->state = SFC_ADAPTER_CONFIGURED;
564 sfc_log_init(sa, "done");
578 sa->state = SFC_ADAPTER_INITIALIZED;
579 sfc_log_init(sa, "failed %d", rc);
584 sfc_close(struct sfc_adapter *sa)
586 sfc_log_init(sa, "entry");
588 SFC_ASSERT(sfc_adapter_is_locked(sa));
590 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
591 sa->state = SFC_ADAPTER_CLOSING;
598 sa->state = SFC_ADAPTER_INITIALIZED;
599 sfc_log_init(sa, "done");
603 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
605 struct rte_eth_dev *eth_dev = sa->eth_dev;
606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
607 efsys_bar_t *ebp = &sa->mem_bar;
608 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
610 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
611 ebp->esb_rid = membar;
612 ebp->esb_dev = pci_dev;
613 ebp->esb_base = res->addr;
618 sfc_mem_bar_fini(struct sfc_adapter *sa)
620 efsys_bar_t *ebp = &sa->mem_bar;
622 SFC_BAR_LOCK_DESTROY(ebp);
623 memset(ebp, 0, sizeof(*ebp));
626 #if EFSYS_OPT_RX_SCALE
628 * A fixed RSS key which has a property of being symmetric
629 * (symmetrical flows are distributed to the same CPU)
630 * and also known to give a uniform distribution
631 * (a good distribution of traffic between different CPUs)
633 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
634 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
635 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
636 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
637 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
638 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
642 #if EFSYS_OPT_RX_SCALE
644 sfc_set_rss_defaults(struct sfc_adapter *sa)
648 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
652 rc = efx_ev_init(sa->nic);
656 rc = efx_rx_init(sa->nic);
660 rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
662 goto fail_scale_support_get;
664 rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
666 goto fail_hash_support_get;
668 efx_rx_fini(sa->nic);
669 efx_ev_fini(sa->nic);
670 efx_intr_fini(sa->nic);
672 sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
674 rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
678 fail_hash_support_get:
679 fail_scale_support_get:
680 efx_rx_fini(sa->nic);
683 efx_ev_fini(sa->nic);
686 efx_intr_fini(sa->nic);
693 sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
700 sfc_attach(struct sfc_adapter *sa)
702 const efx_nic_cfg_t *encp;
703 efx_nic_t *enp = sa->nic;
706 sfc_log_init(sa, "entry");
708 SFC_ASSERT(sfc_adapter_is_locked(sa));
710 efx_mcdi_new_epoch(enp);
712 sfc_log_init(sa, "reset nic");
713 rc = efx_nic_reset(enp);
718 * Probed NIC is sufficient for tunnel init.
719 * Initialize tunnel support to be able to use libefx
720 * efx_tunnel_config_udp_{add,remove}() in any state and
721 * efx_tunnel_reconfigure() on start up.
723 rc = efx_tunnel_init(enp);
725 goto fail_tunnel_init;
727 encp = efx_nic_cfg_get(sa->nic);
729 if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
730 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
733 "TSO support isn't available on this adapter");
736 sfc_log_init(sa, "estimate resource limits");
737 rc = sfc_estimate_resource_limits(sa);
739 goto fail_estimate_rsrc_limits;
741 sa->txq_max_entries = encp->enc_txq_max_ndescs;
742 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
744 rc = sfc_intr_attach(sa);
746 goto fail_intr_attach;
748 rc = sfc_ev_attach(sa);
752 rc = sfc_port_attach(sa);
754 goto fail_port_attach;
756 rc = sfc_set_rss_defaults(sa);
758 goto fail_set_rss_defaults;
760 rc = sfc_filter_attach(sa);
762 goto fail_filter_attach;
764 sfc_log_init(sa, "fini nic");
769 sa->state = SFC_ADAPTER_INITIALIZED;
771 sfc_log_init(sa, "done");
775 fail_set_rss_defaults:
785 efx_nic_fini(sa->nic);
787 fail_estimate_rsrc_limits:
789 efx_tunnel_fini(sa->nic);
793 sfc_log_init(sa, "failed %d", rc);
798 sfc_detach(struct sfc_adapter *sa)
800 sfc_log_init(sa, "entry");
802 SFC_ASSERT(sfc_adapter_is_locked(sa));
806 sfc_filter_detach(sa);
810 efx_tunnel_fini(sa->nic);
812 sa->state = SFC_ADAPTER_UNINITIALIZED;
816 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
817 const char *value_str, void *opaque)
819 uint32_t *value = opaque;
821 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
822 *value = EFX_FW_VARIANT_DONT_CARE;
823 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
824 *value = EFX_FW_VARIANT_FULL_FEATURED;
825 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
826 *value = EFX_FW_VARIANT_LOW_LATENCY;
827 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
828 *value = EFX_FW_VARIANT_PACKED_STREAM;
836 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
838 efx_nic_fw_info_t enfi;
841 rc = efx_nic_get_fw_version(sa->nic, &enfi);
844 else if (!enfi.enfi_dpcpu_fw_ids_valid)
848 * Firmware variant can be uniquely identified by the RxDPCPU
851 switch (enfi.enfi_rx_dpcpu_fw_id) {
852 case EFX_RXDP_FULL_FEATURED_FW_ID:
853 *efv = EFX_FW_VARIANT_FULL_FEATURED;
856 case EFX_RXDP_LOW_LATENCY_FW_ID:
857 *efv = EFX_FW_VARIANT_LOW_LATENCY;
860 case EFX_RXDP_PACKED_STREAM_FW_ID:
861 *efv = EFX_FW_VARIANT_PACKED_STREAM;
866 * Other firmware variants are not considered, since they are
867 * not supported in the device parameters
869 *efv = EFX_FW_VARIANT_DONT_CARE;
877 sfc_fw_variant2str(efx_fw_variant_t efv)
880 case EFX_RXDP_FULL_FEATURED_FW_ID:
881 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
882 case EFX_RXDP_LOW_LATENCY_FW_ID:
883 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
884 case EFX_RXDP_PACKED_STREAM_FW_ID:
885 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
892 sfc_nic_probe(struct sfc_adapter *sa)
894 efx_nic_t *enp = sa->nic;
895 efx_fw_variant_t preferred_efv;
896 efx_fw_variant_t efv;
899 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
900 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
901 sfc_kvarg_fv_variant_handler,
904 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
908 rc = efx_nic_probe(enp, preferred_efv);
910 /* Unprivileged functions cannot set FW variant */
911 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
916 rc = sfc_get_fw_variant(sa, &efv);
918 sfc_warn(sa, "FW variant can not be obtained");
924 /* Check that firmware variant was changed to the requested one */
925 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
926 sfc_warn(sa, "FW variant has not changed to the requested %s",
927 sfc_fw_variant2str(preferred_efv));
930 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
936 sfc_probe(struct sfc_adapter *sa)
938 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
943 sfc_log_init(sa, "entry");
945 SFC_ASSERT(sfc_adapter_is_locked(sa));
947 sa->socket_id = rte_socket_id();
948 rte_atomic32_init(&sa->restart_required);
950 sfc_log_init(sa, "get family");
951 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
952 &sa->family, &membar);
955 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
957 sfc_log_init(sa, "init mem bar");
958 rc = sfc_mem_bar_init(sa, membar);
960 goto fail_mem_bar_init;
962 sfc_log_init(sa, "create nic");
963 rte_spinlock_init(&sa->nic_lock);
964 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
965 &sa->mem_bar, &sa->nic_lock, &enp);
967 goto fail_nic_create;
970 rc = sfc_mcdi_init(sa);
974 sfc_log_init(sa, "probe nic");
975 rc = sfc_nic_probe(sa);
979 sfc_log_init(sa, "done");
986 sfc_log_init(sa, "destroy nic");
988 efx_nic_destroy(enp);
991 sfc_mem_bar_fini(sa);
995 sfc_log_init(sa, "failed %d", rc);
1000 sfc_unprobe(struct sfc_adapter *sa)
1002 efx_nic_t *enp = sa->nic;
1004 sfc_log_init(sa, "entry");
1006 SFC_ASSERT(sfc_adapter_is_locked(sa));
1008 sfc_log_init(sa, "unprobe nic");
1009 efx_nic_unprobe(enp);
1014 * Make sure there is no pending alarm to restart since we are
1015 * going to free device private which is passed as the callback
1016 * opaque data. A new alarm cannot be scheduled since MCDI is
1019 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1021 sfc_log_init(sa, "destroy nic");
1023 efx_nic_destroy(enp);
1025 sfc_mem_bar_fini(sa);
1028 sa->state = SFC_ADAPTER_UNINITIALIZED;
1032 sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
1033 uint32_t ll_default)
1035 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1036 size_t lt_str_size_max;
1037 char *lt_str = NULL;
1040 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1041 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1042 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1044 return RTE_LOGTYPE_PMD;
1047 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1049 return RTE_LOGTYPE_PMD;
1051 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1052 lt_str[lt_prefix_str_size - 1] = '.';
1053 rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
1054 lt_str_size_max - lt_prefix_str_size);
1055 lt_str[lt_str_size_max - 1] = '\0';
1057 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1060 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;