1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
27 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
28 size_t len, int socket_id, efsys_mem_t *esmp)
30 const struct rte_memzone *mz;
32 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
33 name, id, len, socket_id);
35 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
36 sysconf(_SC_PAGESIZE), socket_id);
38 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
39 name, (unsigned int)id, (unsigned int)len, socket_id,
40 rte_strerror(rte_errno));
44 esmp->esm_addr = mz->iova;
45 if (esmp->esm_addr == RTE_BAD_IOVA) {
46 (void)rte_memzone_free(mz);
51 esmp->esm_base = mz->addr;
57 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
61 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
63 rc = rte_memzone_free(esmp->esm_mz);
65 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
67 memset(esmp, 0, sizeof(*esmp));
71 sfc_phy_cap_from_link_speeds(uint32_t speeds)
73 uint32_t phy_caps = 0;
75 if (~speeds & ETH_LINK_SPEED_FIXED) {
76 phy_caps |= (1 << EFX_PHY_CAP_AN);
78 * If no speeds are specified in the mask, any supported
81 if (speeds == ETH_LINK_SPEED_AUTONEG)
83 (1 << EFX_PHY_CAP_1000FDX) |
84 (1 << EFX_PHY_CAP_10000FDX) |
85 (1 << EFX_PHY_CAP_25000FDX) |
86 (1 << EFX_PHY_CAP_40000FDX) |
87 (1 << EFX_PHY_CAP_50000FDX) |
88 (1 << EFX_PHY_CAP_100000FDX);
90 if (speeds & ETH_LINK_SPEED_1G)
91 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
92 if (speeds & ETH_LINK_SPEED_10G)
93 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
94 if (speeds & ETH_LINK_SPEED_25G)
95 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
96 if (speeds & ETH_LINK_SPEED_40G)
97 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
98 if (speeds & ETH_LINK_SPEED_50G)
99 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
100 if (speeds & ETH_LINK_SPEED_100G)
101 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
107 * Check requested device level configuration.
108 * Receive and transmit configuration is checked in corresponding
112 sfc_check_conf(struct sfc_adapter *sa)
114 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
117 sa->port.phy_adv_cap =
118 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
119 sa->port.phy_adv_cap_mask;
120 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
121 sfc_err(sa, "No link speeds from mask %#x are supported",
126 #if !EFSYS_OPT_LOOPBACK
127 if (conf->lpbk_mode != 0) {
128 sfc_err(sa, "Loopback not supported");
133 if (conf->dcb_capability_en != 0) {
134 sfc_err(sa, "Priority-based flow control not supported");
138 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
139 sfc_err(sa, "Flow Director not supported");
143 if ((conf->intr_conf.lsc != 0) &&
144 (sa->intr.type != EFX_INTR_LINE) &&
145 (sa->intr.type != EFX_INTR_MESSAGE)) {
146 sfc_err(sa, "Link status change interrupt not supported");
150 if (conf->intr_conf.rxq != 0) {
151 sfc_err(sa, "Receive queue interrupt not supported");
159 * Find out maximum number of receive and transmit queues which could be
162 * NIC is kept initialized on success to allow other modules acquire
163 * defaults and capabilities.
166 sfc_estimate_resource_limits(struct sfc_adapter *sa)
168 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
169 efx_drv_limits_t limits;
171 uint32_t evq_allocated;
172 uint32_t rxq_allocated;
173 uint32_t txq_allocated;
175 memset(&limits, 0, sizeof(limits));
177 /* Request at least one Rx and Tx queue */
178 limits.edl_min_rxq_count = 1;
179 limits.edl_min_txq_count = 1;
180 /* Management event queue plus event queue for each Tx and Rx queue */
181 limits.edl_min_evq_count =
182 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
184 /* Divide by number of functions to guarantee that all functions
185 * will get promised resources
187 /* FIXME Divide by number of functions (not 2) below */
188 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
189 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
191 /* Split equally between receive and transmit */
192 limits.edl_max_rxq_count =
193 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
194 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
196 limits.edl_max_txq_count =
197 MIN(encp->enc_txq_limit,
198 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
201 limits.edl_max_txq_count =
202 MIN(limits.edl_max_txq_count,
203 encp->enc_fw_assisted_tso_v2_n_contexts /
204 encp->enc_hw_pf_count);
206 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
208 /* Configure the minimum required resources needed for the
209 * driver to operate, and the maximum desired resources that the
210 * driver is capable of using.
212 efx_nic_set_drv_limits(sa->nic, &limits);
214 sfc_log_init(sa, "init nic");
215 rc = efx_nic_init(sa->nic);
219 /* Find resource dimensions assigned by firmware to this function */
220 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
223 goto fail_get_vi_pool;
225 /* It still may allocate more than maximum, ensure limit */
226 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
227 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
228 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
230 /* Subtract management EVQ not used for traffic */
231 SFC_ASSERT(evq_allocated > 0);
234 /* Right now we use separate EVQ for Rx and Tx */
235 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
236 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
238 /* Keep NIC initialized */
243 efx_nic_fini(sa->nic);
248 sfc_set_drv_limits(struct sfc_adapter *sa)
250 const struct rte_eth_dev_data *data = sa->eth_dev->data;
251 efx_drv_limits_t lim;
253 memset(&lim, 0, sizeof(lim));
255 /* Limits are strict since take into account initial estimation */
256 lim.edl_min_evq_count = lim.edl_max_evq_count =
257 1 + data->nb_rx_queues + data->nb_tx_queues;
258 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
259 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
261 return efx_nic_set_drv_limits(sa->nic, &lim);
265 sfc_set_fw_subvariant(struct sfc_adapter *sa)
267 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
268 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
269 unsigned int txq_index;
270 efx_nic_fw_subvariant_t req_fw_subvariant;
271 efx_nic_fw_subvariant_t cur_fw_subvariant;
274 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
275 sfc_info(sa, "no-Tx-checksum subvariant not supported");
279 for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
280 struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
282 if (txq_info->txq != NULL)
283 tx_offloads |= txq_info->txq->offloads;
286 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
287 DEV_TX_OFFLOAD_TCP_CKSUM |
288 DEV_TX_OFFLOAD_UDP_CKSUM |
289 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
290 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
292 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
294 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
296 sfc_err(sa, "failed to get FW subvariant: %d", rc);
299 sfc_info(sa, "FW subvariant is %u vs required %u",
300 cur_fw_subvariant, req_fw_subvariant);
302 if (cur_fw_subvariant == req_fw_subvariant)
305 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
307 sfc_err(sa, "failed to set FW subvariant %u: %d",
308 req_fw_subvariant, rc);
311 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
317 sfc_try_start(struct sfc_adapter *sa)
319 const efx_nic_cfg_t *encp;
322 sfc_log_init(sa, "entry");
324 SFC_ASSERT(sfc_adapter_is_locked(sa));
325 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
327 sfc_log_init(sa, "set FW subvariant");
328 rc = sfc_set_fw_subvariant(sa);
330 goto fail_set_fw_subvariant;
332 sfc_log_init(sa, "set resource limits");
333 rc = sfc_set_drv_limits(sa);
335 goto fail_set_drv_limits;
337 sfc_log_init(sa, "init nic");
338 rc = efx_nic_init(sa->nic);
342 encp = efx_nic_cfg_get(sa->nic);
343 if (encp->enc_tunnel_encapsulations_supported != 0) {
344 sfc_log_init(sa, "apply tunnel config");
345 rc = efx_tunnel_reconfigure(sa->nic);
347 goto fail_tunnel_reconfigure;
350 rc = sfc_intr_start(sa);
352 goto fail_intr_start;
354 rc = sfc_ev_start(sa);
358 rc = sfc_port_start(sa);
360 goto fail_port_start;
362 rc = sfc_rx_start(sa);
366 rc = sfc_tx_start(sa);
370 rc = sfc_flow_start(sa);
372 goto fail_flows_insert;
374 sfc_log_init(sa, "done");
393 fail_tunnel_reconfigure:
394 efx_nic_fini(sa->nic);
398 fail_set_fw_subvariant:
399 sfc_log_init(sa, "failed %d", rc);
404 sfc_start(struct sfc_adapter *sa)
406 unsigned int start_tries = 3;
409 sfc_log_init(sa, "entry");
411 SFC_ASSERT(sfc_adapter_is_locked(sa));
414 case SFC_ADAPTER_CONFIGURED:
416 case SFC_ADAPTER_STARTED:
417 sfc_notice(sa, "already started");
424 sa->state = SFC_ADAPTER_STARTING;
427 rc = sfc_try_start(sa);
428 } while ((--start_tries > 0) &&
429 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
434 sa->state = SFC_ADAPTER_STARTED;
435 sfc_log_init(sa, "done");
439 sa->state = SFC_ADAPTER_CONFIGURED;
441 sfc_log_init(sa, "failed %d", rc);
446 sfc_stop(struct sfc_adapter *sa)
448 sfc_log_init(sa, "entry");
450 SFC_ASSERT(sfc_adapter_is_locked(sa));
453 case SFC_ADAPTER_STARTED:
455 case SFC_ADAPTER_CONFIGURED:
456 sfc_notice(sa, "already stopped");
459 sfc_err(sa, "stop in unexpected state %u", sa->state);
464 sa->state = SFC_ADAPTER_STOPPING;
472 efx_nic_fini(sa->nic);
474 sa->state = SFC_ADAPTER_CONFIGURED;
475 sfc_log_init(sa, "done");
479 sfc_restart(struct sfc_adapter *sa)
483 SFC_ASSERT(sfc_adapter_is_locked(sa));
485 if (sa->state != SFC_ADAPTER_STARTED)
492 sfc_err(sa, "restart failed");
498 sfc_restart_if_required(void *arg)
500 struct sfc_adapter *sa = arg;
502 /* If restart is scheduled, clear the flag and do it */
503 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
505 sfc_adapter_lock(sa);
506 if (sa->state == SFC_ADAPTER_STARTED)
507 (void)sfc_restart(sa);
508 sfc_adapter_unlock(sa);
513 sfc_schedule_restart(struct sfc_adapter *sa)
517 /* Schedule restart alarm if it is not scheduled yet */
518 if (!rte_atomic32_test_and_set(&sa->restart_required))
521 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
523 sfc_warn(sa, "alarms are not supported, restart is pending");
525 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
527 sfc_notice(sa, "restart scheduled");
531 sfc_configure(struct sfc_adapter *sa)
535 sfc_log_init(sa, "entry");
537 SFC_ASSERT(sfc_adapter_is_locked(sa));
539 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
540 sa->state == SFC_ADAPTER_CONFIGURED);
541 sa->state = SFC_ADAPTER_CONFIGURING;
543 rc = sfc_check_conf(sa);
545 goto fail_check_conf;
547 rc = sfc_intr_configure(sa);
549 goto fail_intr_configure;
551 rc = sfc_port_configure(sa);
553 goto fail_port_configure;
555 rc = sfc_rx_configure(sa);
557 goto fail_rx_configure;
559 rc = sfc_tx_configure(sa);
561 goto fail_tx_configure;
563 sa->state = SFC_ADAPTER_CONFIGURED;
564 sfc_log_init(sa, "done");
578 sa->state = SFC_ADAPTER_INITIALIZED;
579 sfc_log_init(sa, "failed %d", rc);
584 sfc_close(struct sfc_adapter *sa)
586 sfc_log_init(sa, "entry");
588 SFC_ASSERT(sfc_adapter_is_locked(sa));
590 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
591 sa->state = SFC_ADAPTER_CLOSING;
598 sa->state = SFC_ADAPTER_INITIALIZED;
599 sfc_log_init(sa, "done");
603 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
605 struct rte_eth_dev *eth_dev = sa->eth_dev;
606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
607 efsys_bar_t *ebp = &sa->mem_bar;
608 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
610 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
611 ebp->esb_rid = membar;
612 ebp->esb_dev = pci_dev;
613 ebp->esb_base = res->addr;
618 sfc_mem_bar_fini(struct sfc_adapter *sa)
620 efsys_bar_t *ebp = &sa->mem_bar;
622 SFC_BAR_LOCK_DESTROY(ebp);
623 memset(ebp, 0, sizeof(*ebp));
627 * A fixed RSS key which has a property of being symmetric
628 * (symmetrical flows are distributed to the same CPU)
629 * and also known to give a uniform distribution
630 * (a good distribution of traffic between different CPUs)
632 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
633 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
634 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
635 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
636 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
637 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
641 sfc_set_rss_defaults(struct sfc_adapter *sa)
645 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
649 rc = efx_ev_init(sa->nic);
653 rc = efx_rx_init(sa->nic);
657 rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
659 goto fail_scale_support_get;
661 rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
663 goto fail_hash_support_get;
665 efx_rx_fini(sa->nic);
666 efx_ev_fini(sa->nic);
667 efx_intr_fini(sa->nic);
669 sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
671 rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
675 fail_hash_support_get:
676 fail_scale_support_get:
677 efx_rx_fini(sa->nic);
680 efx_ev_fini(sa->nic);
683 efx_intr_fini(sa->nic);
690 sfc_attach(struct sfc_adapter *sa)
692 const efx_nic_cfg_t *encp;
693 efx_nic_t *enp = sa->nic;
696 sfc_log_init(sa, "entry");
698 SFC_ASSERT(sfc_adapter_is_locked(sa));
700 efx_mcdi_new_epoch(enp);
702 sfc_log_init(sa, "reset nic");
703 rc = efx_nic_reset(enp);
708 * Probed NIC is sufficient for tunnel init.
709 * Initialize tunnel support to be able to use libefx
710 * efx_tunnel_config_udp_{add,remove}() in any state and
711 * efx_tunnel_reconfigure() on start up.
713 rc = efx_tunnel_init(enp);
715 goto fail_tunnel_init;
717 encp = efx_nic_cfg_get(sa->nic);
719 if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
720 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
723 "TSO support isn't available on this adapter");
726 sfc_log_init(sa, "estimate resource limits");
727 rc = sfc_estimate_resource_limits(sa);
729 goto fail_estimate_rsrc_limits;
731 sa->txq_max_entries = encp->enc_txq_max_ndescs;
732 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
734 rc = sfc_intr_attach(sa);
736 goto fail_intr_attach;
738 rc = sfc_ev_attach(sa);
742 rc = sfc_port_attach(sa);
744 goto fail_port_attach;
746 rc = sfc_set_rss_defaults(sa);
748 goto fail_set_rss_defaults;
750 rc = sfc_filter_attach(sa);
752 goto fail_filter_attach;
754 sfc_log_init(sa, "fini nic");
759 sa->state = SFC_ADAPTER_INITIALIZED;
761 sfc_log_init(sa, "done");
765 fail_set_rss_defaults:
775 efx_nic_fini(sa->nic);
777 fail_estimate_rsrc_limits:
779 efx_tunnel_fini(sa->nic);
783 sfc_log_init(sa, "failed %d", rc);
788 sfc_detach(struct sfc_adapter *sa)
790 sfc_log_init(sa, "entry");
792 SFC_ASSERT(sfc_adapter_is_locked(sa));
796 sfc_filter_detach(sa);
800 efx_tunnel_fini(sa->nic);
802 sa->state = SFC_ADAPTER_UNINITIALIZED;
806 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
807 const char *value_str, void *opaque)
809 uint32_t *value = opaque;
811 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
812 *value = EFX_FW_VARIANT_DONT_CARE;
813 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
814 *value = EFX_FW_VARIANT_FULL_FEATURED;
815 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
816 *value = EFX_FW_VARIANT_LOW_LATENCY;
817 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
818 *value = EFX_FW_VARIANT_PACKED_STREAM;
826 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
828 efx_nic_fw_info_t enfi;
831 rc = efx_nic_get_fw_version(sa->nic, &enfi);
834 else if (!enfi.enfi_dpcpu_fw_ids_valid)
838 * Firmware variant can be uniquely identified by the RxDPCPU
841 switch (enfi.enfi_rx_dpcpu_fw_id) {
842 case EFX_RXDP_FULL_FEATURED_FW_ID:
843 *efv = EFX_FW_VARIANT_FULL_FEATURED;
846 case EFX_RXDP_LOW_LATENCY_FW_ID:
847 *efv = EFX_FW_VARIANT_LOW_LATENCY;
850 case EFX_RXDP_PACKED_STREAM_FW_ID:
851 *efv = EFX_FW_VARIANT_PACKED_STREAM;
856 * Other firmware variants are not considered, since they are
857 * not supported in the device parameters
859 *efv = EFX_FW_VARIANT_DONT_CARE;
867 sfc_fw_variant2str(efx_fw_variant_t efv)
870 case EFX_RXDP_FULL_FEATURED_FW_ID:
871 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
872 case EFX_RXDP_LOW_LATENCY_FW_ID:
873 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
874 case EFX_RXDP_PACKED_STREAM_FW_ID:
875 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
882 sfc_nic_probe(struct sfc_adapter *sa)
884 efx_nic_t *enp = sa->nic;
885 efx_fw_variant_t preferred_efv;
886 efx_fw_variant_t efv;
889 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
890 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
891 sfc_kvarg_fv_variant_handler,
894 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
898 rc = efx_nic_probe(enp, preferred_efv);
900 /* Unprivileged functions cannot set FW variant */
901 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
906 rc = sfc_get_fw_variant(sa, &efv);
908 sfc_warn(sa, "FW variant can not be obtained");
914 /* Check that firmware variant was changed to the requested one */
915 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
916 sfc_warn(sa, "FW variant has not changed to the requested %s",
917 sfc_fw_variant2str(preferred_efv));
920 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
926 sfc_probe(struct sfc_adapter *sa)
928 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
933 sfc_log_init(sa, "entry");
935 SFC_ASSERT(sfc_adapter_is_locked(sa));
937 sa->socket_id = rte_socket_id();
938 rte_atomic32_init(&sa->restart_required);
940 sfc_log_init(sa, "get family");
941 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
942 &sa->family, &membar);
945 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
947 sfc_log_init(sa, "init mem bar");
948 rc = sfc_mem_bar_init(sa, membar);
950 goto fail_mem_bar_init;
952 sfc_log_init(sa, "create nic");
953 rte_spinlock_init(&sa->nic_lock);
954 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
955 &sa->mem_bar, &sa->nic_lock, &enp);
957 goto fail_nic_create;
960 rc = sfc_mcdi_init(sa);
964 sfc_log_init(sa, "probe nic");
965 rc = sfc_nic_probe(sa);
969 sfc_log_init(sa, "done");
976 sfc_log_init(sa, "destroy nic");
978 efx_nic_destroy(enp);
981 sfc_mem_bar_fini(sa);
985 sfc_log_init(sa, "failed %d", rc);
990 sfc_unprobe(struct sfc_adapter *sa)
992 efx_nic_t *enp = sa->nic;
994 sfc_log_init(sa, "entry");
996 SFC_ASSERT(sfc_adapter_is_locked(sa));
998 sfc_log_init(sa, "unprobe nic");
999 efx_nic_unprobe(enp);
1004 * Make sure there is no pending alarm to restart since we are
1005 * going to free device private which is passed as the callback
1006 * opaque data. A new alarm cannot be scheduled since MCDI is
1009 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1011 sfc_log_init(sa, "destroy nic");
1013 efx_nic_destroy(enp);
1015 sfc_mem_bar_fini(sa);
1018 sa->state = SFC_ADAPTER_UNINITIALIZED;
1022 sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
1023 uint32_t ll_default)
1025 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1026 size_t lt_str_size_max;
1027 char *lt_str = NULL;
1030 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1031 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1032 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1034 return RTE_LOGTYPE_PMD;
1037 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1039 return RTE_LOGTYPE_PMD;
1041 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1042 lt_str[lt_prefix_str_size - 1] = '.';
1043 rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
1044 lt_str_size_max - lt_prefix_str_size);
1045 lt_str[lt_str_size_max - 1] = '\0';
1047 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1050 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;