1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
27 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
28 size_t len, int socket_id, efsys_mem_t *esmp)
30 const struct rte_memzone *mz;
32 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
33 name, id, len, socket_id);
35 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
36 sysconf(_SC_PAGESIZE), socket_id);
38 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
39 name, (unsigned int)id, (unsigned int)len, socket_id,
40 rte_strerror(rte_errno));
44 esmp->esm_addr = mz->iova;
45 if (esmp->esm_addr == RTE_BAD_IOVA) {
46 (void)rte_memzone_free(mz);
51 esmp->esm_base = mz->addr;
57 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
61 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
63 rc = rte_memzone_free(esmp->esm_mz);
65 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
67 memset(esmp, 0, sizeof(*esmp));
71 sfc_phy_cap_from_link_speeds(uint32_t speeds)
73 uint32_t phy_caps = 0;
75 if (~speeds & ETH_LINK_SPEED_FIXED) {
76 phy_caps |= (1 << EFX_PHY_CAP_AN);
78 * If no speeds are specified in the mask, any supported
81 if (speeds == ETH_LINK_SPEED_AUTONEG)
83 (1 << EFX_PHY_CAP_1000FDX) |
84 (1 << EFX_PHY_CAP_10000FDX) |
85 (1 << EFX_PHY_CAP_25000FDX) |
86 (1 << EFX_PHY_CAP_40000FDX) |
87 (1 << EFX_PHY_CAP_50000FDX) |
88 (1 << EFX_PHY_CAP_100000FDX);
90 if (speeds & ETH_LINK_SPEED_1G)
91 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
92 if (speeds & ETH_LINK_SPEED_10G)
93 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
94 if (speeds & ETH_LINK_SPEED_25G)
95 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
96 if (speeds & ETH_LINK_SPEED_40G)
97 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
98 if (speeds & ETH_LINK_SPEED_50G)
99 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
100 if (speeds & ETH_LINK_SPEED_100G)
101 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
107 * Check requested device level configuration.
108 * Receive and transmit configuration is checked in corresponding
112 sfc_check_conf(struct sfc_adapter *sa)
114 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
117 sa->port.phy_adv_cap =
118 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
119 sa->port.phy_adv_cap_mask;
120 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
121 sfc_err(sa, "No link speeds from mask %#x are supported",
126 if (conf->lpbk_mode != 0) {
127 sfc_err(sa, "Loopback not supported");
131 if (conf->dcb_capability_en != 0) {
132 sfc_err(sa, "Priority-based flow control not supported");
136 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
137 sfc_err(sa, "Flow Director not supported");
141 if ((conf->intr_conf.lsc != 0) &&
142 (sa->intr.type != EFX_INTR_LINE) &&
143 (sa->intr.type != EFX_INTR_MESSAGE)) {
144 sfc_err(sa, "Link status change interrupt not supported");
148 if (conf->intr_conf.rxq != 0) {
149 sfc_err(sa, "Receive queue interrupt not supported");
157 * Find out maximum number of receive and transmit queues which could be
160 * NIC is kept initialized on success to allow other modules acquire
161 * defaults and capabilities.
164 sfc_estimate_resource_limits(struct sfc_adapter *sa)
166 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
167 efx_drv_limits_t limits;
169 uint32_t evq_allocated;
170 uint32_t rxq_allocated;
171 uint32_t txq_allocated;
173 memset(&limits, 0, sizeof(limits));
175 /* Request at least one Rx and Tx queue */
176 limits.edl_min_rxq_count = 1;
177 limits.edl_min_txq_count = 1;
178 /* Management event queue plus event queue for each Tx and Rx queue */
179 limits.edl_min_evq_count =
180 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
182 /* Divide by number of functions to guarantee that all functions
183 * will get promised resources
185 /* FIXME Divide by number of functions (not 2) below */
186 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
187 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
189 /* Split equally between receive and transmit */
190 limits.edl_max_rxq_count =
191 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
192 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
194 limits.edl_max_txq_count =
195 MIN(encp->enc_txq_limit,
196 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
199 limits.edl_max_txq_count =
200 MIN(limits.edl_max_txq_count,
201 encp->enc_fw_assisted_tso_v2_n_contexts /
202 encp->enc_hw_pf_count);
204 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
206 /* Configure the minimum required resources needed for the
207 * driver to operate, and the maximum desired resources that the
208 * driver is capable of using.
210 efx_nic_set_drv_limits(sa->nic, &limits);
212 sfc_log_init(sa, "init nic");
213 rc = efx_nic_init(sa->nic);
217 /* Find resource dimensions assigned by firmware to this function */
218 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
221 goto fail_get_vi_pool;
223 /* It still may allocate more than maximum, ensure limit */
224 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
225 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
226 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
228 /* Subtract management EVQ not used for traffic */
229 SFC_ASSERT(evq_allocated > 0);
232 /* Right now we use separate EVQ for Rx and Tx */
233 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
234 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
236 /* Keep NIC initialized */
241 efx_nic_fini(sa->nic);
246 sfc_set_drv_limits(struct sfc_adapter *sa)
248 const struct rte_eth_dev_data *data = sa->eth_dev->data;
249 efx_drv_limits_t lim;
251 memset(&lim, 0, sizeof(lim));
253 /* Limits are strict since take into account initial estimation */
254 lim.edl_min_evq_count = lim.edl_max_evq_count =
255 1 + data->nb_rx_queues + data->nb_tx_queues;
256 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
257 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
259 return efx_nic_set_drv_limits(sa->nic, &lim);
263 sfc_set_fw_subvariant(struct sfc_adapter *sa)
265 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
266 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
267 unsigned int txq_index;
268 efx_nic_fw_subvariant_t req_fw_subvariant;
269 efx_nic_fw_subvariant_t cur_fw_subvariant;
272 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
273 sfc_info(sa, "no-Tx-checksum subvariant not supported");
277 for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
278 struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
280 if (txq_info->txq != NULL)
281 tx_offloads |= txq_info->txq->offloads;
284 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
285 DEV_TX_OFFLOAD_TCP_CKSUM |
286 DEV_TX_OFFLOAD_UDP_CKSUM |
287 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
288 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
290 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
292 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
294 sfc_err(sa, "failed to get FW subvariant: %d", rc);
297 sfc_info(sa, "FW subvariant is %u vs required %u",
298 cur_fw_subvariant, req_fw_subvariant);
300 if (cur_fw_subvariant == req_fw_subvariant)
303 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
305 sfc_err(sa, "failed to set FW subvariant %u: %d",
306 req_fw_subvariant, rc);
309 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
315 sfc_try_start(struct sfc_adapter *sa)
317 const efx_nic_cfg_t *encp;
320 sfc_log_init(sa, "entry");
322 SFC_ASSERT(sfc_adapter_is_locked(sa));
323 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
325 sfc_log_init(sa, "set FW subvariant");
326 rc = sfc_set_fw_subvariant(sa);
328 goto fail_set_fw_subvariant;
330 sfc_log_init(sa, "set resource limits");
331 rc = sfc_set_drv_limits(sa);
333 goto fail_set_drv_limits;
335 sfc_log_init(sa, "init nic");
336 rc = efx_nic_init(sa->nic);
340 encp = efx_nic_cfg_get(sa->nic);
341 if (encp->enc_tunnel_encapsulations_supported != 0) {
342 sfc_log_init(sa, "apply tunnel config");
343 rc = efx_tunnel_reconfigure(sa->nic);
345 goto fail_tunnel_reconfigure;
348 rc = sfc_intr_start(sa);
350 goto fail_intr_start;
352 rc = sfc_ev_start(sa);
356 rc = sfc_port_start(sa);
358 goto fail_port_start;
360 rc = sfc_rx_start(sa);
364 rc = sfc_tx_start(sa);
368 rc = sfc_flow_start(sa);
370 goto fail_flows_insert;
372 sfc_log_init(sa, "done");
391 fail_tunnel_reconfigure:
392 efx_nic_fini(sa->nic);
396 fail_set_fw_subvariant:
397 sfc_log_init(sa, "failed %d", rc);
402 sfc_start(struct sfc_adapter *sa)
404 unsigned int start_tries = 3;
407 sfc_log_init(sa, "entry");
409 SFC_ASSERT(sfc_adapter_is_locked(sa));
412 case SFC_ADAPTER_CONFIGURED:
414 case SFC_ADAPTER_STARTED:
415 sfc_notice(sa, "already started");
422 sa->state = SFC_ADAPTER_STARTING;
425 rc = sfc_try_start(sa);
426 } while ((--start_tries > 0) &&
427 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
432 sa->state = SFC_ADAPTER_STARTED;
433 sfc_log_init(sa, "done");
437 sa->state = SFC_ADAPTER_CONFIGURED;
439 sfc_log_init(sa, "failed %d", rc);
444 sfc_stop(struct sfc_adapter *sa)
446 sfc_log_init(sa, "entry");
448 SFC_ASSERT(sfc_adapter_is_locked(sa));
451 case SFC_ADAPTER_STARTED:
453 case SFC_ADAPTER_CONFIGURED:
454 sfc_notice(sa, "already stopped");
457 sfc_err(sa, "stop in unexpected state %u", sa->state);
462 sa->state = SFC_ADAPTER_STOPPING;
470 efx_nic_fini(sa->nic);
472 sa->state = SFC_ADAPTER_CONFIGURED;
473 sfc_log_init(sa, "done");
477 sfc_restart(struct sfc_adapter *sa)
481 SFC_ASSERT(sfc_adapter_is_locked(sa));
483 if (sa->state != SFC_ADAPTER_STARTED)
490 sfc_err(sa, "restart failed");
496 sfc_restart_if_required(void *arg)
498 struct sfc_adapter *sa = arg;
500 /* If restart is scheduled, clear the flag and do it */
501 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
503 sfc_adapter_lock(sa);
504 if (sa->state == SFC_ADAPTER_STARTED)
505 (void)sfc_restart(sa);
506 sfc_adapter_unlock(sa);
511 sfc_schedule_restart(struct sfc_adapter *sa)
515 /* Schedule restart alarm if it is not scheduled yet */
516 if (!rte_atomic32_test_and_set(&sa->restart_required))
519 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
521 sfc_warn(sa, "alarms are not supported, restart is pending");
523 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
525 sfc_notice(sa, "restart scheduled");
529 sfc_configure(struct sfc_adapter *sa)
533 sfc_log_init(sa, "entry");
535 SFC_ASSERT(sfc_adapter_is_locked(sa));
537 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
538 sa->state == SFC_ADAPTER_CONFIGURED);
539 sa->state = SFC_ADAPTER_CONFIGURING;
541 rc = sfc_check_conf(sa);
543 goto fail_check_conf;
545 rc = sfc_intr_configure(sa);
547 goto fail_intr_configure;
549 rc = sfc_port_configure(sa);
551 goto fail_port_configure;
553 rc = sfc_rx_configure(sa);
555 goto fail_rx_configure;
557 rc = sfc_tx_configure(sa);
559 goto fail_tx_configure;
561 sa->state = SFC_ADAPTER_CONFIGURED;
562 sfc_log_init(sa, "done");
576 sa->state = SFC_ADAPTER_INITIALIZED;
577 sfc_log_init(sa, "failed %d", rc);
582 sfc_close(struct sfc_adapter *sa)
584 sfc_log_init(sa, "entry");
586 SFC_ASSERT(sfc_adapter_is_locked(sa));
588 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
589 sa->state = SFC_ADAPTER_CLOSING;
596 sa->state = SFC_ADAPTER_INITIALIZED;
597 sfc_log_init(sa, "done");
601 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
603 struct rte_eth_dev *eth_dev = sa->eth_dev;
604 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
605 efsys_bar_t *ebp = &sa->mem_bar;
606 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
608 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
609 ebp->esb_rid = membar;
610 ebp->esb_dev = pci_dev;
611 ebp->esb_base = res->addr;
616 sfc_mem_bar_fini(struct sfc_adapter *sa)
618 efsys_bar_t *ebp = &sa->mem_bar;
620 SFC_BAR_LOCK_DESTROY(ebp);
621 memset(ebp, 0, sizeof(*ebp));
624 #if EFSYS_OPT_RX_SCALE
626 * A fixed RSS key which has a property of being symmetric
627 * (symmetrical flows are distributed to the same CPU)
628 * and also known to give a uniform distribution
629 * (a good distribution of traffic between different CPUs)
631 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
632 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
633 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
634 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
635 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
636 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
640 #if EFSYS_OPT_RX_SCALE
642 sfc_set_rss_defaults(struct sfc_adapter *sa)
646 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
650 rc = efx_ev_init(sa->nic);
654 rc = efx_rx_init(sa->nic);
658 rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
660 goto fail_scale_support_get;
662 rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
664 goto fail_hash_support_get;
666 efx_rx_fini(sa->nic);
667 efx_ev_fini(sa->nic);
668 efx_intr_fini(sa->nic);
670 sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
672 rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
676 fail_hash_support_get:
677 fail_scale_support_get:
679 efx_ev_fini(sa->nic);
682 efx_intr_fini(sa->nic);
689 sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
696 sfc_attach(struct sfc_adapter *sa)
698 const efx_nic_cfg_t *encp;
699 efx_nic_t *enp = sa->nic;
702 sfc_log_init(sa, "entry");
704 SFC_ASSERT(sfc_adapter_is_locked(sa));
706 efx_mcdi_new_epoch(enp);
708 sfc_log_init(sa, "reset nic");
709 rc = efx_nic_reset(enp);
714 * Probed NIC is sufficient for tunnel init.
715 * Initialize tunnel support to be able to use libefx
716 * efx_tunnel_config_udp_{add,remove}() in any state and
717 * efx_tunnel_reconfigure() on start up.
719 rc = efx_tunnel_init(enp);
721 goto fail_tunnel_init;
723 encp = efx_nic_cfg_get(sa->nic);
725 if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
726 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
729 "TSO support isn't available on this adapter");
732 sfc_log_init(sa, "estimate resource limits");
733 rc = sfc_estimate_resource_limits(sa);
735 goto fail_estimate_rsrc_limits;
737 sa->txq_max_entries = encp->enc_txq_max_ndescs;
738 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
740 rc = sfc_intr_attach(sa);
742 goto fail_intr_attach;
744 rc = sfc_ev_attach(sa);
748 rc = sfc_port_attach(sa);
750 goto fail_port_attach;
752 rc = sfc_set_rss_defaults(sa);
754 goto fail_set_rss_defaults;
756 rc = sfc_filter_attach(sa);
758 goto fail_filter_attach;
760 sfc_log_init(sa, "fini nic");
765 sa->state = SFC_ADAPTER_INITIALIZED;
767 sfc_log_init(sa, "done");
771 fail_set_rss_defaults:
781 efx_nic_fini(sa->nic);
783 fail_estimate_rsrc_limits:
785 efx_tunnel_fini(sa->nic);
789 sfc_log_init(sa, "failed %d", rc);
794 sfc_detach(struct sfc_adapter *sa)
796 sfc_log_init(sa, "entry");
798 SFC_ASSERT(sfc_adapter_is_locked(sa));
802 sfc_filter_detach(sa);
806 efx_tunnel_fini(sa->nic);
808 sa->state = SFC_ADAPTER_UNINITIALIZED;
812 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
813 const char *value_str, void *opaque)
815 uint32_t *value = opaque;
817 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
818 *value = EFX_FW_VARIANT_DONT_CARE;
819 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
820 *value = EFX_FW_VARIANT_FULL_FEATURED;
821 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
822 *value = EFX_FW_VARIANT_LOW_LATENCY;
823 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
824 *value = EFX_FW_VARIANT_PACKED_STREAM;
832 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
834 efx_nic_fw_info_t enfi;
837 rc = efx_nic_get_fw_version(sa->nic, &enfi);
840 else if (!enfi.enfi_dpcpu_fw_ids_valid)
844 * Firmware variant can be uniquely identified by the RxDPCPU
847 switch (enfi.enfi_rx_dpcpu_fw_id) {
848 case EFX_RXDP_FULL_FEATURED_FW_ID:
849 *efv = EFX_FW_VARIANT_FULL_FEATURED;
852 case EFX_RXDP_LOW_LATENCY_FW_ID:
853 *efv = EFX_FW_VARIANT_LOW_LATENCY;
856 case EFX_RXDP_PACKED_STREAM_FW_ID:
857 *efv = EFX_FW_VARIANT_PACKED_STREAM;
862 * Other firmware variants are not considered, since they are
863 * not supported in the device parameters
865 *efv = EFX_FW_VARIANT_DONT_CARE;
873 sfc_fw_variant2str(efx_fw_variant_t efv)
876 case EFX_RXDP_FULL_FEATURED_FW_ID:
877 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
878 case EFX_RXDP_LOW_LATENCY_FW_ID:
879 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
880 case EFX_RXDP_PACKED_STREAM_FW_ID:
881 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
888 sfc_nic_probe(struct sfc_adapter *sa)
890 efx_nic_t *enp = sa->nic;
891 efx_fw_variant_t preferred_efv;
892 efx_fw_variant_t efv;
895 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
896 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
897 sfc_kvarg_fv_variant_handler,
900 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
904 rc = efx_nic_probe(enp, preferred_efv);
906 /* Unprivileged functions cannot set FW variant */
907 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
912 rc = sfc_get_fw_variant(sa, &efv);
914 sfc_warn(sa, "FW variant can not be obtained");
920 /* Check that firmware variant was changed to the requested one */
921 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
922 sfc_warn(sa, "FW variant has not changed to the requested %s",
923 sfc_fw_variant2str(preferred_efv));
926 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
932 sfc_probe(struct sfc_adapter *sa)
934 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
939 sfc_log_init(sa, "entry");
941 SFC_ASSERT(sfc_adapter_is_locked(sa));
943 sa->socket_id = rte_socket_id();
944 rte_atomic32_init(&sa->restart_required);
946 sfc_log_init(sa, "get family");
947 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
948 &sa->family, &membar);
951 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
953 sfc_log_init(sa, "init mem bar");
954 rc = sfc_mem_bar_init(sa, membar);
956 goto fail_mem_bar_init;
958 sfc_log_init(sa, "create nic");
959 rte_spinlock_init(&sa->nic_lock);
960 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
961 &sa->mem_bar, &sa->nic_lock, &enp);
963 goto fail_nic_create;
966 rc = sfc_mcdi_init(sa);
970 sfc_log_init(sa, "probe nic");
971 rc = sfc_nic_probe(sa);
975 sfc_log_init(sa, "done");
982 sfc_log_init(sa, "destroy nic");
984 efx_nic_destroy(enp);
987 sfc_mem_bar_fini(sa);
991 sfc_log_init(sa, "failed %d", rc);
996 sfc_unprobe(struct sfc_adapter *sa)
998 efx_nic_t *enp = sa->nic;
1000 sfc_log_init(sa, "entry");
1002 SFC_ASSERT(sfc_adapter_is_locked(sa));
1004 sfc_log_init(sa, "unprobe nic");
1005 efx_nic_unprobe(enp);
1010 * Make sure there is no pending alarm to restart since we are
1011 * going to free device private which is passed as the callback
1012 * opaque data. A new alarm cannot be scheduled since MCDI is
1015 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1017 sfc_log_init(sa, "destroy nic");
1019 efx_nic_destroy(enp);
1021 sfc_mem_bar_fini(sa);
1024 sa->state = SFC_ADAPTER_UNINITIALIZED;
1028 sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
1029 uint32_t ll_default)
1031 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1032 size_t lt_str_size_max;
1033 char *lt_str = NULL;
1036 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1037 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1038 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1040 return RTE_LOGTYPE_PMD;
1043 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1045 return RTE_LOGTYPE_PMD;
1047 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1048 lt_str[lt_prefix_str_size - 1] = '.';
1049 rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
1050 lt_str_size_max - lt_prefix_str_size);
1051 lt_str[lt_str_size_max - 1] = '\0';
1053 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1056 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;