1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
27 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
28 size_t len, int socket_id, efsys_mem_t *esmp)
30 const struct rte_memzone *mz;
32 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
33 name, id, len, socket_id);
35 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
36 sysconf(_SC_PAGESIZE), socket_id);
38 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
39 name, (unsigned int)id, (unsigned int)len, socket_id,
40 rte_strerror(rte_errno));
44 esmp->esm_addr = mz->iova;
45 if (esmp->esm_addr == RTE_BAD_IOVA) {
46 (void)rte_memzone_free(mz);
51 esmp->esm_base = mz->addr;
57 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
61 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
63 rc = rte_memzone_free(esmp->esm_mz);
65 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
67 memset(esmp, 0, sizeof(*esmp));
71 sfc_phy_cap_from_link_speeds(uint32_t speeds)
73 uint32_t phy_caps = 0;
75 if (~speeds & ETH_LINK_SPEED_FIXED) {
76 phy_caps |= (1 << EFX_PHY_CAP_AN);
78 * If no speeds are specified in the mask, any supported
81 if (speeds == ETH_LINK_SPEED_AUTONEG)
83 (1 << EFX_PHY_CAP_1000FDX) |
84 (1 << EFX_PHY_CAP_10000FDX) |
85 (1 << EFX_PHY_CAP_25000FDX) |
86 (1 << EFX_PHY_CAP_40000FDX) |
87 (1 << EFX_PHY_CAP_50000FDX) |
88 (1 << EFX_PHY_CAP_100000FDX);
90 if (speeds & ETH_LINK_SPEED_1G)
91 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
92 if (speeds & ETH_LINK_SPEED_10G)
93 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
94 if (speeds & ETH_LINK_SPEED_25G)
95 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
96 if (speeds & ETH_LINK_SPEED_40G)
97 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
98 if (speeds & ETH_LINK_SPEED_50G)
99 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
100 if (speeds & ETH_LINK_SPEED_100G)
101 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
107 * Check requested device level configuration.
108 * Receive and transmit configuration is checked in corresponding
112 sfc_check_conf(struct sfc_adapter *sa)
114 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
117 sa->port.phy_adv_cap =
118 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
119 sa->port.phy_adv_cap_mask;
120 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
121 sfc_err(sa, "No link speeds from mask %#x are supported",
126 #if !EFSYS_OPT_LOOPBACK
127 if (conf->lpbk_mode != 0) {
128 sfc_err(sa, "Loopback not supported");
133 if (conf->dcb_capability_en != 0) {
134 sfc_err(sa, "Priority-based flow control not supported");
138 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
139 sfc_err(sa, "Flow Director not supported");
143 if ((conf->intr_conf.lsc != 0) &&
144 (sa->intr.type != EFX_INTR_LINE) &&
145 (sa->intr.type != EFX_INTR_MESSAGE)) {
146 sfc_err(sa, "Link status change interrupt not supported");
150 if (conf->intr_conf.rxq != 0) {
151 sfc_err(sa, "Receive queue interrupt not supported");
159 * Find out maximum number of receive and transmit queues which could be
162 * NIC is kept initialized on success to allow other modules acquire
163 * defaults and capabilities.
166 sfc_estimate_resource_limits(struct sfc_adapter *sa)
168 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
169 efx_drv_limits_t limits;
171 uint32_t evq_allocated;
172 uint32_t rxq_allocated;
173 uint32_t txq_allocated;
175 memset(&limits, 0, sizeof(limits));
177 /* Request at least one Rx and Tx queue */
178 limits.edl_min_rxq_count = 1;
179 limits.edl_min_txq_count = 1;
180 /* Management event queue plus event queue for each Tx and Rx queue */
181 limits.edl_min_evq_count =
182 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
184 /* Divide by number of functions to guarantee that all functions
185 * will get promised resources
187 /* FIXME Divide by number of functions (not 2) below */
188 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
189 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
191 /* Split equally between receive and transmit */
192 limits.edl_max_rxq_count =
193 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
194 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
196 limits.edl_max_txq_count =
197 MIN(encp->enc_txq_limit,
198 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
201 limits.edl_max_txq_count =
202 MIN(limits.edl_max_txq_count,
203 encp->enc_fw_assisted_tso_v2_n_contexts /
204 encp->enc_hw_pf_count);
206 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
208 /* Configure the minimum required resources needed for the
209 * driver to operate, and the maximum desired resources that the
210 * driver is capable of using.
212 efx_nic_set_drv_limits(sa->nic, &limits);
214 sfc_log_init(sa, "init nic");
215 rc = efx_nic_init(sa->nic);
219 /* Find resource dimensions assigned by firmware to this function */
220 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
223 goto fail_get_vi_pool;
225 /* It still may allocate more than maximum, ensure limit */
226 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
227 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
228 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
230 /* Subtract management EVQ not used for traffic */
231 SFC_ASSERT(evq_allocated > 0);
234 /* Right now we use separate EVQ for Rx and Tx */
235 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
236 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
238 /* Keep NIC initialized */
243 efx_nic_fini(sa->nic);
248 sfc_set_drv_limits(struct sfc_adapter *sa)
250 const struct rte_eth_dev_data *data = sa->eth_dev->data;
251 efx_drv_limits_t lim;
253 memset(&lim, 0, sizeof(lim));
255 /* Limits are strict since take into account initial estimation */
256 lim.edl_min_evq_count = lim.edl_max_evq_count =
257 1 + data->nb_rx_queues + data->nb_tx_queues;
258 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
259 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
261 return efx_nic_set_drv_limits(sa->nic, &lim);
265 sfc_set_fw_subvariant(struct sfc_adapter *sa)
267 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
268 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
269 unsigned int txq_index;
270 efx_nic_fw_subvariant_t req_fw_subvariant;
271 efx_nic_fw_subvariant_t cur_fw_subvariant;
274 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
275 sfc_info(sa, "no-Tx-checksum subvariant not supported");
279 for (txq_index = 0; txq_index < sa->txq_count; ++txq_index) {
280 struct sfc_txq_info *txq_info = &sa->txq_info[txq_index];
282 if (txq_info->txq != NULL)
283 tx_offloads |= txq_info->txq->offloads;
286 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
287 DEV_TX_OFFLOAD_TCP_CKSUM |
288 DEV_TX_OFFLOAD_UDP_CKSUM |
289 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
290 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
292 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
294 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
296 sfc_err(sa, "failed to get FW subvariant: %d", rc);
299 sfc_info(sa, "FW subvariant is %u vs required %u",
300 cur_fw_subvariant, req_fw_subvariant);
302 if (cur_fw_subvariant == req_fw_subvariant)
305 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
307 sfc_err(sa, "failed to set FW subvariant %u: %d",
308 req_fw_subvariant, rc);
311 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
317 sfc_try_start(struct sfc_adapter *sa)
319 const efx_nic_cfg_t *encp;
322 sfc_log_init(sa, "entry");
324 SFC_ASSERT(sfc_adapter_is_locked(sa));
325 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
327 sfc_log_init(sa, "set FW subvariant");
328 rc = sfc_set_fw_subvariant(sa);
330 goto fail_set_fw_subvariant;
332 sfc_log_init(sa, "set resource limits");
333 rc = sfc_set_drv_limits(sa);
335 goto fail_set_drv_limits;
337 sfc_log_init(sa, "init nic");
338 rc = efx_nic_init(sa->nic);
342 encp = efx_nic_cfg_get(sa->nic);
343 if (encp->enc_tunnel_encapsulations_supported != 0) {
344 sfc_log_init(sa, "apply tunnel config");
345 rc = efx_tunnel_reconfigure(sa->nic);
347 goto fail_tunnel_reconfigure;
350 rc = sfc_intr_start(sa);
352 goto fail_intr_start;
354 rc = sfc_ev_start(sa);
358 rc = sfc_port_start(sa);
360 goto fail_port_start;
362 rc = sfc_rx_start(sa);
366 rc = sfc_tx_start(sa);
370 rc = sfc_flow_start(sa);
372 goto fail_flows_insert;
374 sfc_log_init(sa, "done");
393 fail_tunnel_reconfigure:
394 efx_nic_fini(sa->nic);
398 fail_set_fw_subvariant:
399 sfc_log_init(sa, "failed %d", rc);
404 sfc_start(struct sfc_adapter *sa)
406 unsigned int start_tries = 3;
409 sfc_log_init(sa, "entry");
411 SFC_ASSERT(sfc_adapter_is_locked(sa));
414 case SFC_ADAPTER_CONFIGURED:
416 case SFC_ADAPTER_STARTED:
417 sfc_notice(sa, "already started");
424 sa->state = SFC_ADAPTER_STARTING;
427 rc = sfc_try_start(sa);
428 } while ((--start_tries > 0) &&
429 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
434 sa->state = SFC_ADAPTER_STARTED;
435 sfc_log_init(sa, "done");
439 sa->state = SFC_ADAPTER_CONFIGURED;
441 sfc_log_init(sa, "failed %d", rc);
446 sfc_stop(struct sfc_adapter *sa)
448 sfc_log_init(sa, "entry");
450 SFC_ASSERT(sfc_adapter_is_locked(sa));
453 case SFC_ADAPTER_STARTED:
455 case SFC_ADAPTER_CONFIGURED:
456 sfc_notice(sa, "already stopped");
459 sfc_err(sa, "stop in unexpected state %u", sa->state);
464 sa->state = SFC_ADAPTER_STOPPING;
472 efx_nic_fini(sa->nic);
474 sa->state = SFC_ADAPTER_CONFIGURED;
475 sfc_log_init(sa, "done");
479 sfc_restart(struct sfc_adapter *sa)
483 SFC_ASSERT(sfc_adapter_is_locked(sa));
485 if (sa->state != SFC_ADAPTER_STARTED)
492 sfc_err(sa, "restart failed");
498 sfc_restart_if_required(void *arg)
500 struct sfc_adapter *sa = arg;
502 /* If restart is scheduled, clear the flag and do it */
503 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
505 sfc_adapter_lock(sa);
506 if (sa->state == SFC_ADAPTER_STARTED)
507 (void)sfc_restart(sa);
508 sfc_adapter_unlock(sa);
513 sfc_schedule_restart(struct sfc_adapter *sa)
517 /* Schedule restart alarm if it is not scheduled yet */
518 if (!rte_atomic32_test_and_set(&sa->restart_required))
521 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
523 sfc_warn(sa, "alarms are not supported, restart is pending");
525 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
527 sfc_notice(sa, "restart scheduled");
531 sfc_configure(struct sfc_adapter *sa)
535 sfc_log_init(sa, "entry");
537 SFC_ASSERT(sfc_adapter_is_locked(sa));
539 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
540 sa->state == SFC_ADAPTER_CONFIGURED);
541 sa->state = SFC_ADAPTER_CONFIGURING;
543 rc = sfc_check_conf(sa);
545 goto fail_check_conf;
547 rc = sfc_intr_configure(sa);
549 goto fail_intr_configure;
551 rc = sfc_port_configure(sa);
553 goto fail_port_configure;
555 rc = sfc_rx_configure(sa);
557 goto fail_rx_configure;
559 rc = sfc_tx_configure(sa);
561 goto fail_tx_configure;
563 sa->state = SFC_ADAPTER_CONFIGURED;
564 sfc_log_init(sa, "done");
578 sa->state = SFC_ADAPTER_INITIALIZED;
579 sfc_log_init(sa, "failed %d", rc);
584 sfc_close(struct sfc_adapter *sa)
586 sfc_log_init(sa, "entry");
588 SFC_ASSERT(sfc_adapter_is_locked(sa));
590 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
591 sa->state = SFC_ADAPTER_CLOSING;
598 sa->state = SFC_ADAPTER_INITIALIZED;
599 sfc_log_init(sa, "done");
603 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
605 struct rte_eth_dev *eth_dev = sa->eth_dev;
606 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
607 efsys_bar_t *ebp = &sa->mem_bar;
608 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
610 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
611 ebp->esb_rid = membar;
612 ebp->esb_dev = pci_dev;
613 ebp->esb_base = res->addr;
618 sfc_mem_bar_fini(struct sfc_adapter *sa)
620 efsys_bar_t *ebp = &sa->mem_bar;
622 SFC_BAR_LOCK_DESTROY(ebp);
623 memset(ebp, 0, sizeof(*ebp));
626 #if EFSYS_OPT_RX_SCALE
628 * A fixed RSS key which has a property of being symmetric
629 * (symmetrical flows are distributed to the same CPU)
630 * and also known to give a uniform distribution
631 * (a good distribution of traffic between different CPUs)
633 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
634 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
635 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
636 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
637 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
638 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
642 #if EFSYS_OPT_RX_SCALE
644 sfc_set_rss_defaults(struct sfc_adapter *sa)
648 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
652 rc = efx_ev_init(sa->nic);
656 rc = efx_rx_init(sa->nic);
660 rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
662 goto fail_scale_support_get;
664 rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
666 goto fail_hash_support_get;
668 efx_rx_fini(sa->nic);
669 efx_ev_fini(sa->nic);
670 efx_intr_fini(sa->nic);
672 sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
674 rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
678 fail_hash_support_get:
679 fail_scale_support_get:
681 efx_ev_fini(sa->nic);
684 efx_intr_fini(sa->nic);
691 sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
698 sfc_attach(struct sfc_adapter *sa)
700 const efx_nic_cfg_t *encp;
701 efx_nic_t *enp = sa->nic;
704 sfc_log_init(sa, "entry");
706 SFC_ASSERT(sfc_adapter_is_locked(sa));
708 efx_mcdi_new_epoch(enp);
710 sfc_log_init(sa, "reset nic");
711 rc = efx_nic_reset(enp);
716 * Probed NIC is sufficient for tunnel init.
717 * Initialize tunnel support to be able to use libefx
718 * efx_tunnel_config_udp_{add,remove}() in any state and
719 * efx_tunnel_reconfigure() on start up.
721 rc = efx_tunnel_init(enp);
723 goto fail_tunnel_init;
725 encp = efx_nic_cfg_get(sa->nic);
727 if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
728 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
731 "TSO support isn't available on this adapter");
734 sfc_log_init(sa, "estimate resource limits");
735 rc = sfc_estimate_resource_limits(sa);
737 goto fail_estimate_rsrc_limits;
739 sa->txq_max_entries = encp->enc_txq_max_ndescs;
740 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
742 rc = sfc_intr_attach(sa);
744 goto fail_intr_attach;
746 rc = sfc_ev_attach(sa);
750 rc = sfc_port_attach(sa);
752 goto fail_port_attach;
754 rc = sfc_set_rss_defaults(sa);
756 goto fail_set_rss_defaults;
758 rc = sfc_filter_attach(sa);
760 goto fail_filter_attach;
762 sfc_log_init(sa, "fini nic");
767 sa->state = SFC_ADAPTER_INITIALIZED;
769 sfc_log_init(sa, "done");
773 fail_set_rss_defaults:
783 efx_nic_fini(sa->nic);
785 fail_estimate_rsrc_limits:
787 efx_tunnel_fini(sa->nic);
791 sfc_log_init(sa, "failed %d", rc);
796 sfc_detach(struct sfc_adapter *sa)
798 sfc_log_init(sa, "entry");
800 SFC_ASSERT(sfc_adapter_is_locked(sa));
804 sfc_filter_detach(sa);
808 efx_tunnel_fini(sa->nic);
810 sa->state = SFC_ADAPTER_UNINITIALIZED;
814 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
815 const char *value_str, void *opaque)
817 uint32_t *value = opaque;
819 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
820 *value = EFX_FW_VARIANT_DONT_CARE;
821 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
822 *value = EFX_FW_VARIANT_FULL_FEATURED;
823 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
824 *value = EFX_FW_VARIANT_LOW_LATENCY;
825 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
826 *value = EFX_FW_VARIANT_PACKED_STREAM;
834 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
836 efx_nic_fw_info_t enfi;
839 rc = efx_nic_get_fw_version(sa->nic, &enfi);
842 else if (!enfi.enfi_dpcpu_fw_ids_valid)
846 * Firmware variant can be uniquely identified by the RxDPCPU
849 switch (enfi.enfi_rx_dpcpu_fw_id) {
850 case EFX_RXDP_FULL_FEATURED_FW_ID:
851 *efv = EFX_FW_VARIANT_FULL_FEATURED;
854 case EFX_RXDP_LOW_LATENCY_FW_ID:
855 *efv = EFX_FW_VARIANT_LOW_LATENCY;
858 case EFX_RXDP_PACKED_STREAM_FW_ID:
859 *efv = EFX_FW_VARIANT_PACKED_STREAM;
864 * Other firmware variants are not considered, since they are
865 * not supported in the device parameters
867 *efv = EFX_FW_VARIANT_DONT_CARE;
875 sfc_fw_variant2str(efx_fw_variant_t efv)
878 case EFX_RXDP_FULL_FEATURED_FW_ID:
879 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
880 case EFX_RXDP_LOW_LATENCY_FW_ID:
881 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
882 case EFX_RXDP_PACKED_STREAM_FW_ID:
883 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
890 sfc_nic_probe(struct sfc_adapter *sa)
892 efx_nic_t *enp = sa->nic;
893 efx_fw_variant_t preferred_efv;
894 efx_fw_variant_t efv;
897 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
898 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
899 sfc_kvarg_fv_variant_handler,
902 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
906 rc = efx_nic_probe(enp, preferred_efv);
908 /* Unprivileged functions cannot set FW variant */
909 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
914 rc = sfc_get_fw_variant(sa, &efv);
916 sfc_warn(sa, "FW variant can not be obtained");
922 /* Check that firmware variant was changed to the requested one */
923 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
924 sfc_warn(sa, "FW variant has not changed to the requested %s",
925 sfc_fw_variant2str(preferred_efv));
928 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
934 sfc_probe(struct sfc_adapter *sa)
936 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
941 sfc_log_init(sa, "entry");
943 SFC_ASSERT(sfc_adapter_is_locked(sa));
945 sa->socket_id = rte_socket_id();
946 rte_atomic32_init(&sa->restart_required);
948 sfc_log_init(sa, "get family");
949 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
950 &sa->family, &membar);
953 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
955 sfc_log_init(sa, "init mem bar");
956 rc = sfc_mem_bar_init(sa, membar);
958 goto fail_mem_bar_init;
960 sfc_log_init(sa, "create nic");
961 rte_spinlock_init(&sa->nic_lock);
962 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
963 &sa->mem_bar, &sa->nic_lock, &enp);
965 goto fail_nic_create;
968 rc = sfc_mcdi_init(sa);
972 sfc_log_init(sa, "probe nic");
973 rc = sfc_nic_probe(sa);
977 sfc_log_init(sa, "done");
984 sfc_log_init(sa, "destroy nic");
986 efx_nic_destroy(enp);
989 sfc_mem_bar_fini(sa);
993 sfc_log_init(sa, "failed %d", rc);
998 sfc_unprobe(struct sfc_adapter *sa)
1000 efx_nic_t *enp = sa->nic;
1002 sfc_log_init(sa, "entry");
1004 SFC_ASSERT(sfc_adapter_is_locked(sa));
1006 sfc_log_init(sa, "unprobe nic");
1007 efx_nic_unprobe(enp);
1012 * Make sure there is no pending alarm to restart since we are
1013 * going to free device private which is passed as the callback
1014 * opaque data. A new alarm cannot be scheduled since MCDI is
1017 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1019 sfc_log_init(sa, "destroy nic");
1021 efx_nic_destroy(enp);
1023 sfc_mem_bar_fini(sa);
1026 sa->state = SFC_ADAPTER_UNINITIALIZED;
1030 sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
1031 uint32_t ll_default)
1033 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1034 size_t lt_str_size_max;
1035 char *lt_str = NULL;
1038 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1039 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1040 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1042 return RTE_LOGTYPE_PMD;
1045 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1047 return RTE_LOGTYPE_PMD;
1049 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1050 lt_str[lt_prefix_str_size - 1] = '.';
1051 rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
1052 lt_str_size_max - lt_prefix_str_size);
1053 lt_str[lt_str_size_max - 1] = '\0';
1055 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1058 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;