1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
24 #include "sfc_tweak.h"
28 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
29 size_t len, int socket_id, efsys_mem_t *esmp)
31 const struct rte_memzone *mz;
33 sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d",
34 name, id, len, socket_id);
36 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
37 sysconf(_SC_PAGESIZE), socket_id);
39 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
40 name, (unsigned int)id, (unsigned int)len, socket_id,
41 rte_strerror(rte_errno));
45 esmp->esm_addr = mz->iova;
46 if (esmp->esm_addr == RTE_BAD_IOVA) {
47 (void)rte_memzone_free(mz);
52 esmp->esm_base = mz->addr;
58 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
62 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
64 rc = rte_memzone_free(esmp->esm_mz);
66 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
68 memset(esmp, 0, sizeof(*esmp));
72 sfc_phy_cap_from_link_speeds(uint32_t speeds)
74 uint32_t phy_caps = 0;
76 if (~speeds & ETH_LINK_SPEED_FIXED) {
77 phy_caps |= (1 << EFX_PHY_CAP_AN);
79 * If no speeds are specified in the mask, any supported
82 if (speeds == ETH_LINK_SPEED_AUTONEG)
84 (1 << EFX_PHY_CAP_1000FDX) |
85 (1 << EFX_PHY_CAP_10000FDX) |
86 (1 << EFX_PHY_CAP_25000FDX) |
87 (1 << EFX_PHY_CAP_40000FDX) |
88 (1 << EFX_PHY_CAP_50000FDX) |
89 (1 << EFX_PHY_CAP_100000FDX);
91 if (speeds & ETH_LINK_SPEED_1G)
92 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
93 if (speeds & ETH_LINK_SPEED_10G)
94 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
95 if (speeds & ETH_LINK_SPEED_25G)
96 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
97 if (speeds & ETH_LINK_SPEED_40G)
98 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
99 if (speeds & ETH_LINK_SPEED_50G)
100 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
101 if (speeds & ETH_LINK_SPEED_100G)
102 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
108 * Check requested device level configuration.
109 * Receive and transmit configuration is checked in corresponding
113 sfc_check_conf(struct sfc_adapter *sa)
115 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
118 sa->port.phy_adv_cap =
119 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
120 sa->port.phy_adv_cap_mask;
121 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
122 sfc_err(sa, "No link speeds from mask %#x are supported",
127 #if !EFSYS_OPT_LOOPBACK
128 if (conf->lpbk_mode != 0) {
129 sfc_err(sa, "Loopback not supported");
134 if (conf->dcb_capability_en != 0) {
135 sfc_err(sa, "Priority-based flow control not supported");
139 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
140 sfc_err(sa, "Flow Director not supported");
144 if ((conf->intr_conf.lsc != 0) &&
145 (sa->intr.type != EFX_INTR_LINE) &&
146 (sa->intr.type != EFX_INTR_MESSAGE)) {
147 sfc_err(sa, "Link status change interrupt not supported");
151 if (conf->intr_conf.rxq != 0 &&
152 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
153 sfc_err(sa, "Receive queue interrupt not supported");
161 * Find out maximum number of receive and transmit queues which could be
164 * NIC is kept initialized on success to allow other modules acquire
165 * defaults and capabilities.
168 sfc_estimate_resource_limits(struct sfc_adapter *sa)
170 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
171 efx_drv_limits_t limits;
173 uint32_t evq_allocated;
174 uint32_t rxq_allocated;
175 uint32_t txq_allocated;
177 memset(&limits, 0, sizeof(limits));
179 /* Request at least one Rx and Tx queue */
180 limits.edl_min_rxq_count = 1;
181 limits.edl_min_txq_count = 1;
182 /* Management event queue plus event queue for each Tx and Rx queue */
183 limits.edl_min_evq_count =
184 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
186 /* Divide by number of functions to guarantee that all functions
187 * will get promised resources
189 /* FIXME Divide by number of functions (not 2) below */
190 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
191 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
193 /* Split equally between receive and transmit */
194 limits.edl_max_rxq_count =
195 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
196 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
198 limits.edl_max_txq_count =
199 MIN(encp->enc_txq_limit,
200 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
203 limits.edl_max_txq_count =
204 MIN(limits.edl_max_txq_count,
205 encp->enc_fw_assisted_tso_v2_n_contexts /
206 encp->enc_hw_pf_count);
208 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
210 /* Configure the minimum required resources needed for the
211 * driver to operate, and the maximum desired resources that the
212 * driver is capable of using.
214 efx_nic_set_drv_limits(sa->nic, &limits);
216 sfc_log_init(sa, "init nic");
217 rc = efx_nic_init(sa->nic);
221 /* Find resource dimensions assigned by firmware to this function */
222 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
225 goto fail_get_vi_pool;
227 /* It still may allocate more than maximum, ensure limit */
228 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
229 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
230 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
232 /* Subtract management EVQ not used for traffic */
233 SFC_ASSERT(evq_allocated > 0);
236 /* Right now we use separate EVQ for Rx and Tx */
237 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
238 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
240 /* Keep NIC initialized */
244 efx_nic_fini(sa->nic);
250 sfc_set_drv_limits(struct sfc_adapter *sa)
252 const struct rte_eth_dev_data *data = sa->eth_dev->data;
253 efx_drv_limits_t lim;
255 memset(&lim, 0, sizeof(lim));
257 /* Limits are strict since take into account initial estimation */
258 lim.edl_min_evq_count = lim.edl_max_evq_count =
259 1 + data->nb_rx_queues + data->nb_tx_queues;
260 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
261 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
263 return efx_nic_set_drv_limits(sa->nic, &lim);
267 sfc_set_fw_subvariant(struct sfc_adapter *sa)
269 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
270 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
271 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
272 unsigned int txq_index;
273 efx_nic_fw_subvariant_t req_fw_subvariant;
274 efx_nic_fw_subvariant_t cur_fw_subvariant;
277 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
278 sfc_info(sa, "no-Tx-checksum subvariant not supported");
282 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
283 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
285 if (txq_info->state & SFC_TXQ_INITIALIZED)
286 tx_offloads |= txq_info->offloads;
289 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
290 DEV_TX_OFFLOAD_TCP_CKSUM |
291 DEV_TX_OFFLOAD_UDP_CKSUM |
292 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
293 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
295 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
297 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
299 sfc_err(sa, "failed to get FW subvariant: %d", rc);
302 sfc_info(sa, "FW subvariant is %u vs required %u",
303 cur_fw_subvariant, req_fw_subvariant);
305 if (cur_fw_subvariant == req_fw_subvariant)
308 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
310 sfc_err(sa, "failed to set FW subvariant %u: %d",
311 req_fw_subvariant, rc);
314 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
320 sfc_try_start(struct sfc_adapter *sa)
322 const efx_nic_cfg_t *encp;
325 sfc_log_init(sa, "entry");
327 SFC_ASSERT(sfc_adapter_is_locked(sa));
328 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
330 sfc_log_init(sa, "set FW subvariant");
331 rc = sfc_set_fw_subvariant(sa);
333 goto fail_set_fw_subvariant;
335 sfc_log_init(sa, "set resource limits");
336 rc = sfc_set_drv_limits(sa);
338 goto fail_set_drv_limits;
340 sfc_log_init(sa, "init nic");
341 rc = efx_nic_init(sa->nic);
345 encp = efx_nic_cfg_get(sa->nic);
348 * Refresh (since it may change on NIC reset/restart) a copy of
349 * supported tunnel encapsulations in shared memory to be used
350 * on supported Rx packet type classes get.
352 sa->priv.shared->tunnel_encaps =
353 encp->enc_tunnel_encapsulations_supported;
355 if (encp->enc_tunnel_encapsulations_supported != 0) {
356 sfc_log_init(sa, "apply tunnel config");
357 rc = efx_tunnel_reconfigure(sa->nic);
359 goto fail_tunnel_reconfigure;
362 rc = sfc_intr_start(sa);
364 goto fail_intr_start;
366 rc = sfc_ev_start(sa);
370 rc = sfc_port_start(sa);
372 goto fail_port_start;
374 rc = sfc_rx_start(sa);
378 rc = sfc_tx_start(sa);
382 rc = sfc_flow_start(sa);
384 goto fail_flows_insert;
386 sfc_log_init(sa, "done");
405 fail_tunnel_reconfigure:
406 efx_nic_fini(sa->nic);
410 fail_set_fw_subvariant:
411 sfc_log_init(sa, "failed %d", rc);
416 sfc_start(struct sfc_adapter *sa)
418 unsigned int start_tries = 3;
421 sfc_log_init(sa, "entry");
423 SFC_ASSERT(sfc_adapter_is_locked(sa));
426 case SFC_ADAPTER_CONFIGURED:
428 case SFC_ADAPTER_STARTED:
429 sfc_notice(sa, "already started");
436 sa->state = SFC_ADAPTER_STARTING;
439 rc = sfc_try_start(sa);
440 } while ((--start_tries > 0) &&
441 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
446 sa->state = SFC_ADAPTER_STARTED;
447 sfc_log_init(sa, "done");
451 sa->state = SFC_ADAPTER_CONFIGURED;
453 sfc_log_init(sa, "failed %d", rc);
458 sfc_stop(struct sfc_adapter *sa)
460 sfc_log_init(sa, "entry");
462 SFC_ASSERT(sfc_adapter_is_locked(sa));
465 case SFC_ADAPTER_STARTED:
467 case SFC_ADAPTER_CONFIGURED:
468 sfc_notice(sa, "already stopped");
471 sfc_err(sa, "stop in unexpected state %u", sa->state);
476 sa->state = SFC_ADAPTER_STOPPING;
484 efx_nic_fini(sa->nic);
486 sa->state = SFC_ADAPTER_CONFIGURED;
487 sfc_log_init(sa, "done");
491 sfc_restart(struct sfc_adapter *sa)
495 SFC_ASSERT(sfc_adapter_is_locked(sa));
497 if (sa->state != SFC_ADAPTER_STARTED)
504 sfc_err(sa, "restart failed");
510 sfc_restart_if_required(void *arg)
512 struct sfc_adapter *sa = arg;
514 /* If restart is scheduled, clear the flag and do it */
515 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
517 sfc_adapter_lock(sa);
518 if (sa->state == SFC_ADAPTER_STARTED)
519 (void)sfc_restart(sa);
520 sfc_adapter_unlock(sa);
525 sfc_schedule_restart(struct sfc_adapter *sa)
529 /* Schedule restart alarm if it is not scheduled yet */
530 if (!rte_atomic32_test_and_set(&sa->restart_required))
533 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
535 sfc_warn(sa, "alarms are not supported, restart is pending");
537 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
539 sfc_notice(sa, "restart scheduled");
543 sfc_configure(struct sfc_adapter *sa)
547 sfc_log_init(sa, "entry");
549 SFC_ASSERT(sfc_adapter_is_locked(sa));
551 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
552 sa->state == SFC_ADAPTER_CONFIGURED);
553 sa->state = SFC_ADAPTER_CONFIGURING;
555 rc = sfc_check_conf(sa);
557 goto fail_check_conf;
559 rc = sfc_intr_configure(sa);
561 goto fail_intr_configure;
563 rc = sfc_port_configure(sa);
565 goto fail_port_configure;
567 rc = sfc_rx_configure(sa);
569 goto fail_rx_configure;
571 rc = sfc_tx_configure(sa);
573 goto fail_tx_configure;
575 sa->state = SFC_ADAPTER_CONFIGURED;
576 sfc_log_init(sa, "done");
590 sa->state = SFC_ADAPTER_INITIALIZED;
591 sfc_log_init(sa, "failed %d", rc);
596 sfc_close(struct sfc_adapter *sa)
598 sfc_log_init(sa, "entry");
600 SFC_ASSERT(sfc_adapter_is_locked(sa));
602 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
603 sa->state = SFC_ADAPTER_CLOSING;
610 sa->state = SFC_ADAPTER_INITIALIZED;
611 sfc_log_init(sa, "done");
615 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
617 struct rte_eth_dev *eth_dev = sa->eth_dev;
618 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
619 efsys_bar_t *ebp = &sa->mem_bar;
620 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
622 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
623 ebp->esb_rid = membar;
624 ebp->esb_dev = pci_dev;
625 ebp->esb_base = res->addr;
630 sfc_mem_bar_fini(struct sfc_adapter *sa)
632 efsys_bar_t *ebp = &sa->mem_bar;
634 SFC_BAR_LOCK_DESTROY(ebp);
635 memset(ebp, 0, sizeof(*ebp));
639 * A fixed RSS key which has a property of being symmetric
640 * (symmetrical flows are distributed to the same CPU)
641 * and also known to give a uniform distribution
642 * (a good distribution of traffic between different CPUs)
644 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
645 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
646 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
647 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
648 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
649 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
653 sfc_rss_attach(struct sfc_adapter *sa)
655 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
658 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
662 rc = efx_ev_init(sa->nic);
666 rc = efx_rx_init(sa->nic);
670 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
672 goto fail_scale_support_get;
674 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
676 goto fail_hash_support_get;
678 rc = sfc_rx_hash_init(sa);
680 goto fail_rx_hash_init;
682 efx_rx_fini(sa->nic);
683 efx_ev_fini(sa->nic);
684 efx_intr_fini(sa->nic);
686 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
691 fail_hash_support_get:
692 fail_scale_support_get:
693 efx_rx_fini(sa->nic);
696 efx_ev_fini(sa->nic);
699 efx_intr_fini(sa->nic);
706 sfc_rss_detach(struct sfc_adapter *sa)
708 sfc_rx_hash_fini(sa);
712 sfc_attach(struct sfc_adapter *sa)
714 const efx_nic_cfg_t *encp;
715 efx_nic_t *enp = sa->nic;
718 sfc_log_init(sa, "entry");
720 SFC_ASSERT(sfc_adapter_is_locked(sa));
722 efx_mcdi_new_epoch(enp);
724 sfc_log_init(sa, "reset nic");
725 rc = efx_nic_reset(enp);
730 * Probed NIC is sufficient for tunnel init.
731 * Initialize tunnel support to be able to use libefx
732 * efx_tunnel_config_udp_{add,remove}() in any state and
733 * efx_tunnel_reconfigure() on start up.
735 rc = efx_tunnel_init(enp);
737 goto fail_tunnel_init;
739 encp = efx_nic_cfg_get(sa->nic);
742 * Make a copy of supported tunnel encapsulations in shared
743 * memory to be used on supported Rx packet type classes get.
745 sa->priv.shared->tunnel_encaps =
746 encp->enc_tunnel_encapsulations_supported;
748 if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
749 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
751 sfc_info(sa, "TSO support isn't available on this adapter");
755 (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
756 (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
757 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
758 sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled;
760 sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
763 sfc_log_init(sa, "estimate resource limits");
764 rc = sfc_estimate_resource_limits(sa);
766 goto fail_estimate_rsrc_limits;
768 sa->evq_max_entries = encp->enc_evq_max_nevs;
769 SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
771 sa->evq_min_entries = encp->enc_evq_min_nevs;
772 SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
774 sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
775 SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
777 sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
778 SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
780 sa->txq_max_entries = encp->enc_txq_max_ndescs;
781 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
783 sa->txq_min_entries = encp->enc_txq_min_ndescs;
784 SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
786 rc = sfc_intr_attach(sa);
788 goto fail_intr_attach;
790 rc = sfc_ev_attach(sa);
794 rc = sfc_port_attach(sa);
796 goto fail_port_attach;
798 rc = sfc_rss_attach(sa);
800 goto fail_rss_attach;
802 rc = sfc_filter_attach(sa);
804 goto fail_filter_attach;
806 sfc_log_init(sa, "fini nic");
811 sa->state = SFC_ADAPTER_INITIALIZED;
813 sfc_log_init(sa, "done");
829 efx_nic_fini(sa->nic);
831 fail_estimate_rsrc_limits:
833 efx_tunnel_fini(sa->nic);
837 sfc_log_init(sa, "failed %d", rc);
842 sfc_detach(struct sfc_adapter *sa)
844 sfc_log_init(sa, "entry");
846 SFC_ASSERT(sfc_adapter_is_locked(sa));
850 sfc_filter_detach(sa);
855 efx_tunnel_fini(sa->nic);
857 sa->state = SFC_ADAPTER_UNINITIALIZED;
861 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
862 const char *value_str, void *opaque)
864 uint32_t *value = opaque;
866 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
867 *value = EFX_FW_VARIANT_DONT_CARE;
868 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
869 *value = EFX_FW_VARIANT_FULL_FEATURED;
870 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
871 *value = EFX_FW_VARIANT_LOW_LATENCY;
872 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
873 *value = EFX_FW_VARIANT_PACKED_STREAM;
874 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
875 *value = EFX_FW_VARIANT_DPDK;
883 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
885 efx_nic_fw_info_t enfi;
888 rc = efx_nic_get_fw_version(sa->nic, &enfi);
891 else if (!enfi.enfi_dpcpu_fw_ids_valid)
895 * Firmware variant can be uniquely identified by the RxDPCPU
898 switch (enfi.enfi_rx_dpcpu_fw_id) {
899 case EFX_RXDP_FULL_FEATURED_FW_ID:
900 *efv = EFX_FW_VARIANT_FULL_FEATURED;
903 case EFX_RXDP_LOW_LATENCY_FW_ID:
904 *efv = EFX_FW_VARIANT_LOW_LATENCY;
907 case EFX_RXDP_PACKED_STREAM_FW_ID:
908 *efv = EFX_FW_VARIANT_PACKED_STREAM;
911 case EFX_RXDP_DPDK_FW_ID:
912 *efv = EFX_FW_VARIANT_DPDK;
917 * Other firmware variants are not considered, since they are
918 * not supported in the device parameters
920 *efv = EFX_FW_VARIANT_DONT_CARE;
928 sfc_fw_variant2str(efx_fw_variant_t efv)
931 case EFX_RXDP_FULL_FEATURED_FW_ID:
932 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
933 case EFX_RXDP_LOW_LATENCY_FW_ID:
934 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
935 case EFX_RXDP_PACKED_STREAM_FW_ID:
936 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
937 case EFX_RXDP_DPDK_FW_ID:
938 return SFC_KVARG_FW_VARIANT_DPDK;
945 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
950 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
952 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
953 sfc_kvarg_long_handler, &value);
958 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
959 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
960 "was set (%ld);", value);
961 sfc_err(sa, "it must not be less than 0 or greater than %u",
962 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
966 sa->rxd_wait_timeout_ns = value;
971 sfc_nic_probe(struct sfc_adapter *sa)
973 efx_nic_t *enp = sa->nic;
974 efx_fw_variant_t preferred_efv;
975 efx_fw_variant_t efv;
978 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
979 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
980 sfc_kvarg_fv_variant_handler,
983 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
987 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
991 rc = efx_nic_probe(enp, preferred_efv);
993 /* Unprivileged functions cannot set FW variant */
994 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
999 rc = sfc_get_fw_variant(sa, &efv);
1000 if (rc == ENOTSUP) {
1001 sfc_warn(sa, "FW variant can not be obtained");
1007 /* Check that firmware variant was changed to the requested one */
1008 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
1009 sfc_warn(sa, "FW variant has not changed to the requested %s",
1010 sfc_fw_variant2str(preferred_efv));
1013 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
1019 sfc_probe(struct sfc_adapter *sa)
1021 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
1022 unsigned int membar;
1026 sfc_log_init(sa, "entry");
1028 SFC_ASSERT(sfc_adapter_is_locked(sa));
1030 sa->socket_id = rte_socket_id();
1031 rte_atomic32_init(&sa->restart_required);
1033 sfc_log_init(sa, "get family");
1034 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
1035 &sa->family, &membar);
1038 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
1040 sfc_log_init(sa, "init mem bar");
1041 rc = sfc_mem_bar_init(sa, membar);
1043 goto fail_mem_bar_init;
1045 sfc_log_init(sa, "create nic");
1046 rte_spinlock_init(&sa->nic_lock);
1047 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1048 &sa->mem_bar, &sa->nic_lock, &enp);
1050 goto fail_nic_create;
1053 rc = sfc_mcdi_init(sa);
1055 goto fail_mcdi_init;
1057 sfc_log_init(sa, "probe nic");
1058 rc = sfc_nic_probe(sa);
1060 goto fail_nic_probe;
1062 sfc_log_init(sa, "done");
1069 sfc_log_init(sa, "destroy nic");
1071 efx_nic_destroy(enp);
1074 sfc_mem_bar_fini(sa);
1078 sfc_log_init(sa, "failed %d", rc);
1083 sfc_unprobe(struct sfc_adapter *sa)
1085 efx_nic_t *enp = sa->nic;
1087 sfc_log_init(sa, "entry");
1089 SFC_ASSERT(sfc_adapter_is_locked(sa));
1091 sfc_log_init(sa, "unprobe nic");
1092 efx_nic_unprobe(enp);
1097 * Make sure there is no pending alarm to restart since we are
1098 * going to free device private which is passed as the callback
1099 * opaque data. A new alarm cannot be scheduled since MCDI is
1102 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1104 sfc_log_init(sa, "destroy nic");
1106 efx_nic_destroy(enp);
1108 sfc_mem_bar_fini(sa);
1111 sa->state = SFC_ADAPTER_UNINITIALIZED;
1115 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1116 const char *lt_prefix_str, uint32_t ll_default)
1118 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1119 size_t lt_str_size_max;
1120 char *lt_str = NULL;
1123 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1124 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1125 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1127 return sfc_logtype_driver;
1130 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1132 return sfc_logtype_driver;
1134 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1135 lt_str[lt_prefix_str_size - 1] = '.';
1136 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1137 lt_str_size_max - lt_prefix_str_size);
1138 lt_str[lt_str_size_max - 1] = '\0';
1140 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1144 return sfc_logtype_driver;