1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
19 #include "sfc_debug.h"
24 #include "sfc_kvargs.h"
25 #include "sfc_tweak.h"
29 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
30 size_t len, int socket_id, efsys_mem_t *esmp)
32 const struct rte_memzone *mz;
34 sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d",
35 name, id, len, socket_id);
37 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
38 sysconf(_SC_PAGESIZE), socket_id);
40 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
41 name, (unsigned int)id, (unsigned int)len, socket_id,
42 rte_strerror(rte_errno));
46 esmp->esm_addr = mz->iova;
47 if (esmp->esm_addr == RTE_BAD_IOVA) {
48 (void)rte_memzone_free(mz);
53 esmp->esm_base = mz->addr;
59 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
63 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
65 rc = rte_memzone_free(esmp->esm_mz);
67 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
69 memset(esmp, 0, sizeof(*esmp));
73 sfc_phy_cap_from_link_speeds(uint32_t speeds)
75 uint32_t phy_caps = 0;
77 if (~speeds & ETH_LINK_SPEED_FIXED) {
78 phy_caps |= (1 << EFX_PHY_CAP_AN);
80 * If no speeds are specified in the mask, any supported
83 if (speeds == ETH_LINK_SPEED_AUTONEG)
85 (1 << EFX_PHY_CAP_1000FDX) |
86 (1 << EFX_PHY_CAP_10000FDX) |
87 (1 << EFX_PHY_CAP_25000FDX) |
88 (1 << EFX_PHY_CAP_40000FDX) |
89 (1 << EFX_PHY_CAP_50000FDX) |
90 (1 << EFX_PHY_CAP_100000FDX);
92 if (speeds & ETH_LINK_SPEED_1G)
93 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
94 if (speeds & ETH_LINK_SPEED_10G)
95 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
96 if (speeds & ETH_LINK_SPEED_25G)
97 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
98 if (speeds & ETH_LINK_SPEED_40G)
99 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
100 if (speeds & ETH_LINK_SPEED_50G)
101 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
102 if (speeds & ETH_LINK_SPEED_100G)
103 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
109 * Check requested device level configuration.
110 * Receive and transmit configuration is checked in corresponding
114 sfc_check_conf(struct sfc_adapter *sa)
116 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
119 sa->port.phy_adv_cap =
120 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
121 sa->port.phy_adv_cap_mask;
122 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
123 sfc_err(sa, "No link speeds from mask %#x are supported",
128 #if !EFSYS_OPT_LOOPBACK
129 if (conf->lpbk_mode != 0) {
130 sfc_err(sa, "Loopback not supported");
135 if (conf->dcb_capability_en != 0) {
136 sfc_err(sa, "Priority-based flow control not supported");
140 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
141 sfc_err(sa, "Flow Director not supported");
145 if ((conf->intr_conf.lsc != 0) &&
146 (sa->intr.type != EFX_INTR_LINE) &&
147 (sa->intr.type != EFX_INTR_MESSAGE)) {
148 sfc_err(sa, "Link status change interrupt not supported");
152 if (conf->intr_conf.rxq != 0 &&
153 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
154 sfc_err(sa, "Receive queue interrupt not supported");
162 * Find out maximum number of receive and transmit queues which could be
165 * NIC is kept initialized on success to allow other modules acquire
166 * defaults and capabilities.
169 sfc_estimate_resource_limits(struct sfc_adapter *sa)
171 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
172 efx_drv_limits_t limits;
174 uint32_t evq_allocated;
175 uint32_t rxq_allocated;
176 uint32_t txq_allocated;
178 memset(&limits, 0, sizeof(limits));
180 /* Request at least one Rx and Tx queue */
181 limits.edl_min_rxq_count = 1;
182 limits.edl_min_txq_count = 1;
183 /* Management event queue plus event queue for each Tx and Rx queue */
184 limits.edl_min_evq_count =
185 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
187 /* Divide by number of functions to guarantee that all functions
188 * will get promised resources
190 /* FIXME Divide by number of functions (not 2) below */
191 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
192 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
194 /* Split equally between receive and transmit */
195 limits.edl_max_rxq_count =
196 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
197 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
199 limits.edl_max_txq_count =
200 MIN(encp->enc_txq_limit,
201 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
204 limits.edl_max_txq_count =
205 MIN(limits.edl_max_txq_count,
206 encp->enc_fw_assisted_tso_v2_n_contexts /
207 encp->enc_hw_pf_count);
209 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
211 /* Configure the minimum required resources needed for the
212 * driver to operate, and the maximum desired resources that the
213 * driver is capable of using.
215 efx_nic_set_drv_limits(sa->nic, &limits);
217 sfc_log_init(sa, "init nic");
218 rc = efx_nic_init(sa->nic);
222 /* Find resource dimensions assigned by firmware to this function */
223 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
226 goto fail_get_vi_pool;
228 /* It still may allocate more than maximum, ensure limit */
229 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
230 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
231 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
233 /* Subtract management EVQ not used for traffic */
234 SFC_ASSERT(evq_allocated > 0);
237 /* Right now we use separate EVQ for Rx and Tx */
238 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
239 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
241 /* Keep NIC initialized */
245 efx_nic_fini(sa->nic);
251 sfc_set_drv_limits(struct sfc_adapter *sa)
253 const struct rte_eth_dev_data *data = sa->eth_dev->data;
254 efx_drv_limits_t lim;
256 memset(&lim, 0, sizeof(lim));
258 /* Limits are strict since take into account initial estimation */
259 lim.edl_min_evq_count = lim.edl_max_evq_count =
260 1 + data->nb_rx_queues + data->nb_tx_queues;
261 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
262 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
264 return efx_nic_set_drv_limits(sa->nic, &lim);
268 sfc_set_fw_subvariant(struct sfc_adapter *sa)
270 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
271 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
272 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
273 unsigned int txq_index;
274 efx_nic_fw_subvariant_t req_fw_subvariant;
275 efx_nic_fw_subvariant_t cur_fw_subvariant;
278 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
279 sfc_info(sa, "no-Tx-checksum subvariant not supported");
283 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
284 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
286 if (txq_info->state & SFC_TXQ_INITIALIZED)
287 tx_offloads |= txq_info->offloads;
290 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
291 DEV_TX_OFFLOAD_TCP_CKSUM |
292 DEV_TX_OFFLOAD_UDP_CKSUM |
293 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
294 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
296 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
298 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
300 sfc_err(sa, "failed to get FW subvariant: %d", rc);
303 sfc_info(sa, "FW subvariant is %u vs required %u",
304 cur_fw_subvariant, req_fw_subvariant);
306 if (cur_fw_subvariant == req_fw_subvariant)
309 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
311 sfc_err(sa, "failed to set FW subvariant %u: %d",
312 req_fw_subvariant, rc);
315 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
321 sfc_try_start(struct sfc_adapter *sa)
323 const efx_nic_cfg_t *encp;
326 sfc_log_init(sa, "entry");
328 SFC_ASSERT(sfc_adapter_is_locked(sa));
329 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
331 sfc_log_init(sa, "set FW subvariant");
332 rc = sfc_set_fw_subvariant(sa);
334 goto fail_set_fw_subvariant;
336 sfc_log_init(sa, "set resource limits");
337 rc = sfc_set_drv_limits(sa);
339 goto fail_set_drv_limits;
341 sfc_log_init(sa, "init nic");
342 rc = efx_nic_init(sa->nic);
346 encp = efx_nic_cfg_get(sa->nic);
349 * Refresh (since it may change on NIC reset/restart) a copy of
350 * supported tunnel encapsulations in shared memory to be used
351 * on supported Rx packet type classes get.
353 sa->priv.shared->tunnel_encaps =
354 encp->enc_tunnel_encapsulations_supported;
356 if (encp->enc_tunnel_encapsulations_supported != 0) {
357 sfc_log_init(sa, "apply tunnel config");
358 rc = efx_tunnel_reconfigure(sa->nic);
360 goto fail_tunnel_reconfigure;
363 rc = sfc_intr_start(sa);
365 goto fail_intr_start;
367 rc = sfc_ev_start(sa);
371 rc = sfc_port_start(sa);
373 goto fail_port_start;
375 rc = sfc_rx_start(sa);
379 rc = sfc_tx_start(sa);
383 rc = sfc_flow_start(sa);
385 goto fail_flows_insert;
387 sfc_log_init(sa, "done");
406 fail_tunnel_reconfigure:
407 efx_nic_fini(sa->nic);
411 fail_set_fw_subvariant:
412 sfc_log_init(sa, "failed %d", rc);
417 sfc_start(struct sfc_adapter *sa)
419 unsigned int start_tries = 3;
422 sfc_log_init(sa, "entry");
424 SFC_ASSERT(sfc_adapter_is_locked(sa));
427 case SFC_ADAPTER_CONFIGURED:
429 case SFC_ADAPTER_STARTED:
430 sfc_notice(sa, "already started");
437 sa->state = SFC_ADAPTER_STARTING;
440 rc = sfc_try_start(sa);
441 } while ((--start_tries > 0) &&
442 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
447 sa->state = SFC_ADAPTER_STARTED;
448 sfc_log_init(sa, "done");
452 sa->state = SFC_ADAPTER_CONFIGURED;
454 sfc_log_init(sa, "failed %d", rc);
459 sfc_stop(struct sfc_adapter *sa)
461 sfc_log_init(sa, "entry");
463 SFC_ASSERT(sfc_adapter_is_locked(sa));
466 case SFC_ADAPTER_STARTED:
468 case SFC_ADAPTER_CONFIGURED:
469 sfc_notice(sa, "already stopped");
472 sfc_err(sa, "stop in unexpected state %u", sa->state);
477 sa->state = SFC_ADAPTER_STOPPING;
485 efx_nic_fini(sa->nic);
487 sa->state = SFC_ADAPTER_CONFIGURED;
488 sfc_log_init(sa, "done");
492 sfc_restart(struct sfc_adapter *sa)
496 SFC_ASSERT(sfc_adapter_is_locked(sa));
498 if (sa->state != SFC_ADAPTER_STARTED)
505 sfc_err(sa, "restart failed");
511 sfc_restart_if_required(void *arg)
513 struct sfc_adapter *sa = arg;
515 /* If restart is scheduled, clear the flag and do it */
516 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
518 sfc_adapter_lock(sa);
519 if (sa->state == SFC_ADAPTER_STARTED)
520 (void)sfc_restart(sa);
521 sfc_adapter_unlock(sa);
526 sfc_schedule_restart(struct sfc_adapter *sa)
530 /* Schedule restart alarm if it is not scheduled yet */
531 if (!rte_atomic32_test_and_set(&sa->restart_required))
534 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
536 sfc_warn(sa, "alarms are not supported, restart is pending");
538 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
540 sfc_notice(sa, "restart scheduled");
544 sfc_configure(struct sfc_adapter *sa)
548 sfc_log_init(sa, "entry");
550 SFC_ASSERT(sfc_adapter_is_locked(sa));
552 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
553 sa->state == SFC_ADAPTER_CONFIGURED);
554 sa->state = SFC_ADAPTER_CONFIGURING;
556 rc = sfc_check_conf(sa);
558 goto fail_check_conf;
560 rc = sfc_intr_configure(sa);
562 goto fail_intr_configure;
564 rc = sfc_port_configure(sa);
566 goto fail_port_configure;
568 rc = sfc_rx_configure(sa);
570 goto fail_rx_configure;
572 rc = sfc_tx_configure(sa);
574 goto fail_tx_configure;
576 sa->state = SFC_ADAPTER_CONFIGURED;
577 sfc_log_init(sa, "done");
591 sa->state = SFC_ADAPTER_INITIALIZED;
592 sfc_log_init(sa, "failed %d", rc);
597 sfc_close(struct sfc_adapter *sa)
599 sfc_log_init(sa, "entry");
601 SFC_ASSERT(sfc_adapter_is_locked(sa));
603 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
604 sa->state = SFC_ADAPTER_CLOSING;
611 sa->state = SFC_ADAPTER_INITIALIZED;
612 sfc_log_init(sa, "done");
616 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
618 struct rte_eth_dev *eth_dev = sa->eth_dev;
619 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
620 efsys_bar_t *ebp = &sa->mem_bar;
621 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
623 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
624 ebp->esb_rid = membar;
625 ebp->esb_dev = pci_dev;
626 ebp->esb_base = res->addr;
631 sfc_mem_bar_fini(struct sfc_adapter *sa)
633 efsys_bar_t *ebp = &sa->mem_bar;
635 SFC_BAR_LOCK_DESTROY(ebp);
636 memset(ebp, 0, sizeof(*ebp));
640 * A fixed RSS key which has a property of being symmetric
641 * (symmetrical flows are distributed to the same CPU)
642 * and also known to give a uniform distribution
643 * (a good distribution of traffic between different CPUs)
645 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
646 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
647 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
648 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
649 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
650 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
654 sfc_rss_attach(struct sfc_adapter *sa)
656 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
659 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
663 rc = efx_ev_init(sa->nic);
667 rc = efx_rx_init(sa->nic);
671 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
673 goto fail_scale_support_get;
675 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
677 goto fail_hash_support_get;
679 rc = sfc_rx_hash_init(sa);
681 goto fail_rx_hash_init;
683 efx_rx_fini(sa->nic);
684 efx_ev_fini(sa->nic);
685 efx_intr_fini(sa->nic);
687 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
688 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
693 fail_hash_support_get:
694 fail_scale_support_get:
695 efx_rx_fini(sa->nic);
698 efx_ev_fini(sa->nic);
701 efx_intr_fini(sa->nic);
708 sfc_rss_detach(struct sfc_adapter *sa)
710 sfc_rx_hash_fini(sa);
714 sfc_attach(struct sfc_adapter *sa)
716 const efx_nic_cfg_t *encp;
717 efx_nic_t *enp = sa->nic;
720 sfc_log_init(sa, "entry");
722 SFC_ASSERT(sfc_adapter_is_locked(sa));
724 efx_mcdi_new_epoch(enp);
726 sfc_log_init(sa, "reset nic");
727 rc = efx_nic_reset(enp);
732 * Probed NIC is sufficient for tunnel init.
733 * Initialize tunnel support to be able to use libefx
734 * efx_tunnel_config_udp_{add,remove}() in any state and
735 * efx_tunnel_reconfigure() on start up.
737 rc = efx_tunnel_init(enp);
739 goto fail_tunnel_init;
741 encp = efx_nic_cfg_get(sa->nic);
744 * Make a copy of supported tunnel encapsulations in shared
745 * memory to be used on supported Rx packet type classes get.
747 sa->priv.shared->tunnel_encaps =
748 encp->enc_tunnel_encapsulations_supported;
750 if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
751 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
753 sfc_info(sa, "TSO support isn't available on this adapter");
757 (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
758 (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
759 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
760 sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled;
762 sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
765 sfc_log_init(sa, "estimate resource limits");
766 rc = sfc_estimate_resource_limits(sa);
768 goto fail_estimate_rsrc_limits;
770 sa->evq_max_entries = encp->enc_evq_max_nevs;
771 SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
773 sa->evq_min_entries = encp->enc_evq_min_nevs;
774 SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
776 sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
777 SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
779 sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
780 SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
782 sa->txq_max_entries = encp->enc_txq_max_ndescs;
783 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
785 sa->txq_min_entries = encp->enc_txq_min_ndescs;
786 SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
788 rc = sfc_intr_attach(sa);
790 goto fail_intr_attach;
792 rc = sfc_ev_attach(sa);
796 rc = sfc_port_attach(sa);
798 goto fail_port_attach;
800 rc = sfc_rss_attach(sa);
802 goto fail_rss_attach;
804 rc = sfc_filter_attach(sa);
806 goto fail_filter_attach;
808 sfc_log_init(sa, "fini nic");
813 sa->state = SFC_ADAPTER_INITIALIZED;
815 sfc_log_init(sa, "done");
831 efx_nic_fini(sa->nic);
833 fail_estimate_rsrc_limits:
835 efx_tunnel_fini(sa->nic);
839 sfc_log_init(sa, "failed %d", rc);
844 sfc_detach(struct sfc_adapter *sa)
846 sfc_log_init(sa, "entry");
848 SFC_ASSERT(sfc_adapter_is_locked(sa));
852 sfc_filter_detach(sa);
857 efx_tunnel_fini(sa->nic);
859 sa->state = SFC_ADAPTER_UNINITIALIZED;
863 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
864 const char *value_str, void *opaque)
866 uint32_t *value = opaque;
868 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
869 *value = EFX_FW_VARIANT_DONT_CARE;
870 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
871 *value = EFX_FW_VARIANT_FULL_FEATURED;
872 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
873 *value = EFX_FW_VARIANT_LOW_LATENCY;
874 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
875 *value = EFX_FW_VARIANT_PACKED_STREAM;
876 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
877 *value = EFX_FW_VARIANT_DPDK;
885 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
887 efx_nic_fw_info_t enfi;
890 rc = efx_nic_get_fw_version(sa->nic, &enfi);
893 else if (!enfi.enfi_dpcpu_fw_ids_valid)
897 * Firmware variant can be uniquely identified by the RxDPCPU
900 switch (enfi.enfi_rx_dpcpu_fw_id) {
901 case EFX_RXDP_FULL_FEATURED_FW_ID:
902 *efv = EFX_FW_VARIANT_FULL_FEATURED;
905 case EFX_RXDP_LOW_LATENCY_FW_ID:
906 *efv = EFX_FW_VARIANT_LOW_LATENCY;
909 case EFX_RXDP_PACKED_STREAM_FW_ID:
910 *efv = EFX_FW_VARIANT_PACKED_STREAM;
913 case EFX_RXDP_DPDK_FW_ID:
914 *efv = EFX_FW_VARIANT_DPDK;
919 * Other firmware variants are not considered, since they are
920 * not supported in the device parameters
922 *efv = EFX_FW_VARIANT_DONT_CARE;
930 sfc_fw_variant2str(efx_fw_variant_t efv)
933 case EFX_RXDP_FULL_FEATURED_FW_ID:
934 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
935 case EFX_RXDP_LOW_LATENCY_FW_ID:
936 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
937 case EFX_RXDP_PACKED_STREAM_FW_ID:
938 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
939 case EFX_RXDP_DPDK_FW_ID:
940 return SFC_KVARG_FW_VARIANT_DPDK;
947 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
952 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
954 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
955 sfc_kvarg_long_handler, &value);
960 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
961 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
962 "was set (%ld);", value);
963 sfc_err(sa, "it must not be less than 0 or greater than %u",
964 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
968 sa->rxd_wait_timeout_ns = value;
973 sfc_nic_probe(struct sfc_adapter *sa)
975 efx_nic_t *enp = sa->nic;
976 efx_fw_variant_t preferred_efv;
977 efx_fw_variant_t efv;
980 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
981 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
982 sfc_kvarg_fv_variant_handler,
985 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
989 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
993 rc = efx_nic_probe(enp, preferred_efv);
995 /* Unprivileged functions cannot set FW variant */
996 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
1001 rc = sfc_get_fw_variant(sa, &efv);
1002 if (rc == ENOTSUP) {
1003 sfc_warn(sa, "FW variant can not be obtained");
1009 /* Check that firmware variant was changed to the requested one */
1010 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
1011 sfc_warn(sa, "FW variant has not changed to the requested %s",
1012 sfc_fw_variant2str(preferred_efv));
1015 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
1021 sfc_probe(struct sfc_adapter *sa)
1023 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
1024 unsigned int membar;
1028 sfc_log_init(sa, "entry");
1030 SFC_ASSERT(sfc_adapter_is_locked(sa));
1032 sa->socket_id = rte_socket_id();
1033 rte_atomic32_init(&sa->restart_required);
1035 sfc_log_init(sa, "get family");
1036 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
1037 &sa->family, &membar);
1040 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
1042 sfc_log_init(sa, "init mem bar");
1043 rc = sfc_mem_bar_init(sa, membar);
1045 goto fail_mem_bar_init;
1047 sfc_log_init(sa, "create nic");
1048 rte_spinlock_init(&sa->nic_lock);
1049 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1051 &sa->nic_lock, &enp);
1053 goto fail_nic_create;
1056 rc = sfc_mcdi_init(sa);
1058 goto fail_mcdi_init;
1060 sfc_log_init(sa, "probe nic");
1061 rc = sfc_nic_probe(sa);
1063 goto fail_nic_probe;
1065 sfc_log_init(sa, "done");
1072 sfc_log_init(sa, "destroy nic");
1074 efx_nic_destroy(enp);
1077 sfc_mem_bar_fini(sa);
1081 sfc_log_init(sa, "failed %d", rc);
1086 sfc_unprobe(struct sfc_adapter *sa)
1088 efx_nic_t *enp = sa->nic;
1090 sfc_log_init(sa, "entry");
1092 SFC_ASSERT(sfc_adapter_is_locked(sa));
1094 sfc_log_init(sa, "unprobe nic");
1095 efx_nic_unprobe(enp);
1100 * Make sure there is no pending alarm to restart since we are
1101 * going to free device private which is passed as the callback
1102 * opaque data. A new alarm cannot be scheduled since MCDI is
1105 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1107 sfc_log_init(sa, "destroy nic");
1109 efx_nic_destroy(enp);
1111 sfc_mem_bar_fini(sa);
1114 sa->state = SFC_ADAPTER_UNINITIALIZED;
1118 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1119 const char *lt_prefix_str, uint32_t ll_default)
1121 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1122 size_t lt_str_size_max;
1123 char *lt_str = NULL;
1126 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1127 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1128 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1130 return sfc_logtype_driver;
1133 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1135 return sfc_logtype_driver;
1137 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1138 lt_str[lt_prefix_str_size - 1] = '.';
1139 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1140 lt_str_size_max - lt_prefix_str_size);
1141 lt_str[lt_str_size_max - 1] = '\0';
1143 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1147 return sfc_logtype_driver;