1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2020 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
19 #include "sfc_debug.h"
24 #include "sfc_kvargs.h"
25 #include "sfc_tweak.h"
29 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
30 size_t len, int socket_id, efsys_mem_t *esmp)
32 const struct rte_memzone *mz;
34 sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d",
35 name, id, len, socket_id);
37 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
38 sysconf(_SC_PAGESIZE), socket_id);
40 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
41 name, (unsigned int)id, (unsigned int)len, socket_id,
42 rte_strerror(rte_errno));
46 esmp->esm_addr = mz->iova;
47 if (esmp->esm_addr == RTE_BAD_IOVA) {
48 (void)rte_memzone_free(mz);
53 esmp->esm_base = mz->addr;
59 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
63 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
65 rc = rte_memzone_free(esmp->esm_mz);
67 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
69 memset(esmp, 0, sizeof(*esmp));
73 sfc_phy_cap_from_link_speeds(uint32_t speeds)
75 uint32_t phy_caps = 0;
77 if (~speeds & ETH_LINK_SPEED_FIXED) {
78 phy_caps |= (1 << EFX_PHY_CAP_AN);
80 * If no speeds are specified in the mask, any supported
83 if (speeds == ETH_LINK_SPEED_AUTONEG)
85 (1 << EFX_PHY_CAP_1000FDX) |
86 (1 << EFX_PHY_CAP_10000FDX) |
87 (1 << EFX_PHY_CAP_25000FDX) |
88 (1 << EFX_PHY_CAP_40000FDX) |
89 (1 << EFX_PHY_CAP_50000FDX) |
90 (1 << EFX_PHY_CAP_100000FDX);
92 if (speeds & ETH_LINK_SPEED_1G)
93 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
94 if (speeds & ETH_LINK_SPEED_10G)
95 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
96 if (speeds & ETH_LINK_SPEED_25G)
97 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
98 if (speeds & ETH_LINK_SPEED_40G)
99 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
100 if (speeds & ETH_LINK_SPEED_50G)
101 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
102 if (speeds & ETH_LINK_SPEED_100G)
103 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
109 * Check requested device level configuration.
110 * Receive and transmit configuration is checked in corresponding
114 sfc_check_conf(struct sfc_adapter *sa)
116 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
119 sa->port.phy_adv_cap =
120 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
121 sa->port.phy_adv_cap_mask;
122 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
123 sfc_err(sa, "No link speeds from mask %#x are supported",
128 #if !EFSYS_OPT_LOOPBACK
129 if (conf->lpbk_mode != 0) {
130 sfc_err(sa, "Loopback not supported");
135 if (conf->dcb_capability_en != 0) {
136 sfc_err(sa, "Priority-based flow control not supported");
140 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
141 sfc_err(sa, "Flow Director not supported");
145 if ((conf->intr_conf.lsc != 0) &&
146 (sa->intr.type != EFX_INTR_LINE) &&
147 (sa->intr.type != EFX_INTR_MESSAGE)) {
148 sfc_err(sa, "Link status change interrupt not supported");
152 if (conf->intr_conf.rxq != 0 &&
153 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
154 sfc_err(sa, "Receive queue interrupt not supported");
162 * Find out maximum number of receive and transmit queues which could be
165 * NIC is kept initialized on success to allow other modules acquire
166 * defaults and capabilities.
169 sfc_estimate_resource_limits(struct sfc_adapter *sa)
171 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
172 efx_drv_limits_t limits;
174 uint32_t evq_allocated;
175 uint32_t rxq_allocated;
176 uint32_t txq_allocated;
178 memset(&limits, 0, sizeof(limits));
180 /* Request at least one Rx and Tx queue */
181 limits.edl_min_rxq_count = 1;
182 limits.edl_min_txq_count = 1;
183 /* Management event queue plus event queue for each Tx and Rx queue */
184 limits.edl_min_evq_count =
185 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
187 /* Divide by number of functions to guarantee that all functions
188 * will get promised resources
190 /* FIXME Divide by number of functions (not 2) below */
191 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
192 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
194 /* Split equally between receive and transmit */
195 limits.edl_max_rxq_count =
196 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
197 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
199 limits.edl_max_txq_count =
200 MIN(encp->enc_txq_limit,
201 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
204 limits.edl_max_txq_count =
205 MIN(limits.edl_max_txq_count,
206 encp->enc_fw_assisted_tso_v2_n_contexts /
207 encp->enc_hw_pf_count);
209 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
211 /* Configure the minimum required resources needed for the
212 * driver to operate, and the maximum desired resources that the
213 * driver is capable of using.
215 efx_nic_set_drv_limits(sa->nic, &limits);
217 sfc_log_init(sa, "init nic");
218 rc = efx_nic_init(sa->nic);
222 /* Find resource dimensions assigned by firmware to this function */
223 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
226 goto fail_get_vi_pool;
228 /* It still may allocate more than maximum, ensure limit */
229 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
230 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
231 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
233 /* Subtract management EVQ not used for traffic */
234 SFC_ASSERT(evq_allocated > 0);
237 /* Right now we use separate EVQ for Rx and Tx */
238 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
239 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
241 /* Keep NIC initialized */
245 efx_nic_fini(sa->nic);
251 sfc_set_drv_limits(struct sfc_adapter *sa)
253 const struct rte_eth_dev_data *data = sa->eth_dev->data;
254 efx_drv_limits_t lim;
256 memset(&lim, 0, sizeof(lim));
258 /* Limits are strict since take into account initial estimation */
259 lim.edl_min_evq_count = lim.edl_max_evq_count =
260 1 + data->nb_rx_queues + data->nb_tx_queues;
261 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
262 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
264 return efx_nic_set_drv_limits(sa->nic, &lim);
268 sfc_set_fw_subvariant(struct sfc_adapter *sa)
270 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
271 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
272 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
273 unsigned int txq_index;
274 efx_nic_fw_subvariant_t req_fw_subvariant;
275 efx_nic_fw_subvariant_t cur_fw_subvariant;
278 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
279 sfc_info(sa, "no-Tx-checksum subvariant not supported");
283 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
284 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
286 if (txq_info->state & SFC_TXQ_INITIALIZED)
287 tx_offloads |= txq_info->offloads;
290 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
291 DEV_TX_OFFLOAD_TCP_CKSUM |
292 DEV_TX_OFFLOAD_UDP_CKSUM |
293 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
294 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
296 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
298 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
300 sfc_err(sa, "failed to get FW subvariant: %d", rc);
303 sfc_info(sa, "FW subvariant is %u vs required %u",
304 cur_fw_subvariant, req_fw_subvariant);
306 if (cur_fw_subvariant == req_fw_subvariant)
309 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
311 sfc_err(sa, "failed to set FW subvariant %u: %d",
312 req_fw_subvariant, rc);
315 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
321 sfc_try_start(struct sfc_adapter *sa)
323 const efx_nic_cfg_t *encp;
326 sfc_log_init(sa, "entry");
328 SFC_ASSERT(sfc_adapter_is_locked(sa));
329 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
331 sfc_log_init(sa, "set FW subvariant");
332 rc = sfc_set_fw_subvariant(sa);
334 goto fail_set_fw_subvariant;
336 sfc_log_init(sa, "set resource limits");
337 rc = sfc_set_drv_limits(sa);
339 goto fail_set_drv_limits;
341 sfc_log_init(sa, "init nic");
342 rc = efx_nic_init(sa->nic);
346 encp = efx_nic_cfg_get(sa->nic);
349 * Refresh (since it may change on NIC reset/restart) a copy of
350 * supported tunnel encapsulations in shared memory to be used
351 * on supported Rx packet type classes get.
353 sa->priv.shared->tunnel_encaps =
354 encp->enc_tunnel_encapsulations_supported;
356 if (encp->enc_tunnel_encapsulations_supported != 0) {
357 sfc_log_init(sa, "apply tunnel config");
358 rc = efx_tunnel_reconfigure(sa->nic);
360 goto fail_tunnel_reconfigure;
363 rc = sfc_intr_start(sa);
365 goto fail_intr_start;
367 rc = sfc_ev_start(sa);
371 rc = sfc_port_start(sa);
373 goto fail_port_start;
375 rc = sfc_rx_start(sa);
379 rc = sfc_tx_start(sa);
383 rc = sfc_flow_start(sa);
385 goto fail_flows_insert;
387 sfc_log_init(sa, "done");
406 fail_tunnel_reconfigure:
407 efx_nic_fini(sa->nic);
411 fail_set_fw_subvariant:
412 sfc_log_init(sa, "failed %d", rc);
417 sfc_start(struct sfc_adapter *sa)
419 unsigned int start_tries = 3;
422 sfc_log_init(sa, "entry");
424 SFC_ASSERT(sfc_adapter_is_locked(sa));
427 case SFC_ADAPTER_CONFIGURED:
429 case SFC_ADAPTER_STARTED:
430 sfc_notice(sa, "already started");
437 sa->state = SFC_ADAPTER_STARTING;
442 * FIXME Try to recreate vSwitch on start retry.
443 * vSwitch is absent after MC reboot like events and
444 * we should recreate it. May be we need proper
445 * indication instead of guessing.
448 sfc_sriov_vswitch_destroy(sa);
449 rc = sfc_sriov_vswitch_create(sa);
451 goto fail_sriov_vswitch_create;
453 rc = sfc_try_start(sa);
454 } while ((--start_tries > 0) &&
455 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
460 sa->state = SFC_ADAPTER_STARTED;
461 sfc_log_init(sa, "done");
465 fail_sriov_vswitch_create:
466 sa->state = SFC_ADAPTER_CONFIGURED;
468 sfc_log_init(sa, "failed %d", rc);
473 sfc_stop(struct sfc_adapter *sa)
475 sfc_log_init(sa, "entry");
477 SFC_ASSERT(sfc_adapter_is_locked(sa));
480 case SFC_ADAPTER_STARTED:
482 case SFC_ADAPTER_CONFIGURED:
483 sfc_notice(sa, "already stopped");
486 sfc_err(sa, "stop in unexpected state %u", sa->state);
491 sa->state = SFC_ADAPTER_STOPPING;
499 efx_nic_fini(sa->nic);
501 sa->state = SFC_ADAPTER_CONFIGURED;
502 sfc_log_init(sa, "done");
506 sfc_restart(struct sfc_adapter *sa)
510 SFC_ASSERT(sfc_adapter_is_locked(sa));
512 if (sa->state != SFC_ADAPTER_STARTED)
519 sfc_err(sa, "restart failed");
525 sfc_restart_if_required(void *arg)
527 struct sfc_adapter *sa = arg;
529 /* If restart is scheduled, clear the flag and do it */
530 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
532 sfc_adapter_lock(sa);
533 if (sa->state == SFC_ADAPTER_STARTED)
534 (void)sfc_restart(sa);
535 sfc_adapter_unlock(sa);
540 sfc_schedule_restart(struct sfc_adapter *sa)
544 /* Schedule restart alarm if it is not scheduled yet */
545 if (!rte_atomic32_test_and_set(&sa->restart_required))
548 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
550 sfc_warn(sa, "alarms are not supported, restart is pending");
552 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
554 sfc_notice(sa, "restart scheduled");
558 sfc_configure(struct sfc_adapter *sa)
562 sfc_log_init(sa, "entry");
564 SFC_ASSERT(sfc_adapter_is_locked(sa));
566 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
567 sa->state == SFC_ADAPTER_CONFIGURED);
568 sa->state = SFC_ADAPTER_CONFIGURING;
570 rc = sfc_check_conf(sa);
572 goto fail_check_conf;
574 rc = sfc_intr_configure(sa);
576 goto fail_intr_configure;
578 rc = sfc_port_configure(sa);
580 goto fail_port_configure;
582 rc = sfc_rx_configure(sa);
584 goto fail_rx_configure;
586 rc = sfc_tx_configure(sa);
588 goto fail_tx_configure;
590 sa->state = SFC_ADAPTER_CONFIGURED;
591 sfc_log_init(sa, "done");
605 sa->state = SFC_ADAPTER_INITIALIZED;
606 sfc_log_init(sa, "failed %d", rc);
611 sfc_close(struct sfc_adapter *sa)
613 sfc_log_init(sa, "entry");
615 SFC_ASSERT(sfc_adapter_is_locked(sa));
617 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
618 sa->state = SFC_ADAPTER_CLOSING;
625 sa->state = SFC_ADAPTER_INITIALIZED;
626 sfc_log_init(sa, "done");
630 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
632 struct rte_eth_dev *eth_dev = sa->eth_dev;
633 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
634 efsys_bar_t *ebp = &sa->mem_bar;
635 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
637 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
638 ebp->esb_rid = membar;
639 ebp->esb_dev = pci_dev;
640 ebp->esb_base = res->addr;
645 sfc_mem_bar_fini(struct sfc_adapter *sa)
647 efsys_bar_t *ebp = &sa->mem_bar;
649 SFC_BAR_LOCK_DESTROY(ebp);
650 memset(ebp, 0, sizeof(*ebp));
654 * A fixed RSS key which has a property of being symmetric
655 * (symmetrical flows are distributed to the same CPU)
656 * and also known to give a uniform distribution
657 * (a good distribution of traffic between different CPUs)
659 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
660 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
661 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
662 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
663 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
664 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
668 sfc_rss_attach(struct sfc_adapter *sa)
670 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
673 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
677 rc = efx_ev_init(sa->nic);
681 rc = efx_rx_init(sa->nic);
685 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
687 goto fail_scale_support_get;
689 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
691 goto fail_hash_support_get;
693 rc = sfc_rx_hash_init(sa);
695 goto fail_rx_hash_init;
697 efx_rx_fini(sa->nic);
698 efx_ev_fini(sa->nic);
699 efx_intr_fini(sa->nic);
701 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
702 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
707 fail_hash_support_get:
708 fail_scale_support_get:
709 efx_rx_fini(sa->nic);
712 efx_ev_fini(sa->nic);
715 efx_intr_fini(sa->nic);
722 sfc_rss_detach(struct sfc_adapter *sa)
724 sfc_rx_hash_fini(sa);
728 sfc_attach(struct sfc_adapter *sa)
730 const efx_nic_cfg_t *encp;
731 efx_nic_t *enp = sa->nic;
734 sfc_log_init(sa, "entry");
736 SFC_ASSERT(sfc_adapter_is_locked(sa));
738 efx_mcdi_new_epoch(enp);
740 sfc_log_init(sa, "reset nic");
741 rc = efx_nic_reset(enp);
745 rc = sfc_sriov_attach(sa);
747 goto fail_sriov_attach;
750 * Probed NIC is sufficient for tunnel init.
751 * Initialize tunnel support to be able to use libefx
752 * efx_tunnel_config_udp_{add,remove}() in any state and
753 * efx_tunnel_reconfigure() on start up.
755 rc = efx_tunnel_init(enp);
757 goto fail_tunnel_init;
759 encp = efx_nic_cfg_get(sa->nic);
762 * Make a copy of supported tunnel encapsulations in shared
763 * memory to be used on supported Rx packet type classes get.
765 sa->priv.shared->tunnel_encaps =
766 encp->enc_tunnel_encapsulations_supported;
768 if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
769 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
771 sfc_info(sa, "TSO support isn't available on this adapter");
775 (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
776 (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
777 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
778 sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled;
780 sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
783 sfc_log_init(sa, "estimate resource limits");
784 rc = sfc_estimate_resource_limits(sa);
786 goto fail_estimate_rsrc_limits;
788 sa->evq_max_entries = encp->enc_evq_max_nevs;
789 SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
791 sa->evq_min_entries = encp->enc_evq_min_nevs;
792 SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
794 sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
795 SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
797 sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
798 SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
800 sa->txq_max_entries = encp->enc_txq_max_ndescs;
801 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
803 sa->txq_min_entries = encp->enc_txq_min_ndescs;
804 SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
806 rc = sfc_intr_attach(sa);
808 goto fail_intr_attach;
810 rc = sfc_ev_attach(sa);
814 rc = sfc_port_attach(sa);
816 goto fail_port_attach;
818 rc = sfc_rss_attach(sa);
820 goto fail_rss_attach;
822 rc = sfc_filter_attach(sa);
824 goto fail_filter_attach;
826 sfc_log_init(sa, "fini nic");
832 * Create vSwitch to be able to use VFs when PF is not started yet
833 * as DPDK port. VFs should be able to talk to each other even
836 rc = sfc_sriov_vswitch_create(sa);
838 goto fail_sriov_vswitch_create;
840 sa->state = SFC_ADAPTER_INITIALIZED;
842 sfc_log_init(sa, "done");
845 fail_sriov_vswitch_create:
847 sfc_filter_detach(sa);
862 efx_nic_fini(sa->nic);
864 fail_estimate_rsrc_limits:
866 efx_tunnel_fini(sa->nic);
867 sfc_sriov_detach(sa);
872 sfc_log_init(sa, "failed %d", rc);
877 sfc_detach(struct sfc_adapter *sa)
879 sfc_log_init(sa, "entry");
881 SFC_ASSERT(sfc_adapter_is_locked(sa));
883 sfc_sriov_vswitch_destroy(sa);
887 sfc_filter_detach(sa);
892 efx_tunnel_fini(sa->nic);
893 sfc_sriov_detach(sa);
895 sa->state = SFC_ADAPTER_UNINITIALIZED;
899 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
900 const char *value_str, void *opaque)
902 uint32_t *value = opaque;
904 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
905 *value = EFX_FW_VARIANT_DONT_CARE;
906 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
907 *value = EFX_FW_VARIANT_FULL_FEATURED;
908 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
909 *value = EFX_FW_VARIANT_LOW_LATENCY;
910 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
911 *value = EFX_FW_VARIANT_PACKED_STREAM;
912 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
913 *value = EFX_FW_VARIANT_DPDK;
921 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
923 efx_nic_fw_info_t enfi;
926 rc = efx_nic_get_fw_version(sa->nic, &enfi);
929 else if (!enfi.enfi_dpcpu_fw_ids_valid)
933 * Firmware variant can be uniquely identified by the RxDPCPU
936 switch (enfi.enfi_rx_dpcpu_fw_id) {
937 case EFX_RXDP_FULL_FEATURED_FW_ID:
938 *efv = EFX_FW_VARIANT_FULL_FEATURED;
941 case EFX_RXDP_LOW_LATENCY_FW_ID:
942 *efv = EFX_FW_VARIANT_LOW_LATENCY;
945 case EFX_RXDP_PACKED_STREAM_FW_ID:
946 *efv = EFX_FW_VARIANT_PACKED_STREAM;
949 case EFX_RXDP_DPDK_FW_ID:
950 *efv = EFX_FW_VARIANT_DPDK;
955 * Other firmware variants are not considered, since they are
956 * not supported in the device parameters
958 *efv = EFX_FW_VARIANT_DONT_CARE;
966 sfc_fw_variant2str(efx_fw_variant_t efv)
969 case EFX_RXDP_FULL_FEATURED_FW_ID:
970 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
971 case EFX_RXDP_LOW_LATENCY_FW_ID:
972 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
973 case EFX_RXDP_PACKED_STREAM_FW_ID:
974 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
975 case EFX_RXDP_DPDK_FW_ID:
976 return SFC_KVARG_FW_VARIANT_DPDK;
983 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
988 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
990 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
991 sfc_kvarg_long_handler, &value);
996 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
997 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
998 "was set (%ld);", value);
999 sfc_err(sa, "it must not be less than 0 or greater than %u",
1000 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
1004 sa->rxd_wait_timeout_ns = value;
1009 sfc_nic_probe(struct sfc_adapter *sa)
1011 efx_nic_t *enp = sa->nic;
1012 efx_fw_variant_t preferred_efv;
1013 efx_fw_variant_t efv;
1016 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
1017 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
1018 sfc_kvarg_fv_variant_handler,
1021 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
1025 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
1029 rc = efx_nic_probe(enp, preferred_efv);
1031 /* Unprivileged functions cannot set FW variant */
1032 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
1037 rc = sfc_get_fw_variant(sa, &efv);
1038 if (rc == ENOTSUP) {
1039 sfc_warn(sa, "FW variant can not be obtained");
1045 /* Check that firmware variant was changed to the requested one */
1046 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
1047 sfc_warn(sa, "FW variant has not changed to the requested %s",
1048 sfc_fw_variant2str(preferred_efv));
1051 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
1057 sfc_probe(struct sfc_adapter *sa)
1059 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
1060 unsigned int membar;
1064 sfc_log_init(sa, "entry");
1066 SFC_ASSERT(sfc_adapter_is_locked(sa));
1068 sa->socket_id = rte_socket_id();
1069 rte_atomic32_init(&sa->restart_required);
1071 sfc_log_init(sa, "get family");
1072 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
1073 &sa->family, &membar);
1076 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
1078 sfc_log_init(sa, "init mem bar");
1079 rc = sfc_mem_bar_init(sa, membar);
1081 goto fail_mem_bar_init;
1083 sfc_log_init(sa, "create nic");
1084 rte_spinlock_init(&sa->nic_lock);
1085 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1087 &sa->nic_lock, &enp);
1089 goto fail_nic_create;
1092 rc = sfc_mcdi_init(sa);
1094 goto fail_mcdi_init;
1096 sfc_log_init(sa, "probe nic");
1097 rc = sfc_nic_probe(sa);
1099 goto fail_nic_probe;
1101 sfc_log_init(sa, "done");
1108 sfc_log_init(sa, "destroy nic");
1110 efx_nic_destroy(enp);
1113 sfc_mem_bar_fini(sa);
1117 sfc_log_init(sa, "failed %d", rc);
1122 sfc_unprobe(struct sfc_adapter *sa)
1124 efx_nic_t *enp = sa->nic;
1126 sfc_log_init(sa, "entry");
1128 SFC_ASSERT(sfc_adapter_is_locked(sa));
1130 sfc_log_init(sa, "unprobe nic");
1131 efx_nic_unprobe(enp);
1136 * Make sure there is no pending alarm to restart since we are
1137 * going to free device private which is passed as the callback
1138 * opaque data. A new alarm cannot be scheduled since MCDI is
1141 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1143 sfc_log_init(sa, "destroy nic");
1145 efx_nic_destroy(enp);
1147 sfc_mem_bar_fini(sa);
1150 sa->state = SFC_ADAPTER_UNINITIALIZED;
1154 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1155 const char *lt_prefix_str, uint32_t ll_default)
1157 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1158 size_t lt_str_size_max;
1159 char *lt_str = NULL;
1162 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1163 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1164 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1166 return sfc_logtype_driver;
1169 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1171 return sfc_logtype_driver;
1173 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1174 lt_str[lt_prefix_str_size - 1] = '.';
1175 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1176 lt_str_size_max - lt_prefix_str_size);
1177 lt_str[lt_str_size_max - 1] = '\0';
1179 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1183 return sfc_logtype_driver;