1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
19 #include "sfc_debug.h"
24 #include "sfc_kvargs.h"
25 #include "sfc_tweak.h"
29 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
30 size_t len, int socket_id, efsys_mem_t *esmp)
32 const struct rte_memzone *mz;
34 sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d",
35 name, id, len, socket_id);
37 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
38 sysconf(_SC_PAGESIZE), socket_id);
40 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
41 name, (unsigned int)id, (unsigned int)len, socket_id,
42 rte_strerror(rte_errno));
46 esmp->esm_addr = mz->iova;
47 if (esmp->esm_addr == RTE_BAD_IOVA) {
48 (void)rte_memzone_free(mz);
53 esmp->esm_base = mz->addr;
56 "DMA name=%s id=%u len=%lu socket_id=%d => virt=%p iova=%lx",
57 name, id, len, socket_id, esmp->esm_base,
58 (unsigned long)esmp->esm_addr);
64 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
68 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
70 rc = rte_memzone_free(esmp->esm_mz);
72 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
74 memset(esmp, 0, sizeof(*esmp));
78 sfc_phy_cap_from_link_speeds(uint32_t speeds)
80 uint32_t phy_caps = 0;
82 if (~speeds & ETH_LINK_SPEED_FIXED) {
83 phy_caps |= (1 << EFX_PHY_CAP_AN);
85 * If no speeds are specified in the mask, any supported
88 if (speeds == ETH_LINK_SPEED_AUTONEG)
90 (1 << EFX_PHY_CAP_1000FDX) |
91 (1 << EFX_PHY_CAP_10000FDX) |
92 (1 << EFX_PHY_CAP_25000FDX) |
93 (1 << EFX_PHY_CAP_40000FDX) |
94 (1 << EFX_PHY_CAP_50000FDX) |
95 (1 << EFX_PHY_CAP_100000FDX);
97 if (speeds & ETH_LINK_SPEED_1G)
98 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
99 if (speeds & ETH_LINK_SPEED_10G)
100 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
101 if (speeds & ETH_LINK_SPEED_25G)
102 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
103 if (speeds & ETH_LINK_SPEED_40G)
104 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
105 if (speeds & ETH_LINK_SPEED_50G)
106 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
107 if (speeds & ETH_LINK_SPEED_100G)
108 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
114 * Check requested device level configuration.
115 * Receive and transmit configuration is checked in corresponding
119 sfc_check_conf(struct sfc_adapter *sa)
121 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
124 sa->port.phy_adv_cap =
125 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
126 sa->port.phy_adv_cap_mask;
127 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
128 sfc_err(sa, "No link speeds from mask %#x are supported",
133 #if !EFSYS_OPT_LOOPBACK
134 if (conf->lpbk_mode != 0) {
135 sfc_err(sa, "Loopback not supported");
140 if (conf->dcb_capability_en != 0) {
141 sfc_err(sa, "Priority-based flow control not supported");
145 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
146 sfc_err(sa, "Flow Director not supported");
150 if ((conf->intr_conf.lsc != 0) &&
151 (sa->intr.type != EFX_INTR_LINE) &&
152 (sa->intr.type != EFX_INTR_MESSAGE)) {
153 sfc_err(sa, "Link status change interrupt not supported");
157 if (conf->intr_conf.rxq != 0 &&
158 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
159 sfc_err(sa, "Receive queue interrupt not supported");
167 * Find out maximum number of receive and transmit queues which could be
170 * NIC is kept initialized on success to allow other modules acquire
171 * defaults and capabilities.
174 sfc_estimate_resource_limits(struct sfc_adapter *sa)
176 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
177 efx_drv_limits_t limits;
179 uint32_t evq_allocated;
180 uint32_t rxq_allocated;
181 uint32_t txq_allocated;
183 memset(&limits, 0, sizeof(limits));
185 /* Request at least one Rx and Tx queue */
186 limits.edl_min_rxq_count = 1;
187 limits.edl_min_txq_count = 1;
188 /* Management event queue plus event queue for each Tx and Rx queue */
189 limits.edl_min_evq_count =
190 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
192 /* Divide by number of functions to guarantee that all functions
193 * will get promised resources
195 /* FIXME Divide by number of functions (not 2) below */
196 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
197 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
199 /* Split equally between receive and transmit */
200 limits.edl_max_rxq_count =
201 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
202 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
204 limits.edl_max_txq_count =
205 MIN(encp->enc_txq_limit,
206 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
208 if (sa->tso && encp->enc_fw_assisted_tso_v2_enabled)
209 limits.edl_max_txq_count =
210 MIN(limits.edl_max_txq_count,
211 encp->enc_fw_assisted_tso_v2_n_contexts /
212 encp->enc_hw_pf_count);
214 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
216 /* Configure the minimum required resources needed for the
217 * driver to operate, and the maximum desired resources that the
218 * driver is capable of using.
220 efx_nic_set_drv_limits(sa->nic, &limits);
222 sfc_log_init(sa, "init nic");
223 rc = efx_nic_init(sa->nic);
227 /* Find resource dimensions assigned by firmware to this function */
228 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
231 goto fail_get_vi_pool;
233 /* It still may allocate more than maximum, ensure limit */
234 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
235 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
236 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
238 /* Subtract management EVQ not used for traffic */
239 SFC_ASSERT(evq_allocated > 0);
242 /* Right now we use separate EVQ for Rx and Tx */
243 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
244 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
246 /* Keep NIC initialized */
250 efx_nic_fini(sa->nic);
256 sfc_set_drv_limits(struct sfc_adapter *sa)
258 const struct rte_eth_dev_data *data = sa->eth_dev->data;
259 efx_drv_limits_t lim;
261 memset(&lim, 0, sizeof(lim));
263 /* Limits are strict since take into account initial estimation */
264 lim.edl_min_evq_count = lim.edl_max_evq_count =
265 1 + data->nb_rx_queues + data->nb_tx_queues;
266 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
267 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
269 return efx_nic_set_drv_limits(sa->nic, &lim);
273 sfc_set_fw_subvariant(struct sfc_adapter *sa)
275 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
276 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
277 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
278 unsigned int txq_index;
279 efx_nic_fw_subvariant_t req_fw_subvariant;
280 efx_nic_fw_subvariant_t cur_fw_subvariant;
283 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
284 sfc_info(sa, "no-Tx-checksum subvariant not supported");
288 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
289 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
291 if (txq_info->state & SFC_TXQ_INITIALIZED)
292 tx_offloads |= txq_info->offloads;
295 if (tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
296 DEV_TX_OFFLOAD_TCP_CKSUM |
297 DEV_TX_OFFLOAD_UDP_CKSUM |
298 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM))
299 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
301 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
303 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
305 sfc_err(sa, "failed to get FW subvariant: %d", rc);
308 sfc_info(sa, "FW subvariant is %u vs required %u",
309 cur_fw_subvariant, req_fw_subvariant);
311 if (cur_fw_subvariant == req_fw_subvariant)
314 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
316 sfc_err(sa, "failed to set FW subvariant %u: %d",
317 req_fw_subvariant, rc);
320 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
326 sfc_try_start(struct sfc_adapter *sa)
328 const efx_nic_cfg_t *encp;
331 sfc_log_init(sa, "entry");
333 SFC_ASSERT(sfc_adapter_is_locked(sa));
334 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
336 sfc_log_init(sa, "set FW subvariant");
337 rc = sfc_set_fw_subvariant(sa);
339 goto fail_set_fw_subvariant;
341 sfc_log_init(sa, "set resource limits");
342 rc = sfc_set_drv_limits(sa);
344 goto fail_set_drv_limits;
346 sfc_log_init(sa, "init nic");
347 rc = efx_nic_init(sa->nic);
351 encp = efx_nic_cfg_get(sa->nic);
354 * Refresh (since it may change on NIC reset/restart) a copy of
355 * supported tunnel encapsulations in shared memory to be used
356 * on supported Rx packet type classes get.
358 sa->priv.shared->tunnel_encaps =
359 encp->enc_tunnel_encapsulations_supported;
361 if (encp->enc_tunnel_encapsulations_supported != 0) {
362 sfc_log_init(sa, "apply tunnel config");
363 rc = efx_tunnel_reconfigure(sa->nic);
365 goto fail_tunnel_reconfigure;
368 rc = sfc_intr_start(sa);
370 goto fail_intr_start;
372 rc = sfc_ev_start(sa);
376 rc = sfc_port_start(sa);
378 goto fail_port_start;
380 rc = sfc_rx_start(sa);
384 rc = sfc_tx_start(sa);
388 rc = sfc_flow_start(sa);
390 goto fail_flows_insert;
392 sfc_log_init(sa, "done");
411 fail_tunnel_reconfigure:
412 efx_nic_fini(sa->nic);
416 fail_set_fw_subvariant:
417 sfc_log_init(sa, "failed %d", rc);
422 sfc_start(struct sfc_adapter *sa)
424 unsigned int start_tries = 3;
427 sfc_log_init(sa, "entry");
429 SFC_ASSERT(sfc_adapter_is_locked(sa));
432 case SFC_ADAPTER_CONFIGURED:
434 case SFC_ADAPTER_STARTED:
435 sfc_notice(sa, "already started");
442 sa->state = SFC_ADAPTER_STARTING;
447 * FIXME Try to recreate vSwitch on start retry.
448 * vSwitch is absent after MC reboot like events and
449 * we should recreate it. May be we need proper
450 * indication instead of guessing.
453 sfc_sriov_vswitch_destroy(sa);
454 rc = sfc_sriov_vswitch_create(sa);
456 goto fail_sriov_vswitch_create;
458 rc = sfc_try_start(sa);
459 } while ((--start_tries > 0) &&
460 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
465 sa->state = SFC_ADAPTER_STARTED;
466 sfc_log_init(sa, "done");
470 fail_sriov_vswitch_create:
471 sa->state = SFC_ADAPTER_CONFIGURED;
473 sfc_log_init(sa, "failed %d", rc);
478 sfc_stop(struct sfc_adapter *sa)
480 sfc_log_init(sa, "entry");
482 SFC_ASSERT(sfc_adapter_is_locked(sa));
485 case SFC_ADAPTER_STARTED:
487 case SFC_ADAPTER_CONFIGURED:
488 sfc_notice(sa, "already stopped");
491 sfc_err(sa, "stop in unexpected state %u", sa->state);
496 sa->state = SFC_ADAPTER_STOPPING;
504 efx_nic_fini(sa->nic);
506 sa->state = SFC_ADAPTER_CONFIGURED;
507 sfc_log_init(sa, "done");
511 sfc_restart(struct sfc_adapter *sa)
515 SFC_ASSERT(sfc_adapter_is_locked(sa));
517 if (sa->state != SFC_ADAPTER_STARTED)
524 sfc_err(sa, "restart failed");
530 sfc_restart_if_required(void *arg)
532 struct sfc_adapter *sa = arg;
534 /* If restart is scheduled, clear the flag and do it */
535 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
537 sfc_adapter_lock(sa);
538 if (sa->state == SFC_ADAPTER_STARTED)
539 (void)sfc_restart(sa);
540 sfc_adapter_unlock(sa);
545 sfc_schedule_restart(struct sfc_adapter *sa)
549 /* Schedule restart alarm if it is not scheduled yet */
550 if (!rte_atomic32_test_and_set(&sa->restart_required))
553 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
555 sfc_warn(sa, "alarms are not supported, restart is pending");
557 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
559 sfc_notice(sa, "restart scheduled");
563 sfc_configure(struct sfc_adapter *sa)
567 sfc_log_init(sa, "entry");
569 SFC_ASSERT(sfc_adapter_is_locked(sa));
571 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
572 sa->state == SFC_ADAPTER_CONFIGURED);
573 sa->state = SFC_ADAPTER_CONFIGURING;
575 rc = sfc_check_conf(sa);
577 goto fail_check_conf;
579 rc = sfc_intr_configure(sa);
581 goto fail_intr_configure;
583 rc = sfc_port_configure(sa);
585 goto fail_port_configure;
587 rc = sfc_rx_configure(sa);
589 goto fail_rx_configure;
591 rc = sfc_tx_configure(sa);
593 goto fail_tx_configure;
595 sa->state = SFC_ADAPTER_CONFIGURED;
596 sfc_log_init(sa, "done");
610 sa->state = SFC_ADAPTER_INITIALIZED;
611 sfc_log_init(sa, "failed %d", rc);
616 sfc_close(struct sfc_adapter *sa)
618 sfc_log_init(sa, "entry");
620 SFC_ASSERT(sfc_adapter_is_locked(sa));
622 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
623 sa->state = SFC_ADAPTER_CLOSING;
630 sa->state = SFC_ADAPTER_INITIALIZED;
631 sfc_log_init(sa, "done");
635 sfc_mem_bar_init(struct sfc_adapter *sa, const efx_bar_region_t *mem_ebrp)
637 struct rte_eth_dev *eth_dev = sa->eth_dev;
638 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
639 efsys_bar_t *ebp = &sa->mem_bar;
640 struct rte_mem_resource *res =
641 &pci_dev->mem_resource[mem_ebrp->ebr_index];
643 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
644 ebp->esb_rid = mem_ebrp->ebr_index;
645 ebp->esb_dev = pci_dev;
646 ebp->esb_base = res->addr;
648 sa->fcw_offset = mem_ebrp->ebr_offset;
654 sfc_mem_bar_fini(struct sfc_adapter *sa)
656 efsys_bar_t *ebp = &sa->mem_bar;
658 SFC_BAR_LOCK_DESTROY(ebp);
659 memset(ebp, 0, sizeof(*ebp));
663 * A fixed RSS key which has a property of being symmetric
664 * (symmetrical flows are distributed to the same CPU)
665 * and also known to give a uniform distribution
666 * (a good distribution of traffic between different CPUs)
668 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
669 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
670 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
671 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
672 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
673 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
677 sfc_rss_attach(struct sfc_adapter *sa)
679 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
682 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
686 rc = efx_ev_init(sa->nic);
690 rc = efx_rx_init(sa->nic);
694 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
696 goto fail_scale_support_get;
698 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
700 goto fail_hash_support_get;
702 rc = sfc_rx_hash_init(sa);
704 goto fail_rx_hash_init;
706 efx_rx_fini(sa->nic);
707 efx_ev_fini(sa->nic);
708 efx_intr_fini(sa->nic);
710 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
711 rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
716 fail_hash_support_get:
717 fail_scale_support_get:
718 efx_rx_fini(sa->nic);
721 efx_ev_fini(sa->nic);
724 efx_intr_fini(sa->nic);
731 sfc_rss_detach(struct sfc_adapter *sa)
733 sfc_rx_hash_fini(sa);
737 sfc_attach(struct sfc_adapter *sa)
739 const efx_nic_cfg_t *encp;
740 efx_nic_t *enp = sa->nic;
743 sfc_log_init(sa, "entry");
745 SFC_ASSERT(sfc_adapter_is_locked(sa));
747 efx_mcdi_new_epoch(enp);
749 sfc_log_init(sa, "reset nic");
750 rc = efx_nic_reset(enp);
754 rc = sfc_sriov_attach(sa);
756 goto fail_sriov_attach;
759 * Probed NIC is sufficient for tunnel init.
760 * Initialize tunnel support to be able to use libefx
761 * efx_tunnel_config_udp_{add,remove}() in any state and
762 * efx_tunnel_reconfigure() on start up.
764 rc = efx_tunnel_init(enp);
766 goto fail_tunnel_init;
768 encp = efx_nic_cfg_get(sa->nic);
771 * Make a copy of supported tunnel encapsulations in shared
772 * memory to be used on supported Rx packet type classes get.
774 sa->priv.shared->tunnel_encaps =
775 encp->enc_tunnel_encapsulations_supported;
777 if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & DEV_TX_OFFLOAD_TCP_TSO) {
778 sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
779 encp->enc_tso_v3_enabled;
781 sfc_info(sa, "TSO support isn't available on this adapter");
785 (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
786 (DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
787 DEV_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
788 sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
789 encp->enc_tso_v3_enabled;
791 sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
794 sfc_log_init(sa, "estimate resource limits");
795 rc = sfc_estimate_resource_limits(sa);
797 goto fail_estimate_rsrc_limits;
799 sa->evq_max_entries = encp->enc_evq_max_nevs;
800 SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
802 sa->evq_min_entries = encp->enc_evq_min_nevs;
803 SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
805 sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
806 SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
808 sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
809 SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
811 sa->txq_max_entries = encp->enc_txq_max_ndescs;
812 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
814 sa->txq_min_entries = encp->enc_txq_min_ndescs;
815 SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
817 rc = sfc_intr_attach(sa);
819 goto fail_intr_attach;
821 rc = sfc_ev_attach(sa);
825 rc = sfc_port_attach(sa);
827 goto fail_port_attach;
829 rc = sfc_rss_attach(sa);
831 goto fail_rss_attach;
833 rc = sfc_filter_attach(sa);
835 goto fail_filter_attach;
837 rc = sfc_mae_attach(sa);
839 goto fail_mae_attach;
841 sfc_log_init(sa, "fini nic");
847 * Create vSwitch to be able to use VFs when PF is not started yet
848 * as DPDK port. VFs should be able to talk to each other even
851 rc = sfc_sriov_vswitch_create(sa);
853 goto fail_sriov_vswitch_create;
855 sa->state = SFC_ADAPTER_INITIALIZED;
857 sfc_log_init(sa, "done");
860 fail_sriov_vswitch_create:
865 sfc_filter_detach(sa);
880 efx_nic_fini(sa->nic);
882 fail_estimate_rsrc_limits:
884 efx_tunnel_fini(sa->nic);
885 sfc_sriov_detach(sa);
890 sfc_log_init(sa, "failed %d", rc);
895 sfc_detach(struct sfc_adapter *sa)
897 sfc_log_init(sa, "entry");
899 SFC_ASSERT(sfc_adapter_is_locked(sa));
901 sfc_sriov_vswitch_destroy(sa);
906 sfc_filter_detach(sa);
911 efx_tunnel_fini(sa->nic);
912 sfc_sriov_detach(sa);
914 sa->state = SFC_ADAPTER_UNINITIALIZED;
918 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
919 const char *value_str, void *opaque)
921 uint32_t *value = opaque;
923 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
924 *value = EFX_FW_VARIANT_DONT_CARE;
925 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
926 *value = EFX_FW_VARIANT_FULL_FEATURED;
927 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
928 *value = EFX_FW_VARIANT_LOW_LATENCY;
929 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
930 *value = EFX_FW_VARIANT_PACKED_STREAM;
931 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
932 *value = EFX_FW_VARIANT_DPDK;
940 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
942 efx_nic_fw_info_t enfi;
945 rc = efx_nic_get_fw_version(sa->nic, &enfi);
948 else if (!enfi.enfi_dpcpu_fw_ids_valid)
952 * Firmware variant can be uniquely identified by the RxDPCPU
955 switch (enfi.enfi_rx_dpcpu_fw_id) {
956 case EFX_RXDP_FULL_FEATURED_FW_ID:
957 *efv = EFX_FW_VARIANT_FULL_FEATURED;
960 case EFX_RXDP_LOW_LATENCY_FW_ID:
961 *efv = EFX_FW_VARIANT_LOW_LATENCY;
964 case EFX_RXDP_PACKED_STREAM_FW_ID:
965 *efv = EFX_FW_VARIANT_PACKED_STREAM;
968 case EFX_RXDP_DPDK_FW_ID:
969 *efv = EFX_FW_VARIANT_DPDK;
974 * Other firmware variants are not considered, since they are
975 * not supported in the device parameters
977 *efv = EFX_FW_VARIANT_DONT_CARE;
985 sfc_fw_variant2str(efx_fw_variant_t efv)
988 case EFX_RXDP_FULL_FEATURED_FW_ID:
989 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
990 case EFX_RXDP_LOW_LATENCY_FW_ID:
991 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
992 case EFX_RXDP_PACKED_STREAM_FW_ID:
993 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
994 case EFX_RXDP_DPDK_FW_ID:
995 return SFC_KVARG_FW_VARIANT_DPDK;
1002 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
1007 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
1009 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
1010 sfc_kvarg_long_handler, &value);
1015 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
1016 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
1017 "was set (%ld);", value);
1018 sfc_err(sa, "it must not be less than 0 or greater than %u",
1019 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
1023 sa->rxd_wait_timeout_ns = value;
1028 sfc_nic_probe(struct sfc_adapter *sa)
1030 efx_nic_t *enp = sa->nic;
1031 efx_fw_variant_t preferred_efv;
1032 efx_fw_variant_t efv;
1035 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
1036 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
1037 sfc_kvarg_fv_variant_handler,
1040 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
1044 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
1048 rc = efx_nic_probe(enp, preferred_efv);
1050 /* Unprivileged functions cannot set FW variant */
1051 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
1056 rc = sfc_get_fw_variant(sa, &efv);
1057 if (rc == ENOTSUP) {
1058 sfc_warn(sa, "FW variant can not be obtained");
1064 /* Check that firmware variant was changed to the requested one */
1065 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
1066 sfc_warn(sa, "FW variant has not changed to the requested %s",
1067 sfc_fw_variant2str(preferred_efv));
1070 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
1076 sfc_probe(struct sfc_adapter *sa)
1078 efx_bar_region_t mem_ebrp;
1079 struct rte_eth_dev *eth_dev = sa->eth_dev;
1080 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1084 sfc_log_init(sa, "entry");
1086 SFC_ASSERT(sfc_adapter_is_locked(sa));
1088 sa->socket_id = rte_socket_id();
1089 rte_atomic32_init(&sa->restart_required);
1091 sfc_log_init(sa, "get family");
1092 rc = sfc_efx_family(pci_dev, &mem_ebrp, &sa->family);
1097 "family is %u, membar is %u, function control window offset is %lu",
1098 sa->family, mem_ebrp.ebr_index, mem_ebrp.ebr_offset);
1100 sfc_log_init(sa, "init mem bar");
1101 rc = sfc_mem_bar_init(sa, &mem_ebrp);
1103 goto fail_mem_bar_init;
1105 sfc_log_init(sa, "create nic");
1106 rte_spinlock_init(&sa->nic_lock);
1107 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1108 &sa->mem_bar, mem_ebrp.ebr_offset,
1109 &sa->nic_lock, &enp);
1111 goto fail_nic_create;
1114 rc = sfc_mcdi_init(sa);
1116 goto fail_mcdi_init;
1118 sfc_log_init(sa, "probe nic");
1119 rc = sfc_nic_probe(sa);
1121 goto fail_nic_probe;
1123 sfc_log_init(sa, "done");
1130 sfc_log_init(sa, "destroy nic");
1132 efx_nic_destroy(enp);
1135 sfc_mem_bar_fini(sa);
1139 sfc_log_init(sa, "failed %d", rc);
1144 sfc_unprobe(struct sfc_adapter *sa)
1146 efx_nic_t *enp = sa->nic;
1148 sfc_log_init(sa, "entry");
1150 SFC_ASSERT(sfc_adapter_is_locked(sa));
1152 sfc_log_init(sa, "unprobe nic");
1153 efx_nic_unprobe(enp);
1158 * Make sure there is no pending alarm to restart since we are
1159 * going to free device private which is passed as the callback
1160 * opaque data. A new alarm cannot be scheduled since MCDI is
1163 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1165 sfc_log_init(sa, "destroy nic");
1167 efx_nic_destroy(enp);
1169 sfc_mem_bar_fini(sa);
1172 sa->state = SFC_ADAPTER_UNINITIALIZED;
1176 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1177 const char *lt_prefix_str, uint32_t ll_default)
1179 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1180 size_t lt_str_size_max;
1181 char *lt_str = NULL;
1184 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1185 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1186 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1188 return sfc_logtype_driver;
1191 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1193 return sfc_logtype_driver;
1195 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1196 lt_str[lt_prefix_str_size - 1] = '.';
1197 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1198 lt_str_size_max - lt_prefix_str_size);
1199 lt_str[lt_str_size_max - 1] = '\0';
1201 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1205 return sfc_logtype_driver;
1210 struct sfc_hw_switch_id {
1211 char board_sn[RTE_SIZEOF_FIELD(efx_nic_board_info_t, enbi_serial)];
1215 sfc_hw_switch_id_init(struct sfc_adapter *sa,
1216 struct sfc_hw_switch_id **idp)
1218 efx_nic_board_info_t board_info;
1219 struct sfc_hw_switch_id *id;
1225 id = rte_zmalloc("sfc_hw_switch_id", sizeof(*id), 0);
1229 rc = efx_nic_get_board_info(sa->nic, &board_info);
1233 memcpy(id->board_sn, board_info.enbi_serial, sizeof(id->board_sn));
1241 sfc_hw_switch_id_fini(__rte_unused struct sfc_adapter *sa,
1242 struct sfc_hw_switch_id *id)
1248 sfc_hw_switch_ids_equal(const struct sfc_hw_switch_id *left,
1249 const struct sfc_hw_switch_id *right)
1251 return strncmp(left->board_sn, right->board_sn,
1252 sizeof(left->board_sn)) == 0;