1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
27 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
28 size_t len, int socket_id, efsys_mem_t *esmp)
30 const struct rte_memzone *mz;
32 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
33 name, id, len, socket_id);
35 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
36 sysconf(_SC_PAGESIZE), socket_id);
38 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
39 name, (unsigned int)id, (unsigned int)len, socket_id,
40 rte_strerror(rte_errno));
44 esmp->esm_addr = mz->iova;
45 if (esmp->esm_addr == RTE_BAD_IOVA) {
46 (void)rte_memzone_free(mz);
51 esmp->esm_base = mz->addr;
57 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
61 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
63 rc = rte_memzone_free(esmp->esm_mz);
65 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
67 memset(esmp, 0, sizeof(*esmp));
71 sfc_phy_cap_from_link_speeds(uint32_t speeds)
73 uint32_t phy_caps = 0;
75 if (~speeds & ETH_LINK_SPEED_FIXED) {
76 phy_caps |= (1 << EFX_PHY_CAP_AN);
78 * If no speeds are specified in the mask, any supported
81 if (speeds == ETH_LINK_SPEED_AUTONEG)
83 (1 << EFX_PHY_CAP_1000FDX) |
84 (1 << EFX_PHY_CAP_10000FDX) |
85 (1 << EFX_PHY_CAP_25000FDX) |
86 (1 << EFX_PHY_CAP_40000FDX) |
87 (1 << EFX_PHY_CAP_50000FDX) |
88 (1 << EFX_PHY_CAP_100000FDX);
90 if (speeds & ETH_LINK_SPEED_1G)
91 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
92 if (speeds & ETH_LINK_SPEED_10G)
93 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
94 if (speeds & ETH_LINK_SPEED_25G)
95 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
96 if (speeds & ETH_LINK_SPEED_40G)
97 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
98 if (speeds & ETH_LINK_SPEED_50G)
99 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
100 if (speeds & ETH_LINK_SPEED_100G)
101 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
107 * Check requested device level configuration.
108 * Receive and transmit configuration is checked in corresponding
112 sfc_check_conf(struct sfc_adapter *sa)
114 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
117 sa->port.phy_adv_cap =
118 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
119 sa->port.phy_adv_cap_mask;
120 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
121 sfc_err(sa, "No link speeds from mask %#x are supported",
126 if (conf->lpbk_mode != 0) {
127 sfc_err(sa, "Loopback not supported");
131 if (conf->dcb_capability_en != 0) {
132 sfc_err(sa, "Priority-based flow control not supported");
136 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
137 sfc_err(sa, "Flow Director not supported");
141 if ((conf->intr_conf.lsc != 0) &&
142 (sa->intr.type != EFX_INTR_LINE) &&
143 (sa->intr.type != EFX_INTR_MESSAGE)) {
144 sfc_err(sa, "Link status change interrupt not supported");
148 if (conf->intr_conf.rxq != 0) {
149 sfc_err(sa, "Receive queue interrupt not supported");
157 * Find out maximum number of receive and transmit queues which could be
160 * NIC is kept initialized on success to allow other modules acquire
161 * defaults and capabilities.
164 sfc_estimate_resource_limits(struct sfc_adapter *sa)
166 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
167 efx_drv_limits_t limits;
169 uint32_t evq_allocated;
170 uint32_t rxq_allocated;
171 uint32_t txq_allocated;
173 memset(&limits, 0, sizeof(limits));
175 /* Request at least one Rx and Tx queue */
176 limits.edl_min_rxq_count = 1;
177 limits.edl_min_txq_count = 1;
178 /* Management event queue plus event queue for each Tx and Rx queue */
179 limits.edl_min_evq_count =
180 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
182 /* Divide by number of functions to guarantee that all functions
183 * will get promised resources
185 /* FIXME Divide by number of functions (not 2) below */
186 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
187 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
189 /* Split equally between receive and transmit */
190 limits.edl_max_rxq_count =
191 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
192 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
194 limits.edl_max_txq_count =
195 MIN(encp->enc_txq_limit,
196 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
199 limits.edl_max_txq_count =
200 MIN(limits.edl_max_txq_count,
201 encp->enc_fw_assisted_tso_v2_n_contexts /
202 encp->enc_hw_pf_count);
204 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
206 /* Configure the minimum required resources needed for the
207 * driver to operate, and the maximum desired resources that the
208 * driver is capable of using.
210 efx_nic_set_drv_limits(sa->nic, &limits);
212 sfc_log_init(sa, "init nic");
213 rc = efx_nic_init(sa->nic);
217 /* Find resource dimensions assigned by firmware to this function */
218 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
221 goto fail_get_vi_pool;
223 /* It still may allocate more than maximum, ensure limit */
224 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
225 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
226 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
228 /* Subtract management EVQ not used for traffic */
229 SFC_ASSERT(evq_allocated > 0);
232 /* Right now we use separate EVQ for Rx and Tx */
233 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
234 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
236 /* Keep NIC initialized */
241 efx_nic_fini(sa->nic);
246 sfc_set_drv_limits(struct sfc_adapter *sa)
248 const struct rte_eth_dev_data *data = sa->eth_dev->data;
249 efx_drv_limits_t lim;
251 memset(&lim, 0, sizeof(lim));
253 /* Limits are strict since take into account initial estimation */
254 lim.edl_min_evq_count = lim.edl_max_evq_count =
255 1 + data->nb_rx_queues + data->nb_tx_queues;
256 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
257 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
259 return efx_nic_set_drv_limits(sa->nic, &lim);
263 sfc_try_start(struct sfc_adapter *sa)
265 const efx_nic_cfg_t *encp;
268 sfc_log_init(sa, "entry");
270 SFC_ASSERT(sfc_adapter_is_locked(sa));
271 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
273 sfc_log_init(sa, "set resource limits");
274 rc = sfc_set_drv_limits(sa);
276 goto fail_set_drv_limits;
278 sfc_log_init(sa, "init nic");
279 rc = efx_nic_init(sa->nic);
283 encp = efx_nic_cfg_get(sa->nic);
284 if (encp->enc_tunnel_encapsulations_supported != 0) {
285 sfc_log_init(sa, "apply tunnel config");
286 rc = efx_tunnel_reconfigure(sa->nic);
288 goto fail_tunnel_reconfigure;
291 rc = sfc_intr_start(sa);
293 goto fail_intr_start;
295 rc = sfc_ev_start(sa);
299 rc = sfc_port_start(sa);
301 goto fail_port_start;
303 rc = sfc_rx_start(sa);
307 rc = sfc_tx_start(sa);
311 rc = sfc_flow_start(sa);
313 goto fail_flows_insert;
315 sfc_log_init(sa, "done");
334 fail_tunnel_reconfigure:
335 efx_nic_fini(sa->nic);
339 sfc_log_init(sa, "failed %d", rc);
344 sfc_start(struct sfc_adapter *sa)
346 unsigned int start_tries = 3;
349 sfc_log_init(sa, "entry");
351 SFC_ASSERT(sfc_adapter_is_locked(sa));
354 case SFC_ADAPTER_CONFIGURED:
356 case SFC_ADAPTER_STARTED:
357 sfc_notice(sa, "already started");
364 sa->state = SFC_ADAPTER_STARTING;
367 rc = sfc_try_start(sa);
368 } while ((--start_tries > 0) &&
369 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
374 sa->state = SFC_ADAPTER_STARTED;
375 sfc_log_init(sa, "done");
379 sa->state = SFC_ADAPTER_CONFIGURED;
381 sfc_log_init(sa, "failed %d", rc);
386 sfc_stop(struct sfc_adapter *sa)
388 sfc_log_init(sa, "entry");
390 SFC_ASSERT(sfc_adapter_is_locked(sa));
393 case SFC_ADAPTER_STARTED:
395 case SFC_ADAPTER_CONFIGURED:
396 sfc_notice(sa, "already stopped");
399 sfc_err(sa, "stop in unexpected state %u", sa->state);
404 sa->state = SFC_ADAPTER_STOPPING;
412 efx_nic_fini(sa->nic);
414 sa->state = SFC_ADAPTER_CONFIGURED;
415 sfc_log_init(sa, "done");
419 sfc_restart(struct sfc_adapter *sa)
423 SFC_ASSERT(sfc_adapter_is_locked(sa));
425 if (sa->state != SFC_ADAPTER_STARTED)
432 sfc_err(sa, "restart failed");
438 sfc_restart_if_required(void *arg)
440 struct sfc_adapter *sa = arg;
442 /* If restart is scheduled, clear the flag and do it */
443 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
445 sfc_adapter_lock(sa);
446 if (sa->state == SFC_ADAPTER_STARTED)
447 (void)sfc_restart(sa);
448 sfc_adapter_unlock(sa);
453 sfc_schedule_restart(struct sfc_adapter *sa)
457 /* Schedule restart alarm if it is not scheduled yet */
458 if (!rte_atomic32_test_and_set(&sa->restart_required))
461 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
463 sfc_warn(sa, "alarms are not supported, restart is pending");
465 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
467 sfc_notice(sa, "restart scheduled");
471 sfc_configure(struct sfc_adapter *sa)
475 sfc_log_init(sa, "entry");
477 SFC_ASSERT(sfc_adapter_is_locked(sa));
479 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
480 sa->state == SFC_ADAPTER_CONFIGURED);
481 sa->state = SFC_ADAPTER_CONFIGURING;
483 rc = sfc_check_conf(sa);
485 goto fail_check_conf;
487 rc = sfc_intr_configure(sa);
489 goto fail_intr_configure;
491 rc = sfc_port_configure(sa);
493 goto fail_port_configure;
495 rc = sfc_rx_configure(sa);
497 goto fail_rx_configure;
499 rc = sfc_tx_configure(sa);
501 goto fail_tx_configure;
503 sa->state = SFC_ADAPTER_CONFIGURED;
504 sfc_log_init(sa, "done");
518 sa->state = SFC_ADAPTER_INITIALIZED;
519 sfc_log_init(sa, "failed %d", rc);
524 sfc_close(struct sfc_adapter *sa)
526 sfc_log_init(sa, "entry");
528 SFC_ASSERT(sfc_adapter_is_locked(sa));
530 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
531 sa->state = SFC_ADAPTER_CLOSING;
538 sa->state = SFC_ADAPTER_INITIALIZED;
539 sfc_log_init(sa, "done");
543 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
545 struct rte_eth_dev *eth_dev = sa->eth_dev;
546 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
547 efsys_bar_t *ebp = &sa->mem_bar;
548 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
550 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
551 ebp->esb_rid = membar;
552 ebp->esb_dev = pci_dev;
553 ebp->esb_base = res->addr;
558 sfc_mem_bar_fini(struct sfc_adapter *sa)
560 efsys_bar_t *ebp = &sa->mem_bar;
562 SFC_BAR_LOCK_DESTROY(ebp);
563 memset(ebp, 0, sizeof(*ebp));
566 #if EFSYS_OPT_RX_SCALE
568 * A fixed RSS key which has a property of being symmetric
569 * (symmetrical flows are distributed to the same CPU)
570 * and also known to give a uniform distribution
571 * (a good distribution of traffic between different CPUs)
573 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
574 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
575 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
576 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
577 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
578 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
582 #if EFSYS_OPT_RX_SCALE
584 sfc_set_rss_defaults(struct sfc_adapter *sa)
588 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
592 rc = efx_ev_init(sa->nic);
596 rc = efx_rx_init(sa->nic);
600 rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
602 goto fail_scale_support_get;
604 rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
606 goto fail_hash_support_get;
608 efx_rx_fini(sa->nic);
609 efx_ev_fini(sa->nic);
610 efx_intr_fini(sa->nic);
612 sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
614 rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
618 fail_hash_support_get:
619 fail_scale_support_get:
621 efx_ev_fini(sa->nic);
624 efx_intr_fini(sa->nic);
631 sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
638 sfc_attach(struct sfc_adapter *sa)
640 const efx_nic_cfg_t *encp;
641 efx_nic_t *enp = sa->nic;
644 sfc_log_init(sa, "entry");
646 SFC_ASSERT(sfc_adapter_is_locked(sa));
648 efx_mcdi_new_epoch(enp);
650 sfc_log_init(sa, "reset nic");
651 rc = efx_nic_reset(enp);
656 * Probed NIC is sufficient for tunnel init.
657 * Initialize tunnel support to be able to use libefx
658 * efx_tunnel_config_udp_{add,remove}() in any state and
659 * efx_tunnel_reconfigure() on start up.
661 rc = efx_tunnel_init(enp);
663 goto fail_tunnel_init;
665 encp = efx_nic_cfg_get(sa->nic);
667 if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
668 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
671 "TSO support isn't available on this adapter");
674 sfc_log_init(sa, "estimate resource limits");
675 rc = sfc_estimate_resource_limits(sa);
677 goto fail_estimate_rsrc_limits;
679 sa->txq_max_entries = encp->enc_txq_max_ndescs;
680 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
682 rc = sfc_intr_attach(sa);
684 goto fail_intr_attach;
686 rc = sfc_ev_attach(sa);
690 rc = sfc_port_attach(sa);
692 goto fail_port_attach;
694 rc = sfc_set_rss_defaults(sa);
696 goto fail_set_rss_defaults;
698 rc = sfc_filter_attach(sa);
700 goto fail_filter_attach;
702 sfc_log_init(sa, "fini nic");
707 sa->state = SFC_ADAPTER_INITIALIZED;
709 sfc_log_init(sa, "done");
713 fail_set_rss_defaults:
723 efx_nic_fini(sa->nic);
725 fail_estimate_rsrc_limits:
727 efx_tunnel_fini(sa->nic);
731 sfc_log_init(sa, "failed %d", rc);
736 sfc_detach(struct sfc_adapter *sa)
738 sfc_log_init(sa, "entry");
740 SFC_ASSERT(sfc_adapter_is_locked(sa));
744 sfc_filter_detach(sa);
748 efx_tunnel_fini(sa->nic);
750 sa->state = SFC_ADAPTER_UNINITIALIZED;
754 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
755 const char *value_str, void *opaque)
757 uint32_t *value = opaque;
759 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
760 *value = EFX_FW_VARIANT_DONT_CARE;
761 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
762 *value = EFX_FW_VARIANT_FULL_FEATURED;
763 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
764 *value = EFX_FW_VARIANT_LOW_LATENCY;
765 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
766 *value = EFX_FW_VARIANT_PACKED_STREAM;
774 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
776 efx_nic_fw_info_t enfi;
779 rc = efx_nic_get_fw_version(sa->nic, &enfi);
782 else if (!enfi.enfi_dpcpu_fw_ids_valid)
786 * Firmware variant can be uniquely identified by the RxDPCPU
789 switch (enfi.enfi_rx_dpcpu_fw_id) {
790 case EFX_RXDP_FULL_FEATURED_FW_ID:
791 *efv = EFX_FW_VARIANT_FULL_FEATURED;
794 case EFX_RXDP_LOW_LATENCY_FW_ID:
795 *efv = EFX_FW_VARIANT_LOW_LATENCY;
798 case EFX_RXDP_PACKED_STREAM_FW_ID:
799 *efv = EFX_FW_VARIANT_PACKED_STREAM;
804 * Other firmware variants are not considered, since they are
805 * not supported in the device parameters
807 *efv = EFX_FW_VARIANT_DONT_CARE;
815 sfc_fw_variant2str(efx_fw_variant_t efv)
818 case EFX_RXDP_FULL_FEATURED_FW_ID:
819 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
820 case EFX_RXDP_LOW_LATENCY_FW_ID:
821 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
822 case EFX_RXDP_PACKED_STREAM_FW_ID:
823 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
830 sfc_nic_probe(struct sfc_adapter *sa)
832 efx_nic_t *enp = sa->nic;
833 efx_fw_variant_t preferred_efv;
834 efx_fw_variant_t efv;
837 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
838 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
839 sfc_kvarg_fv_variant_handler,
842 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
846 rc = efx_nic_probe(enp, preferred_efv);
848 /* Unprivileged functions cannot set FW variant */
849 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
854 rc = sfc_get_fw_variant(sa, &efv);
856 sfc_warn(sa, "FW variant can not be obtained");
862 /* Check that firmware variant was changed to the requested one */
863 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
864 sfc_warn(sa, "FW variant has not changed to the requested %s",
865 sfc_fw_variant2str(preferred_efv));
868 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
874 sfc_probe(struct sfc_adapter *sa)
876 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
881 sfc_log_init(sa, "entry");
883 SFC_ASSERT(sfc_adapter_is_locked(sa));
885 sa->socket_id = rte_socket_id();
886 rte_atomic32_init(&sa->restart_required);
888 sfc_log_init(sa, "get family");
889 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
890 &sa->family, &membar);
893 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
895 sfc_log_init(sa, "init mem bar");
896 rc = sfc_mem_bar_init(sa, membar);
898 goto fail_mem_bar_init;
900 sfc_log_init(sa, "create nic");
901 rte_spinlock_init(&sa->nic_lock);
902 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
903 &sa->mem_bar, &sa->nic_lock, &enp);
905 goto fail_nic_create;
908 rc = sfc_mcdi_init(sa);
912 sfc_log_init(sa, "probe nic");
913 rc = sfc_nic_probe(sa);
917 sfc_log_init(sa, "done");
924 sfc_log_init(sa, "destroy nic");
926 efx_nic_destroy(enp);
929 sfc_mem_bar_fini(sa);
933 sfc_log_init(sa, "failed %d", rc);
938 sfc_unprobe(struct sfc_adapter *sa)
940 efx_nic_t *enp = sa->nic;
942 sfc_log_init(sa, "entry");
944 SFC_ASSERT(sfc_adapter_is_locked(sa));
946 sfc_log_init(sa, "unprobe nic");
947 efx_nic_unprobe(enp);
952 * Make sure there is no pending alarm to restart since we are
953 * going to free device private which is passed as the callback
954 * opaque data. A new alarm cannot be scheduled since MCDI is
957 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
959 sfc_log_init(sa, "destroy nic");
961 efx_nic_destroy(enp);
963 sfc_mem_bar_fini(sa);
966 sa->state = SFC_ADAPTER_UNINITIALIZED;
970 sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
973 size_t lt_prefix_str_size = strlen(lt_prefix_str);
974 size_t lt_str_size_max;
978 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
979 ++lt_prefix_str_size; /* Reserve space for prefix separator */
980 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
982 return RTE_LOGTYPE_PMD;
985 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
987 return RTE_LOGTYPE_PMD;
989 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
990 lt_str[lt_prefix_str_size - 1] = '.';
991 rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
992 lt_str_size_max - lt_prefix_str_size);
993 lt_str[lt_str_size_max - 1] = '\0';
995 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
998 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;