1 /* SPDX-License-Identifier: BSD-3-Clause
3 * Copyright (c) 2016-2018 Solarflare Communications Inc.
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
23 #include "sfc_kvargs.h"
27 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
28 size_t len, int socket_id, efsys_mem_t *esmp)
30 const struct rte_memzone *mz;
32 sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
33 name, id, len, socket_id);
35 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
36 sysconf(_SC_PAGESIZE), socket_id);
38 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
39 name, (unsigned int)id, (unsigned int)len, socket_id,
40 rte_strerror(rte_errno));
44 esmp->esm_addr = mz->iova;
45 if (esmp->esm_addr == RTE_BAD_IOVA) {
46 (void)rte_memzone_free(mz);
51 esmp->esm_base = mz->addr;
57 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
61 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
63 rc = rte_memzone_free(esmp->esm_mz);
65 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
67 memset(esmp, 0, sizeof(*esmp));
71 sfc_phy_cap_from_link_speeds(uint32_t speeds)
73 uint32_t phy_caps = 0;
75 if (~speeds & ETH_LINK_SPEED_FIXED) {
76 phy_caps |= (1 << EFX_PHY_CAP_AN);
78 * If no speeds are specified in the mask, any supported
81 if (speeds == ETH_LINK_SPEED_AUTONEG)
83 (1 << EFX_PHY_CAP_1000FDX) |
84 (1 << EFX_PHY_CAP_10000FDX) |
85 (1 << EFX_PHY_CAP_40000FDX);
87 if (speeds & ETH_LINK_SPEED_1G)
88 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
89 if (speeds & ETH_LINK_SPEED_10G)
90 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
91 if (speeds & ETH_LINK_SPEED_40G)
92 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
98 * Check requested device level configuration.
99 * Receive and transmit configuration is checked in corresponding
103 sfc_check_conf(struct sfc_adapter *sa)
105 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
108 sa->port.phy_adv_cap =
109 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
110 sa->port.phy_adv_cap_mask;
111 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
112 sfc_err(sa, "No link speeds from mask %#x are supported",
117 if (conf->lpbk_mode != 0) {
118 sfc_err(sa, "Loopback not supported");
122 if (conf->dcb_capability_en != 0) {
123 sfc_err(sa, "Priority-based flow control not supported");
127 if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
128 sfc_err(sa, "Flow Director not supported");
132 if ((conf->intr_conf.lsc != 0) &&
133 (sa->intr.type != EFX_INTR_LINE) &&
134 (sa->intr.type != EFX_INTR_MESSAGE)) {
135 sfc_err(sa, "Link status change interrupt not supported");
139 if (conf->intr_conf.rxq != 0) {
140 sfc_err(sa, "Receive queue interrupt not supported");
148 * Find out maximum number of receive and transmit queues which could be
151 * NIC is kept initialized on success to allow other modules acquire
152 * defaults and capabilities.
155 sfc_estimate_resource_limits(struct sfc_adapter *sa)
157 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
158 efx_drv_limits_t limits;
160 uint32_t evq_allocated;
161 uint32_t rxq_allocated;
162 uint32_t txq_allocated;
164 memset(&limits, 0, sizeof(limits));
166 /* Request at least one Rx and Tx queue */
167 limits.edl_min_rxq_count = 1;
168 limits.edl_min_txq_count = 1;
169 /* Management event queue plus event queue for each Tx and Rx queue */
170 limits.edl_min_evq_count =
171 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
173 /* Divide by number of functions to guarantee that all functions
174 * will get promised resources
176 /* FIXME Divide by number of functions (not 2) below */
177 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
178 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
180 /* Split equally between receive and transmit */
181 limits.edl_max_rxq_count =
182 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
183 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
185 limits.edl_max_txq_count =
186 MIN(encp->enc_txq_limit,
187 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
190 limits.edl_max_txq_count =
191 MIN(limits.edl_max_txq_count,
192 encp->enc_fw_assisted_tso_v2_n_contexts /
193 encp->enc_hw_pf_count);
195 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
197 /* Configure the minimum required resources needed for the
198 * driver to operate, and the maximum desired resources that the
199 * driver is capable of using.
201 efx_nic_set_drv_limits(sa->nic, &limits);
203 sfc_log_init(sa, "init nic");
204 rc = efx_nic_init(sa->nic);
208 /* Find resource dimensions assigned by firmware to this function */
209 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
212 goto fail_get_vi_pool;
214 /* It still may allocate more than maximum, ensure limit */
215 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
216 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
217 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
219 /* Subtract management EVQ not used for traffic */
220 SFC_ASSERT(evq_allocated > 0);
223 /* Right now we use separate EVQ for Rx and Tx */
224 sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
225 sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
227 /* Keep NIC initialized */
232 efx_nic_fini(sa->nic);
237 sfc_set_drv_limits(struct sfc_adapter *sa)
239 const struct rte_eth_dev_data *data = sa->eth_dev->data;
240 efx_drv_limits_t lim;
242 memset(&lim, 0, sizeof(lim));
244 /* Limits are strict since take into account initial estimation */
245 lim.edl_min_evq_count = lim.edl_max_evq_count =
246 1 + data->nb_rx_queues + data->nb_tx_queues;
247 lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
248 lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
250 return efx_nic_set_drv_limits(sa->nic, &lim);
254 sfc_try_start(struct sfc_adapter *sa)
256 const efx_nic_cfg_t *encp;
259 sfc_log_init(sa, "entry");
261 SFC_ASSERT(sfc_adapter_is_locked(sa));
262 SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
264 sfc_log_init(sa, "set resource limits");
265 rc = sfc_set_drv_limits(sa);
267 goto fail_set_drv_limits;
269 sfc_log_init(sa, "init nic");
270 rc = efx_nic_init(sa->nic);
274 encp = efx_nic_cfg_get(sa->nic);
275 if (encp->enc_tunnel_encapsulations_supported != 0) {
276 sfc_log_init(sa, "apply tunnel config");
277 rc = efx_tunnel_reconfigure(sa->nic);
279 goto fail_tunnel_reconfigure;
282 rc = sfc_intr_start(sa);
284 goto fail_intr_start;
286 rc = sfc_ev_start(sa);
290 rc = sfc_port_start(sa);
292 goto fail_port_start;
294 rc = sfc_rx_start(sa);
298 rc = sfc_tx_start(sa);
302 rc = sfc_flow_start(sa);
304 goto fail_flows_insert;
306 sfc_log_init(sa, "done");
325 fail_tunnel_reconfigure:
326 efx_nic_fini(sa->nic);
330 sfc_log_init(sa, "failed %d", rc);
335 sfc_start(struct sfc_adapter *sa)
337 unsigned int start_tries = 3;
340 sfc_log_init(sa, "entry");
342 SFC_ASSERT(sfc_adapter_is_locked(sa));
345 case SFC_ADAPTER_CONFIGURED:
347 case SFC_ADAPTER_STARTED:
348 sfc_notice(sa, "already started");
355 sa->state = SFC_ADAPTER_STARTING;
358 rc = sfc_try_start(sa);
359 } while ((--start_tries > 0) &&
360 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
365 sa->state = SFC_ADAPTER_STARTED;
366 sfc_log_init(sa, "done");
370 sa->state = SFC_ADAPTER_CONFIGURED;
372 sfc_log_init(sa, "failed %d", rc);
377 sfc_stop(struct sfc_adapter *sa)
379 sfc_log_init(sa, "entry");
381 SFC_ASSERT(sfc_adapter_is_locked(sa));
384 case SFC_ADAPTER_STARTED:
386 case SFC_ADAPTER_CONFIGURED:
387 sfc_notice(sa, "already stopped");
390 sfc_err(sa, "stop in unexpected state %u", sa->state);
395 sa->state = SFC_ADAPTER_STOPPING;
403 efx_nic_fini(sa->nic);
405 sa->state = SFC_ADAPTER_CONFIGURED;
406 sfc_log_init(sa, "done");
410 sfc_restart(struct sfc_adapter *sa)
414 SFC_ASSERT(sfc_adapter_is_locked(sa));
416 if (sa->state != SFC_ADAPTER_STARTED)
423 sfc_err(sa, "restart failed");
429 sfc_restart_if_required(void *arg)
431 struct sfc_adapter *sa = arg;
433 /* If restart is scheduled, clear the flag and do it */
434 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
436 sfc_adapter_lock(sa);
437 if (sa->state == SFC_ADAPTER_STARTED)
438 (void)sfc_restart(sa);
439 sfc_adapter_unlock(sa);
444 sfc_schedule_restart(struct sfc_adapter *sa)
448 /* Schedule restart alarm if it is not scheduled yet */
449 if (!rte_atomic32_test_and_set(&sa->restart_required))
452 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
454 sfc_warn(sa, "alarms are not supported, restart is pending");
456 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
458 sfc_notice(sa, "restart scheduled");
462 sfc_configure(struct sfc_adapter *sa)
466 sfc_log_init(sa, "entry");
468 SFC_ASSERT(sfc_adapter_is_locked(sa));
470 SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
471 sa->state == SFC_ADAPTER_CONFIGURED);
472 sa->state = SFC_ADAPTER_CONFIGURING;
474 rc = sfc_check_conf(sa);
476 goto fail_check_conf;
478 rc = sfc_intr_configure(sa);
480 goto fail_intr_configure;
482 rc = sfc_port_configure(sa);
484 goto fail_port_configure;
486 rc = sfc_rx_configure(sa);
488 goto fail_rx_configure;
490 rc = sfc_tx_configure(sa);
492 goto fail_tx_configure;
494 sa->state = SFC_ADAPTER_CONFIGURED;
495 sfc_log_init(sa, "done");
509 sa->state = SFC_ADAPTER_INITIALIZED;
510 sfc_log_init(sa, "failed %d", rc);
515 sfc_close(struct sfc_adapter *sa)
517 sfc_log_init(sa, "entry");
519 SFC_ASSERT(sfc_adapter_is_locked(sa));
521 SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
522 sa->state = SFC_ADAPTER_CLOSING;
529 sa->state = SFC_ADAPTER_INITIALIZED;
530 sfc_log_init(sa, "done");
534 sfc_mem_bar_init(struct sfc_adapter *sa, unsigned int membar)
536 struct rte_eth_dev *eth_dev = sa->eth_dev;
537 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
538 efsys_bar_t *ebp = &sa->mem_bar;
539 struct rte_mem_resource *res = &pci_dev->mem_resource[membar];
541 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
542 ebp->esb_rid = membar;
543 ebp->esb_dev = pci_dev;
544 ebp->esb_base = res->addr;
549 sfc_mem_bar_fini(struct sfc_adapter *sa)
551 efsys_bar_t *ebp = &sa->mem_bar;
553 SFC_BAR_LOCK_DESTROY(ebp);
554 memset(ebp, 0, sizeof(*ebp));
557 #if EFSYS_OPT_RX_SCALE
559 * A fixed RSS key which has a property of being symmetric
560 * (symmetrical flows are distributed to the same CPU)
561 * and also known to give a uniform distribution
562 * (a good distribution of traffic between different CPUs)
564 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
565 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
566 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
567 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
568 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
569 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
573 #if EFSYS_OPT_RX_SCALE
575 sfc_set_rss_defaults(struct sfc_adapter *sa)
579 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
583 rc = efx_ev_init(sa->nic);
587 rc = efx_rx_init(sa->nic);
591 rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
593 goto fail_scale_support_get;
595 rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
597 goto fail_hash_support_get;
599 efx_rx_fini(sa->nic);
600 efx_ev_fini(sa->nic);
601 efx_intr_fini(sa->nic);
603 sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
605 rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
609 fail_hash_support_get:
610 fail_scale_support_get:
612 efx_ev_fini(sa->nic);
615 efx_intr_fini(sa->nic);
622 sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
629 sfc_attach(struct sfc_adapter *sa)
631 const efx_nic_cfg_t *encp;
632 efx_nic_t *enp = sa->nic;
635 sfc_log_init(sa, "entry");
637 SFC_ASSERT(sfc_adapter_is_locked(sa));
639 efx_mcdi_new_epoch(enp);
641 sfc_log_init(sa, "reset nic");
642 rc = efx_nic_reset(enp);
647 * Probed NIC is sufficient for tunnel init.
648 * Initialize tunnel support to be able to use libefx
649 * efx_tunnel_config_udp_{add,remove}() in any state and
650 * efx_tunnel_reconfigure() on start up.
652 rc = efx_tunnel_init(enp);
654 goto fail_tunnel_init;
656 encp = efx_nic_cfg_get(sa->nic);
658 if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
659 sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
662 "TSO support isn't available on this adapter");
665 sfc_log_init(sa, "estimate resource limits");
666 rc = sfc_estimate_resource_limits(sa);
668 goto fail_estimate_rsrc_limits;
670 sa->txq_max_entries = encp->enc_txq_max_ndescs;
671 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
673 rc = sfc_intr_attach(sa);
675 goto fail_intr_attach;
677 rc = sfc_ev_attach(sa);
681 rc = sfc_port_attach(sa);
683 goto fail_port_attach;
685 rc = sfc_set_rss_defaults(sa);
687 goto fail_set_rss_defaults;
689 rc = sfc_filter_attach(sa);
691 goto fail_filter_attach;
693 sfc_log_init(sa, "fini nic");
698 sa->state = SFC_ADAPTER_INITIALIZED;
700 sfc_log_init(sa, "done");
704 fail_set_rss_defaults:
714 efx_nic_fini(sa->nic);
716 fail_estimate_rsrc_limits:
718 efx_tunnel_fini(sa->nic);
722 sfc_log_init(sa, "failed %d", rc);
727 sfc_detach(struct sfc_adapter *sa)
729 sfc_log_init(sa, "entry");
731 SFC_ASSERT(sfc_adapter_is_locked(sa));
735 sfc_filter_detach(sa);
739 efx_tunnel_fini(sa->nic);
741 sa->state = SFC_ADAPTER_UNINITIALIZED;
745 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
746 const char *value_str, void *opaque)
748 uint32_t *value = opaque;
750 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
751 *value = EFX_FW_VARIANT_DONT_CARE;
752 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
753 *value = EFX_FW_VARIANT_FULL_FEATURED;
754 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
755 *value = EFX_FW_VARIANT_LOW_LATENCY;
756 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
757 *value = EFX_FW_VARIANT_PACKED_STREAM;
765 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
767 efx_nic_fw_info_t enfi;
770 rc = efx_nic_get_fw_version(sa->nic, &enfi);
773 else if (!enfi.enfi_dpcpu_fw_ids_valid)
777 * Firmware variant can be uniquely identified by the RxDPCPU
780 switch (enfi.enfi_rx_dpcpu_fw_id) {
781 case EFX_RXDP_FULL_FEATURED_FW_ID:
782 *efv = EFX_FW_VARIANT_FULL_FEATURED;
785 case EFX_RXDP_LOW_LATENCY_FW_ID:
786 *efv = EFX_FW_VARIANT_LOW_LATENCY;
789 case EFX_RXDP_PACKED_STREAM_FW_ID:
790 *efv = EFX_FW_VARIANT_PACKED_STREAM;
795 * Other firmware variants are not considered, since they are
796 * not supported in the device parameters
798 *efv = EFX_FW_VARIANT_DONT_CARE;
806 sfc_fw_variant2str(efx_fw_variant_t efv)
809 case EFX_RXDP_FULL_FEATURED_FW_ID:
810 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
811 case EFX_RXDP_LOW_LATENCY_FW_ID:
812 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
813 case EFX_RXDP_PACKED_STREAM_FW_ID:
814 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
821 sfc_nic_probe(struct sfc_adapter *sa)
823 efx_nic_t *enp = sa->nic;
824 efx_fw_variant_t preferred_efv;
825 efx_fw_variant_t efv;
828 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
829 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
830 sfc_kvarg_fv_variant_handler,
833 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
837 rc = efx_nic_probe(enp, preferred_efv);
839 /* Unprivileged functions cannot set FW variant */
840 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
845 rc = sfc_get_fw_variant(sa, &efv);
847 sfc_warn(sa, "FW variant can not be obtained");
853 /* Check that firmware variant was changed to the requested one */
854 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
855 sfc_warn(sa, "FW variant has not changed to the requested %s",
856 sfc_fw_variant2str(preferred_efv));
859 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
865 sfc_probe(struct sfc_adapter *sa)
867 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
872 sfc_log_init(sa, "entry");
874 SFC_ASSERT(sfc_adapter_is_locked(sa));
876 sa->socket_id = rte_socket_id();
877 rte_atomic32_init(&sa->restart_required);
879 sfc_log_init(sa, "get family");
880 rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
881 &sa->family, &membar);
884 sfc_log_init(sa, "family is %u, membar is %u", sa->family, membar);
886 sfc_log_init(sa, "init mem bar");
887 rc = sfc_mem_bar_init(sa, membar);
889 goto fail_mem_bar_init;
891 sfc_log_init(sa, "create nic");
892 rte_spinlock_init(&sa->nic_lock);
893 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
894 &sa->mem_bar, &sa->nic_lock, &enp);
896 goto fail_nic_create;
899 rc = sfc_mcdi_init(sa);
903 sfc_log_init(sa, "probe nic");
904 rc = sfc_nic_probe(sa);
908 sfc_log_init(sa, "done");
915 sfc_log_init(sa, "destroy nic");
917 efx_nic_destroy(enp);
920 sfc_mem_bar_fini(sa);
924 sfc_log_init(sa, "failed %d", rc);
929 sfc_unprobe(struct sfc_adapter *sa)
931 efx_nic_t *enp = sa->nic;
933 sfc_log_init(sa, "entry");
935 SFC_ASSERT(sfc_adapter_is_locked(sa));
937 sfc_log_init(sa, "unprobe nic");
938 efx_nic_unprobe(enp);
943 * Make sure there is no pending alarm to restart since we are
944 * going to free device private which is passed as the callback
945 * opaque data. A new alarm cannot be scheduled since MCDI is
948 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
950 sfc_log_init(sa, "destroy nic");
952 efx_nic_destroy(enp);
954 sfc_mem_bar_fini(sa);
957 sa->state = SFC_ADAPTER_UNINITIALIZED;
961 sfc_register_logtype(struct sfc_adapter *sa, const char *lt_prefix_str,
964 size_t lt_prefix_str_size = strlen(lt_prefix_str);
965 size_t lt_str_size_max;
969 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
970 ++lt_prefix_str_size; /* Reserve space for prefix separator */
971 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
973 return RTE_LOGTYPE_PMD;
976 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
978 return RTE_LOGTYPE_PMD;
980 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
981 lt_str[lt_prefix_str_size - 1] = '.';
982 rte_pci_device_name(&sa->pci_addr, lt_str + lt_prefix_str_size,
983 lt_str_size_max - lt_prefix_str_size);
984 lt_str[lt_str_size_max - 1] = '\0';
986 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
989 return (ret < 0) ? RTE_LOGTYPE_PMD : ret;