1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
9 #include <rte_common.h>
10 #include <rte_lcore.h>
11 #include <rte_cycles.h>
15 #include <rte_bus_pci.h>
16 #include <rte_memzone.h>
17 #include <rte_memcpy.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
21 #include "ntb_hw_intel.h"
26 static const struct rte_pci_id pci_id_ntb_map[] = {
27 { RTE_PCI_DEVICE(NTB_INTEL_VENDOR_ID, NTB_INTEL_DEV_ID_B2B_SKX) },
28 { .vendor_id = 0, /* sentinel */ },
32 ntb_set_mw(struct rte_rawdev *dev, int mw_idx, uint64_t mw_size)
34 struct ntb_hw *hw = dev->dev_private;
35 char mw_name[RTE_MEMZONE_NAMESIZE];
36 const struct rte_memzone *mz;
39 if (hw->ntb_ops->mw_set_trans == NULL) {
40 NTB_LOG(ERR, "Not supported to set mw.");
44 snprintf(mw_name, sizeof(mw_name), "ntb_%d_mw_%d",
47 mz = rte_memzone_lookup(mw_name);
52 * Hardware requires that mapped memory base address should be
53 * aligned with EMBARSZ and needs continuous memzone.
55 mz = rte_memzone_reserve_aligned(mw_name, mw_size, dev->socket_id,
56 RTE_MEMZONE_IOVA_CONTIG, hw->mw_size[mw_idx]);
58 NTB_LOG(ERR, "Cannot allocate aligned memzone.");
63 ret = (*hw->ntb_ops->mw_set_trans)(dev, mw_idx, mz->iova, mw_size);
65 NTB_LOG(ERR, "Cannot set mw translation.");
73 ntb_link_cleanup(struct rte_rawdev *dev)
75 struct ntb_hw *hw = dev->dev_private;
78 if (hw->ntb_ops->spad_write == NULL ||
79 hw->ntb_ops->mw_set_trans == NULL) {
80 NTB_LOG(ERR, "Not supported to clean up link.");
84 /* Clean spad registers. */
85 for (i = 0; i < hw->spad_cnt; i++) {
86 status = (*hw->ntb_ops->spad_write)(dev, i, 0, 0);
88 NTB_LOG(ERR, "Failed to clean local spad.");
91 /* Clear mw so that peer cannot access local memory.*/
92 for (i = 0; i < hw->mw_cnt; i++) {
93 status = (*hw->ntb_ops->mw_set_trans)(dev, i, 0, 0);
95 NTB_LOG(ERR, "Failed to clean mw.");
100 ntb_dev_intr_handler(void *param)
102 struct rte_rawdev *dev = (struct rte_rawdev *)param;
103 struct ntb_hw *hw = dev->dev_private;
104 uint32_t mw_size_h, mw_size_l;
105 uint64_t db_bits = 0;
108 if (hw->ntb_ops->db_read == NULL ||
109 hw->ntb_ops->db_clear == NULL ||
110 hw->ntb_ops->peer_db_set == NULL) {
111 NTB_LOG(ERR, "Doorbell is not supported.");
115 db_bits = (*hw->ntb_ops->db_read)(dev);
117 NTB_LOG(ERR, "No doorbells");
119 /* Doorbell 0 is for peer device ready. */
121 NTB_LOG(DEBUG, "DB0: Peer device is up.");
122 /* Clear received doorbell. */
123 (*hw->ntb_ops->db_clear)(dev, 1);
126 * Peer dev is already up. All mw settings are already done.
132 if (hw->ntb_ops->spad_read == NULL ||
133 hw->ntb_ops->spad_write == NULL) {
134 NTB_LOG(ERR, "Scratchpad is not supported.");
138 hw->peer_mw_cnt = (*hw->ntb_ops->spad_read)
139 (dev, SPAD_NUM_MWS, 0);
140 hw->peer_mw_size = rte_zmalloc("uint64_t",
141 hw->peer_mw_cnt * sizeof(uint64_t), 0);
142 for (i = 0; i < hw->mw_cnt; i++) {
143 mw_size_h = (*hw->ntb_ops->spad_read)
144 (dev, SPAD_MW0_SZ_H + 2 * i, 0);
145 mw_size_l = (*hw->ntb_ops->spad_read)
146 (dev, SPAD_MW0_SZ_L + 2 * i, 0);
147 hw->peer_mw_size[i] = ((uint64_t)mw_size_h << 32) |
149 NTB_LOG(DEBUG, "Peer %u mw size: 0x%"PRIx64"", i,
150 hw->peer_mw_size[i]);
156 * Handshake with peer. Spad_write only works when both
157 * devices are up. So write spad again when db is received.
158 * And set db again for the later device who may miss
161 for (i = 0; i < hw->mw_cnt; i++) {
162 (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS,
164 mw_size_h = hw->mw_size[i] >> 32;
165 (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_H + 2 * i,
168 mw_size_l = hw->mw_size[i];
169 (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_L + 2 * i,
172 (*hw->ntb_ops->peer_db_set)(dev, 0);
174 /* To get the link info. */
175 if (hw->ntb_ops->get_link_status == NULL) {
176 NTB_LOG(ERR, "Not supported to get link status.");
179 (*hw->ntb_ops->get_link_status)(dev);
180 NTB_LOG(INFO, "Link is up. Link speed: %u. Link width: %u",
181 hw->link_speed, hw->link_width);
185 if (db_bits & (1 << 1)) {
186 NTB_LOG(DEBUG, "DB1: Peer device is down.");
187 /* Clear received doorbell. */
188 (*hw->ntb_ops->db_clear)(dev, 2);
190 /* Peer device will be down, So clean local side too. */
191 ntb_link_cleanup(dev);
194 /* Response peer's dev_stop request. */
195 (*hw->ntb_ops->peer_db_set)(dev, 2);
199 if (db_bits & (1 << 2)) {
200 NTB_LOG(DEBUG, "DB2: Peer device agrees dev to be down.");
201 /* Clear received doorbell. */
202 (*hw->ntb_ops->db_clear)(dev, (1 << 2));
209 ntb_queue_conf_get(struct rte_rawdev *dev __rte_unused,
210 uint16_t queue_id __rte_unused,
211 rte_rawdev_obj_t queue_conf __rte_unused)
216 ntb_queue_setup(struct rte_rawdev *dev __rte_unused,
217 uint16_t queue_id __rte_unused,
218 rte_rawdev_obj_t queue_conf __rte_unused)
224 ntb_queue_release(struct rte_rawdev *dev __rte_unused,
225 uint16_t queue_id __rte_unused)
231 ntb_queue_count(struct rte_rawdev *dev)
233 struct ntb_hw *hw = dev->dev_private;
234 return hw->queue_pairs;
238 ntb_enqueue_bufs(struct rte_rawdev *dev,
239 struct rte_rawdev_buf **buffers,
241 rte_rawdev_obj_t context)
243 /* Not FIFO right now. Just for testing memory write. */
244 struct ntb_hw *hw = dev->dev_private;
249 if (hw->ntb_ops->get_peer_mw_addr == NULL)
251 bar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);
252 size = (size_t)context;
254 for (i = 0; i < count; i++)
255 rte_memcpy(bar_addr, buffers[i]->buf_addr, size);
260 ntb_dequeue_bufs(struct rte_rawdev *dev,
261 struct rte_rawdev_buf **buffers,
263 rte_rawdev_obj_t context)
265 /* Not FIFO. Just for testing memory read. */
266 struct ntb_hw *hw = dev->dev_private;
270 size = (size_t)context;
272 for (i = 0; i < count; i++)
273 rte_memcpy(buffers[i]->buf_addr, hw->mz[i]->addr, size);
278 ntb_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
280 struct ntb_hw *hw = dev->dev_private;
281 struct ntb_attr *ntb_attrs = dev_info;
283 strncpy(ntb_attrs[NTB_TOPO_ID].name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN);
285 case NTB_TOPO_B2B_DSD:
286 strncpy(ntb_attrs[NTB_TOPO_ID].value, "B2B DSD",
289 case NTB_TOPO_B2B_USD:
290 strncpy(ntb_attrs[NTB_TOPO_ID].value, "B2B USD",
294 strncpy(ntb_attrs[NTB_TOPO_ID].value, "Unsupported",
298 strncpy(ntb_attrs[NTB_LINK_STATUS_ID].name, NTB_LINK_STATUS_NAME,
300 snprintf(ntb_attrs[NTB_LINK_STATUS_ID].value, NTB_ATTR_VAL_LEN,
301 "%d", hw->link_status);
303 strncpy(ntb_attrs[NTB_SPEED_ID].name, NTB_SPEED_NAME,
305 snprintf(ntb_attrs[NTB_SPEED_ID].value, NTB_ATTR_VAL_LEN,
306 "%d", hw->link_speed);
308 strncpy(ntb_attrs[NTB_WIDTH_ID].name, NTB_WIDTH_NAME,
310 snprintf(ntb_attrs[NTB_WIDTH_ID].value, NTB_ATTR_VAL_LEN,
311 "%d", hw->link_width);
313 strncpy(ntb_attrs[NTB_MW_CNT_ID].name, NTB_MW_CNT_NAME,
315 snprintf(ntb_attrs[NTB_MW_CNT_ID].value, NTB_ATTR_VAL_LEN,
318 strncpy(ntb_attrs[NTB_DB_CNT_ID].name, NTB_DB_CNT_NAME,
320 snprintf(ntb_attrs[NTB_DB_CNT_ID].value, NTB_ATTR_VAL_LEN,
323 strncpy(ntb_attrs[NTB_SPAD_CNT_ID].name, NTB_SPAD_CNT_NAME,
325 snprintf(ntb_attrs[NTB_SPAD_CNT_ID].value, NTB_ATTR_VAL_LEN,
330 ntb_dev_configure(const struct rte_rawdev *dev __rte_unused,
331 rte_rawdev_obj_t config __rte_unused)
337 ntb_dev_start(struct rte_rawdev *dev)
339 struct ntb_hw *hw = dev->dev_private;
342 /* TODO: init queues and start queues. */
344 /* Map memory of bar_size to remote. */
345 hw->mz = rte_zmalloc("struct rte_memzone *",
346 hw->mw_cnt * sizeof(struct rte_memzone *), 0);
347 for (i = 0; i < hw->mw_cnt; i++) {
348 ret = ntb_set_mw(dev, i, hw->mw_size[i]);
350 NTB_LOG(ERR, "Fail to set mw.");
361 ntb_dev_stop(struct rte_rawdev *dev)
363 struct ntb_hw *hw = dev->dev_private;
367 /* TODO: stop rx/tx queues. */
369 if (!hw->peer_dev_up)
372 ntb_link_cleanup(dev);
374 /* Notify the peer that device will be down. */
375 if (hw->ntb_ops->peer_db_set == NULL) {
376 NTB_LOG(ERR, "Peer doorbell setting is not supported.");
379 status = (*hw->ntb_ops->peer_db_set)(dev, 1);
381 NTB_LOG(ERR, "Failed to tell peer device is down.");
386 * Set time out as 1s in case that the peer is stopped accidently
387 * without any notification.
391 /* Wait for cleanup work down before db mask clear. */
392 while (hw->peer_dev_up && time_out) {
398 /* Clear doorbells mask. */
399 if (hw->ntb_ops->db_set_mask == NULL) {
400 NTB_LOG(ERR, "Doorbell mask setting is not supported.");
403 status = (*hw->ntb_ops->db_set_mask)(dev,
404 (((uint64_t)1 << hw->db_cnt) - 1));
406 NTB_LOG(ERR, "Failed to clear doorbells.");
412 ntb_dev_close(struct rte_rawdev *dev)
414 struct ntb_hw *hw = dev->dev_private;
415 struct rte_intr_handle *intr_handle;
421 /* TODO: free queues. */
423 intr_handle = &hw->pci_dev->intr_handle;
424 /* Clean datapath event and vec mapping */
425 rte_intr_efd_disable(intr_handle);
426 if (intr_handle->intr_vec) {
427 rte_free(intr_handle->intr_vec);
428 intr_handle->intr_vec = NULL;
430 /* Disable uio intr before callback unregister */
431 rte_intr_disable(intr_handle);
433 /* Unregister callback func to eal lib */
434 rte_intr_callback_unregister(intr_handle,
435 ntb_dev_intr_handler, dev);
441 ntb_dev_reset(struct rte_rawdev *rawdev __rte_unused)
447 ntb_attr_set(struct rte_rawdev *dev, const char *attr_name,
453 if (dev == NULL || attr_name == NULL) {
454 NTB_LOG(ERR, "Invalid arguments for setting attributes");
458 hw = dev->dev_private;
460 if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
461 if (hw->ntb_ops->spad_write == NULL)
463 index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
464 (*hw->ntb_ops->spad_write)(dev, hw->spad_user_list[index],
466 NTB_LOG(INFO, "Set attribute (%s) Value (%" PRIu64 ")",
467 attr_name, attr_value);
471 /* Attribute not found. */
472 NTB_LOG(ERR, "Attribute not found.");
477 ntb_attr_get(struct rte_rawdev *dev, const char *attr_name,
478 uint64_t *attr_value)
483 if (dev == NULL || attr_name == NULL || attr_value == NULL) {
484 NTB_LOG(ERR, "Invalid arguments for getting attributes");
488 hw = dev->dev_private;
490 if (!strncmp(attr_name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN)) {
491 *attr_value = hw->topo;
492 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
493 attr_name, *attr_value);
497 if (!strncmp(attr_name, NTB_LINK_STATUS_NAME, NTB_ATTR_NAME_LEN)) {
498 *attr_value = hw->link_status;
499 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
500 attr_name, *attr_value);
504 if (!strncmp(attr_name, NTB_SPEED_NAME, NTB_ATTR_NAME_LEN)) {
505 *attr_value = hw->link_speed;
506 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
507 attr_name, *attr_value);
511 if (!strncmp(attr_name, NTB_WIDTH_NAME, NTB_ATTR_NAME_LEN)) {
512 *attr_value = hw->link_width;
513 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
514 attr_name, *attr_value);
518 if (!strncmp(attr_name, NTB_MW_CNT_NAME, NTB_ATTR_NAME_LEN)) {
519 *attr_value = hw->mw_cnt;
520 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
521 attr_name, *attr_value);
525 if (!strncmp(attr_name, NTB_DB_CNT_NAME, NTB_ATTR_NAME_LEN)) {
526 *attr_value = hw->db_cnt;
527 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
528 attr_name, *attr_value);
532 if (!strncmp(attr_name, NTB_SPAD_CNT_NAME, NTB_ATTR_NAME_LEN)) {
533 *attr_value = hw->spad_cnt;
534 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
535 attr_name, *attr_value);
539 if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
540 if (hw->ntb_ops->spad_read == NULL)
542 index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
543 *attr_value = (*hw->ntb_ops->spad_read)(dev,
544 hw->spad_user_list[index], 0);
545 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
546 attr_name, *attr_value);
550 /* Attribute not found. */
551 NTB_LOG(ERR, "Attribute not found.");
556 ntb_xstats_get(const struct rte_rawdev *dev __rte_unused,
557 const unsigned int ids[] __rte_unused,
558 uint64_t values[] __rte_unused,
559 unsigned int n __rte_unused)
565 ntb_xstats_get_names(const struct rte_rawdev *dev __rte_unused,
566 struct rte_rawdev_xstats_name *xstats_names __rte_unused,
567 unsigned int size __rte_unused)
573 ntb_xstats_get_by_name(const struct rte_rawdev *dev __rte_unused,
574 const char *name __rte_unused,
575 unsigned int *id __rte_unused)
581 ntb_xstats_reset(struct rte_rawdev *dev __rte_unused,
582 const uint32_t ids[] __rte_unused,
583 uint32_t nb_ids __rte_unused)
588 static const struct rte_rawdev_ops ntb_ops = {
589 .dev_info_get = ntb_dev_info_get,
590 .dev_configure = ntb_dev_configure,
591 .dev_start = ntb_dev_start,
592 .dev_stop = ntb_dev_stop,
593 .dev_close = ntb_dev_close,
594 .dev_reset = ntb_dev_reset,
596 .queue_def_conf = ntb_queue_conf_get,
597 .queue_setup = ntb_queue_setup,
598 .queue_release = ntb_queue_release,
599 .queue_count = ntb_queue_count,
601 .enqueue_bufs = ntb_enqueue_bufs,
602 .dequeue_bufs = ntb_dequeue_bufs,
604 .attr_get = ntb_attr_get,
605 .attr_set = ntb_attr_set,
607 .xstats_get = ntb_xstats_get,
608 .xstats_get_names = ntb_xstats_get_names,
609 .xstats_get_by_name = ntb_xstats_get_by_name,
610 .xstats_reset = ntb_xstats_reset,
614 ntb_init_hw(struct rte_rawdev *dev, struct rte_pci_device *pci_dev)
616 struct ntb_hw *hw = dev->dev_private;
617 struct rte_intr_handle *intr_handle;
621 hw->pci_dev = pci_dev;
623 hw->link_status = NTB_LINK_DOWN;
624 hw->link_speed = NTB_SPEED_NONE;
625 hw->link_width = NTB_WIDTH_NONE;
627 switch (pci_dev->id.device_id) {
628 case NTB_INTEL_DEV_ID_B2B_SKX:
629 hw->ntb_ops = &intel_ntb_ops;
632 NTB_LOG(ERR, "Not supported device.");
636 if (hw->ntb_ops->ntb_dev_init == NULL)
638 ret = (*hw->ntb_ops->ntb_dev_init)(dev);
640 NTB_LOG(ERR, "Unable to init ntb dev.");
644 if (hw->ntb_ops->set_link == NULL)
646 ret = (*hw->ntb_ops->set_link)(dev, 1);
651 hw->db_valid_mask = RTE_LEN2MASK(hw->db_cnt, uint64_t);
653 intr_handle = &pci_dev->intr_handle;
654 /* Register callback func to eal lib */
655 rte_intr_callback_register(intr_handle,
656 ntb_dev_intr_handler, dev);
658 ret = rte_intr_efd_enable(intr_handle, hw->db_cnt);
662 /* To clarify, the interrupt for each doorbell is already mapped
663 * by default for intel gen3. They are mapped to msix vec 1-32,
664 * and hardware intr is mapped to 0. Map all to 0 for uio.
666 if (!rte_intr_cap_multiple(intr_handle)) {
667 for (i = 0; i < hw->db_cnt; i++) {
668 if (hw->ntb_ops->vector_bind == NULL)
670 ret = (*hw->ntb_ops->vector_bind)(dev, i, 0);
676 if (hw->ntb_ops->db_set_mask == NULL ||
677 hw->ntb_ops->peer_db_set == NULL) {
678 NTB_LOG(ERR, "Doorbell is not supported.");
682 ret = (*hw->ntb_ops->db_set_mask)(dev, hw->db_mask);
684 NTB_LOG(ERR, "Unable to enable intr for all dbs.");
688 /* enable uio intr after callback register */
689 rte_intr_enable(intr_handle);
691 if (hw->ntb_ops->spad_write == NULL) {
692 NTB_LOG(ERR, "Scratchpad is not supported.");
695 /* Tell peer the mw_cnt of local side. */
696 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS, 1, hw->mw_cnt);
698 NTB_LOG(ERR, "Failed to tell peer mw count.");
702 /* Tell peer each mw size on local side. */
703 for (i = 0; i < hw->mw_cnt; i++) {
704 NTB_LOG(DEBUG, "Local %u mw size: 0x%"PRIx64"", i,
706 val = hw->mw_size[i] >> 32;
707 ret = (*hw->ntb_ops->spad_write)
708 (dev, SPAD_MW0_SZ_H + 2 * i, 1, val);
710 NTB_LOG(ERR, "Failed to tell peer mw size.");
714 val = hw->mw_size[i];
715 ret = (*hw->ntb_ops->spad_write)
716 (dev, SPAD_MW0_SZ_L + 2 * i, 1, val);
718 NTB_LOG(ERR, "Failed to tell peer mw size.");
723 /* Ring doorbell 0 to tell peer the device is ready. */
724 ret = (*hw->ntb_ops->peer_db_set)(dev, 0);
726 NTB_LOG(ERR, "Failed to tell peer device is probed.");
734 ntb_create(struct rte_pci_device *pci_dev, int socket_id)
736 char name[RTE_RAWDEV_NAME_MAX_LEN];
737 struct rte_rawdev *rawdev = NULL;
740 if (pci_dev == NULL) {
741 NTB_LOG(ERR, "Invalid pci_dev.");
745 memset(name, 0, sizeof(name));
746 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
747 pci_dev->addr.bus, pci_dev->addr.devid,
748 pci_dev->addr.function);
750 NTB_LOG(INFO, "Init %s on NUMA node %d", name, socket_id);
752 /* Allocate device structure. */
753 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct ntb_hw),
755 if (rawdev == NULL) {
756 NTB_LOG(ERR, "Unable to allocate rawdev.");
760 rawdev->dev_ops = &ntb_ops;
761 rawdev->device = &pci_dev->device;
762 rawdev->driver_name = pci_dev->driver->driver.name;
764 ret = ntb_init_hw(rawdev, pci_dev);
766 NTB_LOG(ERR, "Unable to init ntb hw.");
774 rte_rawdev_pmd_release(rawdev);
780 ntb_destroy(struct rte_pci_device *pci_dev)
782 char name[RTE_RAWDEV_NAME_MAX_LEN];
783 struct rte_rawdev *rawdev;
786 if (pci_dev == NULL) {
787 NTB_LOG(ERR, "Invalid pci_dev.");
792 memset(name, 0, sizeof(name));
793 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
794 pci_dev->addr.bus, pci_dev->addr.devid,
795 pci_dev->addr.function);
797 NTB_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
799 rawdev = rte_rawdev_pmd_get_named_dev(name);
800 if (rawdev == NULL) {
801 NTB_LOG(ERR, "Invalid device name (%s)", name);
806 ret = rte_rawdev_pmd_release(rawdev);
808 NTB_LOG(ERR, "Failed to destroy ntb rawdev.");
814 ntb_probe(struct rte_pci_driver *pci_drv __rte_unused,
815 struct rte_pci_device *pci_dev)
817 return ntb_create(pci_dev, rte_socket_id());
821 ntb_remove(struct rte_pci_device *pci_dev)
823 return ntb_destroy(pci_dev);
827 static struct rte_pci_driver rte_ntb_pmd = {
828 .id_table = pci_id_ntb_map,
829 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
831 .remove = ntb_remove,
834 RTE_PMD_REGISTER_PCI(raw_ntb, rte_ntb_pmd);
835 RTE_PMD_REGISTER_PCI_TABLE(raw_ntb, pci_id_ntb_map);
836 RTE_PMD_REGISTER_KMOD_DEP(raw_ntb, "* igb_uio | uio_pci_generic | vfio-pci");
838 RTE_INIT(ntb_init_log)
840 ntb_logtype = rte_log_register("pmd.raw.ntb");
841 if (ntb_logtype >= 0)
842 rte_log_set_level(ntb_logtype, RTE_LOG_DEBUG);