1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
9 #include <rte_common.h>
10 #include <rte_lcore.h>
11 #include <rte_cycles.h>
15 #include <rte_bus_pci.h>
16 #include <rte_memzone.h>
17 #include <rte_memcpy.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
21 #include "ntb_hw_intel.h"
26 static const struct rte_pci_id pci_id_ntb_map[] = {
27 { RTE_PCI_DEVICE(NTB_INTEL_VENDOR_ID, NTB_INTEL_DEV_ID_B2B_SKX) },
28 { .vendor_id = 0, /* sentinel */ },
32 ntb_set_mw(struct rte_rawdev *dev, int mw_idx, uint64_t mw_size)
34 struct ntb_hw *hw = dev->dev_private;
35 char mw_name[RTE_MEMZONE_NAMESIZE];
36 const struct rte_memzone *mz;
39 if (hw->ntb_ops->mw_set_trans == NULL) {
40 NTB_LOG(ERR, "Not supported to set mw.");
44 snprintf(mw_name, sizeof(mw_name), "ntb_%d_mw_%d",
47 mz = rte_memzone_lookup(mw_name);
52 * Hardware requires that mapped memory base address should be
53 * aligned with EMBARSZ and needs continuous memzone.
55 mz = rte_memzone_reserve_aligned(mw_name, mw_size, dev->socket_id,
56 RTE_MEMZONE_IOVA_CONTIG, hw->mw_size[mw_idx]);
58 NTB_LOG(ERR, "Cannot allocate aligned memzone.");
63 ret = (*hw->ntb_ops->mw_set_trans)(dev, mw_idx, mz->iova, mw_size);
65 NTB_LOG(ERR, "Cannot set mw translation.");
73 ntb_link_cleanup(struct rte_rawdev *dev)
75 struct ntb_hw *hw = dev->dev_private;
78 if (hw->ntb_ops->spad_write == NULL ||
79 hw->ntb_ops->mw_set_trans == NULL) {
80 NTB_LOG(ERR, "Not supported to clean up link.");
84 /* Clean spad registers. */
85 for (i = 0; i < hw->spad_cnt; i++) {
86 status = (*hw->ntb_ops->spad_write)(dev, i, 0, 0);
88 NTB_LOG(ERR, "Failed to clean local spad.");
91 /* Clear mw so that peer cannot access local memory.*/
92 for (i = 0; i < hw->mw_cnt; i++) {
93 status = (*hw->ntb_ops->mw_set_trans)(dev, i, 0, 0);
95 NTB_LOG(ERR, "Failed to clean mw.");
100 ntb_dev_intr_handler(void *param)
102 struct rte_rawdev *dev = (struct rte_rawdev *)param;
103 struct ntb_hw *hw = dev->dev_private;
104 uint32_t mw_size_h, mw_size_l;
105 uint64_t db_bits = 0;
108 if (hw->ntb_ops->db_read == NULL ||
109 hw->ntb_ops->db_clear == NULL ||
110 hw->ntb_ops->peer_db_set == NULL) {
111 NTB_LOG(ERR, "Doorbell is not supported.");
115 db_bits = (*hw->ntb_ops->db_read)(dev);
117 NTB_LOG(ERR, "No doorbells");
119 /* Doorbell 0 is for peer device ready. */
121 NTB_LOG(DEBUG, "DB0: Peer device is up.");
122 /* Clear received doorbell. */
123 (*hw->ntb_ops->db_clear)(dev, 1);
126 * Peer dev is already up. All mw settings are already done.
132 if (hw->ntb_ops->spad_read == NULL ||
133 hw->ntb_ops->spad_write == NULL) {
134 NTB_LOG(ERR, "Scratchpad is not supported.");
138 hw->peer_mw_cnt = (*hw->ntb_ops->spad_read)
139 (dev, SPAD_NUM_MWS, 0);
140 hw->peer_mw_size = rte_zmalloc("uint64_t",
141 hw->peer_mw_cnt * sizeof(uint64_t), 0);
142 for (i = 0; i < hw->mw_cnt; i++) {
143 mw_size_h = (*hw->ntb_ops->spad_read)
144 (dev, SPAD_MW0_SZ_H + 2 * i, 0);
145 mw_size_l = (*hw->ntb_ops->spad_read)
146 (dev, SPAD_MW0_SZ_L + 2 * i, 0);
147 hw->peer_mw_size[i] = ((uint64_t)mw_size_h << 32) |
149 NTB_LOG(DEBUG, "Peer %u mw size: 0x%"PRIx64"", i,
150 hw->peer_mw_size[i]);
156 * Handshake with peer. Spad_write only works when both
157 * devices are up. So write spad again when db is received.
158 * And set db again for the later device who may miss
161 for (i = 0; i < hw->mw_cnt; i++) {
162 (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS,
164 mw_size_h = hw->mw_size[i] >> 32;
165 (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_H + 2 * i,
168 mw_size_l = hw->mw_size[i];
169 (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_L + 2 * i,
172 (*hw->ntb_ops->peer_db_set)(dev, 0);
174 /* To get the link info. */
175 if (hw->ntb_ops->get_link_status == NULL) {
176 NTB_LOG(ERR, "Not supported to get link status.");
179 (*hw->ntb_ops->get_link_status)(dev);
180 NTB_LOG(INFO, "Link is up. Link speed: %u. Link width: %u",
181 hw->link_speed, hw->link_width);
185 if (db_bits & (1 << 1)) {
186 NTB_LOG(DEBUG, "DB1: Peer device is down.");
187 /* Clear received doorbell. */
188 (*hw->ntb_ops->db_clear)(dev, 2);
190 /* Peer device will be down, So clean local side too. */
191 ntb_link_cleanup(dev);
194 /* Response peer's dev_stop request. */
195 (*hw->ntb_ops->peer_db_set)(dev, 2);
199 if (db_bits & (1 << 2)) {
200 NTB_LOG(DEBUG, "DB2: Peer device agrees dev to be down.");
201 /* Clear received doorbell. */
202 (*hw->ntb_ops->db_clear)(dev, (1 << 2));
209 ntb_queue_conf_get(struct rte_rawdev *dev __rte_unused,
210 uint16_t queue_id __rte_unused,
211 rte_rawdev_obj_t queue_conf __rte_unused)
216 ntb_queue_setup(struct rte_rawdev *dev __rte_unused,
217 uint16_t queue_id __rte_unused,
218 rte_rawdev_obj_t queue_conf __rte_unused)
224 ntb_queue_release(struct rte_rawdev *dev __rte_unused,
225 uint16_t queue_id __rte_unused)
231 ntb_queue_count(struct rte_rawdev *dev)
233 struct ntb_hw *hw = dev->dev_private;
234 return hw->queue_pairs;
238 ntb_enqueue_bufs(struct rte_rawdev *dev,
239 struct rte_rawdev_buf **buffers,
241 rte_rawdev_obj_t context)
243 /* Not FIFO right now. Just for testing memory write. */
244 struct ntb_hw *hw = dev->dev_private;
249 if (hw->ntb_ops->get_peer_mw_addr == NULL)
251 bar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);
252 size = (size_t)context;
254 for (i = 0; i < count; i++)
255 rte_memcpy(bar_addr, buffers[i]->buf_addr, size);
260 ntb_dequeue_bufs(struct rte_rawdev *dev,
261 struct rte_rawdev_buf **buffers,
263 rte_rawdev_obj_t context)
265 /* Not FIFO. Just for testing memory read. */
266 struct ntb_hw *hw = dev->dev_private;
270 size = (size_t)context;
272 for (i = 0; i < count; i++)
273 rte_memcpy(buffers[i]->buf_addr, hw->mz[i]->addr, size);
278 ntb_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
280 struct ntb_hw *hw = dev->dev_private;
281 struct ntb_attr *ntb_attrs = dev_info;
283 strncpy(ntb_attrs[NTB_TOPO_ID].name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN);
285 case NTB_TOPO_B2B_DSD:
286 strncpy(ntb_attrs[NTB_TOPO_ID].value, "B2B DSD",
289 case NTB_TOPO_B2B_USD:
290 strncpy(ntb_attrs[NTB_TOPO_ID].value, "B2B USD",
294 strncpy(ntb_attrs[NTB_TOPO_ID].value, "Unsupported",
298 strncpy(ntb_attrs[NTB_LINK_STATUS_ID].name, NTB_LINK_STATUS_NAME,
300 snprintf(ntb_attrs[NTB_LINK_STATUS_ID].value, NTB_ATTR_VAL_LEN,
301 "%d", hw->link_status);
303 strncpy(ntb_attrs[NTB_SPEED_ID].name, NTB_SPEED_NAME,
305 snprintf(ntb_attrs[NTB_SPEED_ID].value, NTB_ATTR_VAL_LEN,
306 "%d", hw->link_speed);
308 strncpy(ntb_attrs[NTB_WIDTH_ID].name, NTB_WIDTH_NAME,
310 snprintf(ntb_attrs[NTB_WIDTH_ID].value, NTB_ATTR_VAL_LEN,
311 "%d", hw->link_width);
313 strncpy(ntb_attrs[NTB_MW_CNT_ID].name, NTB_MW_CNT_NAME,
315 snprintf(ntb_attrs[NTB_MW_CNT_ID].value, NTB_ATTR_VAL_LEN,
318 strncpy(ntb_attrs[NTB_DB_CNT_ID].name, NTB_DB_CNT_NAME,
320 snprintf(ntb_attrs[NTB_DB_CNT_ID].value, NTB_ATTR_VAL_LEN,
323 strncpy(ntb_attrs[NTB_SPAD_CNT_ID].name, NTB_SPAD_CNT_NAME,
325 snprintf(ntb_attrs[NTB_SPAD_CNT_ID].value, NTB_ATTR_VAL_LEN,
330 ntb_dev_configure(const struct rte_rawdev *dev __rte_unused,
331 rte_rawdev_obj_t config __rte_unused)
337 ntb_dev_start(struct rte_rawdev *dev)
339 struct ntb_hw *hw = dev->dev_private;
342 /* TODO: init queues and start queues. */
344 /* Map memory of bar_size to remote. */
345 hw->mz = rte_zmalloc("struct rte_memzone *",
346 hw->mw_cnt * sizeof(struct rte_memzone *), 0);
347 for (i = 0; i < hw->mw_cnt; i++) {
348 ret = ntb_set_mw(dev, i, hw->mw_size[i]);
350 NTB_LOG(ERR, "Fail to set mw.");
361 ntb_dev_stop(struct rte_rawdev *dev)
363 struct ntb_hw *hw = dev->dev_private;
367 /* TODO: stop rx/tx queues. */
369 if (!hw->peer_dev_up)
372 ntb_link_cleanup(dev);
374 /* Notify the peer that device will be down. */
375 if (hw->ntb_ops->peer_db_set == NULL) {
376 NTB_LOG(ERR, "Peer doorbell setting is not supported.");
379 status = (*hw->ntb_ops->peer_db_set)(dev, 1);
381 NTB_LOG(ERR, "Failed to tell peer device is down.");
386 * Set time out as 1s in case that the peer is stopped accidently
387 * without any notification.
391 /* Wait for cleanup work down before db mask clear. */
392 while (hw->peer_dev_up && time_out) {
398 /* Clear doorbells mask. */
399 if (hw->ntb_ops->db_set_mask == NULL) {
400 NTB_LOG(ERR, "Doorbell mask setting is not supported.");
403 status = (*hw->ntb_ops->db_set_mask)(dev,
404 (((uint64_t)1 << hw->db_cnt) - 1));
406 NTB_LOG(ERR, "Failed to clear doorbells.");
412 ntb_dev_close(struct rte_rawdev *dev)
414 struct ntb_hw *hw = dev->dev_private;
415 struct rte_intr_handle *intr_handle;
421 /* TODO: free queues. */
423 intr_handle = &hw->pci_dev->intr_handle;
424 /* Clean datapath event and vec mapping */
425 rte_intr_efd_disable(intr_handle);
426 if (intr_handle->intr_vec) {
427 rte_free(intr_handle->intr_vec);
428 intr_handle->intr_vec = NULL;
430 /* Disable uio intr before callback unregister */
431 rte_intr_disable(intr_handle);
433 /* Unregister callback func to eal lib */
434 rte_intr_callback_unregister(intr_handle,
435 ntb_dev_intr_handler, dev);
441 ntb_dev_reset(struct rte_rawdev *rawdev __rte_unused)
447 ntb_attr_set(struct rte_rawdev *dev, const char *attr_name,
450 struct ntb_hw *hw = dev->dev_private;
453 if (dev == NULL || attr_name == NULL) {
454 NTB_LOG(ERR, "Invalid arguments for setting attributes");
458 if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
459 if (hw->ntb_ops->spad_write == NULL)
461 index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
462 (*hw->ntb_ops->spad_write)(dev, hw->spad_user_list[index],
464 NTB_LOG(INFO, "Set attribute (%s) Value (%" PRIu64 ")",
465 attr_name, attr_value);
469 /* Attribute not found. */
470 NTB_LOG(ERR, "Attribute not found.");
475 ntb_attr_get(struct rte_rawdev *dev, const char *attr_name,
476 uint64_t *attr_value)
478 struct ntb_hw *hw = dev->dev_private;
481 if (dev == NULL || attr_name == NULL || attr_value == NULL) {
482 NTB_LOG(ERR, "Invalid arguments for getting attributes");
486 if (!strncmp(attr_name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN)) {
487 *attr_value = hw->topo;
488 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
489 attr_name, *attr_value);
493 if (!strncmp(attr_name, NTB_LINK_STATUS_NAME, NTB_ATTR_NAME_LEN)) {
494 *attr_value = hw->link_status;
495 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
496 attr_name, *attr_value);
500 if (!strncmp(attr_name, NTB_SPEED_NAME, NTB_ATTR_NAME_LEN)) {
501 *attr_value = hw->link_speed;
502 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
503 attr_name, *attr_value);
507 if (!strncmp(attr_name, NTB_WIDTH_NAME, NTB_ATTR_NAME_LEN)) {
508 *attr_value = hw->link_width;
509 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
510 attr_name, *attr_value);
514 if (!strncmp(attr_name, NTB_MW_CNT_NAME, NTB_ATTR_NAME_LEN)) {
515 *attr_value = hw->mw_cnt;
516 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
517 attr_name, *attr_value);
521 if (!strncmp(attr_name, NTB_DB_CNT_NAME, NTB_ATTR_NAME_LEN)) {
522 *attr_value = hw->db_cnt;
523 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
524 attr_name, *attr_value);
528 if (!strncmp(attr_name, NTB_SPAD_CNT_NAME, NTB_ATTR_NAME_LEN)) {
529 *attr_value = hw->spad_cnt;
530 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
531 attr_name, *attr_value);
535 if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
536 if (hw->ntb_ops->spad_read == NULL)
538 index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
539 *attr_value = (*hw->ntb_ops->spad_read)(dev,
540 hw->spad_user_list[index], 0);
541 NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
542 attr_name, *attr_value);
546 /* Attribute not found. */
547 NTB_LOG(ERR, "Attribute not found.");
552 ntb_xstats_get(const struct rte_rawdev *dev __rte_unused,
553 const unsigned int ids[] __rte_unused,
554 uint64_t values[] __rte_unused,
555 unsigned int n __rte_unused)
561 ntb_xstats_get_names(const struct rte_rawdev *dev __rte_unused,
562 struct rte_rawdev_xstats_name *xstats_names __rte_unused,
563 unsigned int size __rte_unused)
569 ntb_xstats_get_by_name(const struct rte_rawdev *dev __rte_unused,
570 const char *name __rte_unused,
571 unsigned int *id __rte_unused)
577 ntb_xstats_reset(struct rte_rawdev *dev __rte_unused,
578 const uint32_t ids[] __rte_unused,
579 uint32_t nb_ids __rte_unused)
584 static const struct rte_rawdev_ops ntb_ops = {
585 .dev_info_get = ntb_dev_info_get,
586 .dev_configure = ntb_dev_configure,
587 .dev_start = ntb_dev_start,
588 .dev_stop = ntb_dev_stop,
589 .dev_close = ntb_dev_close,
590 .dev_reset = ntb_dev_reset,
592 .queue_def_conf = ntb_queue_conf_get,
593 .queue_setup = ntb_queue_setup,
594 .queue_release = ntb_queue_release,
595 .queue_count = ntb_queue_count,
597 .enqueue_bufs = ntb_enqueue_bufs,
598 .dequeue_bufs = ntb_dequeue_bufs,
600 .attr_get = ntb_attr_get,
601 .attr_set = ntb_attr_set,
603 .xstats_get = ntb_xstats_get,
604 .xstats_get_names = ntb_xstats_get_names,
605 .xstats_get_by_name = ntb_xstats_get_by_name,
606 .xstats_reset = ntb_xstats_reset,
610 ntb_init_hw(struct rte_rawdev *dev, struct rte_pci_device *pci_dev)
612 struct ntb_hw *hw = dev->dev_private;
613 struct rte_intr_handle *intr_handle;
617 hw->pci_dev = pci_dev;
619 hw->link_status = NTB_LINK_DOWN;
620 hw->link_speed = NTB_SPEED_NONE;
621 hw->link_width = NTB_WIDTH_NONE;
623 switch (pci_dev->id.device_id) {
624 case NTB_INTEL_DEV_ID_B2B_SKX:
625 hw->ntb_ops = &intel_ntb_ops;
628 NTB_LOG(ERR, "Not supported device.");
632 if (hw->ntb_ops->ntb_dev_init == NULL)
634 ret = (*hw->ntb_ops->ntb_dev_init)(dev);
636 NTB_LOG(ERR, "Unable to init ntb dev.");
640 if (hw->ntb_ops->set_link == NULL)
642 ret = (*hw->ntb_ops->set_link)(dev, 1);
647 hw->db_valid_mask = RTE_LEN2MASK(hw->db_cnt, uint64_t);
649 intr_handle = &pci_dev->intr_handle;
650 /* Register callback func to eal lib */
651 rte_intr_callback_register(intr_handle,
652 ntb_dev_intr_handler, dev);
654 ret = rte_intr_efd_enable(intr_handle, hw->db_cnt);
658 /* To clarify, the interrupt for each doorbell is already mapped
659 * by default for intel gen3. They are mapped to msix vec 1-32,
660 * and hardware intr is mapped to 0. Map all to 0 for uio.
662 if (!rte_intr_cap_multiple(intr_handle)) {
663 for (i = 0; i < hw->db_cnt; i++) {
664 if (hw->ntb_ops->vector_bind == NULL)
666 ret = (*hw->ntb_ops->vector_bind)(dev, i, 0);
672 if (hw->ntb_ops->db_set_mask == NULL ||
673 hw->ntb_ops->peer_db_set == NULL) {
674 NTB_LOG(ERR, "Doorbell is not supported.");
678 ret = (*hw->ntb_ops->db_set_mask)(dev, hw->db_mask);
680 NTB_LOG(ERR, "Unable to enable intr for all dbs.");
684 /* enable uio intr after callback register */
685 rte_intr_enable(intr_handle);
687 if (hw->ntb_ops->spad_write == NULL) {
688 NTB_LOG(ERR, "Scratchpad is not supported.");
691 /* Tell peer the mw_cnt of local side. */
692 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS, 1, hw->mw_cnt);
694 NTB_LOG(ERR, "Failed to tell peer mw count.");
698 /* Tell peer each mw size on local side. */
699 for (i = 0; i < hw->mw_cnt; i++) {
700 NTB_LOG(DEBUG, "Local %u mw size: 0x%"PRIx64"", i,
702 val = hw->mw_size[i] >> 32;
703 ret = (*hw->ntb_ops->spad_write)
704 (dev, SPAD_MW0_SZ_H + 2 * i, 1, val);
706 NTB_LOG(ERR, "Failed to tell peer mw size.");
710 val = hw->mw_size[i];
711 ret = (*hw->ntb_ops->spad_write)
712 (dev, SPAD_MW0_SZ_L + 2 * i, 1, val);
714 NTB_LOG(ERR, "Failed to tell peer mw size.");
719 /* Ring doorbell 0 to tell peer the device is ready. */
720 ret = (*hw->ntb_ops->peer_db_set)(dev, 0);
722 NTB_LOG(ERR, "Failed to tell peer device is probed.");
730 ntb_create(struct rte_pci_device *pci_dev, int socket_id)
732 char name[RTE_RAWDEV_NAME_MAX_LEN];
733 struct rte_rawdev *rawdev = NULL;
736 if (pci_dev == NULL) {
737 NTB_LOG(ERR, "Invalid pci_dev.");
741 memset(name, 0, sizeof(name));
742 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
743 pci_dev->addr.bus, pci_dev->addr.devid,
744 pci_dev->addr.function);
746 NTB_LOG(INFO, "Init %s on NUMA node %d", name, socket_id);
748 /* Allocate device structure. */
749 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct ntb_hw),
751 if (rawdev == NULL) {
752 NTB_LOG(ERR, "Unable to allocate rawdev.");
756 rawdev->dev_ops = &ntb_ops;
757 rawdev->device = &pci_dev->device;
758 rawdev->driver_name = pci_dev->driver->driver.name;
760 ret = ntb_init_hw(rawdev, pci_dev);
762 NTB_LOG(ERR, "Unable to init ntb hw.");
770 rte_rawdev_pmd_release(rawdev);
776 ntb_destroy(struct rte_pci_device *pci_dev)
778 char name[RTE_RAWDEV_NAME_MAX_LEN];
779 struct rte_rawdev *rawdev;
782 if (pci_dev == NULL) {
783 NTB_LOG(ERR, "Invalid pci_dev.");
788 memset(name, 0, sizeof(name));
789 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
790 pci_dev->addr.bus, pci_dev->addr.devid,
791 pci_dev->addr.function);
793 NTB_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
795 rawdev = rte_rawdev_pmd_get_named_dev(name);
796 if (rawdev == NULL) {
797 NTB_LOG(ERR, "Invalid device name (%s)", name);
802 ret = rte_rawdev_pmd_release(rawdev);
804 NTB_LOG(ERR, "Failed to destroy ntb rawdev.");
810 ntb_probe(struct rte_pci_driver *pci_drv __rte_unused,
811 struct rte_pci_device *pci_dev)
813 return ntb_create(pci_dev, rte_socket_id());
817 ntb_remove(struct rte_pci_device *pci_dev)
819 return ntb_destroy(pci_dev);
823 static struct rte_pci_driver rte_ntb_pmd = {
824 .id_table = pci_id_ntb_map,
825 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
827 .remove = ntb_remove,
830 RTE_PMD_REGISTER_PCI(raw_ntb, rte_ntb_pmd);
831 RTE_PMD_REGISTER_PCI_TABLE(raw_ntb, pci_id_ntb_map);
832 RTE_PMD_REGISTER_KMOD_DEP(raw_ntb, "* igb_uio | uio_pci_generic | vfio-pci");
834 RTE_INIT(ntb_init_log)
836 ntb_logtype = rte_log_register("pmd.raw.ntb");
837 if (ntb_logtype >= 0)
838 rte_log_set_level(ntb_logtype, RTE_LOG_DEBUG);