1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation.
9 #include <rte_common.h>
10 #include <rte_lcore.h>
11 #include <rte_cycles.h>
16 #include <rte_bus_pci.h>
17 #include <rte_memzone.h>
18 #include <rte_memcpy.h>
19 #include <rte_rawdev.h>
20 #include <rte_rawdev_pmd.h>
22 #include "ntb_hw_intel.h"
23 #include "rte_pmd_ntb.h"
28 static const struct rte_pci_id pci_id_ntb_map[] = {
29 { RTE_PCI_DEVICE(NTB_INTEL_VENDOR_ID, NTB_INTEL_DEV_ID_B2B_SKX) },
30 { .vendor_id = 0, /* sentinel */ },
34 ntb_link_cleanup(struct rte_rawdev *dev)
36 struct ntb_hw *hw = dev->dev_private;
39 if (hw->ntb_ops->spad_write == NULL ||
40 hw->ntb_ops->mw_set_trans == NULL) {
41 NTB_LOG(ERR, "Not supported to clean up link.");
45 /* Clean spad registers. */
46 for (i = 0; i < hw->spad_cnt; i++) {
47 status = (*hw->ntb_ops->spad_write)(dev, i, 0, 0);
49 NTB_LOG(ERR, "Failed to clean local spad.");
52 /* Clear mw so that peer cannot access local memory.*/
53 for (i = 0; i < hw->used_mw_num; i++) {
54 status = (*hw->ntb_ops->mw_set_trans)(dev, i, 0, 0);
56 NTB_LOG(ERR, "Failed to clean mw.");
61 ntb_handshake_work(const struct rte_rawdev *dev)
63 struct ntb_hw *hw = dev->dev_private;
67 if (hw->ntb_ops->spad_write == NULL ||
68 hw->ntb_ops->mw_set_trans == NULL) {
69 NTB_LOG(ERR, "Scratchpad/MW setting is not supported.");
73 /* Tell peer the mw info of local side. */
74 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS, 1, hw->mw_cnt);
77 for (i = 0; i < hw->mw_cnt; i++) {
78 NTB_LOG(INFO, "Local %u mw size: 0x%"PRIx64"", i,
80 val = hw->mw_size[i] >> 32;
81 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_H + 2 * i,
86 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_L + 2 * i,
92 /* Tell peer about the queue info and map memory to the peer. */
93 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_Q_SZ, 1, hw->queue_size);
96 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_QPS, 1,
100 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_USED_MWS, 1,
104 for (i = 0; i < hw->used_mw_num; i++) {
105 val = (uint64_t)(size_t)(hw->mz[i]->addr) >> 32;
106 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_H + 2 * i,
110 val = (uint64_t)(size_t)(hw->mz[i]->addr);
111 ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_L + 2 * i,
117 for (i = 0; i < hw->used_mw_num; i++) {
118 ret = (*hw->ntb_ops->mw_set_trans)(dev, i, hw->mz[i]->iova,
124 /* Ring doorbell 0 to tell peer the device is ready. */
125 ret = (*hw->ntb_ops->peer_db_set)(dev, 0);
133 ntb_dev_intr_handler(void *param)
135 struct rte_rawdev *dev = (struct rte_rawdev *)param;
136 struct ntb_hw *hw = dev->dev_private;
137 uint32_t val_h, val_l;
138 uint64_t peer_mw_size;
139 uint64_t db_bits = 0;
143 if (hw->ntb_ops->db_read == NULL ||
144 hw->ntb_ops->db_clear == NULL ||
145 hw->ntb_ops->peer_db_set == NULL) {
146 NTB_LOG(ERR, "Doorbell is not supported.");
150 db_bits = (*hw->ntb_ops->db_read)(dev);
152 NTB_LOG(ERR, "No doorbells");
154 /* Doorbell 0 is for peer device ready. */
156 NTB_LOG(INFO, "DB0: Peer device is up.");
157 /* Clear received doorbell. */
158 (*hw->ntb_ops->db_clear)(dev, 1);
161 * Peer dev is already up. All mw settings are already done.
167 if (hw->ntb_ops->spad_read == NULL) {
168 NTB_LOG(ERR, "Scratchpad read is not supported.");
172 /* Check if mw setting on the peer is the same as local. */
173 peer_mw_cnt = (*hw->ntb_ops->spad_read)(dev, SPAD_NUM_MWS, 0);
174 if (peer_mw_cnt != hw->mw_cnt) {
175 NTB_LOG(ERR, "Both mw cnt must be the same.");
179 for (i = 0; i < hw->mw_cnt; i++) {
180 val_h = (*hw->ntb_ops->spad_read)
181 (dev, SPAD_MW0_SZ_H + 2 * i, 0);
182 val_l = (*hw->ntb_ops->spad_read)
183 (dev, SPAD_MW0_SZ_L + 2 * i, 0);
184 peer_mw_size = ((uint64_t)val_h << 32) | val_l;
185 NTB_LOG(DEBUG, "Peer %u mw size: 0x%"PRIx64"", i,
187 if (peer_mw_size != hw->mw_size[i]) {
188 NTB_LOG(ERR, "Mw config must be the same.");
196 * Handshake with peer. Spad_write & mw_set_trans only works
197 * when both devices are up. So write spad again when db is
198 * received. And set db again for the later device who may miss
201 if (ntb_handshake_work(dev) < 0) {
202 NTB_LOG(ERR, "Handshake work failed.");
206 /* To get the link info. */
207 if (hw->ntb_ops->get_link_status == NULL) {
208 NTB_LOG(ERR, "Not supported to get link status.");
211 (*hw->ntb_ops->get_link_status)(dev);
212 NTB_LOG(INFO, "Link is up. Link speed: %u. Link width: %u",
213 hw->link_speed, hw->link_width);
217 if (db_bits & (1 << 1)) {
218 NTB_LOG(INFO, "DB1: Peer device is down.");
219 /* Clear received doorbell. */
220 (*hw->ntb_ops->db_clear)(dev, 2);
222 /* Peer device will be down, So clean local side too. */
223 ntb_link_cleanup(dev);
226 /* Response peer's dev_stop request. */
227 (*hw->ntb_ops->peer_db_set)(dev, 2);
231 if (db_bits & (1 << 2)) {
232 NTB_LOG(INFO, "DB2: Peer device agrees dev to be down.");
233 /* Clear received doorbell. */
234 (*hw->ntb_ops->db_clear)(dev, (1 << 2));
241 ntb_queue_conf_get(struct rte_rawdev *dev,
243 rte_rawdev_obj_t queue_conf)
245 struct ntb_queue_conf *q_conf = queue_conf;
246 struct ntb_hw *hw = dev->dev_private;
248 q_conf->tx_free_thresh = hw->tx_queues[queue_id]->tx_free_thresh;
249 q_conf->nb_desc = hw->rx_queues[queue_id]->nb_rx_desc;
250 q_conf->rx_mp = hw->rx_queues[queue_id]->mpool;
254 ntb_rxq_release_mbufs(struct ntb_rx_queue *q)
258 if (!q || !q->sw_ring) {
259 NTB_LOG(ERR, "Pointer to rxq or sw_ring is NULL");
263 for (i = 0; i < q->nb_rx_desc; i++) {
264 if (q->sw_ring[i].mbuf) {
265 rte_pktmbuf_free_seg(q->sw_ring[i].mbuf);
266 q->sw_ring[i].mbuf = NULL;
272 ntb_rxq_release(struct ntb_rx_queue *rxq)
275 NTB_LOG(ERR, "Pointer to rxq is NULL");
279 ntb_rxq_release_mbufs(rxq);
281 rte_free(rxq->sw_ring);
286 ntb_rxq_setup(struct rte_rawdev *dev,
288 rte_rawdev_obj_t queue_conf)
290 struct ntb_queue_conf *rxq_conf = queue_conf;
291 struct ntb_hw *hw = dev->dev_private;
292 struct ntb_rx_queue *rxq;
294 /* Allocate the rx queue data structure */
295 rxq = rte_zmalloc_socket("ntb rx queue",
296 sizeof(struct ntb_rx_queue),
300 NTB_LOG(ERR, "Failed to allocate memory for "
301 "rx queue data structure.");
305 if (rxq_conf->rx_mp == NULL) {
306 NTB_LOG(ERR, "Invalid null mempool pointer.");
309 rxq->nb_rx_desc = rxq_conf->nb_desc;
310 rxq->mpool = rxq_conf->rx_mp;
311 rxq->port_id = dev->dev_id;
312 rxq->queue_id = qp_id;
315 /* Allocate the software ring. */
317 rte_zmalloc_socket("ntb rx sw ring",
318 sizeof(struct ntb_rx_entry) *
323 ntb_rxq_release(rxq);
325 NTB_LOG(ERR, "Failed to allocate memory for SW ring");
329 hw->rx_queues[qp_id] = rxq;
335 ntb_txq_release_mbufs(struct ntb_tx_queue *q)
339 if (!q || !q->sw_ring) {
340 NTB_LOG(ERR, "Pointer to txq or sw_ring is NULL");
344 for (i = 0; i < q->nb_tx_desc; i++) {
345 if (q->sw_ring[i].mbuf) {
346 rte_pktmbuf_free_seg(q->sw_ring[i].mbuf);
347 q->sw_ring[i].mbuf = NULL;
353 ntb_txq_release(struct ntb_tx_queue *txq)
356 NTB_LOG(ERR, "Pointer to txq is NULL");
360 ntb_txq_release_mbufs(txq);
362 rte_free(txq->sw_ring);
367 ntb_txq_setup(struct rte_rawdev *dev,
369 rte_rawdev_obj_t queue_conf)
371 struct ntb_queue_conf *txq_conf = queue_conf;
372 struct ntb_hw *hw = dev->dev_private;
373 struct ntb_tx_queue *txq;
376 /* Allocate the TX queue data structure. */
377 txq = rte_zmalloc_socket("ntb tx queue",
378 sizeof(struct ntb_tx_queue),
382 NTB_LOG(ERR, "Failed to allocate memory for "
383 "tx queue structure");
387 txq->nb_tx_desc = txq_conf->nb_desc;
388 txq->port_id = dev->dev_id;
389 txq->queue_id = qp_id;
392 /* Allocate software ring */
394 rte_zmalloc_socket("ntb tx sw ring",
395 sizeof(struct ntb_tx_entry) *
400 ntb_txq_release(txq);
402 NTB_LOG(ERR, "Failed to allocate memory for SW TX ring");
406 prev = txq->nb_tx_desc - 1;
407 for (i = 0; i < txq->nb_tx_desc; i++) {
408 txq->sw_ring[i].mbuf = NULL;
409 txq->sw_ring[i].last_id = i;
410 txq->sw_ring[prev].next_id = i;
414 txq->tx_free_thresh = txq_conf->tx_free_thresh ?
415 txq_conf->tx_free_thresh :
416 NTB_DFLT_TX_FREE_THRESH;
417 if (txq->tx_free_thresh >= txq->nb_tx_desc - 3) {
418 NTB_LOG(ERR, "tx_free_thresh must be less than nb_desc - 3. "
419 "(tx_free_thresh=%u qp_id=%u)", txq->tx_free_thresh,
424 hw->tx_queues[qp_id] = txq;
431 ntb_queue_setup(struct rte_rawdev *dev,
433 rte_rawdev_obj_t queue_conf)
435 struct ntb_hw *hw = dev->dev_private;
438 if (queue_id >= hw->queue_pairs)
441 ret = ntb_txq_setup(dev, queue_id, queue_conf);
445 ret = ntb_rxq_setup(dev, queue_id, queue_conf);
451 ntb_queue_release(struct rte_rawdev *dev, uint16_t queue_id)
453 struct ntb_hw *hw = dev->dev_private;
455 if (queue_id >= hw->queue_pairs)
458 ntb_txq_release(hw->tx_queues[queue_id]);
459 hw->tx_queues[queue_id] = NULL;
460 ntb_rxq_release(hw->rx_queues[queue_id]);
461 hw->rx_queues[queue_id] = NULL;
467 ntb_queue_count(struct rte_rawdev *dev)
469 struct ntb_hw *hw = dev->dev_private;
470 return hw->queue_pairs;
474 ntb_queue_init(struct rte_rawdev *dev, uint16_t qp_id)
476 struct ntb_hw *hw = dev->dev_private;
477 struct ntb_rx_queue *rxq = hw->rx_queues[qp_id];
478 struct ntb_tx_queue *txq = hw->tx_queues[qp_id];
479 volatile struct ntb_header *local_hdr;
480 struct ntb_header *remote_hdr;
481 uint16_t q_size = hw->queue_size;
486 if (hw->ntb_ops->get_peer_mw_addr == NULL) {
487 NTB_LOG(ERR, "Getting peer mw addr is not supported.");
491 /* Put queue info into the start of shared memory. */
492 hdr_offset = hw->hdr_size_per_queue * qp_id;
493 local_hdr = (volatile struct ntb_header *)
494 ((size_t)hw->mz[0]->addr + hdr_offset);
495 bar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);
496 if (bar_addr == NULL)
498 remote_hdr = (struct ntb_header *)
499 ((size_t)bar_addr + hdr_offset);
502 rxq->rx_desc_ring = (struct ntb_desc *)
503 (&remote_hdr->desc_ring);
504 rxq->rx_used_ring = (volatile struct ntb_used *)
505 (&local_hdr->desc_ring[q_size]);
506 rxq->avail_cnt = &remote_hdr->avail_cnt;
507 rxq->used_cnt = &local_hdr->used_cnt;
509 for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
510 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mpool);
511 if (unlikely(!mbuf)) {
512 NTB_LOG(ERR, "Failed to allocate mbuf for RX");
515 mbuf->port = dev->dev_id;
517 rxq->sw_ring[i].mbuf = mbuf;
519 rxq->rx_desc_ring[i].addr = rte_pktmbuf_mtod(mbuf, size_t);
520 rxq->rx_desc_ring[i].len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
523 *rxq->avail_cnt = rxq->nb_rx_desc - 1;
524 rxq->last_avail = rxq->nb_rx_desc - 1;
528 txq->tx_desc_ring = (volatile struct ntb_desc *)
529 (&local_hdr->desc_ring);
530 txq->tx_used_ring = (struct ntb_used *)
531 (&remote_hdr->desc_ring[q_size]);
532 txq->avail_cnt = &local_hdr->avail_cnt;
533 txq->used_cnt = &remote_hdr->used_cnt;
539 txq->nb_tx_free = txq->nb_tx_desc - 1;
545 ntb_enqueue_bufs(struct rte_rawdev *dev,
546 struct rte_rawdev_buf **buffers,
548 rte_rawdev_obj_t context)
550 /* Not FIFO right now. Just for testing memory write. */
551 struct ntb_hw *hw = dev->dev_private;
556 if (hw->ntb_ops->get_peer_mw_addr == NULL)
558 bar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);
559 size = (size_t)context;
561 for (i = 0; i < count; i++)
562 rte_memcpy(bar_addr, buffers[i]->buf_addr, size);
567 ntb_dequeue_bufs(struct rte_rawdev *dev,
568 struct rte_rawdev_buf **buffers,
570 rte_rawdev_obj_t context)
572 /* Not FIFO. Just for testing memory read. */
573 struct ntb_hw *hw = dev->dev_private;
577 size = (size_t)context;
579 for (i = 0; i < count; i++)
580 rte_memcpy(buffers[i]->buf_addr, hw->mz[i]->addr, size);
585 ntb_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
587 struct ntb_hw *hw = dev->dev_private;
588 struct ntb_dev_info *info = dev_info;
590 info->mw_cnt = hw->mw_cnt;
591 info->mw_size = hw->mw_size;
594 * Intel hardware requires that mapped memory base address should be
595 * aligned with EMBARSZ and needs continuous memzone.
597 info->mw_size_align = (uint8_t)(hw->pci_dev->id.vendor_id ==
598 NTB_INTEL_VENDOR_ID);
600 if (!hw->queue_size || !hw->queue_pairs) {
601 NTB_LOG(ERR, "No queue size and queue num assigned.");
605 hw->hdr_size_per_queue = RTE_ALIGN(sizeof(struct ntb_header) +
606 hw->queue_size * sizeof(struct ntb_desc) +
607 hw->queue_size * sizeof(struct ntb_used),
608 RTE_CACHE_LINE_SIZE);
609 info->ntb_hdr_size = hw->hdr_size_per_queue * hw->queue_pairs;
613 ntb_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
615 struct ntb_dev_config *conf = config;
616 struct ntb_hw *hw = dev->dev_private;
619 hw->queue_pairs = conf->num_queues;
620 hw->queue_size = conf->queue_size;
621 hw->used_mw_num = conf->mz_num;
622 hw->mz = conf->mz_list;
623 hw->rx_queues = rte_zmalloc("ntb_rx_queues",
624 sizeof(struct ntb_rx_queue *) * hw->queue_pairs, 0);
625 hw->tx_queues = rte_zmalloc("ntb_tx_queues",
626 sizeof(struct ntb_tx_queue *) * hw->queue_pairs, 0);
628 /* Start handshake with the peer. */
629 ret = ntb_handshake_work(dev);
631 rte_free(hw->rx_queues);
632 rte_free(hw->tx_queues);
633 hw->rx_queues = NULL;
634 hw->tx_queues = NULL;
642 ntb_dev_start(struct rte_rawdev *dev)
644 struct ntb_hw *hw = dev->dev_private;
645 uint32_t peer_base_l, peer_val;
646 uint64_t peer_base_h;
650 if (!hw->link_status || !hw->peer_dev_up)
653 for (i = 0; i < hw->queue_pairs; i++) {
654 ret = ntb_queue_init(dev, i);
656 NTB_LOG(ERR, "Failed to init queue.");
661 hw->peer_mw_base = rte_zmalloc("ntb_peer_mw_base", hw->mw_cnt *
662 sizeof(uint64_t), 0);
664 if (hw->ntb_ops->spad_read == NULL) {
669 peer_val = (*hw->ntb_ops->spad_read)(dev, SPAD_Q_SZ, 0);
670 if (peer_val != hw->queue_size) {
671 NTB_LOG(ERR, "Inconsistent queue size! (local: %u peer: %u)",
672 hw->queue_size, peer_val);
677 peer_val = (*hw->ntb_ops->spad_read)(dev, SPAD_NUM_QPS, 0);
678 if (peer_val != hw->queue_pairs) {
679 NTB_LOG(ERR, "Inconsistent number of queues! (local: %u peer:"
680 " %u)", hw->queue_pairs, peer_val);
685 hw->peer_used_mws = (*hw->ntb_ops->spad_read)(dev, SPAD_USED_MWS, 0);
687 for (i = 0; i < hw->peer_used_mws; i++) {
688 peer_base_h = (*hw->ntb_ops->spad_read)(dev,
689 SPAD_MW0_BA_H + 2 * i, 0);
690 peer_base_l = (*hw->ntb_ops->spad_read)(dev,
691 SPAD_MW0_BA_L + 2 * i, 0);
692 hw->peer_mw_base[i] = (peer_base_h << 32) + peer_base_l;
700 rte_free(hw->peer_mw_base);
702 for (i = 0; i < hw->queue_pairs; i++) {
703 ntb_rxq_release_mbufs(hw->rx_queues[i]);
704 ntb_txq_release_mbufs(hw->tx_queues[i]);
711 ntb_dev_stop(struct rte_rawdev *dev)
713 struct ntb_hw *hw = dev->dev_private;
717 if (!hw->peer_dev_up)
720 ntb_link_cleanup(dev);
722 /* Notify the peer that device will be down. */
723 if (hw->ntb_ops->peer_db_set == NULL) {
724 NTB_LOG(ERR, "Peer doorbell setting is not supported.");
727 status = (*hw->ntb_ops->peer_db_set)(dev, 1);
729 NTB_LOG(ERR, "Failed to tell peer device is down.");
734 * Set time out as 1s in case that the peer is stopped accidently
735 * without any notification.
739 /* Wait for cleanup work down before db mask clear. */
740 while (hw->peer_dev_up && time_out) {
746 /* Clear doorbells mask. */
747 if (hw->ntb_ops->db_set_mask == NULL) {
748 NTB_LOG(ERR, "Doorbell mask setting is not supported.");
751 status = (*hw->ntb_ops->db_set_mask)(dev,
752 (((uint64_t)1 << hw->db_cnt) - 1));
754 NTB_LOG(ERR, "Failed to clear doorbells.");
756 for (i = 0; i < hw->queue_pairs; i++) {
757 ntb_rxq_release_mbufs(hw->rx_queues[i]);
758 ntb_txq_release_mbufs(hw->tx_queues[i]);
765 ntb_dev_close(struct rte_rawdev *dev)
767 struct ntb_hw *hw = dev->dev_private;
768 struct rte_intr_handle *intr_handle;
775 for (i = 0; i < hw->queue_pairs; i++)
776 ntb_queue_release(dev, i);
779 intr_handle = &hw->pci_dev->intr_handle;
780 /* Clean datapath event and vec mapping */
781 rte_intr_efd_disable(intr_handle);
782 if (intr_handle->intr_vec) {
783 rte_free(intr_handle->intr_vec);
784 intr_handle->intr_vec = NULL;
786 /* Disable uio intr before callback unregister */
787 rte_intr_disable(intr_handle);
789 /* Unregister callback func to eal lib */
790 rte_intr_callback_unregister(intr_handle,
791 ntb_dev_intr_handler, dev);
797 ntb_dev_reset(struct rte_rawdev *rawdev __rte_unused)
803 ntb_attr_set(struct rte_rawdev *dev, const char *attr_name,
809 if (dev == NULL || attr_name == NULL) {
810 NTB_LOG(ERR, "Invalid arguments for setting attributes");
814 hw = dev->dev_private;
816 if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
817 if (hw->ntb_ops->spad_write == NULL)
819 index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
820 (*hw->ntb_ops->spad_write)(dev, hw->spad_user_list[index],
822 NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
823 attr_name, attr_value);
827 if (!strncmp(attr_name, NTB_QUEUE_SZ_NAME, NTB_ATTR_NAME_LEN)) {
828 hw->queue_size = attr_value;
829 NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
830 attr_name, attr_value);
834 if (!strncmp(attr_name, NTB_QUEUE_NUM_NAME, NTB_ATTR_NAME_LEN)) {
835 hw->queue_pairs = attr_value;
836 NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
837 attr_name, attr_value);
841 /* Attribute not found. */
842 NTB_LOG(ERR, "Attribute not found.");
847 ntb_attr_get(struct rte_rawdev *dev, const char *attr_name,
848 uint64_t *attr_value)
853 if (dev == NULL || attr_name == NULL || attr_value == NULL) {
854 NTB_LOG(ERR, "Invalid arguments for getting attributes");
858 hw = dev->dev_private;
860 if (!strncmp(attr_name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN)) {
861 *attr_value = hw->topo;
862 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
863 attr_name, *attr_value);
867 if (!strncmp(attr_name, NTB_LINK_STATUS_NAME, NTB_ATTR_NAME_LEN)) {
868 /* hw->link_status only indicates hw link status. */
869 *attr_value = hw->link_status && hw->peer_dev_up;
870 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
871 attr_name, *attr_value);
875 if (!strncmp(attr_name, NTB_SPEED_NAME, NTB_ATTR_NAME_LEN)) {
876 *attr_value = hw->link_speed;
877 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
878 attr_name, *attr_value);
882 if (!strncmp(attr_name, NTB_WIDTH_NAME, NTB_ATTR_NAME_LEN)) {
883 *attr_value = hw->link_width;
884 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
885 attr_name, *attr_value);
889 if (!strncmp(attr_name, NTB_MW_CNT_NAME, NTB_ATTR_NAME_LEN)) {
890 *attr_value = hw->mw_cnt;
891 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
892 attr_name, *attr_value);
896 if (!strncmp(attr_name, NTB_DB_CNT_NAME, NTB_ATTR_NAME_LEN)) {
897 *attr_value = hw->db_cnt;
898 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
899 attr_name, *attr_value);
903 if (!strncmp(attr_name, NTB_SPAD_CNT_NAME, NTB_ATTR_NAME_LEN)) {
904 *attr_value = hw->spad_cnt;
905 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
906 attr_name, *attr_value);
910 if (!strncmp(attr_name, NTB_SPAD_USER, NTB_SPAD_USER_LEN)) {
911 if (hw->ntb_ops->spad_read == NULL)
913 index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
914 *attr_value = (*hw->ntb_ops->spad_read)(dev,
915 hw->spad_user_list[index], 0);
916 NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
917 attr_name, *attr_value);
921 /* Attribute not found. */
922 NTB_LOG(ERR, "Attribute not found.");
927 ntb_xstats_get(const struct rte_rawdev *dev __rte_unused,
928 const unsigned int ids[] __rte_unused,
929 uint64_t values[] __rte_unused,
930 unsigned int n __rte_unused)
936 ntb_xstats_get_names(const struct rte_rawdev *dev __rte_unused,
937 struct rte_rawdev_xstats_name *xstats_names __rte_unused,
938 unsigned int size __rte_unused)
944 ntb_xstats_get_by_name(const struct rte_rawdev *dev __rte_unused,
945 const char *name __rte_unused,
946 unsigned int *id __rte_unused)
952 ntb_xstats_reset(struct rte_rawdev *dev __rte_unused,
953 const uint32_t ids[] __rte_unused,
954 uint32_t nb_ids __rte_unused)
960 static const struct rte_rawdev_ops ntb_ops = {
961 .dev_info_get = ntb_dev_info_get,
962 .dev_configure = ntb_dev_configure,
963 .dev_start = ntb_dev_start,
964 .dev_stop = ntb_dev_stop,
965 .dev_close = ntb_dev_close,
966 .dev_reset = ntb_dev_reset,
968 .queue_def_conf = ntb_queue_conf_get,
969 .queue_setup = ntb_queue_setup,
970 .queue_release = ntb_queue_release,
971 .queue_count = ntb_queue_count,
973 .enqueue_bufs = ntb_enqueue_bufs,
974 .dequeue_bufs = ntb_dequeue_bufs,
976 .attr_get = ntb_attr_get,
977 .attr_set = ntb_attr_set,
979 .xstats_get = ntb_xstats_get,
980 .xstats_get_names = ntb_xstats_get_names,
981 .xstats_get_by_name = ntb_xstats_get_by_name,
982 .xstats_reset = ntb_xstats_reset,
986 ntb_init_hw(struct rte_rawdev *dev, struct rte_pci_device *pci_dev)
988 struct ntb_hw *hw = dev->dev_private;
989 struct rte_intr_handle *intr_handle;
992 hw->pci_dev = pci_dev;
994 hw->link_status = NTB_LINK_DOWN;
995 hw->link_speed = NTB_SPEED_NONE;
996 hw->link_width = NTB_WIDTH_NONE;
998 switch (pci_dev->id.device_id) {
999 case NTB_INTEL_DEV_ID_B2B_SKX:
1000 hw->ntb_ops = &intel_ntb_ops;
1003 NTB_LOG(ERR, "Not supported device.");
1007 if (hw->ntb_ops->ntb_dev_init == NULL)
1009 ret = (*hw->ntb_ops->ntb_dev_init)(dev);
1011 NTB_LOG(ERR, "Unable to init ntb dev.");
1015 if (hw->ntb_ops->set_link == NULL)
1017 ret = (*hw->ntb_ops->set_link)(dev, 1);
1021 /* Init doorbell. */
1022 hw->db_valid_mask = RTE_LEN2MASK(hw->db_cnt, uint64_t);
1024 intr_handle = &pci_dev->intr_handle;
1025 /* Register callback func to eal lib */
1026 rte_intr_callback_register(intr_handle,
1027 ntb_dev_intr_handler, dev);
1029 ret = rte_intr_efd_enable(intr_handle, hw->db_cnt);
1033 /* To clarify, the interrupt for each doorbell is already mapped
1034 * by default for intel gen3. They are mapped to msix vec 1-32,
1035 * and hardware intr is mapped to 0. Map all to 0 for uio.
1037 if (!rte_intr_cap_multiple(intr_handle)) {
1038 for (i = 0; i < hw->db_cnt; i++) {
1039 if (hw->ntb_ops->vector_bind == NULL)
1041 ret = (*hw->ntb_ops->vector_bind)(dev, i, 0);
1047 if (hw->ntb_ops->db_set_mask == NULL ||
1048 hw->ntb_ops->peer_db_set == NULL) {
1049 NTB_LOG(ERR, "Doorbell is not supported.");
1053 ret = (*hw->ntb_ops->db_set_mask)(dev, hw->db_mask);
1055 NTB_LOG(ERR, "Unable to enable intr for all dbs.");
1059 /* enable uio intr after callback register */
1060 rte_intr_enable(intr_handle);
1066 ntb_create(struct rte_pci_device *pci_dev, int socket_id)
1068 char name[RTE_RAWDEV_NAME_MAX_LEN];
1069 struct rte_rawdev *rawdev = NULL;
1072 if (pci_dev == NULL) {
1073 NTB_LOG(ERR, "Invalid pci_dev.");
1077 memset(name, 0, sizeof(name));
1078 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
1079 pci_dev->addr.bus, pci_dev->addr.devid,
1080 pci_dev->addr.function);
1082 NTB_LOG(INFO, "Init %s on NUMA node %d", name, socket_id);
1084 /* Allocate device structure. */
1085 rawdev = rte_rawdev_pmd_allocate(name, sizeof(struct ntb_hw),
1087 if (rawdev == NULL) {
1088 NTB_LOG(ERR, "Unable to allocate rawdev.");
1092 rawdev->dev_ops = &ntb_ops;
1093 rawdev->device = &pci_dev->device;
1094 rawdev->driver_name = pci_dev->driver->driver.name;
1096 ret = ntb_init_hw(rawdev, pci_dev);
1098 NTB_LOG(ERR, "Unable to init ntb hw.");
1106 rte_rawdev_pmd_release(rawdev);
1112 ntb_destroy(struct rte_pci_device *pci_dev)
1114 char name[RTE_RAWDEV_NAME_MAX_LEN];
1115 struct rte_rawdev *rawdev;
1118 if (pci_dev == NULL) {
1119 NTB_LOG(ERR, "Invalid pci_dev.");
1124 memset(name, 0, sizeof(name));
1125 snprintf(name, RTE_RAWDEV_NAME_MAX_LEN, "NTB:%x:%02x.%x",
1126 pci_dev->addr.bus, pci_dev->addr.devid,
1127 pci_dev->addr.function);
1129 NTB_LOG(INFO, "Closing %s on NUMA node %d", name, rte_socket_id());
1131 rawdev = rte_rawdev_pmd_get_named_dev(name);
1132 if (rawdev == NULL) {
1133 NTB_LOG(ERR, "Invalid device name (%s)", name);
1138 ret = rte_rawdev_pmd_release(rawdev);
1140 NTB_LOG(ERR, "Failed to destroy ntb rawdev.");
1146 ntb_probe(struct rte_pci_driver *pci_drv __rte_unused,
1147 struct rte_pci_device *pci_dev)
1149 return ntb_create(pci_dev, rte_socket_id());
1153 ntb_remove(struct rte_pci_device *pci_dev)
1155 return ntb_destroy(pci_dev);
1159 static struct rte_pci_driver rte_ntb_pmd = {
1160 .id_table = pci_id_ntb_map,
1161 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
1163 .remove = ntb_remove,
1166 RTE_PMD_REGISTER_PCI(raw_ntb, rte_ntb_pmd);
1167 RTE_PMD_REGISTER_PCI_TABLE(raw_ntb, pci_id_ntb_map);
1168 RTE_PMD_REGISTER_KMOD_DEP(raw_ntb, "* igb_uio | uio_pci_generic | vfio-pci");
1170 RTE_INIT(ntb_init_log)
1172 ntb_logtype = rte_log_register("pmd.raw.ntb");
1173 if (ntb_logtype >= 0)
1174 rte_log_set_level(ntb_logtype, RTE_LOG_INFO);