#include <rte_eal.h>
#include <rte_log.h>
#include <rte_pci.h>
+#include <rte_mbuf.h>
#include <rte_bus_pci.h>
#include <rte_memzone.h>
#include <rte_memcpy.h>
#include <rte_rawdev_pmd.h>
#include "ntb_hw_intel.h"
+#include "rte_pmd_ntb.h"
#include "ntb.h"
int ntb_logtype;
{ .vendor_id = 0, /* sentinel */ },
};
-static int
-ntb_set_mw(struct rte_rawdev *dev, int mw_idx, uint64_t mw_size)
-{
- struct ntb_hw *hw = dev->dev_private;
- char mw_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
- int ret = 0;
-
- if (hw->ntb_ops->mw_set_trans == NULL) {
- NTB_LOG(ERR, "Not supported to set mw.");
- return -ENOTSUP;
- }
-
- snprintf(mw_name, sizeof(mw_name), "ntb_%d_mw_%d",
- dev->dev_id, mw_idx);
-
- mz = rte_memzone_lookup(mw_name);
- if (mz)
- return 0;
-
- /**
- * Hardware requires that mapped memory base address should be
- * aligned with EMBARSZ and needs continuous memzone.
- */
- mz = rte_memzone_reserve_aligned(mw_name, mw_size, dev->socket_id,
- RTE_MEMZONE_IOVA_CONTIG, hw->mw_size[mw_idx]);
- if (!mz) {
- NTB_LOG(ERR, "Cannot allocate aligned memzone.");
- return -EIO;
- }
- hw->mz[mw_idx] = mz;
-
- ret = (*hw->ntb_ops->mw_set_trans)(dev, mw_idx, mz->iova, mw_size);
- if (ret) {
- NTB_LOG(ERR, "Cannot set mw translation.");
- return ret;
- }
-
- return ret;
-}
-
-static void
+static inline void
ntb_link_cleanup(struct rte_rawdev *dev)
{
struct ntb_hw *hw = dev->dev_private;
}
/* Clear mw so that peer cannot access local memory.*/
- for (i = 0; i < hw->mw_cnt; i++) {
+ for (i = 0; i < hw->used_mw_num; i++) {
status = (*hw->ntb_ops->mw_set_trans)(dev, i, 0, 0);
if (status)
NTB_LOG(ERR, "Failed to clean mw.");
}
}
+static inline int
+ntb_handshake_work(const struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t val;
+ int ret, i;
+
+ if (hw->ntb_ops->spad_write == NULL ||
+ hw->ntb_ops->mw_set_trans == NULL) {
+ NTB_LOG(ERR, "Scratchpad/MW setting is not supported.");
+ return -ENOTSUP;
+ }
+
+ /* Tell peer the mw info of local side. */
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS, 1, hw->mw_cnt);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < hw->mw_cnt; i++) {
+ NTB_LOG(INFO, "Local %u mw size: 0x%"PRIx64"", i,
+ hw->mw_size[i]);
+ val = hw->mw_size[i] >> 32;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_H + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ val = hw->mw_size[i];
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_L + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Tell peer about the queue info and map memory to the peer. */
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_Q_SZ, 1, hw->queue_size);
+ if (ret < 0)
+ return ret;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_QPS, 1,
+ hw->queue_pairs);
+ if (ret < 0)
+ return ret;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_USED_MWS, 1,
+ hw->used_mw_num);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < hw->used_mw_num; i++) {
+ val = (uint64_t)(size_t)(hw->mz[i]->addr) >> 32;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_H + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ val = (uint64_t)(size_t)(hw->mz[i]->addr);
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_L + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < hw->used_mw_num; i++) {
+ ret = (*hw->ntb_ops->mw_set_trans)(dev, i, hw->mz[i]->iova,
+ hw->mz[i]->len);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Ring doorbell 0 to tell peer the device is ready. */
+ ret = (*hw->ntb_ops->peer_db_set)(dev, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void
ntb_dev_intr_handler(void *param)
{
struct rte_rawdev *dev = (struct rte_rawdev *)param;
struct ntb_hw *hw = dev->dev_private;
- uint32_t mw_size_h, mw_size_l;
+ uint32_t val_h, val_l;
+ uint64_t peer_mw_size;
uint64_t db_bits = 0;
+ uint8_t peer_mw_cnt;
int i = 0;
if (hw->ntb_ops->db_read == NULL ||
/* Doorbell 0 is for peer device ready. */
if (db_bits & 1) {
- NTB_LOG(DEBUG, "DB0: Peer device is up.");
+ NTB_LOG(INFO, "DB0: Peer device is up.");
/* Clear received doorbell. */
(*hw->ntb_ops->db_clear)(dev, 1);
if (hw->peer_dev_up)
return;
- if (hw->ntb_ops->spad_read == NULL ||
- hw->ntb_ops->spad_write == NULL) {
- NTB_LOG(ERR, "Scratchpad is not supported.");
+ if (hw->ntb_ops->spad_read == NULL) {
+ NTB_LOG(ERR, "Scratchpad read is not supported.");
+ return;
+ }
+
+ /* Check if mw setting on the peer is the same as local. */
+ peer_mw_cnt = (*hw->ntb_ops->spad_read)(dev, SPAD_NUM_MWS, 0);
+ if (peer_mw_cnt != hw->mw_cnt) {
+ NTB_LOG(ERR, "Both mw cnt must be the same.");
return;
}
- hw->peer_mw_cnt = (*hw->ntb_ops->spad_read)
- (dev, SPAD_NUM_MWS, 0);
- hw->peer_mw_size = rte_zmalloc("uint64_t",
- hw->peer_mw_cnt * sizeof(uint64_t), 0);
for (i = 0; i < hw->mw_cnt; i++) {
- mw_size_h = (*hw->ntb_ops->spad_read)
- (dev, SPAD_MW0_SZ_H + 2 * i, 0);
- mw_size_l = (*hw->ntb_ops->spad_read)
- (dev, SPAD_MW0_SZ_L + 2 * i, 0);
- hw->peer_mw_size[i] = ((uint64_t)mw_size_h << 32) |
- mw_size_l;
+ val_h = (*hw->ntb_ops->spad_read)
+ (dev, SPAD_MW0_SZ_H + 2 * i, 0);
+ val_l = (*hw->ntb_ops->spad_read)
+ (dev, SPAD_MW0_SZ_L + 2 * i, 0);
+ peer_mw_size = ((uint64_t)val_h << 32) | val_l;
NTB_LOG(DEBUG, "Peer %u mw size: 0x%"PRIx64"", i,
- hw->peer_mw_size[i]);
+ peer_mw_size);
+ if (peer_mw_size != hw->mw_size[i]) {
+ NTB_LOG(ERR, "Mw config must be the same.");
+ return;
+ }
}
hw->peer_dev_up = 1;
/**
- * Handshake with peer. Spad_write only works when both
- * devices are up. So write spad again when db is received.
- * And set db again for the later device who may miss
+ * Handshake with peer. Spad_write & mw_set_trans only works
+ * when both devices are up. So write spad again when db is
+ * received. And set db again for the later device who may miss
* the 1st db.
*/
- for (i = 0; i < hw->mw_cnt; i++) {
- (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS,
- 1, hw->mw_cnt);
- mw_size_h = hw->mw_size[i] >> 32;
- (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_H + 2 * i,
- 1, mw_size_h);
-
- mw_size_l = hw->mw_size[i];
- (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_L + 2 * i,
- 1, mw_size_l);
+ if (ntb_handshake_work(dev) < 0) {
+ NTB_LOG(ERR, "Handshake work failed.");
+ return;
}
- (*hw->ntb_ops->peer_db_set)(dev, 0);
/* To get the link info. */
if (hw->ntb_ops->get_link_status == NULL) {
}
if (db_bits & (1 << 1)) {
- NTB_LOG(DEBUG, "DB1: Peer device is down.");
+ NTB_LOG(INFO, "DB1: Peer device is down.");
/* Clear received doorbell. */
(*hw->ntb_ops->db_clear)(dev, 2);
}
if (db_bits & (1 << 2)) {
- NTB_LOG(DEBUG, "DB2: Peer device agrees dev to be down.");
+ NTB_LOG(INFO, "DB2: Peer device agrees dev to be down.");
/* Clear received doorbell. */
(*hw->ntb_ops->db_clear)(dev, (1 << 2));
hw->peer_dev_up = 0;
}
static void
-ntb_queue_conf_get(struct rte_rawdev *dev __rte_unused,
- uint16_t queue_id __rte_unused,
- rte_rawdev_obj_t queue_conf __rte_unused)
+ntb_queue_conf_get(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct ntb_queue_conf *q_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+
+ q_conf->tx_free_thresh = hw->tx_queues[queue_id]->tx_free_thresh;
+ q_conf->nb_desc = hw->rx_queues[queue_id]->nb_rx_desc;
+ q_conf->rx_mp = hw->rx_queues[queue_id]->mpool;
+}
+
+static void
+ntb_rxq_release_mbufs(struct ntb_rx_queue *q)
{
+ int i;
+
+ if (!q || !q->sw_ring) {
+ NTB_LOG(ERR, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < q->nb_rx_desc; i++) {
+ if (q->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(q->sw_ring[i].mbuf);
+ q->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static void
+ntb_rxq_release(struct ntb_rx_queue *rxq)
+{
+ if (!rxq) {
+ NTB_LOG(ERR, "Pointer to rxq is NULL");
+ return;
+ }
+
+ ntb_rxq_release_mbufs(rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
}
static int
-ntb_queue_setup(struct rte_rawdev *dev __rte_unused,
- uint16_t queue_id __rte_unused,
- rte_rawdev_obj_t queue_conf __rte_unused)
+ntb_rxq_setup(struct rte_rawdev *dev,
+ uint16_t qp_id,
+ rte_rawdev_obj_t queue_conf)
{
+ struct ntb_queue_conf *rxq_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_rx_queue *rxq;
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ntb rx queue",
+ sizeof(struct ntb_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!rxq) {
+ NTB_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure.");
+ return -ENOMEM;
+ }
+
+ if (rxq_conf->rx_mp == NULL) {
+ NTB_LOG(ERR, "Invalid null mempool pointer.");
+ return -EINVAL;
+ }
+ rxq->nb_rx_desc = rxq_conf->nb_desc;
+ rxq->mpool = rxq_conf->rx_mp;
+ rxq->port_id = dev->dev_id;
+ rxq->queue_id = qp_id;
+ rxq->hw = hw;
+
+ /* Allocate the software ring. */
+ rxq->sw_ring =
+ rte_zmalloc_socket("ntb rx sw ring",
+ sizeof(struct ntb_rx_entry) *
+ rxq->nb_rx_desc,
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!rxq->sw_ring) {
+ ntb_rxq_release(rxq);
+ rxq = NULL;
+ NTB_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ hw->rx_queues[qp_id] = rxq;
+
return 0;
}
+static void
+ntb_txq_release_mbufs(struct ntb_tx_queue *q)
+{
+ int i;
+
+ if (!q || !q->sw_ring) {
+ NTB_LOG(ERR, "Pointer to txq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < q->nb_tx_desc; i++) {
+ if (q->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(q->sw_ring[i].mbuf);
+ q->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static void
+ntb_txq_release(struct ntb_tx_queue *txq)
+{
+ if (!txq) {
+ NTB_LOG(ERR, "Pointer to txq is NULL");
+ return;
+ }
+
+ ntb_txq_release_mbufs(txq);
+
+ rte_free(txq->sw_ring);
+ rte_free(txq);
+}
+
static int
-ntb_queue_release(struct rte_rawdev *dev __rte_unused,
- uint16_t queue_id __rte_unused)
+ntb_txq_setup(struct rte_rawdev *dev,
+ uint16_t qp_id,
+ rte_rawdev_obj_t queue_conf)
{
+ struct ntb_queue_conf *txq_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_tx_queue *txq;
+ uint16_t i, prev;
+
+ /* Allocate the TX queue data structure. */
+ txq = rte_zmalloc_socket("ntb tx queue",
+ sizeof(struct ntb_tx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!txq) {
+ NTB_LOG(ERR, "Failed to allocate memory for "
+ "tx queue structure");
+ return -ENOMEM;
+ }
+
+ txq->nb_tx_desc = txq_conf->nb_desc;
+ txq->port_id = dev->dev_id;
+ txq->queue_id = qp_id;
+ txq->hw = hw;
+
+ /* Allocate software ring */
+ txq->sw_ring =
+ rte_zmalloc_socket("ntb tx sw ring",
+ sizeof(struct ntb_tx_entry) *
+ txq->nb_tx_desc,
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!txq->sw_ring) {
+ ntb_txq_release(txq);
+ txq = NULL;
+ NTB_LOG(ERR, "Failed to allocate memory for SW TX ring");
+ return -ENOMEM;
+ }
+
+ prev = txq->nb_tx_desc - 1;
+ for (i = 0; i < txq->nb_tx_desc; i++) {
+ txq->sw_ring[i].mbuf = NULL;
+ txq->sw_ring[i].last_id = i;
+ txq->sw_ring[prev].next_id = i;
+ prev = i;
+ }
+
+ txq->tx_free_thresh = txq_conf->tx_free_thresh ?
+ txq_conf->tx_free_thresh :
+ NTB_DFLT_TX_FREE_THRESH;
+ if (txq->tx_free_thresh >= txq->nb_tx_desc - 3) {
+ NTB_LOG(ERR, "tx_free_thresh must be less than nb_desc - 3. "
+ "(tx_free_thresh=%u qp_id=%u)", txq->tx_free_thresh,
+ qp_id);
+ return -EINVAL;
+ }
+
+ hw->tx_queues[qp_id] = txq;
+
+ return 0;
+}
+
+
+static int
+ntb_queue_setup(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ int ret;
+
+ if (queue_id >= hw->queue_pairs)
+ return -EINVAL;
+
+ ret = ntb_txq_setup(dev, queue_id, queue_conf);
+ if (ret < 0)
+ return ret;
+
+ ret = ntb_rxq_setup(dev, queue_id, queue_conf);
+
+ return ret;
+}
+
+static int
+ntb_queue_release(struct rte_rawdev *dev, uint16_t queue_id)
+{
+ struct ntb_hw *hw = dev->dev_private;
+
+ if (queue_id >= hw->queue_pairs)
+ return -EINVAL;
+
+ ntb_txq_release(hw->tx_queues[queue_id]);
+ hw->tx_queues[queue_id] = NULL;
+ ntb_rxq_release(hw->rx_queues[queue_id]);
+ hw->rx_queues[queue_id] = NULL;
+
return 0;
}
return hw->queue_pairs;
}
+static int
+ntb_queue_init(struct rte_rawdev *dev, uint16_t qp_id)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_rx_queue *rxq = hw->rx_queues[qp_id];
+ struct ntb_tx_queue *txq = hw->tx_queues[qp_id];
+ volatile struct ntb_header *local_hdr;
+ struct ntb_header *remote_hdr;
+ uint16_t q_size = hw->queue_size;
+ uint32_t hdr_offset;
+ void *bar_addr;
+ uint16_t i;
+
+ if (hw->ntb_ops->get_peer_mw_addr == NULL) {
+ NTB_LOG(ERR, "Getting peer mw addr is not supported.");
+ return -EINVAL;
+ }
+
+ /* Put queue info into the start of shared memory. */
+ hdr_offset = hw->hdr_size_per_queue * qp_id;
+ local_hdr = (volatile struct ntb_header *)
+ ((size_t)hw->mz[0]->addr + hdr_offset);
+ bar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);
+ if (bar_addr == NULL)
+ return -EINVAL;
+ remote_hdr = (struct ntb_header *)
+ ((size_t)bar_addr + hdr_offset);
+
+ /* rxq init. */
+ rxq->rx_desc_ring = (struct ntb_desc *)
+ (&remote_hdr->desc_ring);
+ rxq->rx_used_ring = (volatile struct ntb_used *)
+ (&local_hdr->desc_ring[q_size]);
+ rxq->avail_cnt = &remote_hdr->avail_cnt;
+ rxq->used_cnt = &local_hdr->used_cnt;
+
+ for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mpool);
+ if (unlikely(!mbuf)) {
+ NTB_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+ mbuf->port = dev->dev_id;
+
+ rxq->sw_ring[i].mbuf = mbuf;
+
+ rxq->rx_desc_ring[i].addr = rte_pktmbuf_mtod(mbuf, size_t);
+ rxq->rx_desc_ring[i].len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+ rte_wmb();
+ *rxq->avail_cnt = rxq->nb_rx_desc - 1;
+ rxq->last_avail = rxq->nb_rx_desc - 1;
+ rxq->last_used = 0;
+
+ /* txq init */
+ txq->tx_desc_ring = (volatile struct ntb_desc *)
+ (&local_hdr->desc_ring);
+ txq->tx_used_ring = (struct ntb_used *)
+ (&remote_hdr->desc_ring[q_size]);
+ txq->avail_cnt = &local_hdr->avail_cnt;
+ txq->used_cnt = &remote_hdr->used_cnt;
+
+ rte_wmb();
+ *txq->used_cnt = 0;
+ txq->last_used = 0;
+ txq->last_avail = 0;
+ txq->nb_tx_free = txq->nb_tx_desc - 1;
+
+ return 0;
+}
+
static int
ntb_enqueue_bufs(struct rte_rawdev *dev,
struct rte_rawdev_buf **buffers,
ntb_dev_info_get(struct rte_rawdev *dev, rte_rawdev_obj_t dev_info)
{
struct ntb_hw *hw = dev->dev_private;
- struct ntb_attr *ntb_attrs = dev_info;
-
- strncpy(ntb_attrs[NTB_TOPO_ID].name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN);
- switch (hw->topo) {
- case NTB_TOPO_B2B_DSD:
- strncpy(ntb_attrs[NTB_TOPO_ID].value, "B2B DSD",
- NTB_ATTR_VAL_LEN);
- break;
- case NTB_TOPO_B2B_USD:
- strncpy(ntb_attrs[NTB_TOPO_ID].value, "B2B USD",
- NTB_ATTR_VAL_LEN);
- break;
- default:
- strncpy(ntb_attrs[NTB_TOPO_ID].value, "Unsupported",
- NTB_ATTR_VAL_LEN);
- }
-
- strncpy(ntb_attrs[NTB_LINK_STATUS_ID].name, NTB_LINK_STATUS_NAME,
- NTB_ATTR_NAME_LEN);
- snprintf(ntb_attrs[NTB_LINK_STATUS_ID].value, NTB_ATTR_VAL_LEN,
- "%d", hw->link_status);
+ struct ntb_dev_info *info = dev_info;
- strncpy(ntb_attrs[NTB_SPEED_ID].name, NTB_SPEED_NAME,
- NTB_ATTR_NAME_LEN);
- snprintf(ntb_attrs[NTB_SPEED_ID].value, NTB_ATTR_VAL_LEN,
- "%d", hw->link_speed);
+ info->mw_cnt = hw->mw_cnt;
+ info->mw_size = hw->mw_size;
- strncpy(ntb_attrs[NTB_WIDTH_ID].name, NTB_WIDTH_NAME,
- NTB_ATTR_NAME_LEN);
- snprintf(ntb_attrs[NTB_WIDTH_ID].value, NTB_ATTR_VAL_LEN,
- "%d", hw->link_width);
-
- strncpy(ntb_attrs[NTB_MW_CNT_ID].name, NTB_MW_CNT_NAME,
- NTB_ATTR_NAME_LEN);
- snprintf(ntb_attrs[NTB_MW_CNT_ID].value, NTB_ATTR_VAL_LEN,
- "%d", hw->mw_cnt);
+ /**
+ * Intel hardware requires that mapped memory base address should be
+ * aligned with EMBARSZ and needs continuous memzone.
+ */
+ info->mw_size_align = (uint8_t)(hw->pci_dev->id.vendor_id ==
+ NTB_INTEL_VENDOR_ID);
- strncpy(ntb_attrs[NTB_DB_CNT_ID].name, NTB_DB_CNT_NAME,
- NTB_ATTR_NAME_LEN);
- snprintf(ntb_attrs[NTB_DB_CNT_ID].value, NTB_ATTR_VAL_LEN,
- "%d", hw->db_cnt);
+ if (!hw->queue_size || !hw->queue_pairs) {
+ NTB_LOG(ERR, "No queue size and queue num assigned.");
+ return;
+ }
- strncpy(ntb_attrs[NTB_SPAD_CNT_ID].name, NTB_SPAD_CNT_NAME,
- NTB_ATTR_NAME_LEN);
- snprintf(ntb_attrs[NTB_SPAD_CNT_ID].value, NTB_ATTR_VAL_LEN,
- "%d", hw->spad_cnt);
+ hw->hdr_size_per_queue = RTE_ALIGN(sizeof(struct ntb_header) +
+ hw->queue_size * sizeof(struct ntb_desc) +
+ hw->queue_size * sizeof(struct ntb_used),
+ RTE_CACHE_LINE_SIZE);
+ info->ntb_hdr_size = hw->hdr_size_per_queue * hw->queue_pairs;
}
static int
-ntb_dev_configure(const struct rte_rawdev *dev __rte_unused,
- rte_rawdev_obj_t config __rte_unused)
+ntb_dev_configure(const struct rte_rawdev *dev, rte_rawdev_obj_t config)
{
+ struct ntb_dev_config *conf = config;
+ struct ntb_hw *hw = dev->dev_private;
+ int ret;
+
+ hw->queue_pairs = conf->num_queues;
+ hw->queue_size = conf->queue_size;
+ hw->used_mw_num = conf->mz_num;
+ hw->mz = conf->mz_list;
+ hw->rx_queues = rte_zmalloc("ntb_rx_queues",
+ sizeof(struct ntb_rx_queue *) * hw->queue_pairs, 0);
+ hw->tx_queues = rte_zmalloc("ntb_tx_queues",
+ sizeof(struct ntb_tx_queue *) * hw->queue_pairs, 0);
+
+ /* Start handshake with the peer. */
+ ret = ntb_handshake_work(dev);
+ if (ret < 0) {
+ rte_free(hw->rx_queues);
+ rte_free(hw->tx_queues);
+ hw->rx_queues = NULL;
+ hw->tx_queues = NULL;
+ return ret;
+ }
+
return 0;
}
ntb_dev_start(struct rte_rawdev *dev)
{
struct ntb_hw *hw = dev->dev_private;
- int ret, i;
+ uint32_t peer_base_l, peer_val;
+ uint64_t peer_base_h;
+ uint32_t i;
+ int ret;
- /* TODO: init queues and start queues. */
+ if (!hw->link_status || !hw->peer_dev_up)
+ return -EINVAL;
- /* Map memory of bar_size to remote. */
- hw->mz = rte_zmalloc("struct rte_memzone *",
- hw->mw_cnt * sizeof(struct rte_memzone *), 0);
- for (i = 0; i < hw->mw_cnt; i++) {
- ret = ntb_set_mw(dev, i, hw->mw_size[i]);
+ for (i = 0; i < hw->queue_pairs; i++) {
+ ret = ntb_queue_init(dev, i);
if (ret) {
- NTB_LOG(ERR, "Fail to set mw.");
- return ret;
+ NTB_LOG(ERR, "Failed to init queue.");
+ goto err_q_init;
}
}
+ hw->peer_mw_base = rte_zmalloc("ntb_peer_mw_base", hw->mw_cnt *
+ sizeof(uint64_t), 0);
+
+ if (hw->ntb_ops->spad_read == NULL) {
+ ret = -ENOTSUP;
+ goto err_up;
+ }
+
+ peer_val = (*hw->ntb_ops->spad_read)(dev, SPAD_Q_SZ, 0);
+ if (peer_val != hw->queue_size) {
+ NTB_LOG(ERR, "Inconsistent queue size! (local: %u peer: %u)",
+ hw->queue_size, peer_val);
+ ret = -EINVAL;
+ goto err_up;
+ }
+
+ peer_val = (*hw->ntb_ops->spad_read)(dev, SPAD_NUM_QPS, 0);
+ if (peer_val != hw->queue_pairs) {
+ NTB_LOG(ERR, "Inconsistent number of queues! (local: %u peer:"
+ " %u)", hw->queue_pairs, peer_val);
+ ret = -EINVAL;
+ goto err_up;
+ }
+
+ hw->peer_used_mws = (*hw->ntb_ops->spad_read)(dev, SPAD_USED_MWS, 0);
+
+ for (i = 0; i < hw->peer_used_mws; i++) {
+ peer_base_h = (*hw->ntb_ops->spad_read)(dev,
+ SPAD_MW0_BA_H + 2 * i, 0);
+ peer_base_l = (*hw->ntb_ops->spad_read)(dev,
+ SPAD_MW0_BA_L + 2 * i, 0);
+ hw->peer_mw_base[i] = (peer_base_h << 32) + peer_base_l;
+ }
+
dev->started = 1;
return 0;
+
+err_up:
+ rte_free(hw->peer_mw_base);
+err_q_init:
+ for (i = 0; i < hw->queue_pairs; i++) {
+ ntb_rxq_release_mbufs(hw->rx_queues[i]);
+ ntb_txq_release_mbufs(hw->tx_queues[i]);
+ }
+
+ return ret;
}
static void
{
struct ntb_hw *hw = dev->dev_private;
uint32_t time_out;
- int status;
-
- /* TODO: stop rx/tx queues. */
+ int status, i;
if (!hw->peer_dev_up)
goto clean;
if (status)
NTB_LOG(ERR, "Failed to clear doorbells.");
+ for (i = 0; i < hw->queue_pairs; i++) {
+ ntb_rxq_release_mbufs(hw->rx_queues[i]);
+ ntb_txq_release_mbufs(hw->tx_queues[i]);
+ }
+
dev->started = 0;
}
{
struct ntb_hw *hw = dev->dev_private;
struct rte_intr_handle *intr_handle;
- int ret = 0;
+ int i;
if (dev->started)
ntb_dev_stop(dev);
- /* TODO: free queues. */
+ /* free queues */
+ for (i = 0; i < hw->queue_pairs; i++)
+ ntb_queue_release(dev, i);
+ hw->queue_pairs = 0;
intr_handle = &hw->pci_dev->intr_handle;
/* Clean datapath event and vec mapping */
rte_intr_callback_unregister(intr_handle,
ntb_dev_intr_handler, dev);
- return ret;
+ return 0;
}
static int
static int
ntb_attr_set(struct rte_rawdev *dev, const char *attr_name,
- uint64_t attr_value)
+ uint64_t attr_value)
{
struct ntb_hw *hw;
int index;
index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
(*hw->ntb_ops->spad_write)(dev, hw->spad_user_list[index],
1, attr_value);
- NTB_LOG(INFO, "Set attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
+ attr_name, attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_QUEUE_SZ_NAME, NTB_ATTR_NAME_LEN)) {
+ hw->queue_size = attr_value;
+ NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
+ attr_name, attr_value);
+ return 0;
+ }
+
+ if (!strncmp(attr_name, NTB_QUEUE_NUM_NAME, NTB_ATTR_NAME_LEN)) {
+ hw->queue_pairs = attr_value;
+ NTB_LOG(DEBUG, "Set attribute (%s) Value (%" PRIu64 ")",
attr_name, attr_value);
return 0;
}
static int
ntb_attr_get(struct rte_rawdev *dev, const char *attr_name,
- uint64_t *attr_value)
+ uint64_t *attr_value)
{
struct ntb_hw *hw;
int index;
if (!strncmp(attr_name, NTB_TOPO_NAME, NTB_ATTR_NAME_LEN)) {
*attr_value = hw->topo;
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
if (!strncmp(attr_name, NTB_LINK_STATUS_NAME, NTB_ATTR_NAME_LEN)) {
- *attr_value = hw->link_status;
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ /* hw->link_status only indicates hw link status. */
+ *attr_value = hw->link_status && hw->peer_dev_up;
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
if (!strncmp(attr_name, NTB_SPEED_NAME, NTB_ATTR_NAME_LEN)) {
*attr_value = hw->link_speed;
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
if (!strncmp(attr_name, NTB_WIDTH_NAME, NTB_ATTR_NAME_LEN)) {
*attr_value = hw->link_width;
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
if (!strncmp(attr_name, NTB_MW_CNT_NAME, NTB_ATTR_NAME_LEN)) {
*attr_value = hw->mw_cnt;
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
if (!strncmp(attr_name, NTB_DB_CNT_NAME, NTB_ATTR_NAME_LEN)) {
*attr_value = hw->db_cnt;
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
if (!strncmp(attr_name, NTB_SPAD_CNT_NAME, NTB_ATTR_NAME_LEN)) {
*attr_value = hw->spad_cnt;
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
index = atoi(&attr_name[NTB_SPAD_USER_LEN]);
*attr_value = (*hw->ntb_ops->spad_read)(dev,
hw->spad_user_list[index], 0);
- NTB_LOG(INFO, "Attribute (%s) Value (%" PRIu64 ")",
+ NTB_LOG(DEBUG, "Attribute (%s) Value (%" PRIu64 ")",
attr_name, *attr_value);
return 0;
}
return 0;
}
+
static const struct rte_rawdev_ops ntb_ops = {
.dev_info_get = ntb_dev_info_get,
.dev_configure = ntb_dev_configure,
{
struct ntb_hw *hw = dev->dev_private;
struct rte_intr_handle *intr_handle;
- uint32_t val;
int ret, i;
hw->pci_dev = pci_dev;
/* enable uio intr after callback register */
rte_intr_enable(intr_handle);
- if (hw->ntb_ops->spad_write == NULL) {
- NTB_LOG(ERR, "Scratchpad is not supported.");
- return -ENOTSUP;
- }
- /* Tell peer the mw_cnt of local side. */
- ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS, 1, hw->mw_cnt);
- if (ret) {
- NTB_LOG(ERR, "Failed to tell peer mw count.");
- return ret;
- }
-
- /* Tell peer each mw size on local side. */
- for (i = 0; i < hw->mw_cnt; i++) {
- NTB_LOG(DEBUG, "Local %u mw size: 0x%"PRIx64"", i,
- hw->mw_size[i]);
- val = hw->mw_size[i] >> 32;
- ret = (*hw->ntb_ops->spad_write)
- (dev, SPAD_MW0_SZ_H + 2 * i, 1, val);
- if (ret) {
- NTB_LOG(ERR, "Failed to tell peer mw size.");
- return ret;
- }
-
- val = hw->mw_size[i];
- ret = (*hw->ntb_ops->spad_write)
- (dev, SPAD_MW0_SZ_L + 2 * i, 1, val);
- if (ret) {
- NTB_LOG(ERR, "Failed to tell peer mw size.");
- return ret;
- }
- }
-
- /* Ring doorbell 0 to tell peer the device is ready. */
- ret = (*hw->ntb_ops->peer_db_set)(dev, 0);
- if (ret) {
- NTB_LOG(ERR, "Failed to tell peer device is probed.");
- return ret;
- }
-
return ret;
}
{
ntb_logtype = rte_log_register("pmd.raw.ntb");
if (ntb_logtype >= 0)
- rte_log_set_level(ntb_logtype, RTE_LOG_DEBUG);
+ rte_log_set_level(ntb_logtype, RTE_LOG_INFO);
}