+/* Align with enum ntb_xstats_idx */
+static struct rte_rawdev_xstats_name ntb_xstats_names[] = {
+ {"Tx-packets"},
+ {"Tx-bytes"},
+ {"Tx-errors"},
+ {"Rx-packets"},
+ {"Rx-bytes"},
+ {"Rx-missed"},
+};
+#define NTB_XSTATS_NUM RTE_DIM(ntb_xstats_names)
+
+static inline void
+ntb_link_cleanup(struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ int status, i;
+
+ if (hw->ntb_ops->spad_write == NULL ||
+ hw->ntb_ops->mw_set_trans == NULL) {
+ NTB_LOG(ERR, "Not supported to clean up link.");
+ return;
+ }
+
+ /* Clean spad registers. */
+ for (i = 0; i < hw->spad_cnt; i++) {
+ status = (*hw->ntb_ops->spad_write)(dev, i, 0, 0);
+ if (status)
+ NTB_LOG(ERR, "Failed to clean local spad.");
+ }
+
+ /* Clear mw so that peer cannot access local memory.*/
+ for (i = 0; i < hw->used_mw_num; i++) {
+ status = (*hw->ntb_ops->mw_set_trans)(dev, i, 0, 0);
+ if (status)
+ NTB_LOG(ERR, "Failed to clean mw.");
+ }
+}
+
+static inline int
+ntb_handshake_work(const struct rte_rawdev *dev)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t val;
+ int ret, i;
+
+ if (hw->ntb_ops->spad_write == NULL ||
+ hw->ntb_ops->mw_set_trans == NULL) {
+ NTB_LOG(ERR, "Scratchpad/MW setting is not supported.");
+ return -ENOTSUP;
+ }
+
+ /* Tell peer the mw info of local side. */
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_MWS, 1, hw->mw_cnt);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < hw->mw_cnt; i++) {
+ NTB_LOG(INFO, "Local %u mw size: 0x%"PRIx64"", i,
+ hw->mw_size[i]);
+ val = hw->mw_size[i] >> 32;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_H + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ val = hw->mw_size[i];
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_SZ_L + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Tell peer about the queue info and map memory to the peer. */
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_Q_SZ, 1, hw->queue_size);
+ if (ret < 0)
+ return ret;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_NUM_QPS, 1,
+ hw->queue_pairs);
+ if (ret < 0)
+ return ret;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_USED_MWS, 1,
+ hw->used_mw_num);
+ if (ret < 0)
+ return ret;
+ for (i = 0; i < hw->used_mw_num; i++) {
+ val = (uint64_t)(size_t)(hw->mz[i]->addr) >> 32;
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_H + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ val = (uint64_t)(size_t)(hw->mz[i]->addr);
+ ret = (*hw->ntb_ops->spad_write)(dev, SPAD_MW0_BA_L + 2 * i,
+ 1, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < hw->used_mw_num; i++) {
+ ret = (*hw->ntb_ops->mw_set_trans)(dev, i, hw->mz[i]->iova,
+ hw->mz[i]->len);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Ring doorbell 0 to tell peer the device is ready. */
+ ret = (*hw->ntb_ops->peer_db_set)(dev, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+ntb_dev_intr_handler(void *param)
+{
+ struct rte_rawdev *dev = (struct rte_rawdev *)param;
+ struct ntb_hw *hw = dev->dev_private;
+ uint32_t val_h, val_l;
+ uint64_t peer_mw_size;
+ uint64_t db_bits = 0;
+ uint8_t peer_mw_cnt;
+ int i = 0;
+
+ if (hw->ntb_ops->db_read == NULL ||
+ hw->ntb_ops->db_clear == NULL ||
+ hw->ntb_ops->peer_db_set == NULL) {
+ NTB_LOG(ERR, "Doorbell is not supported.");
+ return;
+ }
+
+ db_bits = (*hw->ntb_ops->db_read)(dev);
+ if (!db_bits)
+ NTB_LOG(ERR, "No doorbells");
+
+ /* Doorbell 0 is for peer device ready. */
+ if (db_bits & 1) {
+ NTB_LOG(INFO, "DB0: Peer device is up.");
+ /* Clear received doorbell. */
+ (*hw->ntb_ops->db_clear)(dev, 1);
+
+ /**
+ * Peer dev is already up. All mw settings are already done.
+ * Skip them.
+ */
+ if (hw->peer_dev_up)
+ return;
+
+ if (hw->ntb_ops->spad_read == NULL) {
+ NTB_LOG(ERR, "Scratchpad read is not supported.");
+ return;
+ }
+
+ /* Check if mw setting on the peer is the same as local. */
+ peer_mw_cnt = (*hw->ntb_ops->spad_read)(dev, SPAD_NUM_MWS, 0);
+ if (peer_mw_cnt != hw->mw_cnt) {
+ NTB_LOG(ERR, "Both mw cnt must be the same.");
+ return;
+ }
+
+ for (i = 0; i < hw->mw_cnt; i++) {
+ val_h = (*hw->ntb_ops->spad_read)
+ (dev, SPAD_MW0_SZ_H + 2 * i, 0);
+ val_l = (*hw->ntb_ops->spad_read)
+ (dev, SPAD_MW0_SZ_L + 2 * i, 0);
+ peer_mw_size = ((uint64_t)val_h << 32) | val_l;
+ NTB_LOG(DEBUG, "Peer %u mw size: 0x%"PRIx64"", i,
+ peer_mw_size);
+ if (peer_mw_size != hw->mw_size[i]) {
+ NTB_LOG(ERR, "Mw config must be the same.");
+ return;
+ }
+ }
+
+ hw->peer_dev_up = 1;
+
+ /**
+ * Handshake with peer. Spad_write & mw_set_trans only works
+ * when both devices are up. So write spad again when db is
+ * received. And set db again for the later device who may miss
+ * the 1st db.
+ */
+ if (ntb_handshake_work(dev) < 0) {
+ NTB_LOG(ERR, "Handshake work failed.");
+ return;
+ }
+
+ /* To get the link info. */
+ if (hw->ntb_ops->get_link_status == NULL) {
+ NTB_LOG(ERR, "Not supported to get link status.");
+ return;
+ }
+ (*hw->ntb_ops->get_link_status)(dev);
+ NTB_LOG(INFO, "Link is up. Link speed: %u. Link width: %u",
+ hw->link_speed, hw->link_width);
+ return;
+ }
+
+ if (db_bits & (1 << 1)) {
+ NTB_LOG(INFO, "DB1: Peer device is down.");
+ /* Clear received doorbell. */
+ (*hw->ntb_ops->db_clear)(dev, 2);
+
+ /* Peer device will be down, So clean local side too. */
+ ntb_link_cleanup(dev);
+
+ hw->peer_dev_up = 0;
+ /* Response peer's dev_stop request. */
+ (*hw->ntb_ops->peer_db_set)(dev, 2);
+ return;
+ }
+
+ if (db_bits & (1 << 2)) {
+ NTB_LOG(INFO, "DB2: Peer device agrees dev to be down.");
+ /* Clear received doorbell. */
+ (*hw->ntb_ops->db_clear)(dev, (1 << 2));
+ hw->peer_dev_up = 0;
+ return;
+ }
+
+ /* Clear other received doorbells. */
+ (*hw->ntb_ops->db_clear)(dev, db_bits);
+}
+
+static int
+ntb_queue_conf_get(struct rte_rawdev *dev,
+ uint16_t queue_id,
+ rte_rawdev_obj_t queue_conf,
+ size_t conf_size)
+{
+ struct ntb_queue_conf *q_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+
+ if (conf_size != sizeof(*q_conf))
+ return -EINVAL;
+
+ q_conf->tx_free_thresh = hw->tx_queues[queue_id]->tx_free_thresh;
+ q_conf->nb_desc = hw->rx_queues[queue_id]->nb_rx_desc;
+ q_conf->rx_mp = hw->rx_queues[queue_id]->mpool;
+
+ return 0;
+}
+
+static void
+ntb_rxq_release_mbufs(struct ntb_rx_queue *q)
+{
+ int i;
+
+ if (!q || !q->sw_ring) {
+ NTB_LOG(ERR, "Pointer to rxq or sw_ring is NULL");
+ return;
+ }
+
+ for (i = 0; i < q->nb_rx_desc; i++) {
+ if (q->sw_ring[i].mbuf) {
+ rte_pktmbuf_free_seg(q->sw_ring[i].mbuf);
+ q->sw_ring[i].mbuf = NULL;
+ }
+ }
+}
+
+static void
+ntb_rxq_release(struct ntb_rx_queue *rxq)
+{
+ if (!rxq) {
+ NTB_LOG(ERR, "Pointer to rxq is NULL");
+ return;
+ }
+
+ ntb_rxq_release_mbufs(rxq);
+
+ rte_free(rxq->sw_ring);
+ rte_free(rxq);
+}
+
+static int
+ntb_rxq_setup(struct rte_rawdev *dev,
+ uint16_t qp_id,
+ rte_rawdev_obj_t queue_conf,
+ size_t conf_size)
+{
+ struct ntb_queue_conf *rxq_conf = queue_conf;
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_rx_queue *rxq;
+
+ if (conf_size != sizeof(*rxq_conf))
+ return -EINVAL;
+
+ /* Allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ntb rx queue",
+ sizeof(struct ntb_rx_queue),
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!rxq) {
+ NTB_LOG(ERR, "Failed to allocate memory for "
+ "rx queue data structure.");
+ return -ENOMEM;
+ }
+
+ if (rxq_conf->rx_mp == NULL) {
+ NTB_LOG(ERR, "Invalid null mempool pointer.");
+ return -EINVAL;
+ }
+ rxq->nb_rx_desc = rxq_conf->nb_desc;
+ rxq->mpool = rxq_conf->rx_mp;
+ rxq->port_id = dev->dev_id;
+ rxq->queue_id = qp_id;
+ rxq->hw = hw;
+
+ /* Allocate the software ring. */
+ rxq->sw_ring =
+ rte_zmalloc_socket("ntb rx sw ring",
+ sizeof(struct ntb_rx_entry) *
+ rxq->nb_rx_desc,
+ RTE_CACHE_LINE_SIZE,
+ dev->socket_id);
+ if (!rxq->sw_ring) {
+ ntb_rxq_release(rxq);
+ rxq = NULL;
+ NTB_LOG(ERR, "Failed to allocate memory for SW ring");
+ return -ENOMEM;
+ }
+
+ hw->rx_queues[qp_id] = rxq;
+
+ return 0;
+}
+