+static int
+ntb_queue_init(struct rte_rawdev *dev, uint16_t qp_id)
+{
+ struct ntb_hw *hw = dev->dev_private;
+ struct ntb_rx_queue *rxq = hw->rx_queues[qp_id];
+ struct ntb_tx_queue *txq = hw->tx_queues[qp_id];
+ volatile struct ntb_header *local_hdr;
+ struct ntb_header *remote_hdr;
+ uint16_t q_size = hw->queue_size;
+ uint32_t hdr_offset;
+ void *bar_addr;
+ uint16_t i;
+
+ if (hw->ntb_ops->get_peer_mw_addr == NULL) {
+ NTB_LOG(ERR, "Getting peer mw addr is not supported.");
+ return -EINVAL;
+ }
+
+ /* Put queue info into the start of shared memory. */
+ hdr_offset = hw->hdr_size_per_queue * qp_id;
+ local_hdr = (volatile struct ntb_header *)
+ ((size_t)hw->mz[0]->addr + hdr_offset);
+ bar_addr = (*hw->ntb_ops->get_peer_mw_addr)(dev, 0);
+ if (bar_addr == NULL)
+ return -EINVAL;
+ remote_hdr = (struct ntb_header *)
+ ((size_t)bar_addr + hdr_offset);
+
+ /* rxq init. */
+ rxq->rx_desc_ring = (struct ntb_desc *)
+ (&remote_hdr->desc_ring);
+ rxq->rx_used_ring = (volatile struct ntb_used *)
+ (&local_hdr->desc_ring[q_size]);
+ rxq->avail_cnt = &remote_hdr->avail_cnt;
+ rxq->used_cnt = &local_hdr->used_cnt;
+
+ for (i = 0; i < rxq->nb_rx_desc - 1; i++) {
+ struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mpool);
+ if (unlikely(!mbuf)) {
+ NTB_LOG(ERR, "Failed to allocate mbuf for RX");
+ return -ENOMEM;
+ }
+ mbuf->port = dev->dev_id;
+
+ rxq->sw_ring[i].mbuf = mbuf;
+
+ rxq->rx_desc_ring[i].addr = rte_pktmbuf_mtod(mbuf, size_t);
+ rxq->rx_desc_ring[i].len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
+ }
+ rte_wmb();
+ *rxq->avail_cnt = rxq->nb_rx_desc - 1;
+ rxq->last_avail = rxq->nb_rx_desc - 1;
+ rxq->last_used = 0;
+
+ /* txq init */
+ txq->tx_desc_ring = (volatile struct ntb_desc *)
+ (&local_hdr->desc_ring);
+ txq->tx_used_ring = (struct ntb_used *)
+ (&remote_hdr->desc_ring[q_size]);
+ txq->avail_cnt = &local_hdr->avail_cnt;
+ txq->used_cnt = &remote_hdr->used_cnt;
+
+ rte_wmb();
+ *txq->used_cnt = 0;
+ txq->last_used = 0;
+ txq->last_avail = 0;
+ txq->nb_tx_free = txq->nb_tx_desc - 1;
+
+ /* Set per queue stats. */
+ for (i = 0; i < NTB_XSTATS_NUM; i++) {
+ hw->ntb_xstats[i + NTB_XSTATS_NUM * (qp_id + 1)] = 0;
+ hw->ntb_xstats_off[i + NTB_XSTATS_NUM * (qp_id + 1)] = 0;
+ }
+
+ return 0;
+}
+
+static inline void
+ntb_enqueue_cleanup(struct ntb_tx_queue *txq)
+{
+ struct ntb_tx_entry *sw_ring = txq->sw_ring;
+ uint16_t tx_free = txq->last_avail;
+ uint16_t nb_to_clean, i;
+
+ /* avail_cnt + 1 represents where to rx next in the peer. */
+ nb_to_clean = (*txq->avail_cnt - txq->last_avail + 1 +
+ txq->nb_tx_desc) & (txq->nb_tx_desc - 1);
+ nb_to_clean = RTE_MIN(nb_to_clean, txq->tx_free_thresh);
+ for (i = 0; i < nb_to_clean; i++) {
+ if (sw_ring[tx_free].mbuf)
+ rte_pktmbuf_free_seg(sw_ring[tx_free].mbuf);
+ tx_free = (tx_free + 1) & (txq->nb_tx_desc - 1);
+ }
+
+ txq->nb_tx_free += nb_to_clean;
+ txq->last_avail = tx_free;
+}
+