net/cnxk: add Tx queue setup and release
[dpdk.git] / drivers / net / cnxk / cn9k_ethdev.c
index 2fb7c14..5c696c8 100644 (file)
@@ -2,6 +2,118 @@
  * Copyright(C) 2021 Marvell.
  */
 #include "cn9k_ethdev.h"
+#include "cn9k_tx.h"
+
+static void
+nix_form_default_desc(struct cnxk_eth_dev *dev, struct cn9k_eth_txq *txq,
+                     uint16_t qid)
+{
+       struct nix_send_ext_s *send_hdr_ext;
+       struct nix_send_hdr_s *send_hdr;
+       union nix_send_sg_s *sg;
+
+       RTE_SET_USED(dev);
+
+       /* Initialize the fields based on basic single segment packet */
+       memset(&txq->cmd, 0, sizeof(txq->cmd));
+
+       if (dev->tx_offload_flags & NIX_TX_NEED_EXT_HDR) {
+               send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+               /* 2(HDR) + 2(EXT_HDR) + 1(SG) + 1(IOVA) = 6/2 - 1 = 2 */
+               send_hdr->w0.sizem1 = 2;
+
+               send_hdr_ext = (struct nix_send_ext_s *)&txq->cmd[2];
+               send_hdr_ext->w0.subdc = NIX_SUBDC_EXT;
+               sg = (union nix_send_sg_s *)&txq->cmd[4];
+       } else {
+               send_hdr = (struct nix_send_hdr_s *)&txq->cmd[0];
+               /* 2(HDR) + 1(SG) + 1(IOVA) = 4/2 - 1 = 1 */
+               send_hdr->w0.sizem1 = 1;
+               sg = (union nix_send_sg_s *)&txq->cmd[2];
+       }
+
+       send_hdr->w0.sq = qid;
+       sg->subdc = NIX_SUBDC_SG;
+       sg->segs = 1;
+       sg->ld_type = NIX_SENDLDTYPE_LDD;
+
+       rte_wmb();
+}
+
+static int
+cn9k_nix_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+                       uint16_t nb_desc, unsigned int socket,
+                       const struct rte_eth_txconf *tx_conf)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cn9k_eth_txq *txq;
+       struct roc_nix_sq *sq;
+       int rc;
+
+       RTE_SET_USED(socket);
+
+       /* Common Tx queue setup */
+       rc = cnxk_nix_tx_queue_setup(eth_dev, qid, nb_desc,
+                                    sizeof(struct cn9k_eth_txq), tx_conf);
+       if (rc)
+               return rc;
+
+       sq = &dev->sqs[qid];
+       /* Update fast path queue */
+       txq = eth_dev->data->tx_queues[qid];
+       txq->fc_mem = sq->fc;
+       txq->lmt_addr = sq->lmt_addr;
+       txq->io_addr = sq->io_addr;
+       txq->nb_sqb_bufs_adj = sq->nb_sqb_bufs_adj;
+       txq->sqes_per_sqb_log2 = sq->sqes_per_sqb_log2;
+
+       nix_form_default_desc(dev, txq, qid);
+       txq->lso_tun_fmt = dev->lso_tun_fmt;
+       return 0;
+}
+
+static int
+cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+                       uint16_t nb_desc, unsigned int socket,
+                       const struct rte_eth_rxconf *rx_conf,
+                       struct rte_mempool *mp)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cn9k_eth_rxq *rxq;
+       struct roc_nix_rq *rq;
+       struct roc_nix_cq *cq;
+       int rc;
+
+       RTE_SET_USED(socket);
+
+       /* CQ Errata needs min 4K ring */
+       if (dev->cq_min_4k && nb_desc < 4096)
+               nb_desc = 4096;
+
+       /* Common Rx queue setup */
+       rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
+                                    sizeof(struct cn9k_eth_rxq), rx_conf, mp);
+       if (rc)
+               return rc;
+
+       rq = &dev->rqs[qid];
+       cq = &dev->cqs[qid];
+
+       /* Update fast path queue */
+       rxq = eth_dev->data->rx_queues[qid];
+       rxq->rq = qid;
+       rxq->desc = (uintptr_t)cq->desc_base;
+       rxq->cq_door = cq->door;
+       rxq->cq_status = cq->status;
+       rxq->wdata = cq->wdata;
+       rxq->head = cq->head;
+       rxq->qmask = cq->qmask;
+
+       /* Data offset from data to start of mbuf is first_skip */
+       rxq->data_off = rq->first_skip;
+       rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+       return 0;
+}
 
 static int
 cn9k_nix_configure(struct rte_eth_dev *eth_dev)
@@ -44,6 +156,8 @@ nix_eth_dev_ops_override(void)
 
        /* Update platform specific ops */
        cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
+       cnxk_eth_dev_ops.tx_queue_setup = cn9k_nix_tx_queue_setup;
+       cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
 }
 
 static int