]> git.droids-corp.org - dpdk.git/commitdiff
net/cnxk: add Rx queue setup and release
authorNithin Dabilpuram <ndabilpuram@marvell.com>
Wed, 23 Jun 2021 04:46:15 +0000 (10:16 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 29 Jun 2021 19:56:32 +0000 (21:56 +0200)
Add Rx queue setup and release op for CN9K and CN10K
SoC. Release is completely common while setup is platform
dependent due to fast path Rx queue structure variation.
Fastpath is platform dependent partly due to core cacheline
size difference.

Signed-off-by: Nithin Dabilpuram <ndabilpuram@marvell.com>
doc/guides/nics/features/cnxk.ini
doc/guides/nics/features/cnxk_vec.ini
doc/guides/nics/features/cnxk_vf.ini
drivers/net/cnxk/cn10k_ethdev.c
drivers/net/cnxk/cn10k_ethdev.h
drivers/net/cnxk/cn9k_ethdev.c
drivers/net/cnxk/cn9k_ethdev.h
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cnxk/cnxk_ethdev.h

index affbbd95d18b412a8590477dc9581adb1e614c44..a9d2b0388516af0b87219d666694fa4fce60ce12 100644 (file)
@@ -10,6 +10,7 @@ SR-IOV               = Y
 Multiprocess aware   = Y
 Link status          = Y
 Link status event    = Y
+Runtime Rx queue setup = Y
 RSS hash             = Y
 Inner RSS            = Y
 Linux                = Y
index 836cc9f822a8006a2f274e191a300c50d3266b2d..6a8ca1f294ab33f3e219d9b300166ceb1d28e772 100644 (file)
@@ -10,6 +10,7 @@ SR-IOV               = Y
 Multiprocess aware   = Y
 Link status          = Y
 Link status event    = Y
+Runtime Rx queue setup = Y
 RSS hash             = Y
 Inner RSS            = Y
 Linux                = Y
index 29bb24f8c92805e18149e20b05382a62af838b54..f761638faa95819f2eb342098b3d27b27ae73c03 100644 (file)
@@ -9,6 +9,7 @@ Lock-free Tx queue   = Y
 Multiprocess aware   = Y
 Link status          = Y
 Link status event    = Y
+Runtime Rx queue setup = Y
 RSS hash             = Y
 Inner RSS            = Y
 Linux                = Y
index d971bbdc0624672fc3cf2b9144e053403295e7be..b87c4e5dc7293d9bbd9b41f0f005b895ea25f917 100644 (file)
@@ -3,6 +3,49 @@
  */
 #include "cn10k_ethdev.h"
 
+static int
+cn10k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+                        uint16_t nb_desc, unsigned int socket,
+                        const struct rte_eth_rxconf *rx_conf,
+                        struct rte_mempool *mp)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cn10k_eth_rxq *rxq;
+       struct roc_nix_rq *rq;
+       struct roc_nix_cq *cq;
+       int rc;
+
+       RTE_SET_USED(socket);
+
+       /* CQ Errata needs min 4K ring */
+       if (dev->cq_min_4k && nb_desc < 4096)
+               nb_desc = 4096;
+
+       /* Common Rx queue setup */
+       rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
+                                    sizeof(struct cn10k_eth_rxq), rx_conf, mp);
+       if (rc)
+               return rc;
+
+       rq = &dev->rqs[qid];
+       cq = &dev->cqs[qid];
+
+       /* Update fast path queue */
+       rxq = eth_dev->data->rx_queues[qid];
+       rxq->rq = qid;
+       rxq->desc = (uintptr_t)cq->desc_base;
+       rxq->cq_door = cq->door;
+       rxq->cq_status = cq->status;
+       rxq->wdata = cq->wdata;
+       rxq->head = cq->head;
+       rxq->qmask = cq->qmask;
+
+       /* Data offset from data to start of mbuf is first_skip */
+       rxq->data_off = rq->first_skip;
+       rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+       return 0;
+}
+
 static int
 cn10k_nix_configure(struct rte_eth_dev *eth_dev)
 {
@@ -33,6 +76,7 @@ nix_eth_dev_ops_override(void)
 
        /* Update platform specific ops */
        cnxk_eth_dev_ops.dev_configure = cn10k_nix_configure;
+       cnxk_eth_dev_ops.rx_queue_setup = cn10k_nix_rx_queue_setup;
 }
 
 static int
index 1bf4a65e62337f3a654cd480e7e25e0e8c403102..08e11bbbe90b22f8b9a5538e6843bb34a3d706ee 100644 (file)
@@ -6,4 +6,18 @@
 
 #include <cnxk_ethdev.h>
 
+struct cn10k_eth_rxq {
+       uint64_t mbuf_initializer;
+       uintptr_t desc;
+       void *lookup_mem;
+       uintptr_t cq_door;
+       uint64_t wdata;
+       int64_t *cq_status;
+       uint32_t head;
+       uint32_t qmask;
+       uint32_t available;
+       uint16_t data_off;
+       uint16_t rq;
+} __plt_cache_aligned;
+
 #endif /* __CN10K_ETHDEV_H__ */
index 2fb7c144e28da59833f789f0a335e6cb0896b646..2ab035ed9d56e8c081164e02667021cbcd8408e2 100644 (file)
@@ -3,6 +3,49 @@
  */
 #include "cn9k_ethdev.h"
 
+static int
+cn9k_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+                       uint16_t nb_desc, unsigned int socket,
+                       const struct rte_eth_rxconf *rx_conf,
+                       struct rte_mempool *mp)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cn9k_eth_rxq *rxq;
+       struct roc_nix_rq *rq;
+       struct roc_nix_cq *cq;
+       int rc;
+
+       RTE_SET_USED(socket);
+
+       /* CQ Errata needs min 4K ring */
+       if (dev->cq_min_4k && nb_desc < 4096)
+               nb_desc = 4096;
+
+       /* Common Rx queue setup */
+       rc = cnxk_nix_rx_queue_setup(eth_dev, qid, nb_desc,
+                                    sizeof(struct cn9k_eth_rxq), rx_conf, mp);
+       if (rc)
+               return rc;
+
+       rq = &dev->rqs[qid];
+       cq = &dev->cqs[qid];
+
+       /* Update fast path queue */
+       rxq = eth_dev->data->rx_queues[qid];
+       rxq->rq = qid;
+       rxq->desc = (uintptr_t)cq->desc_base;
+       rxq->cq_door = cq->door;
+       rxq->cq_status = cq->status;
+       rxq->wdata = cq->wdata;
+       rxq->head = cq->head;
+       rxq->qmask = cq->qmask;
+
+       /* Data offset from data to start of mbuf is first_skip */
+       rxq->data_off = rq->first_skip;
+       rxq->mbuf_initializer = cnxk_nix_rxq_mbuf_setup(dev);
+       return 0;
+}
+
 static int
 cn9k_nix_configure(struct rte_eth_dev *eth_dev)
 {
@@ -44,6 +87,7 @@ nix_eth_dev_ops_override(void)
 
        /* Update platform specific ops */
        cnxk_eth_dev_ops.dev_configure = cn9k_nix_configure;
+       cnxk_eth_dev_ops.rx_queue_setup = cn9k_nix_rx_queue_setup;
 }
 
 static int
index 15d9397fce8ce808d224a1ffd69479efeba9236a..6384609082e08b61e842fd5717446464525a7d0b 100644 (file)
@@ -6,4 +6,18 @@
 
 #include <cnxk_ethdev.h>
 
+struct cn9k_eth_rxq {
+       uint64_t mbuf_initializer;
+       uint64_t data_off;
+       uintptr_t desc;
+       void *lookup_mem;
+       uintptr_t cq_door;
+       uint64_t wdata;
+       int64_t *cq_status;
+       uint32_t head;
+       uint32_t qmask;
+       uint32_t available;
+       uint16_t rq;
+} __plt_cache_aligned;
+
 #endif /* __CN9K_ETHDEV_H__ */
index ea49809d0f20f0776cce4452b5c022bd0ff718a5..2775fe4eb043489f23d7de87ebbe1487e3181d94 100644 (file)
@@ -37,6 +37,177 @@ nix_get_speed_capa(struct cnxk_eth_dev *dev)
        return speed_capa;
 }
 
+uint64_t
+cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev)
+{
+       uint16_t port_id = dev->eth_dev->data->port_id;
+       struct rte_mbuf mb_def;
+       uint64_t *tmp;
+
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, data_off) % 8 != 0);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, refcnt) -
+                                offsetof(struct rte_mbuf, data_off) !=
+                        2);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, nb_segs) -
+                                offsetof(struct rte_mbuf, data_off) !=
+                        4);
+       RTE_BUILD_BUG_ON(offsetof(struct rte_mbuf, port) -
+                                offsetof(struct rte_mbuf, data_off) !=
+                        6);
+       mb_def.nb_segs = 1;
+       mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+       mb_def.port = port_id;
+       rte_mbuf_refcnt_set(&mb_def, 1);
+
+       /* Prevent compiler reordering: rearm_data covers previous fields */
+       rte_compiler_barrier();
+       tmp = (uint64_t *)&mb_def.rearm_data;
+
+       return *tmp;
+}
+
+int
+cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+                       uint16_t nb_desc, uint16_t fp_rx_q_sz,
+                       const struct rte_eth_rxconf *rx_conf,
+                       struct rte_mempool *mp)
+{
+       struct cnxk_eth_dev *dev = cnxk_eth_pmd_priv(eth_dev);
+       struct cnxk_eth_rxq_sp *rxq_sp;
+       struct rte_mempool_ops *ops;
+       const char *platform_ops;
+       struct roc_nix_rq *rq;
+       struct roc_nix_cq *cq;
+       uint16_t first_skip;
+       int rc = -EINVAL;
+       size_t rxq_sz;
+
+       /* Sanity checks */
+       if (rx_conf->rx_deferred_start == 1) {
+               plt_err("Deferred Rx start is not supported");
+               goto fail;
+       }
+
+       platform_ops = rte_mbuf_platform_mempool_ops();
+       /* This driver needs cnxk_npa mempool ops to work */
+       ops = rte_mempool_get_ops(mp->ops_index);
+       if (strncmp(ops->name, platform_ops, RTE_MEMPOOL_OPS_NAMESIZE)) {
+               plt_err("mempool ops should be of cnxk_npa type");
+               goto fail;
+       }
+
+       if (mp->pool_id == 0) {
+               plt_err("Invalid pool_id");
+               goto fail;
+       }
+
+       /* Free memory prior to re-allocation if needed */
+       if (eth_dev->data->rx_queues[qid] != NULL) {
+               const struct eth_dev_ops *dev_ops = eth_dev->dev_ops;
+
+               plt_nix_dbg("Freeing memory prior to re-allocation %d", qid);
+               dev_ops->rx_queue_release(eth_dev->data->rx_queues[qid]);
+               eth_dev->data->rx_queues[qid] = NULL;
+       }
+
+       /* Setup ROC CQ */
+       cq = &dev->cqs[qid];
+       cq->qid = qid;
+       cq->nb_desc = nb_desc;
+       rc = roc_nix_cq_init(&dev->nix, cq);
+       if (rc) {
+               plt_err("Failed to init roc cq for rq=%d, rc=%d", qid, rc);
+               goto fail;
+       }
+
+       /* Setup ROC RQ */
+       rq = &dev->rqs[qid];
+       rq->qid = qid;
+       rq->aura_handle = mp->pool_id;
+       rq->flow_tag_width = 32;
+       rq->sso_ena = false;
+
+       /* Calculate first mbuf skip */
+       first_skip = (sizeof(struct rte_mbuf));
+       first_skip += RTE_PKTMBUF_HEADROOM;
+       first_skip += rte_pktmbuf_priv_size(mp);
+       rq->first_skip = first_skip;
+       rq->later_skip = sizeof(struct rte_mbuf);
+       rq->lpb_size = mp->elt_size;
+
+       rc = roc_nix_rq_init(&dev->nix, rq, !!eth_dev->data->dev_started);
+       if (rc) {
+               plt_err("Failed to init roc rq for rq=%d, rc=%d", qid, rc);
+               goto cq_fini;
+       }
+
+       /* Allocate and setup fast path rx queue */
+       rc = -ENOMEM;
+       rxq_sz = sizeof(struct cnxk_eth_rxq_sp) + fp_rx_q_sz;
+       rxq_sp = plt_zmalloc(rxq_sz, PLT_CACHE_LINE_SIZE);
+       if (!rxq_sp) {
+               plt_err("Failed to alloc rx queue for rq=%d", qid);
+               goto rq_fini;
+       }
+
+       /* Setup slow path fields */
+       rxq_sp->dev = dev;
+       rxq_sp->qid = qid;
+       rxq_sp->qconf.conf.rx = *rx_conf;
+       rxq_sp->qconf.nb_desc = nb_desc;
+       rxq_sp->qconf.mp = mp;
+
+       plt_nix_dbg("rq=%d pool=%s nb_desc=%d->%d", qid, mp->name, nb_desc,
+                   cq->nb_desc);
+
+       /* Store start of fast path area */
+       eth_dev->data->rx_queues[qid] = rxq_sp + 1;
+       eth_dev->data->rx_queue_state[qid] = RTE_ETH_QUEUE_STATE_STOPPED;
+
+       return 0;
+rq_fini:
+       rc |= roc_nix_rq_fini(rq);
+cq_fini:
+       rc |= roc_nix_cq_fini(cq);
+fail:
+       return rc;
+}
+
+static void
+cnxk_nix_rx_queue_release(void *rxq)
+{
+       struct cnxk_eth_rxq_sp *rxq_sp;
+       struct cnxk_eth_dev *dev;
+       struct roc_nix_rq *rq;
+       struct roc_nix_cq *cq;
+       uint16_t qid;
+       int rc;
+
+       if (!rxq)
+               return;
+
+       rxq_sp = cnxk_eth_rxq_to_sp(rxq);
+       dev = rxq_sp->dev;
+       qid = rxq_sp->qid;
+
+       plt_nix_dbg("Releasing rxq %u", qid);
+
+       /* Cleanup ROC RQ */
+       rq = &dev->rqs[qid];
+       rc = roc_nix_rq_fini(rq);
+       if (rc)
+               plt_err("Failed to cleanup rq, rc=%d", rc);
+
+       /* Cleanup ROC CQ */
+       cq = &dev->cqs[qid];
+       rc = roc_nix_cq_fini(cq);
+       if (rc)
+               plt_err("Failed to cleanup cq, rc=%d", rc);
+
+       /* Finally free fast path area */
+       plt_free(rxq_sp);
+}
+
 uint32_t
 cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,
                       uint8_t rss_level)
@@ -602,6 +773,7 @@ fail_configure:
 struct eth_dev_ops cnxk_eth_dev_ops = {
        .dev_infos_get = cnxk_nix_info_get,
        .link_update = cnxk_nix_link_update,
+       .rx_queue_release = cnxk_nix_rx_queue_release,
 };
 
 static int
index daa87af04157c417e8ac5fc76c9613ce256b5711..4a7c2ca206311c98b209a89ebb690d6cc684fc3e 100644 (file)
@@ -10,6 +10,9 @@
 #include <ethdev_driver.h>
 #include <ethdev_pci.h>
 #include <rte_kvargs.h>
+#include <rte_mbuf.h>
+#include <rte_mbuf_pool_ops.h>
+#include <rte_mempool.h>
 
 #include "roc_api.h"
 
@@ -194,6 +197,12 @@ int cnxk_nix_remove(struct rte_pci_device *pci_dev);
 int cnxk_nix_info_get(struct rte_eth_dev *eth_dev,
                      struct rte_eth_dev_info *dev_info);
 int cnxk_nix_configure(struct rte_eth_dev *eth_dev);
+int cnxk_nix_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t qid,
+                           uint16_t nb_desc, uint16_t fp_rx_q_sz,
+                           const struct rte_eth_rxconf *rx_conf,
+                           struct rte_mempool *mp);
+
+uint64_t cnxk_nix_rxq_mbuf_setup(struct cnxk_eth_dev *dev);
 
 /* RSS */
 uint32_t cnxk_rss_ethdev_to_nix(struct cnxk_eth_dev *dev, uint64_t ethdev_rss,