]> git.droids-corp.org - dpdk.git/commitdiff
event/cnxk: add option to disable NPA
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Tue, 4 May 2021 00:27:15 +0000 (05:57 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 4 May 2021 05:35:00 +0000 (07:35 +0200)
If the chunks are allocated from NPA then TIM can automatically free
them when traversing the list of chunks.
Add devargs to disable NPA and use software mempool to manage chunks.

Example:
--dev "0002:0e:00.0,tim_disable_npa=1"

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
doc/guides/eventdevs/cnxk.rst
drivers/event/cnxk/cn10k_eventdev.c
drivers/event/cnxk/cn9k_eventdev.c
drivers/event/cnxk/cnxk_eventdev.h
drivers/event/cnxk/cnxk_tim_evdev.c
drivers/event/cnxk/cnxk_tim_evdev.h

index eb1453c263ac0297acb13f49d0779e1ed18ba496..e0535d8b880fffaf8c27635635d6736af9462cdf 100644 (file)
@@ -93,6 +93,16 @@ Runtime Config Options
 
     -a 0002:0e:00.0,qos=[1-50-50-50]
 
+- ``TIM disable NPA``
+
+  By default chunks are allocated from NPA then TIM can automatically free
+  them when traversing the list of chunks. The ``tim_disable_npa`` devargs
+  parameter disables NPA and uses software mempool to manage chunks
+
+  For example::
+
+    -a 0002:0e:00.0,tim_disable_npa=1
+
 Debugging Options
 -----------------
 
index 0981085e8d83ba5a072edcfcf2b5abf86c428c07..a2ef1fa73150250ad0af837e1b13686aee7870b7 100644 (file)
@@ -502,4 +502,5 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
                              CNXK_SSO_GGRP_QOS "=<string>"
-                             CN10K_SSO_GW_MODE "=<int>");
+                             CN10K_SSO_GW_MODE "=<int>"
+                             CNXK_TIM_DISABLE_NPA "=1");
index d9882ebb967a8334ff7d57cbb3489faf330001bb..3a0caa009d15b42886bebe626045b17b4cb195ee 100644 (file)
@@ -571,4 +571,5 @@ RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
                              CNXK_SSO_GGRP_QOS "=<string>"
-                             CN9K_SSO_SINGLE_WS "=1");
+                             CN9K_SSO_SINGLE_WS "=1"
+                             CNXK_TIM_DISABLE_NPA "=1");
index 1c61063c93761cb28fdc5f10b5f6caa3506f9948..77835e463cc348703f3c9bd6fc4a58f514d5c9bc 100644 (file)
@@ -159,6 +159,15 @@ struct cnxk_sso_hws_cookie {
        bool configured;
 } __rte_cache_aligned;
 
+static inline int
+parse_kvargs_flag(const char *key, const char *value, void *opaque)
+{
+       RTE_SET_USED(key);
+
+       *(uint8_t *)opaque = !!atoi(value);
+       return 0;
+}
+
 static inline int
 parse_kvargs_value(const char *key, const char *value, void *opaque)
 {
index 655540a72b14e10ef808bdbb2d9e6fde2bad8b03..d93b37e4f28487065a50e80ce4f96a5ae88e3ef4 100644 (file)
@@ -31,30 +31,43 @@ cnxk_tim_chnk_pool_create(struct cnxk_tim_ring *tim_ring,
                cache_sz = RTE_MEMPOOL_CACHE_MAX_SIZE;
        cache_sz = cache_sz != 0 ? cache_sz : 2;
        tim_ring->nb_chunks += (cache_sz * rte_lcore_count());
-       tim_ring->chunk_pool = rte_mempool_create_empty(
-               pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz, cache_sz, 0,
-               rte_socket_id(), mp_flags);
-
-       if (tim_ring->chunk_pool == NULL) {
-               plt_err("Unable to create chunkpool.");
-               return -ENOMEM;
-       }
+       if (!tim_ring->disable_npa) {
+               tim_ring->chunk_pool = rte_mempool_create_empty(
+                       pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
+                       cache_sz, 0, rte_socket_id(), mp_flags);
+
+               if (tim_ring->chunk_pool == NULL) {
+                       plt_err("Unable to create chunkpool.");
+                       return -ENOMEM;
+               }
 
-       rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
-                                       rte_mbuf_platform_mempool_ops(), NULL);
-       if (rc < 0) {
-               plt_err("Unable to set chunkpool ops");
-               goto free;
-       }
+               rc = rte_mempool_set_ops_byname(tim_ring->chunk_pool,
+                                               rte_mbuf_platform_mempool_ops(),
+                                               NULL);
+               if (rc < 0) {
+                       plt_err("Unable to set chunkpool ops");
+                       goto free;
+               }
 
-       rc = rte_mempool_populate_default(tim_ring->chunk_pool);
-       if (rc < 0) {
-               plt_err("Unable to set populate chunkpool.");
-               goto free;
+               rc = rte_mempool_populate_default(tim_ring->chunk_pool);
+               if (rc < 0) {
+                       plt_err("Unable to set populate chunkpool.");
+                       goto free;
+               }
+               tim_ring->aura = roc_npa_aura_handle_to_aura(
+                       tim_ring->chunk_pool->pool_id);
+               tim_ring->ena_dfb = 0;
+       } else {
+               tim_ring->chunk_pool = rte_mempool_create(
+                       pool_name, tim_ring->nb_chunks, tim_ring->chunk_sz,
+                       cache_sz, 0, NULL, NULL, NULL, NULL, rte_socket_id(),
+                       mp_flags);
+               if (tim_ring->chunk_pool == NULL) {
+                       plt_err("Unable to create chunkpool.");
+                       return -ENOMEM;
+               }
+               tim_ring->ena_dfb = 1;
        }
-       tim_ring->aura =
-               roc_npa_aura_handle_to_aura(tim_ring->chunk_pool->pool_id);
-       tim_ring->ena_dfb = 0;
 
        return 0;
 
@@ -110,8 +123,17 @@ cnxk_tim_ring_create(struct rte_event_timer_adapter *adptr)
        tim_ring->nb_bkts = (tim_ring->max_tout / tim_ring->tck_nsec);
        tim_ring->nb_timers = rcfg->nb_timers;
        tim_ring->chunk_sz = dev->chunk_sz;
+       tim_ring->disable_npa = dev->disable_npa;
+
+       if (tim_ring->disable_npa) {
+               tim_ring->nb_chunks =
+                       tim_ring->nb_timers /
+                       CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
+               tim_ring->nb_chunks = tim_ring->nb_chunks * tim_ring->nb_bkts;
+       } else {
+               tim_ring->nb_chunks = tim_ring->nb_timers;
+       }
 
-       tim_ring->nb_chunks = tim_ring->nb_timers;
        tim_ring->nb_chunk_slots = CNXK_TIM_NB_CHUNK_SLOTS(tim_ring->chunk_sz);
        /* Create buckets. */
        tim_ring->bkt =
@@ -199,6 +221,24 @@ cnxk_tim_caps_get(const struct rte_eventdev *evdev, uint64_t flags,
        return 0;
 }
 
+static void
+cnxk_tim_parse_devargs(struct rte_devargs *devargs, struct cnxk_tim_evdev *dev)
+{
+       struct rte_kvargs *kvlist;
+
+       if (devargs == NULL)
+               return;
+
+       kvlist = rte_kvargs_parse(devargs->args, NULL);
+       if (kvlist == NULL)
+               return;
+
+       rte_kvargs_process(kvlist, CNXK_TIM_DISABLE_NPA, &parse_kvargs_flag,
+                          &dev->disable_npa);
+
+       rte_kvargs_free(kvlist);
+}
+
 void
 cnxk_tim_init(struct roc_sso *sso)
 {
@@ -217,6 +257,8 @@ cnxk_tim_init(struct roc_sso *sso)
        }
        dev = mz->addr;
 
+       cnxk_tim_parse_devargs(sso->pci_dev->device.devargs, dev);
+
        dev->tim.roc_sso = sso;
        rc = roc_tim_init(&dev->tim);
        if (rc < 0) {
index 2335707cd43576a74d2d3b27912c29adf4a8824e..4896ed67a96c53284a12dc78e017a44c79708112 100644 (file)
 
 #define CN9K_TIM_MIN_TMO_TKS (256)
 
+#define CNXK_TIM_DISABLE_NPA "tim_disable_npa"
+
 struct cnxk_tim_evdev {
        struct roc_tim tim;
        struct rte_eventdev *event_dev;
        uint16_t nb_rings;
        uint32_t chunk_sz;
+       /* Dev args */
+       uint8_t disable_npa;
 };
 
 enum cnxk_tim_clk_src {
@@ -75,6 +79,7 @@ struct cnxk_tim_ring {
        struct rte_mempool *chunk_pool;
        uint64_t arm_cnt;
        uint8_t prod_type_sp;
+       uint8_t disable_npa;
        uint8_t ena_dfb;
        uint16_t ring_id;
        uint32_t aura;