event/cnxk: allocate event in-flight buffers
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Tue, 4 May 2021 00:26:59 +0000 (05:56 +0530)
committerJerin Jacob <jerinj@marvell.com>
Tue, 4 May 2021 03:42:18 +0000 (05:42 +0200)
Allocate buffers in DRAM that hold inflight events.

Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
drivers/event/cnxk/cn10k_eventdev.c
drivers/event/cnxk/cn9k_eventdev.c
drivers/event/cnxk/cnxk_eventdev.c
drivers/event/cnxk/cnxk_eventdev.h

index 557f26b..9c5ddea 100644 (file)
@@ -55,6 +55,13 @@ cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
                return -ENODEV;
        }
 
+       rc = cnxk_sso_xaq_allocate(dev);
+       if (rc < 0)
+               goto cnxk_rsrc_fini;
+
+       return 0;
+cnxk_rsrc_fini:
+       roc_sso_rsrc_fini(&dev->sso);
        return rc;
 }
 
index eba1bfb..954fea0 100644 (file)
@@ -63,6 +63,13 @@ cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
                return -ENODEV;
        }
 
+       rc = cnxk_sso_xaq_allocate(dev);
+       if (rc < 0)
+               goto cnxk_rsrc_fini;
+
+       return 0;
+cnxk_rsrc_fini:
+       roc_sso_rsrc_fini(&dev->sso);
        return rc;
 }
 
index e22479a..34a8bce 100644 (file)
@@ -28,12 +28,107 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
                                  RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
 }
 
+int
+cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
+{
+       char pool_name[RTE_MEMZONE_NAMESIZE];
+       uint32_t xaq_cnt, npa_aura_id;
+       const struct rte_memzone *mz;
+       struct npa_aura_s *aura;
+       static int reconfig_cnt;
+       int rc;
+
+       if (dev->xaq_pool) {
+               rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
+               if (rc < 0) {
+                       plt_err("Failed to release XAQ %d", rc);
+                       return rc;
+               }
+               rte_mempool_free(dev->xaq_pool);
+               dev->xaq_pool = NULL;
+       }
+
+       /*
+        * Allocate memory for Add work backpressure.
+        */
+       mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
+       if (mz == NULL)
+               mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
+                                                sizeof(struct npa_aura_s) +
+                                                        RTE_CACHE_LINE_SIZE,
+                                                0, 0, RTE_CACHE_LINE_SIZE);
+       if (mz == NULL) {
+               plt_err("Failed to allocate mem for fcmem");
+               return -ENOMEM;
+       }
+
+       dev->fc_iova = mz->iova;
+       dev->fc_mem = mz->addr;
+
+       aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
+                                    RTE_CACHE_LINE_SIZE);
+       memset(aura, 0, sizeof(struct npa_aura_s));
+
+       aura->fc_ena = 1;
+       aura->fc_addr = dev->fc_iova;
+       aura->fc_hyst_bits = 0; /* Store count on all updates */
+
+       /* Taken from HRM 14.3.3(4) */
+       xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
+       xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
+                  (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
+
+       plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
+       /* Setup XAQ based on number of nb queues. */
+       snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
+       dev->xaq_pool = (void *)rte_mempool_create_empty(
+               pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
+               rte_socket_id(), 0);
+
+       if (dev->xaq_pool == NULL) {
+               plt_err("Unable to create empty mempool.");
+               rte_memzone_free(mz);
+               return -ENOMEM;
+       }
+
+       rc = rte_mempool_set_ops_byname(dev->xaq_pool,
+                                       rte_mbuf_platform_mempool_ops(), aura);
+       if (rc != 0) {
+               plt_err("Unable to set xaqpool ops.");
+               goto alloc_fail;
+       }
+
+       rc = rte_mempool_populate_default(dev->xaq_pool);
+       if (rc < 0) {
+               plt_err("Unable to set populate xaqpool.");
+               goto alloc_fail;
+       }
+       reconfig_cnt++;
+       /* When SW does addwork (enqueue) check if there is space in XAQ by
+        * comparing fc_addr above against the xaq_lmt calculated below.
+        * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
+        * to request XAQ to cache them even before enqueue is called.
+        */
+       dev->xaq_lmt =
+               xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
+       dev->nb_xaq_cfg = xaq_cnt;
+
+       npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
+       return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
+                                      dev->nb_event_queues);
+alloc_fail:
+       rte_mempool_free(dev->xaq_pool);
+       rte_memzone_free(mz);
+       return rc;
+}
+
 int
 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
 {
        struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
        uint32_t deq_tmo_ns;
+       int rc;
 
        deq_tmo_ns = conf->dequeue_timeout_ns;
 
@@ -67,6 +162,16 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
                return -EINVAL;
        }
 
+       if (dev->xaq_pool) {
+               rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
+               if (rc < 0) {
+                       plt_err("Failed to release XAQ %d", rc);
+                       return rc;
+               }
+               rte_mempool_free(dev->xaq_pool);
+               dev->xaq_pool = NULL;
+       }
+
        dev->nb_event_queues = conf->nb_event_queues;
        dev->nb_event_ports = conf->nb_event_ports;
 
index 426219c..4abe454 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef __CNXK_EVENTDEV_H__
 #define __CNXK_EVENTDEV_H__
 
+#include <rte_mbuf_pool_ops.h>
 #include <rte_pci.h>
 
 #include <eventdev_pmd_pci.h>
 
 #define USEC2NSEC(__us) ((__us)*1E3)
 
-#define CNXK_SSO_MZ_NAME "cnxk_evdev_mz"
+#define CNXK_SSO_FC_NAME       "cnxk_evdev_xaq_fc"
+#define CNXK_SSO_MZ_NAME       "cnxk_evdev_mz"
+#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
+#define CNXK_SSO_XAQ_SLACK     (8)
 
 struct cnxk_sso_evdev {
        struct roc_sso sso;
@@ -26,6 +30,11 @@ struct cnxk_sso_evdev {
        uint32_t min_dequeue_timeout_ns;
        uint32_t max_dequeue_timeout_ns;
        int32_t max_num_events;
+       uint64_t *fc_mem;
+       uint64_t xaq_lmt;
+       uint64_t nb_xaq_cfg;
+       rte_iova_t fc_iova;
+       struct rte_mempool *xaq_pool;
        /* CN9K */
        uint8_t dual_ws;
 } __rte_cache_aligned;
@@ -36,6 +45,9 @@ cnxk_sso_pmd_priv(const struct rte_eventdev *event_dev)
        return event_dev->data->dev_private;
 }
 
+/* Configuration functions */
+int cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev);
+
 /* Common ops API. */
 int cnxk_sso_init(struct rte_eventdev *event_dev);
 int cnxk_sso_fini(struct rte_eventdev *event_dev);