]> git.droids-corp.org - dpdk.git/commitdiff
event/cnxk: use common XAQ pool functions
authorPavan Nikhilesh <pbhagavatula@marvell.com>
Wed, 3 Nov 2021 00:52:10 +0000 (06:22 +0530)
committerJerin Jacob <jerinj@marvell.com>
Thu, 4 Nov 2021 07:41:25 +0000 (08:41 +0100)
Use the common API to create and free XAQ pool.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
drivers/event/cnxk/cn10k_eventdev.c
drivers/event/cnxk/cn9k_eventdev.c
drivers/event/cnxk/cnxk_eventdev.c
drivers/event/cnxk/cnxk_eventdev.h

index 2fb4ea878e442e9fd3297fec8f521c86f2e8d7d5..243187576605c558bd7879d906225ee64224580b 100644 (file)
@@ -99,7 +99,7 @@ cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
 
        rte_memcpy(ws->grps_base, grps_base,
                   sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-       ws->fc_mem = dev->fc_mem;
+       ws->fc_mem = (uint64_t *)dev->fc_iova;
        ws->xaq_lmt = dev->xaq_lmt;
 
        /* Set get_work timeout for HWS */
@@ -470,8 +470,6 @@ cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
                return -EINVAL;
        }
 
-       roc_sso_rsrc_fini(&dev->sso);
-
        rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
                                 dev->nb_event_queues);
        if (rc < 0) {
index d757da7c379a5090cfad0c196ca1e5c03d3356cd..c36433602318c77fba0416bd413139309255ae77 100644 (file)
@@ -100,7 +100,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
                dws = hws;
                rte_memcpy(dws->grps_base, grps_base,
                           sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-               dws->fc_mem = dev->fc_mem;
+               dws->fc_mem = (uint64_t *)dev->fc_iova;
                dws->xaq_lmt = dev->xaq_lmt;
 
                plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
@@ -109,7 +109,7 @@ cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
                ws = hws;
                rte_memcpy(ws->grps_base, grps_base,
                           sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
-               ws->fc_mem = dev->fc_mem;
+               ws->fc_mem = (uint64_t *)dev->fc_iova;
                ws->xaq_lmt = dev->xaq_lmt;
 
                plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
@@ -729,8 +729,6 @@ cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
                return -EINVAL;
        }
 
-       roc_sso_rsrc_fini(&dev->sso);
-
        rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
        if (rc < 0) {
                plt_err("Failed to initialize SSO resources");
index 9deab0829a92568b89fd824461399cc53017c388..2b9235687a24cc0702a377400bda732e478b576e 100644 (file)
@@ -125,101 +125,28 @@ cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
 int
 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
 {
-       char pool_name[RTE_MEMZONE_NAMESIZE];
-       uint32_t xaq_cnt, npa_aura_id;
-       const struct rte_memzone *mz;
-       struct npa_aura_s *aura;
-       static int reconfig_cnt;
+       uint32_t xae_cnt;
        int rc;
 
-       if (dev->xaq_pool) {
-               rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-               if (rc < 0) {
-                       plt_err("Failed to release XAQ %d", rc);
-                       return rc;
-               }
-               rte_mempool_free(dev->xaq_pool);
-               dev->xaq_pool = NULL;
-       }
-
-       /*
-        * Allocate memory for Add work backpressure.
-        */
-       mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
-       if (mz == NULL)
-               mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
-                                                sizeof(struct npa_aura_s) +
-                                                        RTE_CACHE_LINE_SIZE,
-                                                0, 0, RTE_CACHE_LINE_SIZE);
-       if (mz == NULL) {
-               plt_err("Failed to allocate mem for fcmem");
-               return -ENOMEM;
-       }
-
-       dev->fc_iova = mz->iova;
-       dev->fc_mem = mz->addr;
-
-       aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
-                                    RTE_CACHE_LINE_SIZE);
-       memset(aura, 0, sizeof(struct npa_aura_s));
-
-       aura->fc_ena = 1;
-       aura->fc_addr = dev->fc_iova;
-       aura->fc_hyst_bits = 0; /* Store count on all updates */
-
-       /* Taken from HRM 14.3.3(4) */
-       xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
+       xae_cnt = dev->sso.iue;
        if (dev->xae_cnt)
-               xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
-       else if (dev->adptr_xae_cnt)
-               xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) +
-                          (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
-       else
-               xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
-                          (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
-
-       plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
-       /* Setup XAQ based on number of nb queues. */
-       snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
-       dev->xaq_pool = (void *)rte_mempool_create_empty(
-               pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
-               rte_socket_id(), 0);
-
-       if (dev->xaq_pool == NULL) {
-               plt_err("Unable to create empty mempool.");
-               rte_memzone_free(mz);
-               return -ENOMEM;
-       }
-
-       rc = rte_mempool_set_ops_byname(dev->xaq_pool,
-                                       rte_mbuf_platform_mempool_ops(), aura);
-       if (rc != 0) {
-               plt_err("Unable to set xaqpool ops.");
-               goto alloc_fail;
-       }
+               xae_cnt += dev->xae_cnt;
+       if (dev->adptr_xae_cnt)
+               xae_cnt += (dev->adptr_xae_cnt);
 
-       rc = rte_mempool_populate_default(dev->xaq_pool);
+       plt_sso_dbg("Configuring %d xae buffers", xae_cnt);
+       rc = roc_sso_hwgrp_init_xaq_aura(&dev->sso, xae_cnt);
        if (rc < 0) {
-               plt_err("Unable to set populate xaqpool.");
-               goto alloc_fail;
+               plt_err("Failed to configure XAQ aura");
+               return rc;
        }
-       reconfig_cnt++;
-       /* When SW does addwork (enqueue) check if there is space in XAQ by
-        * comparing fc_addr above against the xaq_lmt calculated below.
-        * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
-        * to request XAQ to cache them even before enqueue is called.
-        */
-       dev->xaq_lmt =
-               xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
-       dev->nb_xaq_cfg = xaq_cnt;
-
-       npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
-       return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
-                                      dev->nb_event_queues);
-alloc_fail:
-       rte_mempool_free(dev->xaq_pool);
-       rte_memzone_free(mz);
-       return rc;
+       dev->xaq_lmt = dev->sso.xaq.xaq_lmt;
+       dev->fc_iova = (uint64_t)dev->sso.xaq.fc;
+
+       return roc_sso_hwgrp_alloc_xaq(
+               &dev->sso,
+               roc_npa_aura_handle_to_aura(dev->sso.xaq.aura_handle),
+               dev->nb_event_queues);
 }
 
 int
@@ -231,14 +158,6 @@ cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
        if (event_dev->data->dev_started)
                event_dev->dev_ops->dev_stop(event_dev);
 
-       rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-       if (rc < 0) {
-               plt_err("Failed to release XAQ %d", rc);
-               return rc;
-       }
-
-       rte_mempool_free(dev->xaq_pool);
-       dev->xaq_pool = NULL;
        rc = cnxk_sso_xaq_allocate(dev);
        if (rc < 0) {
                plt_err("Failed to alloc XAQ %d", rc);
@@ -320,7 +239,6 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
        struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
        struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
        uint32_t deq_tmo_ns;
-       int rc;
 
        deq_tmo_ns = conf->dequeue_timeout_ns;
 
@@ -354,15 +272,8 @@ cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
                return -EINVAL;
        }
 
-       if (dev->xaq_pool) {
-               rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
-               if (rc < 0) {
-                       plt_err("Failed to release XAQ %d", rc);
-                       return rc;
-               }
-               rte_mempool_free(dev->xaq_pool);
-               dev->xaq_pool = NULL;
-       }
+       roc_sso_rsrc_fini(&dev->sso);
+       roc_sso_hwgrp_free_xaq_aura(&dev->sso, dev->sso.nb_hwgrp);
 
        dev->nb_event_queues = conf->nb_event_queues;
        dev->nb_event_ports = conf->nb_event_ports;
@@ -556,12 +467,8 @@ cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn)
        }
 
        roc_sso_rsrc_fini(&dev->sso);
-       rte_mempool_free(dev->xaq_pool);
-       rte_memzone_free(rte_memzone_lookup(CNXK_SSO_FC_NAME));
 
        dev->fc_iova = 0;
-       dev->fc_mem = NULL;
-       dev->xaq_pool = NULL;
        dev->configured = false;
        dev->is_timeout_deq = 0;
        dev->nb_event_ports = 0;
index b57004c0dcf46e143f72ac34a5bfe5c109c8c9a9..957dcf04a46ddbb8197ec4a3cdf3a7f7daa00d17 100644 (file)
@@ -91,11 +91,8 @@ struct cnxk_sso_evdev {
        uint32_t min_dequeue_timeout_ns;
        uint32_t max_dequeue_timeout_ns;
        int32_t max_num_events;
-       uint64_t *fc_mem;
        uint64_t xaq_lmt;
-       uint64_t nb_xaq_cfg;
        rte_iova_t fc_iova;
-       struct rte_mempool *xaq_pool;
        uint64_t rx_offloads;
        uint64_t tx_offloads;
        uint64_t adptr_xae_cnt;