xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
if (dev->xae_cnt)
xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
+ else if (dev->adptr_xae_cnt)
+ xaq_cnt += (dev->adptr_xae_cnt / dev->sso.xae_waes) +
+ (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
else
xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
(CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
return rc;
}
+int
+cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int rc = 0;
+
+ if (event_dev->data->dev_started)
+ event_dev->dev_ops->dev_stop(event_dev);
+
+ rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
+ if (rc < 0) {
+ plt_err("Failed to release XAQ %d", rc);
+ return rc;
+ }
+
+ rte_mempool_free(dev->xaq_pool);
+ dev->xaq_pool = NULL;
+ rc = cnxk_sso_xaq_allocate(dev);
+ if (rc < 0) {
+ plt_err("Failed to alloc XAQ %d", rc);
+ return rc;
+ }
+
+ rte_mb();
+ if (event_dev->data->dev_started)
+ event_dev->dev_ops->dev_start(event_dev);
+
+ return 0;
+}
+
int
cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
cnxk_sso_init_hws_mem_t init_hws_fn,
uint64_t nb_xaq_cfg;
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
+ uint64_t adptr_xae_cnt;
+ uint16_t tim_adptr_ring_cnt;
+ uint16_t *timer_adptr_rings;
+ uint64_t *timer_adptr_sz;
/* Dev args */
uint32_t xae_cnt;
uint8_t qos_queue_cnt;
}
/* Configuration functions */
+int cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev);
int cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev);
+void cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
+ uint32_t event_type);
/* Common ops API. */
int cnxk_sso_init(struct rte_eventdev *event_dev);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2021 Marvell.
+ */
+
+#include "cnxk_eventdev.h"
+
+void
+cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
+ uint32_t event_type)
+{
+ int i;
+
+ switch (event_type) {
+ case RTE_EVENT_TYPE_TIMER: {
+ struct cnxk_tim_ring *timr = data;
+ uint16_t *old_ring_ptr;
+ uint64_t *old_sz_ptr;
+
+ for (i = 0; i < dev->tim_adptr_ring_cnt; i++) {
+ if (timr->ring_id != dev->timer_adptr_rings[i])
+ continue;
+ if (timr->nb_timers == dev->timer_adptr_sz[i])
+ return;
+ dev->adptr_xae_cnt -= dev->timer_adptr_sz[i];
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_sz[i] = timr->nb_timers;
+
+ return;
+ }
+
+ dev->tim_adptr_ring_cnt++;
+ old_ring_ptr = dev->timer_adptr_rings;
+ old_sz_ptr = dev->timer_adptr_sz;
+
+ dev->timer_adptr_rings = rte_realloc(
+ dev->timer_adptr_rings,
+ sizeof(uint16_t) * dev->tim_adptr_ring_cnt, 0);
+ if (dev->timer_adptr_rings == NULL) {
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_rings = old_ring_ptr;
+ dev->tim_adptr_ring_cnt--;
+ return;
+ }
+
+ dev->timer_adptr_sz = rte_realloc(
+ dev->timer_adptr_sz,
+ sizeof(uint64_t) * dev->tim_adptr_ring_cnt, 0);
+
+ if (dev->timer_adptr_sz == NULL) {
+ dev->adptr_xae_cnt += timr->nb_timers;
+ dev->timer_adptr_sz = old_sz_ptr;
+ dev->tim_adptr_ring_cnt--;
+ return;
+ }
+
+ dev->timer_adptr_rings[dev->tim_adptr_ring_cnt - 1] =
+ timr->ring_id;
+ dev->timer_adptr_sz[dev->tim_adptr_ring_cnt - 1] =
+ timr->nb_timers;
+
+ dev->adptr_xae_cnt += timr->nb_timers;
+ break;
+ }
+ default:
+ break;
+ }
+}
plt_write64((uint64_t)tim_ring->bkt, tim_ring->base + TIM_LF_RING_BASE);
plt_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
+ /* Update SSO xae count. */
+ cnxk_sso_updt_xae_cnt(cnxk_sso_pmd_priv(dev->event_dev), tim_ring,
+ RTE_EVENT_TYPE_TIMER);
+ cnxk_sso_xae_reconfigure(dev->event_dev);
+
plt_tim_dbg(
"Total memory used %" PRIu64 "MB\n",
(uint64_t)(((tim_ring->nb_chunks * tim_ring->chunk_sz) +
'cn10k_eventdev.c',
'cn10k_worker.c',
'cnxk_eventdev.c',
+ 'cnxk_eventdev_adptr.c',
'cnxk_eventdev_selftest.c',
'cnxk_eventdev_stats.c',
'cnxk_tim_evdev.c',