SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_worker_dual.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_worker.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_adptr.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_tim_evdev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_selftest.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_OCTEONTX2_EVENTDEV) += otx2_evdev_irq.c
sources = files('otx2_worker.c',
'otx2_worker_dual.c',
'otx2_evdev.c',
+ 'otx2_evdev_adptr.c',
'otx2_evdev_irq.c',
'otx2_evdev_selftest.c',
'otx2_tim_evdev.c',
xaq_cnt = dev->nb_event_queues * OTX2_SSO_XAQ_CACHE_CNT;
if (dev->xae_cnt)
xaq_cnt += dev->xae_cnt / dev->xae_waes;
+ else if (dev->adptr_xae_cnt)
+ xaq_cnt += (dev->adptr_xae_cnt / dev->xae_waes) +
+ (OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
else
xaq_cnt += (dev->iue / dev->xae_waes) +
(OTX2_SSO_XAQ_SLACK * dev->nb_event_queues);
otx2_mbox_process(dev->mbox);
}
+int
+sso_xae_reconfigure(struct rte_eventdev *event_dev)
+{
+ struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+ struct rte_mempool *prev_xaq_pool;
+ int rc = 0;
+
+ if (event_dev->data->dev_started)
+ sso_cleanup(event_dev, 0);
+
+ prev_xaq_pool = dev->xaq_pool;
+ dev->xaq_pool = NULL;
+ sso_xaq_allocate(dev);
+ rc = sso_ggrp_alloc_xaq(dev);
+ if (rc < 0) {
+ otx2_err("Failed to alloc xaq to ggrp %d", rc);
+ rte_mempool_free(prev_xaq_pool);
+ return rc;
+ }
+
+ rte_mempool_free(prev_xaq_pool);
+ rte_mb();
+ if (event_dev->data->dev_started)
+ sso_cleanup(event_dev, 1);
+
+ return 0;
+}
+
static int
otx2_sso_start(struct rte_eventdev *event_dev)
{
uint64_t nb_xaq_cfg;
rte_iova_t fc_iova;
struct rte_mempool *xaq_pool;
+ uint32_t adptr_xae_cnt;
/* Dev args */
uint8_t dual_ws;
uint8_t selftest;
uint16_t otx2_ssogws_dual_deq_timeout_burst(void *port, struct rte_event ev[],
uint16_t nb_events,
uint64_t timeout_ticks);
+
+void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
+ uint32_t event_type);
+int sso_xae_reconfigure(struct rte_eventdev *event_dev);
void sso_fastpath_fns_set(struct rte_eventdev *event_dev);
/* Clean up API's */
typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(C) 2019 Marvell International Ltd.
+ */
+
+#include "otx2_evdev.h"
+
+void
+sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data, uint32_t event_type)
+{
+ switch (event_type) {
+ case RTE_EVENT_TYPE_TIMER:
+ {
+ dev->adptr_xae_cnt += (*(uint64_t *)data);
+ break;
+ }
+ default:
+ break;
+ }
+}
tim_ring->base + TIM_LF_RING_BASE);
otx2_write64(tim_ring->aura, tim_ring->base + TIM_LF_RING_AURA);
+ /* Update SSO xae count. */
+ sso_updt_xae_cnt(sso_pmd_priv(dev->event_dev), (void *)&nb_timers,
+ RTE_EVENT_TYPE_TIMER);
+ sso_xae_reconfigure(dev->event_dev);
+
return rc;
chnk_mem_err: