RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
}
+int
+cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
+{
+ char pool_name[RTE_MEMZONE_NAMESIZE];
+ uint32_t xaq_cnt, npa_aura_id;
+ const struct rte_memzone *mz;
+ struct npa_aura_s *aura;
+ static int reconfig_cnt;
+ int rc;
+
+ if (dev->xaq_pool) {
+ rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
+ if (rc < 0) {
+ plt_err("Failed to release XAQ %d", rc);
+ return rc;
+ }
+ rte_mempool_free(dev->xaq_pool);
+ dev->xaq_pool = NULL;
+ }
+
+ /*
+ * Allocate memory for Add work backpressure.
+ */
+ mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
+ if (mz == NULL)
+ mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
+ sizeof(struct npa_aura_s) +
+ RTE_CACHE_LINE_SIZE,
+ 0, 0, RTE_CACHE_LINE_SIZE);
+ if (mz == NULL) {
+ plt_err("Failed to allocate mem for fcmem");
+ return -ENOMEM;
+ }
+
+ dev->fc_iova = mz->iova;
+ dev->fc_mem = mz->addr;
+
+ aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
+ RTE_CACHE_LINE_SIZE);
+ memset(aura, 0, sizeof(struct npa_aura_s));
+
+ aura->fc_ena = 1;
+ aura->fc_addr = dev->fc_iova;
+ aura->fc_hyst_bits = 0; /* Store count on all updates */
+
+ /* Taken from HRM 14.3.3(4) */
+ xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
+ xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
+ (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
+
+ plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
+ /* Setup XAQ based on number of nb queues. */
+ snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
+ dev->xaq_pool = (void *)rte_mempool_create_empty(
+ pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
+ rte_socket_id(), 0);
+
+ if (dev->xaq_pool == NULL) {
+ plt_err("Unable to create empty mempool.");
+ rte_memzone_free(mz);
+ return -ENOMEM;
+ }
+
+ rc = rte_mempool_set_ops_byname(dev->xaq_pool,
+ rte_mbuf_platform_mempool_ops(), aura);
+ if (rc != 0) {
+ plt_err("Unable to set xaqpool ops.");
+ goto alloc_fail;
+ }
+
+ rc = rte_mempool_populate_default(dev->xaq_pool);
+ if (rc < 0) {
+ plt_err("Unable to set populate xaqpool.");
+ goto alloc_fail;
+ }
+ reconfig_cnt++;
+ /* When SW does addwork (enqueue) check if there is space in XAQ by
+ * comparing fc_addr above against the xaq_lmt calculated below.
+ * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
+ * to request XAQ to cache them even before enqueue is called.
+ */
+ dev->xaq_lmt =
+ xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
+ dev->nb_xaq_cfg = xaq_cnt;
+
+ npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
+ return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
+ dev->nb_event_queues);
+alloc_fail:
+ rte_mempool_free(dev->xaq_pool);
+ rte_memzone_free(mz);
+ return rc;
+}
+
int
cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
{
struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
uint32_t deq_tmo_ns;
+ int rc;
deq_tmo_ns = conf->dequeue_timeout_ns;
return -EINVAL;
}
+ if (dev->xaq_pool) {
+ rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
+ if (rc < 0) {
+ plt_err("Failed to release XAQ %d", rc);
+ return rc;
+ }
+ rte_mempool_free(dev->xaq_pool);
+ dev->xaq_pool = NULL;
+ }
+
dev->nb_event_queues = conf->nb_event_queues;
dev->nb_event_ports = conf->nb_event_ports;
#ifndef __CNXK_EVENTDEV_H__
#define __CNXK_EVENTDEV_H__
+#include <rte_mbuf_pool_ops.h>
#include <rte_pci.h>
#include <eventdev_pmd_pci.h>
#define USEC2NSEC(__us) ((__us)*1E3)
-#define CNXK_SSO_MZ_NAME "cnxk_evdev_mz"
+#define CNXK_SSO_FC_NAME "cnxk_evdev_xaq_fc"
+#define CNXK_SSO_MZ_NAME "cnxk_evdev_mz"
+#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
+#define CNXK_SSO_XAQ_SLACK (8)
struct cnxk_sso_evdev {
struct roc_sso sso;
uint32_t min_dequeue_timeout_ns;
uint32_t max_dequeue_timeout_ns;
int32_t max_num_events;
+ uint64_t *fc_mem;
+ uint64_t xaq_lmt;
+ uint64_t nb_xaq_cfg;
+ rte_iova_t fc_iova;
+ struct rte_mempool *xaq_pool;
/* CN9K */
uint8_t dual_ws;
} __rte_cache_aligned;
return event_dev->data->dev_private;
}
+/* Configuration functions */
+int cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev);
+
/* Common ops API. */
int cnxk_sso_init(struct rte_eventdev *event_dev);
int cnxk_sso_fini(struct rte_eventdev *event_dev);