Add SSO HWS a.k.a event port setup and release functions.
Signed-off-by: Shijith Thotton <sthotton@marvell.com>
Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
#include "cnxk_eventdev.h"
+static void
+cn10k_init_hws_ops(struct cn10k_sso_hws *ws, uintptr_t base)
+{
+ ws->tag_wqe_op = base + SSOW_LF_GWS_WQE0;
+ ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
+ ws->updt_wqe_op = base + SSOW_LF_GWS_OP_UPD_WQP_GRP1;
+ ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
+ ws->swtag_untag_op = base + SSOW_LF_GWS_OP_SWTAG_UNTAG;
+ ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
+ ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
+}
+
+static uint32_t
+cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
+{
+ uint32_t wdata = BIT(16) | 1;
+
+ switch (dev->gw_mode) {
+ case CN10K_GW_MODE_NONE:
+ default:
+ break;
+ case CN10K_GW_MODE_PREF:
+ wdata |= BIT(19);
+ break;
+ case CN10K_GW_MODE_PREF_WFE:
+ wdata |= BIT(20) | BIT(19);
+ break;
+ }
+
+ return wdata;
+}
+
+static void *
+cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
+{
+ struct cnxk_sso_evdev *dev = arg;
+ struct cn10k_sso_hws *ws;
+
+ /* Allocate event port memory */
+ ws = rte_zmalloc("cn10k_ws",
+ sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (ws == NULL) {
+ plt_err("Failed to alloc memory for port=%d", port_id);
+ return NULL;
+ }
+
+ /* First cache line is reserved for cookie */
+ ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
+ ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
+ cn10k_init_hws_ops(ws, ws->base);
+ ws->hws_id = port_id;
+ ws->swtag_req = 0;
+ ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
+ ws->lmt_base = dev->sso.lmt_base;
+
+ return ws;
+}
+
+static void
+cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
+{
+ struct cnxk_sso_evdev *dev = arg;
+ struct cn10k_sso_hws *ws = hws;
+ uint64_t val;
+
+ rte_memcpy(ws->grps_base, grps_base,
+ sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+
+ /* Set get_work timeout for HWS */
+ val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+ plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
+}
+
+static void
+cn10k_sso_hws_release(void *arg, void *hws)
+{
+ struct cn10k_sso_hws *ws = hws;
+
+ RTE_SET_USED(arg);
+ memset(ws, 0, sizeof(*ws));
+}
+
static void
cn10k_sso_set_rsrc(void *arg)
{
if (rc < 0)
goto cnxk_rsrc_fini;
+ rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
+ cn10k_sso_hws_setup);
+ if (rc < 0)
+ goto cnxk_rsrc_fini;
+
return 0;
cnxk_rsrc_fini:
roc_sso_rsrc_fini(&dev->sso);
+ dev->nb_event_ports = 0;
return rc;
}
+static int
+cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+
+ RTE_SET_USED(port_conf);
+ return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
+}
+
+static void
+cn10k_sso_port_release(void *port)
+{
+ struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
+ struct cnxk_sso_evdev *dev;
+
+ if (port == NULL)
+ return;
+
+ dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
+ if (!gws_cookie->configured)
+ goto free;
+
+ cn10k_sso_hws_release(dev, port);
+ memset(gws_cookie, 0, sizeof(*gws_cookie));
+free:
+ rte_free(gws_cookie);
+}
+
static struct rte_eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
.queue_setup = cnxk_sso_queue_setup,
.queue_release = cnxk_sso_queue_release,
.port_def_conf = cnxk_sso_port_def_conf,
+ .port_setup = cn10k_sso_port_setup,
+ .port_release = cn10k_sso_port_release,
};
static int
#define CN9K_DUAL_WS_NB_WS 2
#define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
+static void
+cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
+{
+ ws->tag_op = base + SSOW_LF_GWS_TAG;
+ ws->wqp_op = base + SSOW_LF_GWS_WQP;
+ ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
+ ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
+ ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
+ ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
+}
+
+static void
+cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
+{
+ struct cnxk_sso_evdev *dev = arg;
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+ uint64_t val;
+
+ /* Set get_work tmo for HWS */
+ val = NSEC2USEC(dev->deq_tmo_ns) - 1;
+ if (dev->dual_ws) {
+ dws = hws;
+ rte_memcpy(dws->grps_base, grps_base,
+ sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+ dws->fc_mem = dev->fc_mem;
+ dws->xaq_lmt = dev->xaq_lmt;
+
+ plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
+ plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
+ } else {
+ ws = hws;
+ rte_memcpy(ws->grps_base, grps_base,
+ sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
+ ws->fc_mem = dev->fc_mem;
+ ws->xaq_lmt = dev->xaq_lmt;
+
+ plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
+ }
+}
+
+static void
+cn9k_sso_hws_release(void *arg, void *hws)
+{
+ struct cnxk_sso_evdev *dev = arg;
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+
+ if (dev->dual_ws) {
+ dws = hws;
+ memset(dws, 0, sizeof(*dws));
+ } else {
+ ws = hws;
+ memset(ws, 0, sizeof(*ws));
+ }
+}
+
static void
cn9k_sso_set_rsrc(void *arg)
{
return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
}
+static void *
+cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
+{
+ struct cnxk_sso_evdev *dev = arg;
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+ void *data;
+
+ if (dev->dual_ws) {
+ dws = rte_zmalloc("cn9k_dual_ws",
+ sizeof(struct cn9k_sso_hws_dual) +
+ RTE_CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (dws == NULL) {
+ plt_err("Failed to alloc memory for port=%d", port_id);
+ return NULL;
+ }
+
+ dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
+ dws->base[0] = roc_sso_hws_base_get(
+ &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
+ dws->base[1] = roc_sso_hws_base_get(
+ &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
+ cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
+ cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
+ dws->hws_id = port_id;
+ dws->swtag_req = 0;
+ dws->vws = 0;
+
+ data = dws;
+ } else {
+ /* Allocate event port memory */
+ ws = rte_zmalloc("cn9k_ws",
+ sizeof(struct cn9k_sso_hws) +
+ RTE_CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE);
+ if (ws == NULL) {
+ plt_err("Failed to alloc memory for port=%d", port_id);
+ return NULL;
+ }
+
+ /* First cache line is reserved for cookie */
+ ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
+ ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
+ cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
+ ws->hws_id = port_id;
+ ws->swtag_req = 0;
+
+ data = ws;
+ }
+
+ return data;
+}
+
static void
cn9k_sso_info_get(struct rte_eventdev *event_dev,
struct rte_event_dev_info *dev_info)
if (rc < 0)
goto cnxk_rsrc_fini;
+ rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
+ cn9k_sso_hws_setup);
+ if (rc < 0)
+ goto cnxk_rsrc_fini;
+
return 0;
cnxk_rsrc_fini:
roc_sso_rsrc_fini(&dev->sso);
+ dev->nb_event_ports = 0;
return rc;
}
+static int
+cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+ const struct rte_event_port_conf *port_conf)
+{
+
+ RTE_SET_USED(port_conf);
+ return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
+}
+
+static void
+cn9k_sso_port_release(void *port)
+{
+ struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
+ struct cnxk_sso_evdev *dev;
+
+ if (port == NULL)
+ return;
+
+ dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
+ if (!gws_cookie->configured)
+ goto free;
+
+ cn9k_sso_hws_release(dev, port);
+ memset(gws_cookie, 0, sizeof(*gws_cookie));
+free:
+ rte_free(gws_cookie);
+}
+
static struct rte_eventdev_ops cn9k_sso_dev_ops = {
.dev_infos_get = cn9k_sso_info_get,
.dev_configure = cn9k_sso_dev_configure,
.queue_setup = cnxk_sso_queue_setup,
.queue_release = cnxk_sso_queue_release,
.port_def_conf = cnxk_sso_port_def_conf,
+ .port_setup = cn9k_sso_port_setup,
+ .port_release = cn9k_sso_port_release,
};
static int
return rc;
}
+int
+cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
+ cnxk_sso_init_hws_mem_t init_hws_fn,
+ cnxk_sso_hws_setup_t setup_hws_fn)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ int i;
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ struct cnxk_sso_hws_cookie *ws_cookie;
+ void *ws;
+
+ /* Free memory prior to re-allocation if needed */
+ if (event_dev->data->ports[i] != NULL)
+ ws = event_dev->data->ports[i];
+ else
+ ws = init_hws_fn(dev, i);
+ if (ws == NULL)
+ goto hws_fini;
+ ws_cookie = cnxk_sso_hws_get_cookie(ws);
+ ws_cookie->event_dev = event_dev;
+ ws_cookie->configured = 1;
+ event_dev->data->ports[i] = ws;
+ cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
+ i, setup_hws_fn);
+ }
+
+ return 0;
+hws_fini:
+ for (i = i - 1; i >= 0; i--) {
+ event_dev->data->ports[i] = NULL;
+ rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
+ }
+ return -ENOMEM;
+}
+
int
cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
{
port_conf->enqueue_depth = 1;
}
+int
+cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+ cnxk_sso_hws_setup_t hws_setup_fn)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ uintptr_t grps_base[CNXK_SSO_MAX_HWGRP] = {0};
+ uint16_t q;
+
+ plt_sso_dbg("Port=%d", port_id);
+ if (event_dev->data->ports[port_id] == NULL) {
+ plt_err("Invalid port Id %d", port_id);
+ return -EINVAL;
+ }
+
+ for (q = 0; q < dev->nb_event_queues; q++) {
+ grps_base[q] = roc_sso_hwgrp_base_get(&dev->sso, q);
+ if (grps_base[q] == 0) {
+ plt_err("Failed to get grp[%d] base addr", q);
+ return -EINVAL;
+ }
+ }
+
+ hws_setup_fn(dev, event_dev->data->ports[port_id], grps_base);
+ plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
+ rte_mb();
+
+ return 0;
+}
+
static void
parse_queue_param(char *value, void *opaque)
{
#define CNXK_SSO_XAE_CNT "xae_cnt"
#define CNXK_SSO_GGRP_QOS "qos"
+#define NSEC2USEC(__ns) ((__ns) / 1E3)
#define USEC2NSEC(__us) ((__us)*1E3)
+#define CNXK_SSO_MAX_HWGRP (RTE_EVENT_MAX_QUEUES_PER_DEV + 1)
#define CNXK_SSO_FC_NAME "cnxk_evdev_xaq_fc"
#define CNXK_SSO_MZ_NAME "cnxk_evdev_mz"
#define CNXK_SSO_XAQ_CACHE_CNT (0x7)
#define CNXK_SSO_XAQ_SLACK (8)
+#define CN10K_GW_MODE_NONE 0
+#define CN10K_GW_MODE_PREF 1
+#define CN10K_GW_MODE_PREF_WFE 2
+
+typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id);
+typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t *grp_base);
+typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws);
+
struct cnxk_sso_qos {
uint16_t queue;
uint8_t xaq_prcnt;
struct cnxk_sso_qos *qos_parse_data;
/* CN9K */
uint8_t dual_ws;
+ /* CN10K */
+ uint8_t gw_mode;
+} __rte_cache_aligned;
+
+/* CN10K HWS ops */
+#define CN10K_SSO_HWS_OPS \
+ uintptr_t swtag_desched_op; \
+ uintptr_t swtag_flush_op; \
+ uintptr_t swtag_untag_op; \
+ uintptr_t swtag_norm_op; \
+ uintptr_t updt_wqe_op; \
+ uintptr_t tag_wqe_op; \
+ uintptr_t getwrk_op
+
+struct cn10k_sso_hws {
+ /* Get Work Fastpath data */
+ CN10K_SSO_HWS_OPS;
+ uint32_t gw_wdata;
+ uint8_t swtag_req;
+ uint8_t hws_id;
+ /* Add Work Fastpath data */
+ uint64_t xaq_lmt __rte_cache_aligned;
+ uint64_t *fc_mem;
+ uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+ uint64_t base;
+ uintptr_t lmt_base;
+} __rte_cache_aligned;
+
+/* CN9K HWS ops */
+#define CN9K_SSO_HWS_OPS \
+ uintptr_t swtag_desched_op; \
+ uintptr_t swtag_flush_op; \
+ uintptr_t swtag_norm_op; \
+ uintptr_t getwrk_op; \
+ uintptr_t tag_op; \
+ uintptr_t wqp_op
+
+/* Event port a.k.a GWS */
+struct cn9k_sso_hws {
+ /* Get Work Fastpath data */
+ CN9K_SSO_HWS_OPS;
+ uint8_t swtag_req;
+ uint8_t hws_id;
+ /* Add Work Fastpath data */
+ uint64_t xaq_lmt __rte_cache_aligned;
+ uint64_t *fc_mem;
+ uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+ uint64_t base;
+} __rte_cache_aligned;
+
+struct cn9k_sso_hws_state {
+ CN9K_SSO_HWS_OPS;
+};
+
+struct cn9k_sso_hws_dual {
+ /* Get Work Fastpath data */
+ struct cn9k_sso_hws_state ws_state[2]; /* Ping and Pong */
+ uint8_t swtag_req;
+ uint8_t vws; /* Ping pong bit */
+ uint8_t hws_id;
+ /* Add Work Fastpath data */
+ uint64_t xaq_lmt __rte_cache_aligned;
+ uint64_t *fc_mem;
+ uintptr_t grps_base[CNXK_SSO_MAX_HWGRP];
+ uint64_t base[2];
+} __rte_cache_aligned;
+
+struct cnxk_sso_hws_cookie {
+ const struct rte_eventdev *event_dev;
+ bool configured;
} __rte_cache_aligned;
static inline int
return event_dev->data->dev_private;
}
+static inline struct cnxk_sso_hws_cookie *
+cnxk_sso_hws_get_cookie(void *ws)
+{
+ return RTE_PTR_SUB(ws, sizeof(struct cnxk_sso_hws_cookie));
+}
+
/* Configuration functions */
int cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev);
void cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
struct rte_event_dev_info *dev_info);
int cnxk_sso_dev_validate(const struct rte_eventdev *event_dev);
+int cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
+ cnxk_sso_init_hws_mem_t init_hws_mem,
+ cnxk_sso_hws_setup_t hws_setup);
void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
struct rte_event_queue_conf *queue_conf);
int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
void cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id);
void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
struct rte_event_port_conf *port_conf);
+int cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
+ cnxk_sso_hws_setup_t hws_setup_fn);
#endif /* __CNXK_EVENTDEV_H__ */