memset(ws, 0, sizeof(*ws));
}
+static void
+cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
+ cnxk_handle_event_t fn, void *arg)
+{
+ struct cn10k_sso_hws *ws = hws;
+ uint64_t cq_ds_cnt = 1;
+ uint64_t aq_cnt = 1;
+ uint64_t ds_cnt = 1;
+ struct rte_event ev;
+ uint64_t val, req;
+
+ plt_write64(0, base + SSO_LF_GGRP_QCTL);
+
+ req = queue_id; /* GGRP ID */
+ req |= BIT_ULL(18); /* Grouped */
+ req |= BIT_ULL(16); /* WAIT */
+
+ aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
+ ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
+ cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
+ cq_ds_cnt &= 0x3FFF3FFF0000;
+
+ while (aq_cnt || cq_ds_cnt || ds_cnt) {
+ plt_write64(req, ws->getwrk_op);
+ cn10k_sso_hws_get_work_empty(ws, &ev);
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
+ if (ev.sched_type != SSO_TT_EMPTY)
+ cnxk_sso_hws_swtag_flush(ws->tag_wqe_op,
+ ws->swtag_flush_op);
+ do {
+ val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
+ } while (val & BIT_ULL(56));
+ aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
+ ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
+ cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
+ /* Extract cq and ds count */
+ cq_ds_cnt &= 0x3FFF3FFF0000;
+ }
+
+ plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
+ rte_mb();
+}
+
+static void
+cn10k_sso_hws_reset(void *arg, void *hws)
+{
+ struct cnxk_sso_evdev *dev = arg;
+ struct cn10k_sso_hws *ws = hws;
+ uintptr_t base = ws->base;
+ uint64_t pend_state;
+ union {
+ __uint128_t wdata;
+ uint64_t u64[2];
+ } gw;
+ uint8_t pend_tt;
+
+ /* Wait till getwork/swtp/waitw/desched completes. */
+ do {
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
+ BIT_ULL(56) | BIT_ULL(54)));
+ pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
+ if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
+ cnxk_sso_hws_swtag_untag(base +
+ SSOW_LF_GWS_OP_SWTAG_UNTAG);
+ plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+ }
+
+ /* Wait for desched to complete. */
+ do {
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (pend_state & BIT_ULL(58));
+
+ switch (dev->gw_mode) {
+ case CN10K_GW_MODE_PREF:
+ while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
+ ;
+ break;
+ case CN10K_GW_MODE_PREF_WFE:
+ while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
+ SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
+ continue;
+ plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
+ break;
+ case CN10K_GW_MODE_NONE:
+ default:
+ break;
+ }
+
+ if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
+ SSO_TT_EMPTY) {
+ plt_write64(BIT_ULL(16) | 1, ws->getwrk_op);
+ do {
+ roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
+ } while (gw.u64[0] & BIT_ULL(63));
+ pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
+ if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (pend_tt == SSO_TT_ATOMIC ||
+ pend_tt == SSO_TT_ORDERED)
+ cnxk_sso_hws_swtag_untag(
+ base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
+ plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+ }
+ }
+
+ plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
+ rte_mb();
+}
+
static void
cn10k_sso_set_rsrc(void *arg)
{
return (int)nb_unlinks;
}
+static int
+cn10k_sso_start(struct rte_eventdev *event_dev)
+{
+ int rc;
+
+ rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
+ cn10k_sso_hws_flush_events);
+ if (rc < 0)
+ return rc;
+ cn10k_sso_fp_fns_set(event_dev);
+
+ return rc;
+}
+
static struct rte_eventdev_ops cn10k_sso_dev_ops = {
.dev_infos_get = cn10k_sso_info_get,
.dev_configure = cn10k_sso_dev_configure,
.port_link = cn10k_sso_port_link,
.port_unlink = cn10k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
+
+ .dev_start = cn10k_sso_start,
};
static int
}
}
+static void
+cn9k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
+ cnxk_handle_event_t fn, void *arg)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(arg);
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws_state *st;
+ struct cn9k_sso_hws *ws;
+ uint64_t cq_ds_cnt = 1;
+ uint64_t aq_cnt = 1;
+ uint64_t ds_cnt = 1;
+ struct rte_event ev;
+ uintptr_t ws_base;
+ uint64_t val, req;
+
+ plt_write64(0, base + SSO_LF_GGRP_QCTL);
+
+ req = queue_id; /* GGRP ID */
+ req |= BIT_ULL(18); /* Grouped */
+ req |= BIT_ULL(16); /* WAIT */
+
+ aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
+ ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
+ cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
+ cq_ds_cnt &= 0x3FFF3FFF0000;
+
+ if (dev->dual_ws) {
+ dws = hws;
+ st = &dws->ws_state[0];
+ ws_base = dws->base[0];
+ } else {
+ ws = hws;
+ st = (struct cn9k_sso_hws_state *)ws;
+ ws_base = ws->base;
+ }
+
+ while (aq_cnt || cq_ds_cnt || ds_cnt) {
+ plt_write64(req, st->getwrk_op);
+ cn9k_sso_hws_get_work_empty(st, &ev);
+ if (fn != NULL && ev.u64 != 0)
+ fn(arg, ev);
+ if (ev.sched_type != SSO_TT_EMPTY)
+ cnxk_sso_hws_swtag_flush(st->tag_op,
+ st->swtag_flush_op);
+ do {
+ val = plt_read64(ws_base + SSOW_LF_GWS_PENDSTATE);
+ } while (val & BIT_ULL(56));
+ aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
+ ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
+ cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
+ /* Extract cq and ds count */
+ cq_ds_cnt &= 0x3FFF3FFF0000;
+ }
+
+ plt_write64(0, ws_base + SSOW_LF_GWS_OP_GWC_INVAL);
+}
+
+static void
+cn9k_sso_hws_reset(void *arg, void *hws)
+{
+ struct cnxk_sso_evdev *dev = arg;
+ struct cn9k_sso_hws_dual *dws;
+ struct cn9k_sso_hws *ws;
+ uint64_t pend_state;
+ uint8_t pend_tt;
+ uintptr_t base;
+ uint64_t tag;
+ uint8_t i;
+
+ dws = hws;
+ ws = hws;
+ for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) {
+ base = dev->dual_ws ? dws->base[i] : ws->base;
+ /* Wait till getwork/swtp/waitw/desched completes. */
+ do {
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
+ BIT_ULL(56)));
+
+ tag = plt_read64(base + SSOW_LF_GWS_TAG);
+ pend_tt = (tag >> 32) & 0x3;
+ if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
+ if (pend_tt == SSO_TT_ATOMIC ||
+ pend_tt == SSO_TT_ORDERED)
+ cnxk_sso_hws_swtag_untag(
+ base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
+ plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
+ }
+
+ /* Wait for desched to complete. */
+ do {
+ pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
+ } while (pend_state & BIT_ULL(58));
+ }
+}
+
static void
cn9k_sso_set_rsrc(void *arg)
{
return (int)nb_unlinks;
}
+static int
+cn9k_sso_start(struct rte_eventdev *event_dev)
+{
+ int rc;
+
+ rc = cnxk_sso_start(event_dev, cn9k_sso_hws_reset,
+ cn9k_sso_hws_flush_events);
+ if (rc < 0)
+ return rc;
+
+ cn9k_sso_fp_fns_set(event_dev);
+
+ return rc;
+}
+
static struct rte_eventdev_ops cn9k_sso_dev_ops = {
.dev_infos_get = cn9k_sso_info_get,
.dev_configure = cn9k_sso_dev_configure,
.port_link = cn9k_sso_port_link,
.port_unlink = cn9k_sso_port_unlink,
.timeout_ticks = cnxk_sso_timeout_ticks,
+
+ .dev_start = cn9k_sso_start,
};
static int
return 0;
}
+static void
+cnxk_handle_event(void *arg, struct rte_event event)
+{
+ struct rte_eventdev *event_dev = arg;
+
+ if (event_dev->dev_ops->dev_stop_flush != NULL)
+ event_dev->dev_ops->dev_stop_flush(
+ event_dev->data->dev_id, event,
+ event_dev->data->dev_stop_flush_arg);
+}
+
+static void
+cnxk_sso_cleanup(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
+ cnxk_sso_hws_flush_t flush_fn, uint8_t enable)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ uintptr_t hwgrp_base;
+ uint16_t i;
+ void *ws;
+
+ for (i = 0; i < dev->nb_event_ports; i++) {
+ ws = event_dev->data->ports[i];
+ reset_fn(dev, ws);
+ }
+
+ rte_mb();
+ ws = event_dev->data->ports[0];
+
+ for (i = 0; i < dev->nb_event_queues; i++) {
+ /* Consume all the events through HWS0 */
+ hwgrp_base = roc_sso_hwgrp_base_get(&dev->sso, i);
+ flush_fn(ws, i, hwgrp_base, cnxk_handle_event, event_dev);
+ /* Enable/Disable SSO GGRP */
+ plt_write64(enable, hwgrp_base + SSO_LF_GGRP_QCTL);
+ }
+}
+
+int
+cnxk_sso_start(struct rte_eventdev *event_dev, cnxk_sso_hws_reset_t reset_fn,
+ cnxk_sso_hws_flush_t flush_fn)
+{
+ struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
+ struct roc_sso_hwgrp_qos qos[dev->qos_queue_cnt];
+ int i, rc;
+
+ plt_sso_dbg();
+ for (i = 0; i < dev->qos_queue_cnt; i++) {
+ qos->hwgrp = dev->qos_parse_data[i].queue;
+ qos->iaq_prcnt = dev->qos_parse_data[i].iaq_prcnt;
+ qos->taq_prcnt = dev->qos_parse_data[i].taq_prcnt;
+ qos->xaq_prcnt = dev->qos_parse_data[i].xaq_prcnt;
+ }
+ rc = roc_sso_hwgrp_qos_config(&dev->sso, qos, dev->qos_queue_cnt,
+ dev->xae_cnt);
+ if (rc < 0) {
+ plt_sso_dbg("failed to configure HWGRP QoS rc = %d", rc);
+ return -EINVAL;
+ }
+ cnxk_sso_cleanup(event_dev, reset_fn, flush_fn, true);
+ rte_mb();
+
+ return 0;
+}
+
static void
parse_queue_param(char *value, void *opaque)
{
typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws);
typedef int (*cnxk_sso_link_t)(void *dev, void *ws, uint16_t *map,
uint16_t nb_link);
+typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev);
+typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws);
+typedef void (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
+ cnxk_handle_event_t fn, void *arg);
struct cnxk_sso_qos {
uint16_t queue;
cnxk_sso_hws_setup_t hws_setup_fn);
int cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
uint64_t *tmo_ticks);
+int cnxk_sso_start(struct rte_eventdev *event_dev,
+ cnxk_sso_hws_reset_t reset_fn,
+ cnxk_sso_hws_flush_t flush_fn);
#endif /* __CNXK_EVENTDEV_H__ */