From: Pavan Nikhilesh Date: Fri, 13 May 2022 17:58:41 +0000 (+0530) Subject: event/cnxk: implement event port quiesce function X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=e8594de2731d5bfe823b8e41f44d157d1ff19e21;p=dpdk.git event/cnxk: implement event port quiesce function Implement event port quiesce function to clean up any lcore resources used. Signed-off-by: Pavan Nikhilesh --- diff --git a/drivers/event/cnxk/cn10k_eventdev.c b/drivers/event/cnxk/cn10k_eventdev.c index 94829e789c..776fb944e9 100644 --- a/drivers/event/cnxk/cn10k_eventdev.c +++ b/drivers/event/cnxk/cn10k_eventdev.c @@ -167,15 +167,23 @@ cn10k_sso_hws_reset(void *arg, void *hws) uint64_t u64[2]; } gw; uint8_t pend_tt; + bool is_pend; plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); /* Wait till getwork/swtp/waitw/desched completes. */ + is_pend = false; + /* Work in WQE0 is always consumed, unless its a SWTAG. */ + pend_state = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) || + ws->swtag_req) + is_pend = true; + do { pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54))); pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0)); - if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */ + if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */ if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED) cnxk_sso_hws_swtag_untag(base + SSOW_LF_GWS_OP_SWTAG_UNTAG); @@ -189,15 +197,10 @@ cn10k_sso_hws_reset(void *arg, void *hws) switch (dev->gw_mode) { case CN10K_GW_MODE_PREF: + case CN10K_GW_MODE_PREF_WFE: while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63)) ; break; - case CN10K_GW_MODE_PREF_WFE: - while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & - SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT) - continue; - plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); - break; case CN10K_GW_MODE_NONE: default: break; @@ -533,6 +536,66 @@ free: rte_free(gws_cookie); } +static void +cn10k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, + rte_eventdev_port_flush_t flush_cb, void *args) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct cn10k_sso_hws *ws = port; + struct rte_event ev; + uint64_t ptag; + bool is_pend; + + is_pend = false; + /* Work in WQE0 is always consumed, unless its a SWTAG. */ + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + if (ptag & (BIT_ULL(62) | BIT_ULL(54)) || ws->swtag_req) + is_pend = true; + do { + ptag = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & + (BIT_ULL(62) | BIT_ULL(58) | BIT_ULL(56) | BIT_ULL(54))); + + cn10k_sso_hws_get_work_empty(ws, &ev, + (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F | + NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F); + if (is_pend && ev.u64) { + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); + cnxk_sso_hws_swtag_flush(ws->base); + } + + /* Check if we have work in PRF_WQE0, if so extract it. */ + switch (dev->gw_mode) { + case CN10K_GW_MODE_PREF: + case CN10K_GW_MODE_PREF_WFE: + while (plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0) & + BIT_ULL(63)) + ; + break; + case CN10K_GW_MODE_NONE: + default: + break; + } + + if (CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_PRF_WQE0)) != + SSO_TT_EMPTY) { + plt_write64(BIT_ULL(16) | 1, + ws->base + SSOW_LF_GWS_OP_GET_WORK0); + cn10k_sso_hws_get_work_empty( + ws, &ev, + (NIX_RX_OFFLOAD_MAX - 1) | NIX_RX_REAS_F | + NIX_RX_MULTI_SEG_F | CPT_RX_WQE_F); + if (ev.u64) { + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); + cnxk_sso_hws_swtag_flush(ws->base); + } + } + ws->swtag_req = 0; + plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL); +} + static int cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], const uint8_t priorities[], @@ -852,6 +915,7 @@ static struct eventdev_ops cn10k_sso_dev_ops = { .port_def_conf = cnxk_sso_port_def_conf, .port_setup = cn10k_sso_port_setup, .port_release = cn10k_sso_port_release, + .port_quiesce = cn10k_sso_port_quiesce, .port_link = cn10k_sso_port_link, .port_unlink = cn10k_sso_port_unlink, .timeout_ticks = cnxk_sso_timeout_ticks, diff --git a/drivers/event/cnxk/cn9k_eventdev.c b/drivers/event/cnxk/cn9k_eventdev.c index 987888d3db..43b6086298 100644 --- a/drivers/event/cnxk/cn9k_eventdev.c +++ b/drivers/event/cnxk/cn9k_eventdev.c @@ -186,6 +186,7 @@ cn9k_sso_hws_reset(void *arg, void *hws) uint64_t pend_state; uint8_t pend_tt; uintptr_t base; + bool is_pend; uint64_t tag; uint8_t i; @@ -193,6 +194,13 @@ cn9k_sso_hws_reset(void *arg, void *hws) ws = hws; for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) { base = dev->dual_ws ? dws->base[i] : ws->base; + is_pend = false; + /* Work in WQE0 is always consumed, unless its a SWTAG. */ + pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); + if (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) || + (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) : + ws->swtag_req)) + is_pend = true; /* Wait till getwork/swtp/waitw/desched completes. */ do { pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); @@ -201,7 +209,7 @@ cn9k_sso_hws_reset(void *arg, void *hws) tag = plt_read64(base + SSOW_LF_GWS_TAG); pend_tt = (tag >> 32) & 0x3; - if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */ + if (is_pend && pend_tt != SSO_TT_EMPTY) { /* Work was pending */ if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED) cnxk_sso_hws_swtag_untag( @@ -213,7 +221,14 @@ cn9k_sso_hws_reset(void *arg, void *hws) do { pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE); } while (pend_state & BIT_ULL(58)); + + plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL); } + + if (dev->dual_ws) + dws->swtag_req = 0; + else + ws->swtag_req = 0; } void @@ -789,6 +804,48 @@ free: rte_free(gws_cookie); } +static void +cn9k_sso_port_quiesce(struct rte_eventdev *event_dev, void *port, + rte_eventdev_port_flush_t flush_cb, void *args) +{ + struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev); + struct cn9k_sso_hws_dual *dws; + struct cn9k_sso_hws *ws; + struct rte_event ev; + uintptr_t base; + uint64_t ptag; + bool is_pend; + uint8_t i; + + dws = port; + ws = port; + for (i = 0; i < (dev->dual_ws ? CN9K_DUAL_WS_NB_WS : 1); i++) { + base = dev->dual_ws ? dws->base[i] : ws->base; + is_pend = false; + /* Work in WQE0 is always consumed, unless its a SWTAG. */ + ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE); + if (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(54)) || + (dev->dual_ws ? (dws->swtag_req && i == !dws->vws) : + ws->swtag_req)) + is_pend = true; + /* Wait till getwork/swtp/waitw/desched completes. */ + do { + ptag = plt_read64(base + SSOW_LF_GWS_PENDSTATE); + } while (ptag & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) | + BIT_ULL(56))); + + cn9k_sso_hws_get_work_empty( + base, &ev, dev->rx_offloads, + dev->dual_ws ? dws->lookup_mem : ws->lookup_mem, + dev->dual_ws ? dws->tstamp : ws->tstamp); + if (is_pend && ev.u64) { + if (flush_cb) + flush_cb(event_dev->data->dev_id, ev, args); + cnxk_sso_hws_swtag_flush(ws->base); + } + } +} + static int cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port, const uint8_t queues[], const uint8_t priorities[], @@ -1090,6 +1147,7 @@ static struct eventdev_ops cn9k_sso_dev_ops = { .port_def_conf = cnxk_sso_port_def_conf, .port_setup = cn9k_sso_port_setup, .port_release = cn9k_sso_port_release, + .port_quiesce = cn9k_sso_port_quiesce, .port_link = cn9k_sso_port_link, .port_unlink = cn9k_sso_port_unlink, .timeout_ticks = cnxk_sso_timeout_ticks,