1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN10K_WORKER_H__
6 #define __CN10K_WORKER_H__
8 #include "cnxk_eventdev.h"
9 #include "cnxk_worker.h"
13 static __rte_always_inline uint8_t
14 cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
16 const uint32_t tag = (uint32_t)ev->event;
17 const uint8_t new_tt = ev->sched_type;
18 const uint64_t event_ptr = ev->u64;
19 const uint16_t grp = ev->queue_id;
21 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
22 if (ws->xaq_lmt <= *ws->fc_mem)
25 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
29 static __rte_always_inline void
30 cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)
32 const uint32_t tag = (uint32_t)ev->event;
33 const uint8_t new_tt = ev->sched_type;
34 const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(ws->tag_wqe_op));
37 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
39 * SSO_TT_ORDERED norm norm untag
40 * SSO_TT_ATOMIC norm norm untag
41 * SSO_TT_UNTAGGED norm norm NOOP
44 if (new_tt == SSO_TT_UNTAGGED) {
45 if (cur_tt != SSO_TT_UNTAGGED)
46 cnxk_sso_hws_swtag_untag(ws->swtag_untag_op);
48 cnxk_sso_hws_swtag_norm(tag, new_tt, ws->swtag_norm_op);
53 static __rte_always_inline void
54 cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,
57 const uint32_t tag = (uint32_t)ev->event;
58 const uint8_t new_tt = ev->sched_type;
60 plt_write64(ev->u64, ws->updt_wqe_op);
61 cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
64 static __rte_always_inline void
65 cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
66 const struct rte_event *ev)
68 const uint8_t grp = ev->queue_id;
70 /* Group hasn't changed, Use SWTAG to forward the event */
71 if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_wqe_op)) == grp)
72 cn10k_sso_hws_fwd_swtag(ws, ev);
75 * Group has been changed for group based work pipelining,
76 * Use deschedule/add_work operation to transfer the event to
79 cn10k_sso_hws_fwd_group(ws, ev, grp);
82 static __rte_always_inline uint16_t
83 cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev)
90 gw.get_work = ws->gw_wdata;
91 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
93 PLT_CPU_FEATURE_PREAMBLE
94 "caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
95 : [wdata] "+r"(gw.get_work)
96 : [gw_loc] "r"(ws->getwrk_op)
99 plt_write64(gw.u64[0], ws->getwrk_op);
101 roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
102 } while (gw.u64[0] & BIT_ULL(63));
104 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
105 (gw.u64[0] & (0x3FFull << 36)) << 4 |
106 (gw.u64[0] & 0xffffffff);
108 ev->event = gw.u64[0];
114 /* Used in cleaning up workslot. */
115 static __rte_always_inline uint16_t
116 cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
119 __uint128_t get_work;
123 #ifdef RTE_ARCH_ARM64
124 asm volatile(PLT_CPU_FEATURE_PREAMBLE
125 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
126 " tbz %[tag], 63, done%= \n"
129 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
130 " tbnz %[tag], 63, rty%= \n"
132 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
133 : [tag_loc] "r"(ws->tag_wqe_op)
137 roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
138 } while (gw.u64[0] & BIT_ULL(63));
141 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
142 (gw.u64[0] & (0x3FFull << 36)) << 4 |
143 (gw.u64[0] & 0xffffffff);
145 ev->event = gw.u64[0];
151 /* CN10K Fastpath functions. */
152 uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
153 uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
154 const struct rte_event ev[],
156 uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
157 const struct rte_event ev[],
159 uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
160 const struct rte_event ev[],
163 uint16_t __rte_hot cn10k_sso_hws_deq(void *port, struct rte_event *ev,
164 uint64_t timeout_ticks);
165 uint16_t __rte_hot cn10k_sso_hws_deq_burst(void *port, struct rte_event ev[],
167 uint64_t timeout_ticks);
168 uint16_t __rte_hot cn10k_sso_hws_tmo_deq(void *port, struct rte_event *ev,
169 uint64_t timeout_ticks);
170 uint16_t __rte_hot cn10k_sso_hws_tmo_deq_burst(void *port,
171 struct rte_event ev[],
173 uint64_t timeout_ticks);