1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
8 #include "cnxk_eventdev.h"
9 #include "cnxk_worker.h"
13 static __rte_always_inline uint8_t
14 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
16 const uint32_t tag = (uint32_t)ev->event;
17 const uint8_t new_tt = ev->sched_type;
18 const uint64_t event_ptr = ev->u64;
19 const uint16_t grp = ev->queue_id;
21 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
22 if (ws->xaq_lmt <= *ws->fc_mem)
25 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
29 static __rte_always_inline void
30 cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
31 const struct rte_event *ev)
33 const uint32_t tag = (uint32_t)ev->event;
34 const uint8_t new_tt = ev->sched_type;
35 const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
38 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
40 * SSO_TT_ORDERED norm norm untag
41 * SSO_TT_ATOMIC norm norm untag
42 * SSO_TT_UNTAGGED norm norm NOOP
45 if (new_tt == SSO_TT_UNTAGGED) {
46 if (cur_tt != SSO_TT_UNTAGGED)
47 cnxk_sso_hws_swtag_untag(
48 CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
49 SSOW_LF_GWS_OP_SWTAG_UNTAG);
51 cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
55 static __rte_always_inline void
56 cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
57 const struct rte_event *ev, const uint16_t grp)
59 const uint32_t tag = (uint32_t)ev->event;
60 const uint8_t new_tt = ev->sched_type;
62 plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
63 SSOW_LF_GWS_OP_UPD_WQP_GRP1);
64 cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
67 static __rte_always_inline void
68 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
70 const uint8_t grp = ev->queue_id;
72 /* Group hasn't changed, Use SWTAG to forward the event */
73 if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
74 cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
78 * Group has been changed for group based work pipelining,
79 * Use deschedule/add_work operation to transfer the event to
82 cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
89 static __rte_always_inline uint8_t
90 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
91 const struct rte_event *ev)
93 const uint32_t tag = (uint32_t)ev->event;
94 const uint8_t new_tt = ev->sched_type;
95 const uint64_t event_ptr = ev->u64;
96 const uint16_t grp = ev->queue_id;
98 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
99 if (dws->xaq_lmt <= *dws->fc_mem)
102 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
106 static __rte_always_inline void
107 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
108 struct cn9k_sso_hws_state *vws,
109 const struct rte_event *ev)
111 const uint8_t grp = ev->queue_id;
113 /* Group hasn't changed, Use SWTAG to forward the event */
114 if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
115 cn9k_sso_hws_fwd_swtag(vws, ev);
119 * Group has been changed for group based work pipelining,
120 * Use deschedule/add_work operation to transfer the event to
123 cn9k_sso_hws_fwd_group(vws, ev, grp);
127 static __rte_always_inline uint16_t
128 cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
129 struct cn9k_sso_hws_state *ws_pair,
130 struct rte_event *ev)
132 const uint64_t set_gw = BIT_ULL(16) | 1;
134 __uint128_t get_work;
138 #ifdef RTE_ARCH_ARM64
139 asm volatile(PLT_CPU_FEATURE_PREAMBLE
141 " ldr %[tag], [%[tag_loc]] \n"
142 " ldr %[wqp], [%[wqp_loc]] \n"
143 " tbnz %[tag], 63, rty%= \n"
144 "done%=: str %[gw], [%[pong]] \n"
146 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
147 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
148 [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
150 gw.u64[0] = plt_read64(ws->tag_op);
151 while ((BIT_ULL(63)) & gw.u64[0])
152 gw.u64[0] = plt_read64(ws->tag_op);
153 gw.u64[1] = plt_read64(ws->wqp_op);
154 plt_write64(set_gw, ws_pair->getwrk_op);
157 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
158 (gw.u64[0] & (0x3FFull << 36)) << 4 |
159 (gw.u64[0] & 0xffffffff);
161 ev->event = gw.u64[0];
167 static __rte_always_inline uint16_t
168 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev)
171 __uint128_t get_work;
175 plt_write64(BIT_ULL(16) | /* wait for work. */
176 1, /* Use Mask set 0. */
178 #ifdef RTE_ARCH_ARM64
179 asm volatile(PLT_CPU_FEATURE_PREAMBLE
180 " ldr %[tag], [%[tag_loc]] \n"
181 " ldr %[wqp], [%[wqp_loc]] \n"
182 " tbz %[tag], 63, done%= \n"
185 " ldr %[tag], [%[tag_loc]] \n"
186 " ldr %[wqp], [%[wqp_loc]] \n"
187 " tbnz %[tag], 63, rty%= \n"
189 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
190 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
192 gw.u64[0] = plt_read64(ws->tag_op);
193 while ((BIT_ULL(63)) & gw.u64[0])
194 gw.u64[0] = plt_read64(ws->tag_op);
196 gw.u64[1] = plt_read64(ws->wqp_op);
199 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
200 (gw.u64[0] & (0x3FFull << 36)) << 4 |
201 (gw.u64[0] & 0xffffffff);
203 ev->event = gw.u64[0];
209 /* Used in cleaning up workslot. */
210 static __rte_always_inline uint16_t
211 cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
214 __uint128_t get_work;
218 #ifdef RTE_ARCH_ARM64
219 asm volatile(PLT_CPU_FEATURE_PREAMBLE
220 " ldr %[tag], [%[tag_loc]] \n"
221 " ldr %[wqp], [%[wqp_loc]] \n"
222 " tbz %[tag], 63, done%= \n"
225 " ldr %[tag], [%[tag_loc]] \n"
226 " ldr %[wqp], [%[wqp_loc]] \n"
227 " tbnz %[tag], 63, rty%= \n"
229 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1])
230 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
232 gw.u64[0] = plt_read64(ws->tag_op);
233 while ((BIT_ULL(63)) & gw.u64[0])
234 gw.u64[0] = plt_read64(ws->tag_op);
236 gw.u64[1] = plt_read64(ws->wqp_op);
239 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
240 (gw.u64[0] & (0x3FFull << 36)) << 4 |
241 (gw.u64[0] & 0xffffffff);
243 ev->event = gw.u64[0];
249 /* CN9K Fastpath functions. */
250 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
251 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
252 const struct rte_event ev[],
254 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
255 const struct rte_event ev[],
257 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
258 const struct rte_event ev[],
261 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
262 const struct rte_event *ev);
263 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
264 const struct rte_event ev[],
266 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
267 const struct rte_event ev[],
269 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
270 const struct rte_event ev[],
273 uint16_t __rte_hot cn9k_sso_hws_deq(void *port, struct rte_event *ev,
274 uint64_t timeout_ticks);
275 uint16_t __rte_hot cn9k_sso_hws_deq_burst(void *port, struct rte_event ev[],
277 uint64_t timeout_ticks);
278 uint16_t __rte_hot cn9k_sso_hws_tmo_deq(void *port, struct rte_event *ev,
279 uint64_t timeout_ticks);
280 uint16_t __rte_hot cn9k_sso_hws_tmo_deq_burst(void *port, struct rte_event ev[],
282 uint64_t timeout_ticks);
284 uint16_t __rte_hot cn9k_sso_hws_dual_deq(void *port, struct rte_event *ev,
285 uint64_t timeout_ticks);
286 uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst(void *port,
287 struct rte_event ev[],
289 uint64_t timeout_ticks);
290 uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq(void *port, struct rte_event *ev,
291 uint64_t timeout_ticks);
292 uint16_t __rte_hot cn9k_sso_hws_dual_tmo_deq_burst(void *port,
293 struct rte_event ev[],
295 uint64_t timeout_ticks);