1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
8 #include "cnxk_ethdev.h"
9 #include "cnxk_eventdev.h"
10 #include "cnxk_worker.h"
12 #include "cn9k_ethdev.h"
17 static __rte_always_inline uint8_t
18 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
20 const uint32_t tag = (uint32_t)ev->event;
21 const uint8_t new_tt = ev->sched_type;
22 const uint64_t event_ptr = ev->u64;
23 const uint16_t grp = ev->queue_id;
25 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
26 if (ws->xaq_lmt <= *ws->fc_mem)
29 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
33 static __rte_always_inline void
34 cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
35 const struct rte_event *ev)
37 const uint32_t tag = (uint32_t)ev->event;
38 const uint8_t new_tt = ev->sched_type;
39 const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
42 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
44 * SSO_TT_ORDERED norm norm untag
45 * SSO_TT_ATOMIC norm norm untag
46 * SSO_TT_UNTAGGED norm norm NOOP
49 if (new_tt == SSO_TT_UNTAGGED) {
50 if (cur_tt != SSO_TT_UNTAGGED)
51 cnxk_sso_hws_swtag_untag(
52 CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
53 SSOW_LF_GWS_OP_SWTAG_UNTAG);
55 cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
59 static __rte_always_inline void
60 cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
61 const struct rte_event *ev, const uint16_t grp)
63 const uint32_t tag = (uint32_t)ev->event;
64 const uint8_t new_tt = ev->sched_type;
66 plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
67 SSOW_LF_GWS_OP_UPD_WQP_GRP1);
68 cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
71 static __rte_always_inline void
72 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
74 const uint8_t grp = ev->queue_id;
76 /* Group hasn't changed, Use SWTAG to forward the event */
77 if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
78 cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
82 * Group has been changed for group based work pipelining,
83 * Use deschedule/add_work operation to transfer the event to
86 cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
93 static __rte_always_inline uint8_t
94 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
95 const struct rte_event *ev)
97 const uint32_t tag = (uint32_t)ev->event;
98 const uint8_t new_tt = ev->sched_type;
99 const uint64_t event_ptr = ev->u64;
100 const uint16_t grp = ev->queue_id;
102 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
103 if (dws->xaq_lmt <= *dws->fc_mem)
106 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
110 static __rte_always_inline void
111 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
112 struct cn9k_sso_hws_state *vws,
113 const struct rte_event *ev)
115 const uint8_t grp = ev->queue_id;
117 /* Group hasn't changed, Use SWTAG to forward the event */
118 if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
119 cn9k_sso_hws_fwd_swtag(vws, ev);
123 * Group has been changed for group based work pipelining,
124 * Use deschedule/add_work operation to transfer the event to
127 cn9k_sso_hws_fwd_group(vws, ev, grp);
131 static __rte_always_inline void
132 cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
133 const uint32_t tag, const uint32_t flags,
134 const void *const lookup_mem)
136 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
137 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
139 cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
140 (struct rte_mbuf *)mbuf, lookup_mem,
141 mbuf_init | ((uint64_t)port_id) << 48, flags);
144 static __rte_always_inline uint16_t
145 cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
146 struct cn9k_sso_hws_state *ws_pair,
147 struct rte_event *ev, const uint32_t flags,
148 const void *const lookup_mem,
149 struct cnxk_timesync_info *const tstamp)
151 const uint64_t set_gw = BIT_ULL(16) | 1;
153 __uint128_t get_work;
159 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
160 rte_prefetch_non_temporal(lookup_mem);
161 #ifdef RTE_ARCH_ARM64
162 asm volatile(PLT_CPU_FEATURE_PREAMBLE
164 " ldr %[tag], [%[tag_loc]] \n"
165 " ldr %[wqp], [%[wqp_loc]] \n"
166 " tbnz %[tag], 63, rty%= \n"
167 "done%=: str %[gw], [%[pong]] \n"
169 " sub %[mbuf], %[wqp], #0x80 \n"
170 " prfm pldl1keep, [%[mbuf]] \n"
171 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
173 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
174 [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
176 gw.u64[0] = plt_read64(ws->tag_op);
177 while ((BIT_ULL(63)) & gw.u64[0])
178 gw.u64[0] = plt_read64(ws->tag_op);
179 gw.u64[1] = plt_read64(ws->wqp_op);
180 plt_write64(set_gw, ws_pair->getwrk_op);
181 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
184 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
185 (gw.u64[0] & (0x3FFull << 36)) << 4 |
186 (gw.u64[0] & 0xffffffff);
188 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
189 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
190 RTE_EVENT_TYPE_ETHDEV) {
191 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
193 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
194 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
195 gw.u64[0] & 0xFFFFF, flags,
197 /* Extracting tstamp, if PTP enabled*/
198 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
200 CNXK_SSO_WQE_SG_PTR);
201 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
202 flags & NIX_RX_OFFLOAD_TSTAMP_F,
203 flags & NIX_RX_MULTI_SEG_F,
204 (uint64_t *)tstamp_ptr);
209 ev->event = gw.u64[0];
215 static __rte_always_inline uint16_t
216 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
217 const uint32_t flags, const void *const lookup_mem)
220 __uint128_t get_work;
226 plt_write64(BIT_ULL(16) | /* wait for work. */
227 1, /* Use Mask set 0. */
230 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
231 rte_prefetch_non_temporal(lookup_mem);
232 #ifdef RTE_ARCH_ARM64
233 asm volatile(PLT_CPU_FEATURE_PREAMBLE
234 " ldr %[tag], [%[tag_loc]] \n"
235 " ldr %[wqp], [%[wqp_loc]] \n"
236 " tbz %[tag], 63, done%= \n"
239 " ldr %[tag], [%[tag_loc]] \n"
240 " ldr %[wqp], [%[wqp_loc]] \n"
241 " tbnz %[tag], 63, rty%= \n"
243 " sub %[mbuf], %[wqp], #0x80 \n"
244 " prfm pldl1keep, [%[mbuf]] \n"
245 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
247 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
249 gw.u64[0] = plt_read64(ws->tag_op);
250 while ((BIT_ULL(63)) & gw.u64[0])
251 gw.u64[0] = plt_read64(ws->tag_op);
253 gw.u64[1] = plt_read64(ws->wqp_op);
254 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
257 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
258 (gw.u64[0] & (0x3FFull << 36)) << 4 |
259 (gw.u64[0] & 0xffffffff);
261 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
262 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
263 RTE_EVENT_TYPE_ETHDEV) {
264 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
266 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
267 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
268 gw.u64[0] & 0xFFFFF, flags,
270 /* Extracting tstamp, if PTP enabled*/
271 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
273 CNXK_SSO_WQE_SG_PTR);
274 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
276 flags & NIX_RX_OFFLOAD_TSTAMP_F,
277 flags & NIX_RX_MULTI_SEG_F,
278 (uint64_t *)tstamp_ptr);
283 ev->event = gw.u64[0];
289 /* Used in cleaning up workslot. */
290 static __rte_always_inline uint16_t
291 cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
294 __uint128_t get_work;
299 #ifdef RTE_ARCH_ARM64
300 asm volatile(PLT_CPU_FEATURE_PREAMBLE
301 " ldr %[tag], [%[tag_loc]] \n"
302 " ldr %[wqp], [%[wqp_loc]] \n"
303 " tbz %[tag], 63, done%= \n"
306 " ldr %[tag], [%[tag_loc]] \n"
307 " ldr %[wqp], [%[wqp_loc]] \n"
308 " tbnz %[tag], 63, rty%= \n"
310 " sub %[mbuf], %[wqp], #0x80 \n"
311 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
313 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
315 gw.u64[0] = plt_read64(ws->tag_op);
316 while ((BIT_ULL(63)) & gw.u64[0])
317 gw.u64[0] = plt_read64(ws->tag_op);
319 gw.u64[1] = plt_read64(ws->wqp_op);
320 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
323 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
324 (gw.u64[0] & (0x3FFull << 36)) << 4 |
325 (gw.u64[0] & 0xffffffff);
327 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
328 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
329 RTE_EVENT_TYPE_ETHDEV) {
330 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
332 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
333 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
334 gw.u64[0] & 0xFFFFF, 0, NULL);
339 ev->event = gw.u64[0];
345 /* CN9K Fastpath functions. */
346 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
347 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
348 const struct rte_event ev[],
350 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
351 const struct rte_event ev[],
353 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
354 const struct rte_event ev[],
357 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
358 const struct rte_event *ev);
359 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
360 const struct rte_event ev[],
362 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
363 const struct rte_event ev[],
365 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
366 const struct rte_event ev[],
369 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
370 uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
371 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
372 uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \
373 void *port, struct rte_event ev[], uint16_t nb_events, \
374 uint64_t timeout_ticks); \
375 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \
376 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
377 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
378 void *port, struct rte_event ev[], uint16_t nb_events, \
379 uint64_t timeout_ticks); \
380 uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
381 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
382 uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
383 void *port, struct rte_event ev[], uint16_t nb_events, \
384 uint64_t timeout_ticks); \
385 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
386 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
387 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
388 void *port, struct rte_event ev[], uint16_t nb_events, \
389 uint64_t timeout_ticks);
391 NIX_RX_FASTPATH_MODES
394 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
395 uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \
396 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
397 uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \
398 void *port, struct rte_event ev[], uint16_t nb_events, \
399 uint64_t timeout_ticks); \
400 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \
401 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
402 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
403 void *port, struct rte_event ev[], uint16_t nb_events, \
404 uint64_t timeout_ticks); \
405 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
406 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
407 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
408 void *port, struct rte_event ev[], uint16_t nb_events, \
409 uint64_t timeout_ticks); \
410 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
411 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
412 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
413 void *port, struct rte_event ev[], uint16_t nb_events, \
414 uint64_t timeout_ticks);
416 NIX_RX_FASTPATH_MODES