c01c00e1da91f79f6aec07b3e669d53bd13bc514
[dpdk.git] / drivers / event / cnxk / cn9k_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
7
8 #include "cnxk_ethdev.h"
9 #include "cnxk_eventdev.h"
10 #include "cnxk_worker.h"
11
12 #include "cn9k_ethdev.h"
13 #include "cn9k_rx.h"
14
15 /* SSO Operations */
16
17 static __rte_always_inline uint8_t
18 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
19 {
20         const uint32_t tag = (uint32_t)ev->event;
21         const uint8_t new_tt = ev->sched_type;
22         const uint64_t event_ptr = ev->u64;
23         const uint16_t grp = ev->queue_id;
24
25         rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
26         if (ws->xaq_lmt <= *ws->fc_mem)
27                 return 0;
28
29         cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
30         return 1;
31 }
32
33 static __rte_always_inline void
34 cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
35                        const struct rte_event *ev)
36 {
37         const uint32_t tag = (uint32_t)ev->event;
38         const uint8_t new_tt = ev->sched_type;
39         const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
40
41         /* CNXK model
42          * cur_tt/new_tt     SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
43          *
44          * SSO_TT_ORDERED        norm           norm             untag
45          * SSO_TT_ATOMIC         norm           norm               untag
46          * SSO_TT_UNTAGGED       norm           norm             NOOP
47          */
48
49         if (new_tt == SSO_TT_UNTAGGED) {
50                 if (cur_tt != SSO_TT_UNTAGGED)
51                         cnxk_sso_hws_swtag_untag(
52                                 CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
53                                 SSOW_LF_GWS_OP_SWTAG_UNTAG);
54         } else {
55                 cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
56         }
57 }
58
59 static __rte_always_inline void
60 cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
61                        const struct rte_event *ev, const uint16_t grp)
62 {
63         const uint32_t tag = (uint32_t)ev->event;
64         const uint8_t new_tt = ev->sched_type;
65
66         plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
67                                      SSOW_LF_GWS_OP_UPD_WQP_GRP1);
68         cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
69 }
70
71 static __rte_always_inline void
72 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
73 {
74         const uint8_t grp = ev->queue_id;
75
76         /* Group hasn't changed, Use SWTAG to forward the event */
77         if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
78                 cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
79                 ws->swtag_req = 1;
80         } else {
81                 /*
82                  * Group has been changed for group based work pipelining,
83                  * Use deschedule/add_work operation to transfer the event to
84                  * new group/core
85                  */
86                 cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
87                                        grp);
88         }
89 }
90
91 /* Dual ws ops. */
92
93 static __rte_always_inline uint8_t
94 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
95                             const struct rte_event *ev)
96 {
97         const uint32_t tag = (uint32_t)ev->event;
98         const uint8_t new_tt = ev->sched_type;
99         const uint64_t event_ptr = ev->u64;
100         const uint16_t grp = ev->queue_id;
101
102         rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
103         if (dws->xaq_lmt <= *dws->fc_mem)
104                 return 0;
105
106         cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
107         return 1;
108 }
109
110 static __rte_always_inline void
111 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
112                                 struct cn9k_sso_hws_state *vws,
113                                 const struct rte_event *ev)
114 {
115         const uint8_t grp = ev->queue_id;
116
117         /* Group hasn't changed, Use SWTAG to forward the event */
118         if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
119                 cn9k_sso_hws_fwd_swtag(vws, ev);
120                 dws->swtag_req = 1;
121         } else {
122                 /*
123                  * Group has been changed for group based work pipelining,
124                  * Use deschedule/add_work operation to transfer the event to
125                  * new group/core
126                  */
127                 cn9k_sso_hws_fwd_group(vws, ev, grp);
128         }
129 }
130
131 static __rte_always_inline void
132 cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
133                  const uint32_t tag, const uint32_t flags,
134                  const void *const lookup_mem)
135 {
136         const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
137                                    (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
138
139         cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
140                              (struct rte_mbuf *)mbuf, lookup_mem,
141                              mbuf_init | ((uint64_t)port_id) << 48, flags);
142 }
143
144 static __rte_always_inline uint16_t
145 cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
146                            struct cn9k_sso_hws_state *ws_pair,
147                            struct rte_event *ev, const uint32_t flags,
148                            const void *const lookup_mem,
149                            struct cnxk_timesync_info *const tstamp)
150 {
151         const uint64_t set_gw = BIT_ULL(16) | 1;
152         union {
153                 __uint128_t get_work;
154                 uint64_t u64[2];
155         } gw;
156         uint64_t tstamp_ptr;
157         uint64_t mbuf;
158
159         if (flags & NIX_RX_OFFLOAD_PTYPE_F)
160                 rte_prefetch_non_temporal(lookup_mem);
161 #ifdef RTE_ARCH_ARM64
162         asm volatile(PLT_CPU_FEATURE_PREAMBLE
163                      "rty%=:                                    \n"
164                      "          ldr %[tag], [%[tag_loc]]        \n"
165                      "          ldr %[wqp], [%[wqp_loc]]        \n"
166                      "          tbnz %[tag], 63, rty%=          \n"
167                      "done%=:   str %[gw], [%[pong]]            \n"
168                      "          dmb ld                          \n"
169                      "          sub %[mbuf], %[wqp], #0x80      \n"
170                      "          prfm pldl1keep, [%[mbuf]]       \n"
171                      : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
172                        [mbuf] "=&r"(mbuf)
173                      : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
174                        [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
175 #else
176         gw.u64[0] = plt_read64(ws->tag_op);
177         while ((BIT_ULL(63)) & gw.u64[0])
178                 gw.u64[0] = plt_read64(ws->tag_op);
179         gw.u64[1] = plt_read64(ws->wqp_op);
180         plt_write64(set_gw, ws_pair->getwrk_op);
181         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
182 #endif
183
184         gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
185                     (gw.u64[0] & (0x3FFull << 36)) << 4 |
186                     (gw.u64[0] & 0xffffffff);
187
188         if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
189                 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
190                     RTE_EVENT_TYPE_ETHDEV) {
191                         uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
192
193                         gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
194                         cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
195                                          gw.u64[0] & 0xFFFFF, flags,
196                                          lookup_mem);
197                         /* Extracting tstamp, if PTP enabled*/
198                         tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
199                                                             gw.u64[1]) +
200                                                    CNXK_SSO_WQE_SG_PTR);
201                         cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
202                                                 flags & NIX_RX_OFFLOAD_TSTAMP_F,
203                                                 flags & NIX_RX_MULTI_SEG_F,
204                                                 (uint64_t *)tstamp_ptr);
205                         gw.u64[1] = mbuf;
206                 }
207         }
208
209         ev->event = gw.u64[0];
210         ev->u64 = gw.u64[1];
211
212         return !!gw.u64[1];
213 }
214
215 static __rte_always_inline uint16_t
216 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
217                       const uint32_t flags, const void *const lookup_mem)
218 {
219         union {
220                 __uint128_t get_work;
221                 uint64_t u64[2];
222         } gw;
223         uint64_t tstamp_ptr;
224         uint64_t mbuf;
225
226         plt_write64(BIT_ULL(16) | /* wait for work. */
227                             1,    /* Use Mask set 0. */
228                     ws->getwrk_op);
229
230         if (flags & NIX_RX_OFFLOAD_PTYPE_F)
231                 rte_prefetch_non_temporal(lookup_mem);
232 #ifdef RTE_ARCH_ARM64
233         asm volatile(PLT_CPU_FEATURE_PREAMBLE
234                      "          ldr %[tag], [%[tag_loc]]        \n"
235                      "          ldr %[wqp], [%[wqp_loc]]        \n"
236                      "          tbz %[tag], 63, done%=          \n"
237                      "          sevl                            \n"
238                      "rty%=:    wfe                             \n"
239                      "          ldr %[tag], [%[tag_loc]]        \n"
240                      "          ldr %[wqp], [%[wqp_loc]]        \n"
241                      "          tbnz %[tag], 63, rty%=          \n"
242                      "done%=:   dmb ld                          \n"
243                      "          sub %[mbuf], %[wqp], #0x80      \n"
244                      "          prfm pldl1keep, [%[mbuf]]       \n"
245                      : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
246                        [mbuf] "=&r"(mbuf)
247                      : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
248 #else
249         gw.u64[0] = plt_read64(ws->tag_op);
250         while ((BIT_ULL(63)) & gw.u64[0])
251                 gw.u64[0] = plt_read64(ws->tag_op);
252
253         gw.u64[1] = plt_read64(ws->wqp_op);
254         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
255 #endif
256
257         gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
258                     (gw.u64[0] & (0x3FFull << 36)) << 4 |
259                     (gw.u64[0] & 0xffffffff);
260
261         if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
262                 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
263                     RTE_EVENT_TYPE_ETHDEV) {
264                         uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
265
266                         gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
267                         cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
268                                          gw.u64[0] & 0xFFFFF, flags,
269                                          lookup_mem);
270                         /* Extracting tstamp, if PTP enabled*/
271                         tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
272                                                             gw.u64[1]) +
273                                                    CNXK_SSO_WQE_SG_PTR);
274                         cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
275                                                 ws->tstamp,
276                                                 flags & NIX_RX_OFFLOAD_TSTAMP_F,
277                                                 flags & NIX_RX_MULTI_SEG_F,
278                                                 (uint64_t *)tstamp_ptr);
279                         gw.u64[1] = mbuf;
280                 }
281         }
282
283         ev->event = gw.u64[0];
284         ev->u64 = gw.u64[1];
285
286         return !!gw.u64[1];
287 }
288
289 /* Used in cleaning up workslot. */
290 static __rte_always_inline uint16_t
291 cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
292 {
293         union {
294                 __uint128_t get_work;
295                 uint64_t u64[2];
296         } gw;
297         uint64_t mbuf;
298
299 #ifdef RTE_ARCH_ARM64
300         asm volatile(PLT_CPU_FEATURE_PREAMBLE
301                      "          ldr %[tag], [%[tag_loc]]        \n"
302                      "          ldr %[wqp], [%[wqp_loc]]        \n"
303                      "          tbz %[tag], 63, done%=          \n"
304                      "          sevl                            \n"
305                      "rty%=:    wfe                             \n"
306                      "          ldr %[tag], [%[tag_loc]]        \n"
307                      "          ldr %[wqp], [%[wqp_loc]]        \n"
308                      "          tbnz %[tag], 63, rty%=          \n"
309                      "done%=:   dmb ld                          \n"
310                      "          sub %[mbuf], %[wqp], #0x80      \n"
311                      : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
312                        [mbuf] "=&r"(mbuf)
313                      : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
314 #else
315         gw.u64[0] = plt_read64(ws->tag_op);
316         while ((BIT_ULL(63)) & gw.u64[0])
317                 gw.u64[0] = plt_read64(ws->tag_op);
318
319         gw.u64[1] = plt_read64(ws->wqp_op);
320         mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
321 #endif
322
323         gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
324                     (gw.u64[0] & (0x3FFull << 36)) << 4 |
325                     (gw.u64[0] & 0xffffffff);
326
327         if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
328                 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
329                     RTE_EVENT_TYPE_ETHDEV) {
330                         uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
331
332                         gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
333                         cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
334                                          gw.u64[0] & 0xFFFFF, 0, NULL);
335                         gw.u64[1] = mbuf;
336                 }
337         }
338
339         ev->event = gw.u64[0];
340         ev->u64 = gw.u64[1];
341
342         return !!gw.u64[1];
343 }
344
345 /* CN9K Fastpath functions. */
346 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
347 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
348                                           const struct rte_event ev[],
349                                           uint16_t nb_events);
350 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
351                                               const struct rte_event ev[],
352                                               uint16_t nb_events);
353 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
354                                               const struct rte_event ev[],
355                                               uint16_t nb_events);
356
357 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
358                                          const struct rte_event *ev);
359 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
360                                                const struct rte_event ev[],
361                                                uint16_t nb_events);
362 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
363                                                    const struct rte_event ev[],
364                                                    uint16_t nb_events);
365 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
366                                                    const struct rte_event ev[],
367                                                    uint16_t nb_events);
368
369 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
370         uint16_t __rte_hot cn9k_sso_hws_deq_##name(                            \
371                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
372         uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name(                      \
373                 void *port, struct rte_event ev[], uint16_t nb_events,         \
374                 uint64_t timeout_ticks);                                       \
375         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name(                        \
376                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
377         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name(                  \
378                 void *port, struct rte_event ev[], uint16_t nb_events,         \
379                 uint64_t timeout_ticks);                                       \
380         uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name(                        \
381                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
382         uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name(                  \
383                 void *port, struct rte_event ev[], uint16_t nb_events,         \
384                 uint64_t timeout_ticks);                                       \
385         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name(                    \
386                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
387         uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name(              \
388                 void *port, struct rte_event ev[], uint16_t nb_events,         \
389                 uint64_t timeout_ticks);
390
391 NIX_RX_FASTPATH_MODES
392 #undef R
393
394 #define R(name, f5, f4, f3, f2, f1, f0, flags)                                 \
395         uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name(                       \
396                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
397         uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name(                 \
398                 void *port, struct rte_event ev[], uint16_t nb_events,         \
399                 uint64_t timeout_ticks);                                       \
400         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name(                   \
401                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
402         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name(             \
403                 void *port, struct rte_event ev[], uint16_t nb_events,         \
404                 uint64_t timeout_ticks);                                       \
405         uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name(                   \
406                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
407         uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name(             \
408                 void *port, struct rte_event ev[], uint16_t nb_events,         \
409                 uint64_t timeout_ticks);                                       \
410         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name(               \
411                 void *port, struct rte_event *ev, uint64_t timeout_ticks);     \
412         uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name(         \
413                 void *port, struct rte_event ev[], uint16_t nb_events,         \
414                 uint64_t timeout_ticks);
415
416 NIX_RX_FASTPATH_MODES
417 #undef R
418
419 #endif