1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
8 #include <rte_eventdev.h>
11 #include "cnxk_ethdev.h"
12 #include "cnxk_eventdev.h"
13 #include "cnxk_worker.h"
14 #include "cn9k_cryptodev_ops.h"
16 #include "cn9k_ethdev.h"
22 static __rte_always_inline uint8_t
23 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
25 const uint32_t tag = (uint32_t)ev->event;
26 const uint8_t new_tt = ev->sched_type;
27 const uint64_t event_ptr = ev->u64;
28 const uint16_t grp = ev->queue_id;
30 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
31 if (ws->xaq_lmt <= *ws->fc_mem)
34 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
35 ws->grp_base + (grp << 12));
39 static __rte_always_inline void
40 cn9k_sso_hws_fwd_swtag(uint64_t base, const struct rte_event *ev)
42 const uint32_t tag = (uint32_t)ev->event;
43 const uint8_t new_tt = ev->sched_type;
44 const uint8_t cur_tt =
45 CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG));
48 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
50 * SSO_TT_ORDERED norm norm untag
51 * SSO_TT_ATOMIC norm norm untag
52 * SSO_TT_UNTAGGED norm norm NOOP
55 if (new_tt == SSO_TT_UNTAGGED) {
56 if (cur_tt != SSO_TT_UNTAGGED)
57 cnxk_sso_hws_swtag_untag(base +
58 SSOW_LF_GWS_OP_SWTAG_UNTAG);
60 cnxk_sso_hws_swtag_norm(tag, new_tt,
61 base + SSOW_LF_GWS_OP_SWTAG_NORM);
65 static __rte_always_inline void
66 cn9k_sso_hws_new_event_wait(struct cn9k_sso_hws *ws, const struct rte_event *ev)
68 const uint32_t tag = (uint32_t)ev->event;
69 const uint8_t new_tt = ev->sched_type;
70 const uint64_t event_ptr = ev->u64;
71 const uint16_t grp = ev->queue_id;
73 while (ws->xaq_lmt <= __atomic_load_n(ws->fc_mem, __ATOMIC_RELAXED))
76 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
77 ws->grp_base + (grp << 12));
80 static __rte_always_inline void
81 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
83 const uint8_t grp = ev->queue_id;
85 /* Group hasn't changed, Use SWTAG to forward the event */
86 if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_TAG)) == grp) {
87 cn9k_sso_hws_fwd_swtag(ws->base, ev);
91 * Group has been changed for group based work pipelining,
92 * Use add_work operation to transfer the event to
95 rte_atomic_thread_fence(__ATOMIC_RELEASE);
96 roc_sso_hws_head_wait(ws->base);
97 cn9k_sso_hws_new_event_wait(ws, ev);
103 static __rte_always_inline uint8_t
104 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
105 const struct rte_event *ev)
107 const uint32_t tag = (uint32_t)ev->event;
108 const uint8_t new_tt = ev->sched_type;
109 const uint64_t event_ptr = ev->u64;
110 const uint16_t grp = ev->queue_id;
112 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
113 if (dws->xaq_lmt <= *dws->fc_mem)
116 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
117 dws->grp_base + (grp << 12));
121 static __rte_always_inline void
122 cn9k_sso_hws_dual_new_event_wait(struct cn9k_sso_hws_dual *dws,
123 const struct rte_event *ev)
125 const uint32_t tag = (uint32_t)ev->event;
126 const uint8_t new_tt = ev->sched_type;
127 const uint64_t event_ptr = ev->u64;
128 const uint16_t grp = ev->queue_id;
130 while (dws->xaq_lmt <= __atomic_load_n(dws->fc_mem, __ATOMIC_RELAXED))
133 cnxk_sso_hws_add_work(event_ptr, tag, new_tt,
134 dws->grp_base + (grp << 12));
137 static __rte_always_inline void
138 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws, uint64_t base,
139 const struct rte_event *ev)
141 const uint8_t grp = ev->queue_id;
143 /* Group hasn't changed, Use SWTAG to forward the event */
144 if (CNXK_GRP_FROM_TAG(plt_read64(base + SSOW_LF_GWS_TAG)) == grp) {
145 cn9k_sso_hws_fwd_swtag(base, ev);
149 * Group has been changed for group based work pipelining,
150 * Use add_work operation to transfer the event to
153 rte_atomic_thread_fence(__ATOMIC_RELEASE);
154 roc_sso_hws_head_wait(base);
155 cn9k_sso_hws_dual_new_event_wait(dws, ev);
159 static __rte_always_inline void
160 cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
161 const uint32_t tag, const uint32_t flags,
162 const void *const lookup_mem)
164 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
165 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
167 cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
168 (struct rte_mbuf *)mbuf, lookup_mem,
169 mbuf_init | ((uint64_t)port_id) << 48, flags);
172 static __rte_always_inline uint16_t
173 cn9k_sso_hws_dual_get_work(uint64_t base, uint64_t pair_base,
174 struct rte_event *ev, const uint32_t flags,
175 struct cn9k_sso_hws_dual *dws)
178 __uint128_t get_work;
184 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
185 rte_prefetch_non_temporal(dws->lookup_mem);
186 #ifdef RTE_ARCH_ARM64
187 asm volatile(PLT_CPU_FEATURE_PREAMBLE
189 " ldr %[tag], [%[tag_loc]] \n"
190 " ldr %[wqp], [%[wqp_loc]] \n"
191 " tbnz %[tag], 63, rty%= \n"
192 "done%=: str %[gw], [%[pong]] \n"
194 " sub %[mbuf], %[wqp], #0x80 \n"
195 " prfm pldl1keep, [%[mbuf]] \n"
196 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
198 : [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
199 [wqp_loc] "r"(base + SSOW_LF_GWS_WQP), [gw] "r"(dws->gw_wdata),
200 [pong] "r"(pair_base + SSOW_LF_GWS_OP_GET_WORK0));
202 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
203 while ((BIT_ULL(63)) & gw.u64[0])
204 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
205 gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
206 plt_write64(dws->gw_wdata, pair_base + SSOW_LF_GWS_OP_GET_WORK0);
207 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
210 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
211 (gw.u64[0] & (0x3FFull << 36)) << 4 |
212 (gw.u64[0] & 0xffffffff);
214 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
215 if ((flags & CPT_RX_WQE_F) &&
216 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
217 RTE_EVENT_TYPE_CRYPTODEV)) {
218 gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
219 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
220 RTE_EVENT_TYPE_ETHDEV) {
221 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
223 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
224 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
225 gw.u64[0] & 0xFFFFF, flags,
227 /* Extracting tstamp, if PTP enabled*/
228 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
230 CNXK_SSO_WQE_SG_PTR);
231 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
233 flags & NIX_RX_OFFLOAD_TSTAMP_F,
234 flags & NIX_RX_MULTI_SEG_F,
235 (uint64_t *)tstamp_ptr);
240 ev->event = gw.u64[0];
246 static __rte_always_inline uint16_t
247 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
248 const uint32_t flags, const void *const lookup_mem)
251 __uint128_t get_work;
257 plt_write64(ws->gw_wdata, ws->base + SSOW_LF_GWS_OP_GET_WORK0);
259 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
260 rte_prefetch_non_temporal(lookup_mem);
261 #ifdef RTE_ARCH_ARM64
262 asm volatile(PLT_CPU_FEATURE_PREAMBLE
263 " ldr %[tag], [%[tag_loc]] \n"
264 " ldr %[wqp], [%[wqp_loc]] \n"
265 " tbz %[tag], 63, done%= \n"
268 " ldr %[tag], [%[tag_loc]] \n"
269 " ldr %[wqp], [%[wqp_loc]] \n"
270 " tbnz %[tag], 63, rty%= \n"
272 " sub %[mbuf], %[wqp], #0x80 \n"
273 " prfm pldl1keep, [%[mbuf]] \n"
274 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
276 : [tag_loc] "r"(ws->base + SSOW_LF_GWS_TAG),
277 [wqp_loc] "r"(ws->base + SSOW_LF_GWS_WQP));
279 gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
280 while ((BIT_ULL(63)) & gw.u64[0])
281 gw.u64[0] = plt_read64(ws->base + SSOW_LF_GWS_TAG);
283 gw.u64[1] = plt_read64(ws->base + SSOW_LF_GWS_WQP);
284 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
287 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
288 (gw.u64[0] & (0x3FFull << 36)) << 4 |
289 (gw.u64[0] & 0xffffffff);
291 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
292 if ((flags & CPT_RX_WQE_F) &&
293 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
294 RTE_EVENT_TYPE_CRYPTODEV)) {
295 gw.u64[1] = cn9k_cpt_crypto_adapter_dequeue(gw.u64[1]);
296 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
297 RTE_EVENT_TYPE_ETHDEV) {
298 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
300 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
301 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
302 gw.u64[0] & 0xFFFFF, flags,
304 /* Extracting tstamp, if PTP enabled*/
305 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
307 CNXK_SSO_WQE_SG_PTR);
308 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
310 flags & NIX_RX_OFFLOAD_TSTAMP_F,
311 flags & NIX_RX_MULTI_SEG_F,
312 (uint64_t *)tstamp_ptr);
317 ev->event = gw.u64[0];
323 /* Used in cleaning up workslot. */
324 static __rte_always_inline uint16_t
325 cn9k_sso_hws_get_work_empty(uint64_t base, struct rte_event *ev)
328 __uint128_t get_work;
333 #ifdef RTE_ARCH_ARM64
334 asm volatile(PLT_CPU_FEATURE_PREAMBLE
335 " ldr %[tag], [%[tag_loc]] \n"
336 " ldr %[wqp], [%[wqp_loc]] \n"
337 " tbz %[tag], 63, done%= \n"
340 " ldr %[tag], [%[tag_loc]] \n"
341 " ldr %[wqp], [%[wqp_loc]] \n"
342 " tbnz %[tag], 63, rty%= \n"
344 " sub %[mbuf], %[wqp], #0x80 \n"
345 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
347 : [tag_loc] "r"(base + SSOW_LF_GWS_TAG),
348 [wqp_loc] "r"(base + SSOW_LF_GWS_WQP));
350 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
351 while ((BIT_ULL(63)) & gw.u64[0])
352 gw.u64[0] = plt_read64(base + SSOW_LF_GWS_TAG);
354 gw.u64[1] = plt_read64(base + SSOW_LF_GWS_WQP);
355 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
358 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
359 (gw.u64[0] & (0x3FFull << 36)) << 4 |
360 (gw.u64[0] & 0xffffffff);
362 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
363 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
364 RTE_EVENT_TYPE_ETHDEV) {
365 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
367 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
368 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
369 gw.u64[0] & 0xFFFFF, 0, NULL);
374 ev->event = gw.u64[0];
380 /* CN9K Fastpath functions. */
381 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
382 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
383 const struct rte_event ev[],
385 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
386 const struct rte_event ev[],
388 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
389 const struct rte_event ev[],
392 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
393 const struct rte_event *ev);
394 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
395 const struct rte_event ev[],
397 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
398 const struct rte_event ev[],
400 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
401 const struct rte_event ev[],
403 uint16_t __rte_hot cn9k_sso_hws_ca_enq(void *port, struct rte_event ev[],
405 uint16_t __rte_hot cn9k_sso_hws_dual_ca_enq(void *port, struct rte_event ev[],
408 #define R(name, flags) \
409 uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
410 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
411 uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \
412 void *port, struct rte_event ev[], uint16_t nb_events, \
413 uint64_t timeout_ticks); \
414 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \
415 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
416 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
417 void *port, struct rte_event ev[], uint16_t nb_events, \
418 uint64_t timeout_ticks); \
419 uint16_t __rte_hot cn9k_sso_hws_deq_ca_##name( \
420 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
421 uint16_t __rte_hot cn9k_sso_hws_deq_ca_burst_##name( \
422 void *port, struct rte_event ev[], uint16_t nb_events, \
423 uint64_t timeout_ticks); \
424 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_ca_##name( \
425 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
426 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_ca_burst_##name( \
427 void *port, struct rte_event ev[], uint16_t nb_events, \
428 uint64_t timeout_ticks); \
429 uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
430 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
431 uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
432 void *port, struct rte_event ev[], uint16_t nb_events, \
433 uint64_t timeout_ticks); \
434 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
435 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
436 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
437 void *port, struct rte_event ev[], uint16_t nb_events, \
438 uint64_t timeout_ticks); \
439 uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_##name( \
440 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
441 uint16_t __rte_hot cn9k_sso_hws_deq_ca_seg_burst_##name( \
442 void *port, struct rte_event ev[], uint16_t nb_events, \
443 uint64_t timeout_ticks); \
444 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_ca_seg_##name( \
445 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
446 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_ca_seg_burst_##name( \
447 void *port, struct rte_event ev[], uint16_t nb_events, \
448 uint64_t timeout_ticks);
450 NIX_RX_FASTPATH_MODES
453 #define SSO_DEQ(fn, flags) \
454 uint16_t __rte_hot fn(void *port, struct rte_event *ev, \
455 uint64_t timeout_ticks) \
457 struct cn9k_sso_hws *ws = port; \
458 RTE_SET_USED(timeout_ticks); \
459 if (ws->swtag_req) { \
461 cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
464 return cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
467 #define SSO_DEQ_SEG(fn, flags) SSO_DEQ(fn, flags | NIX_RX_MULTI_SEG_F)
468 #define SSO_DEQ_CA(fn, flags) SSO_DEQ(fn, flags | CPT_RX_WQE_F)
469 #define SSO_DEQ_CA_SEG(fn, flags) SSO_DEQ_SEG(fn, flags | CPT_RX_WQE_F)
471 #define SSO_DEQ_TMO(fn, flags) \
472 uint16_t __rte_hot fn(void *port, struct rte_event *ev, \
473 uint64_t timeout_ticks) \
475 struct cn9k_sso_hws *ws = port; \
478 if (ws->swtag_req) { \
480 cnxk_sso_hws_swtag_wait(ws->base + SSOW_LF_GWS_TAG); \
483 ret = cn9k_sso_hws_get_work(ws, ev, flags, ws->lookup_mem); \
484 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
485 ret = cn9k_sso_hws_get_work(ws, ev, flags, \
490 #define SSO_DEQ_TMO_SEG(fn, flags) SSO_DEQ_TMO(fn, flags | NIX_RX_MULTI_SEG_F)
491 #define SSO_DEQ_TMO_CA(fn, flags) SSO_DEQ_TMO(fn, flags | CPT_RX_WQE_F)
492 #define SSO_DEQ_TMO_CA_SEG(fn, flags) SSO_DEQ_TMO_SEG(fn, flags | CPT_RX_WQE_F)
494 #define R(name, flags) \
495 uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \
496 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
497 uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \
498 void *port, struct rte_event ev[], uint16_t nb_events, \
499 uint64_t timeout_ticks); \
500 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \
501 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
502 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
503 void *port, struct rte_event ev[], uint16_t nb_events, \
504 uint64_t timeout_ticks); \
505 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_##name( \
506 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
507 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_burst_##name( \
508 void *port, struct rte_event ev[], uint16_t nb_events, \
509 uint64_t timeout_ticks); \
510 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_ca_##name( \
511 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
512 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_ca_burst_##name( \
513 void *port, struct rte_event ev[], uint16_t nb_events, \
514 uint64_t timeout_ticks); \
515 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
516 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
517 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
518 void *port, struct rte_event ev[], uint16_t nb_events, \
519 uint64_t timeout_ticks); \
520 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
521 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
522 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
523 void *port, struct rte_event ev[], uint16_t nb_events, \
524 uint64_t timeout_ticks); \
525 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_##name( \
526 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
527 uint16_t __rte_hot cn9k_sso_hws_dual_deq_ca_seg_burst_##name( \
528 void *port, struct rte_event ev[], uint16_t nb_events, \
529 uint64_t timeout_ticks); \
530 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_ca_seg_##name( \
531 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
532 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_ca_seg_burst_##name( \
533 void *port, struct rte_event ev[], uint16_t nb_events, \
534 uint64_t timeout_ticks);
536 NIX_RX_FASTPATH_MODES
539 #define SSO_DUAL_DEQ(fn, flags) \
540 uint16_t __rte_hot fn(void *port, struct rte_event *ev, \
541 uint64_t timeout_ticks) \
543 struct cn9k_sso_hws_dual *dws = port; \
545 RTE_SET_USED(timeout_ticks); \
546 if (dws->swtag_req) { \
547 dws->swtag_req = 0; \
548 cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
552 gw = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \
553 dws->base[!dws->vws], ev, \
555 dws->vws = !dws->vws; \
559 #define SSO_DUAL_DEQ_SEG(fn, flags) SSO_DUAL_DEQ(fn, flags | NIX_RX_MULTI_SEG_F)
560 #define SSO_DUAL_DEQ_CA(fn, flags) SSO_DUAL_DEQ(fn, flags | CPT_RX_WQE_F)
561 #define SSO_DUAL_DEQ_CA_SEG(fn, flags) \
562 SSO_DUAL_DEQ_SEG(fn, flags | CPT_RX_WQE_F)
564 #define SSO_DUAL_DEQ_TMO(fn, flags) \
565 uint16_t __rte_hot fn(void *port, struct rte_event *ev, \
566 uint64_t timeout_ticks) \
568 struct cn9k_sso_hws_dual *dws = port; \
571 if (dws->swtag_req) { \
572 dws->swtag_req = 0; \
573 cnxk_sso_hws_swtag_wait(dws->base[!dws->vws] + \
577 ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \
578 dws->base[!dws->vws], ev, \
580 dws->vws = !dws->vws; \
581 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) { \
582 ret = cn9k_sso_hws_dual_get_work(dws->base[dws->vws], \
583 dws->base[!dws->vws], \
585 dws->vws = !dws->vws; \
590 #define SSO_DUAL_DEQ_TMO_SEG(fn, flags) \
591 SSO_DUAL_DEQ_TMO(fn, flags | NIX_RX_MULTI_SEG_F)
592 #define SSO_DUAL_DEQ_TMO_CA(fn, flags) \
593 SSO_DUAL_DEQ_TMO(fn, flags | CPT_RX_WQE_F)
594 #define SSO_DUAL_DEQ_TMO_CA_SEG(fn, flags) \
595 SSO_DUAL_DEQ_TMO_SEG(fn, flags | CPT_RX_WQE_F)
597 #define SSO_CMN_DEQ_BURST(fnb, fn, flags) \
598 uint16_t __rte_hot fnb(void *port, struct rte_event ev[], \
599 uint16_t nb_events, uint64_t timeout_ticks) \
601 RTE_SET_USED(nb_events); \
602 return fn(port, ev, timeout_ticks); \
605 #define SSO_CMN_DEQ_SEG_BURST(fnb, fn, flags) \
606 uint16_t __rte_hot fnb(void *port, struct rte_event ev[], \
607 uint16_t nb_events, uint64_t timeout_ticks) \
609 RTE_SET_USED(nb_events); \
610 return fn(port, ev, timeout_ticks); \
613 static __rte_always_inline void
614 cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
616 while (!((txq->nb_sqb_bufs_adj -
617 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
618 << (txq)->sqes_per_sqb_log2))
622 static __rte_always_inline struct cn9k_eth_txq *
623 cn9k_sso_hws_xtract_meta(struct rte_mbuf *m, uint64_t *txq_data)
625 return (struct cn9k_eth_txq
626 *)(txq_data[(txq_data[m->port] >> 48) +
627 rte_event_eth_tx_adapter_txq_get(m)] &
631 #if defined(RTE_ARCH_ARM64)
633 static __rte_always_inline void
634 cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
635 struct rte_mbuf *m, uint64_t *cmd,
638 struct cn9k_outb_priv_data *outb_priv;
639 rte_iova_t io_addr = txq->cpt_io_addr;
640 uint64_t *lmt_addr = txq->lmt_addr;
641 struct cn9k_sec_sess_priv mdata;
642 struct nix_send_hdr_s *send_hdr;
643 uint64_t sa_base = txq->sa_base;
644 uint32_t pkt_len, dlen_adj, rlen;
645 uint64x2_t cmd01, cmd23;
646 uint64_t lmt_status, sa;
647 union nix_send_sg_s *sg;
648 uintptr_t dptr, nixtx;
649 uint64_t ucode_cmd[4];
653 mdata.u64 = *rte_security_dynfield(m);
654 send_hdr = (struct nix_send_hdr_s *)cmd;
655 if (flags & NIX_TX_NEED_EXT_HDR)
656 sg = (union nix_send_sg_s *)&cmd[4];
658 sg = (union nix_send_sg_s *)&cmd[2];
660 if (flags & NIX_TX_NEED_SEND_HDR_W1)
661 l2_len = cmd[1] & 0xFF;
666 dptr = *(uint64_t *)(sg + 1);
667 pkt_len = send_hdr->w0.total;
670 rlen = pkt_len - l2_len;
671 rlen = (rlen + mdata.roundup_len) + (mdata.roundup_byte - 1);
672 rlen &= ~(uint64_t)(mdata.roundup_byte - 1);
673 rlen += mdata.partial_len;
674 dlen_adj = rlen - pkt_len + l2_len;
676 /* Update send descriptors. Security is single segment only */
677 send_hdr->w0.total = pkt_len + dlen_adj;
678 sg->seg1_size = pkt_len + dlen_adj;
680 /* Get area where NIX descriptor needs to be stored */
681 nixtx = dptr + pkt_len + dlen_adj;
683 nixtx = (nixtx - 1) & ~(BIT_ULL(7) - 1);
685 roc_lmt_mov_nv((void *)(nixtx + 16), cmd, cn9k_nix_tx_ext_subs(flags));
687 /* Load opcode and cptr already prepared at pkt metadata set */
689 pkt_len += sizeof(struct roc_onf_ipsec_outb_hdr) +
690 ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ;
691 sa_base &= ~(ROC_NIX_INL_SA_BASE_ALIGN - 1);
693 sa = (uintptr_t)roc_nix_inl_onf_ipsec_outb_sa(sa_base, mdata.sa_idx);
694 ucode_cmd[3] = (ROC_CPT_DFLT_ENG_GRP_SE_IE << 61 | sa);
695 ucode_cmd[0] = (ROC_IE_ONF_MAJOR_OP_PROCESS_OUTBOUND_IPSEC << 48 |
696 0x40UL << 48 | pkt_len);
698 /* CPT Word 0 and Word 1 */
699 cmd01 = vdupq_n_u64((nixtx + 16) | (cn9k_nix_tx_ext_subs(flags) + 1));
700 /* CPT_RES_S is 16B above NIXTX */
701 cmd01 = vsetq_lane_u8(nixtx & BIT_ULL(7), cmd01, 8);
703 /* CPT word 2 and 3 */
704 cmd23 = vdupq_n_u64(0);
705 cmd23 = vsetq_lane_u64((((uint64_t)RTE_EVENT_TYPE_CPU << 28) |
706 CNXK_ETHDEV_SEC_OUTB_EV_SUB << 20), cmd23, 0);
707 cmd23 = vsetq_lane_u64((uintptr_t)m | 1, cmd23, 1);
709 dptr += l2_len - ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ -
710 sizeof(struct roc_onf_ipsec_outb_hdr);
714 /* Update IV to zero and l2 sz */
715 *(uint16_t *)(dptr + sizeof(struct roc_onf_ipsec_outb_hdr)) =
716 rte_cpu_to_be_16(ROC_ONF_IPSEC_OUTB_MAX_L2_INFO_SZ);
717 iv = (uint64_t *)(dptr + 8);
721 /* Head wait if needed */
723 roc_sso_hws_head_wait(base);
726 outb_priv = roc_nix_inl_onf_ipsec_outb_sa_sw_rsvd((void *)sa);
727 esn = outb_priv->esn;
728 outb_priv->esn = esn + 1;
730 ucode_cmd[0] |= (esn >> 32) << 16;
731 esn = rte_cpu_to_be_32(esn & (BIT_ULL(32) - 1));
733 /* Update ESN and IPID and IV */
734 *(uint64_t *)dptr = esn << 32 | esn;
737 cn9k_sso_txq_fc_wait(txq);
739 /* Write CPT instruction to lmt line */
740 vst1q_u64(lmt_addr, cmd01);
741 vst1q_u64(lmt_addr + 2, cmd23);
743 roc_lmt_mov_seg(lmt_addr + 4, ucode_cmd, 2);
745 if (roc_lmt_submit_ldeor(io_addr) == 0) {
747 vst1q_u64(lmt_addr, cmd01);
748 vst1q_u64(lmt_addr + 2, cmd23);
749 roc_lmt_mov_seg(lmt_addr + 4, ucode_cmd, 2);
751 lmt_status = roc_lmt_submit_ldeor(io_addr);
752 } while (lmt_status == 0);
758 cn9k_sso_hws_xmit_sec_one(const struct cn9k_eth_txq *txq, uint64_t base,
759 struct rte_mbuf *m, uint64_t *cmd,
770 static __rte_always_inline uint16_t
771 cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
772 uint64_t *txq_data, const uint32_t flags)
774 struct rte_mbuf *m = ev->mbuf;
775 uint16_t ref_cnt = m->refcnt;
776 struct cn9k_eth_txq *txq;
778 /* Perform header writes before barrier for TSO */
779 cn9k_nix_xmit_prepare_tso(m, flags);
780 /* Lets commit any changes in the packet here in case when
781 * fast free is set as no further changes will be made to mbuf.
782 * In case of fast free is not set, both cn9k_nix_prepare_mseg()
783 * and cn9k_nix_xmit_prepare() has a barrier after refcnt update.
785 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) &&
786 !(flags & NIX_TX_OFFLOAD_SECURITY_F))
788 txq = cn9k_sso_hws_xtract_meta(m, txq_data);
789 cn9k_nix_tx_skeleton(txq, cmd, flags, 0);
790 cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
792 if (flags & NIX_TX_OFFLOAD_SECURITY_F) {
793 uint64_t ol_flags = m->ol_flags;
795 if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
796 uintptr_t ssow_base = base;
801 cn9k_sso_hws_xmit_sec_one(txq, ssow_base, m, cmd,
806 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
810 if (flags & NIX_TX_MULTI_SEG_F) {
811 const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
812 cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, segdw,
814 if (!CNXK_TT_FROM_EVENT(ev->event)) {
815 cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
816 roc_sso_hws_head_wait(base);
817 cn9k_sso_txq_fc_wait(txq);
818 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
819 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
820 txq->io_addr, segdw);
822 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr,
826 cn9k_nix_xmit_prepare_tstamp(txq, cmd, m->ol_flags, 4, flags);
827 if (!CNXK_TT_FROM_EVENT(ev->event)) {
828 cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
829 roc_sso_hws_head_wait(base);
830 cn9k_sso_txq_fc_wait(txq);
831 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
832 cn9k_nix_xmit_one(cmd, txq->lmt_addr,
833 txq->io_addr, flags);
835 cn9k_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
841 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
846 cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
847 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
852 #define T(name, sz, flags) \
853 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name( \
854 void *port, struct rte_event ev[], uint16_t nb_events); \
855 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name( \
856 void *port, struct rte_event ev[], uint16_t nb_events); \
857 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name( \
858 void *port, struct rte_event ev[], uint16_t nb_events); \
859 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name( \
860 void *port, struct rte_event ev[], uint16_t nb_events);
862 NIX_TX_FASTPATH_MODES
865 #define SSO_TX(fn, sz, flags) \
866 uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
867 uint16_t nb_events) \
869 struct cn9k_sso_hws *ws = port; \
871 RTE_SET_USED(nb_events); \
872 return cn9k_sso_hws_event_tx(ws->base, &ev[0], cmd, \
873 (uint64_t *)ws->tx_adptr_data, \
877 #define SSO_TX_SEG(fn, sz, flags) \
878 uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
879 uint16_t nb_events) \
881 uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \
882 struct cn9k_sso_hws *ws = port; \
883 RTE_SET_USED(nb_events); \
884 return cn9k_sso_hws_event_tx(ws->base, &ev[0], cmd, \
885 (uint64_t *)ws->tx_adptr_data, \
886 (flags) | NIX_TX_MULTI_SEG_F); \
889 #define SSO_DUAL_TX(fn, sz, flags) \
890 uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
891 uint16_t nb_events) \
893 struct cn9k_sso_hws_dual *ws = port; \
895 RTE_SET_USED(nb_events); \
896 return cn9k_sso_hws_event_tx(ws->base[!ws->vws], &ev[0], cmd, \
897 (uint64_t *)ws->tx_adptr_data, \
901 #define SSO_DUAL_TX_SEG(fn, sz, flags) \
902 uint16_t __rte_hot fn(void *port, struct rte_event ev[], \
903 uint16_t nb_events) \
905 uint64_t cmd[(sz) + CNXK_NIX_TX_MSEG_SG_DWORDS - 2]; \
906 struct cn9k_sso_hws_dual *ws = port; \
907 RTE_SET_USED(nb_events); \
908 return cn9k_sso_hws_event_tx(ws->base[!ws->vws], &ev[0], cmd, \
909 (uint64_t *)ws->tx_adptr_data, \
910 (flags) | NIX_TX_MULTI_SEG_F); \