1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN9K_WORKER_H__
6 #define __CN9K_WORKER_H__
8 #include "cnxk_ethdev.h"
9 #include "cnxk_eventdev.h"
10 #include "cnxk_worker.h"
12 #include "cn9k_ethdev.h"
18 static __rte_always_inline uint8_t
19 cn9k_sso_hws_new_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
21 const uint32_t tag = (uint32_t)ev->event;
22 const uint8_t new_tt = ev->sched_type;
23 const uint64_t event_ptr = ev->u64;
24 const uint16_t grp = ev->queue_id;
26 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
27 if (ws->xaq_lmt <= *ws->fc_mem)
30 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
34 static __rte_always_inline void
35 cn9k_sso_hws_fwd_swtag(struct cn9k_sso_hws_state *vws,
36 const struct rte_event *ev)
38 const uint32_t tag = (uint32_t)ev->event;
39 const uint8_t new_tt = ev->sched_type;
40 const uint8_t cur_tt = CNXK_TT_FROM_TAG(plt_read64(vws->tag_op));
43 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
45 * SSO_TT_ORDERED norm norm untag
46 * SSO_TT_ATOMIC norm norm untag
47 * SSO_TT_UNTAGGED norm norm NOOP
50 if (new_tt == SSO_TT_UNTAGGED) {
51 if (cur_tt != SSO_TT_UNTAGGED)
52 cnxk_sso_hws_swtag_untag(
53 CN9K_SSOW_GET_BASE_ADDR(vws->getwrk_op) +
54 SSOW_LF_GWS_OP_SWTAG_UNTAG);
56 cnxk_sso_hws_swtag_norm(tag, new_tt, vws->swtag_norm_op);
60 static __rte_always_inline void
61 cn9k_sso_hws_fwd_group(struct cn9k_sso_hws_state *ws,
62 const struct rte_event *ev, const uint16_t grp)
64 const uint32_t tag = (uint32_t)ev->event;
65 const uint8_t new_tt = ev->sched_type;
67 plt_write64(ev->u64, CN9K_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
68 SSOW_LF_GWS_OP_UPD_WQP_GRP1);
69 cnxk_sso_hws_swtag_desched(tag, new_tt, grp, ws->swtag_desched_op);
72 static __rte_always_inline void
73 cn9k_sso_hws_forward_event(struct cn9k_sso_hws *ws, const struct rte_event *ev)
75 const uint8_t grp = ev->queue_id;
77 /* Group hasn't changed, Use SWTAG to forward the event */
78 if (CNXK_GRP_FROM_TAG(plt_read64(ws->tag_op)) == grp) {
79 cn9k_sso_hws_fwd_swtag((struct cn9k_sso_hws_state *)ws, ev);
83 * Group has been changed for group based work pipelining,
84 * Use deschedule/add_work operation to transfer the event to
87 cn9k_sso_hws_fwd_group((struct cn9k_sso_hws_state *)ws, ev,
94 static __rte_always_inline uint8_t
95 cn9k_sso_hws_dual_new_event(struct cn9k_sso_hws_dual *dws,
96 const struct rte_event *ev)
98 const uint32_t tag = (uint32_t)ev->event;
99 const uint8_t new_tt = ev->sched_type;
100 const uint64_t event_ptr = ev->u64;
101 const uint16_t grp = ev->queue_id;
103 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
104 if (dws->xaq_lmt <= *dws->fc_mem)
107 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, dws->grps_base[grp]);
111 static __rte_always_inline void
112 cn9k_sso_hws_dual_forward_event(struct cn9k_sso_hws_dual *dws,
113 struct cn9k_sso_hws_state *vws,
114 const struct rte_event *ev)
116 const uint8_t grp = ev->queue_id;
118 /* Group hasn't changed, Use SWTAG to forward the event */
119 if (CNXK_GRP_FROM_TAG(plt_read64(vws->tag_op)) == grp) {
120 cn9k_sso_hws_fwd_swtag(vws, ev);
124 * Group has been changed for group based work pipelining,
125 * Use deschedule/add_work operation to transfer the event to
128 cn9k_sso_hws_fwd_group(vws, ev, grp);
132 static __rte_always_inline void
133 cn9k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
134 const uint32_t tag, const uint32_t flags,
135 const void *const lookup_mem)
137 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
138 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
140 cn9k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
141 (struct rte_mbuf *)mbuf, lookup_mem,
142 mbuf_init | ((uint64_t)port_id) << 48, flags);
145 static __rte_always_inline uint16_t
146 cn9k_sso_hws_dual_get_work(struct cn9k_sso_hws_state *ws,
147 struct cn9k_sso_hws_state *ws_pair,
148 struct rte_event *ev, const uint32_t flags,
149 const void *const lookup_mem,
150 struct cnxk_timesync_info *const tstamp)
152 const uint64_t set_gw = BIT_ULL(16) | 1;
154 __uint128_t get_work;
160 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
161 rte_prefetch_non_temporal(lookup_mem);
162 #ifdef RTE_ARCH_ARM64
163 asm volatile(PLT_CPU_FEATURE_PREAMBLE
165 " ldr %[tag], [%[tag_loc]] \n"
166 " ldr %[wqp], [%[wqp_loc]] \n"
167 " tbnz %[tag], 63, rty%= \n"
168 "done%=: str %[gw], [%[pong]] \n"
170 " sub %[mbuf], %[wqp], #0x80 \n"
171 " prfm pldl1keep, [%[mbuf]] \n"
172 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
174 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op),
175 [gw] "r"(set_gw), [pong] "r"(ws_pair->getwrk_op));
177 gw.u64[0] = plt_read64(ws->tag_op);
178 while ((BIT_ULL(63)) & gw.u64[0])
179 gw.u64[0] = plt_read64(ws->tag_op);
180 gw.u64[1] = plt_read64(ws->wqp_op);
181 plt_write64(set_gw, ws_pair->getwrk_op);
182 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
185 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
186 (gw.u64[0] & (0x3FFull << 36)) << 4 |
187 (gw.u64[0] & 0xffffffff);
189 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
190 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
191 RTE_EVENT_TYPE_ETHDEV) {
192 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
194 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
195 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
196 gw.u64[0] & 0xFFFFF, flags,
198 /* Extracting tstamp, if PTP enabled*/
199 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
201 CNXK_SSO_WQE_SG_PTR);
202 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
203 flags & NIX_RX_OFFLOAD_TSTAMP_F,
204 flags & NIX_RX_MULTI_SEG_F,
205 (uint64_t *)tstamp_ptr);
210 ev->event = gw.u64[0];
216 static __rte_always_inline uint16_t
217 cn9k_sso_hws_get_work(struct cn9k_sso_hws *ws, struct rte_event *ev,
218 const uint32_t flags, const void *const lookup_mem)
221 __uint128_t get_work;
227 plt_write64(BIT_ULL(16) | /* wait for work. */
228 1, /* Use Mask set 0. */
231 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
232 rte_prefetch_non_temporal(lookup_mem);
233 #ifdef RTE_ARCH_ARM64
234 asm volatile(PLT_CPU_FEATURE_PREAMBLE
235 " ldr %[tag], [%[tag_loc]] \n"
236 " ldr %[wqp], [%[wqp_loc]] \n"
237 " tbz %[tag], 63, done%= \n"
240 " ldr %[tag], [%[tag_loc]] \n"
241 " ldr %[wqp], [%[wqp_loc]] \n"
242 " tbnz %[tag], 63, rty%= \n"
244 " sub %[mbuf], %[wqp], #0x80 \n"
245 " prfm pldl1keep, [%[mbuf]] \n"
246 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
248 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
250 gw.u64[0] = plt_read64(ws->tag_op);
251 while ((BIT_ULL(63)) & gw.u64[0])
252 gw.u64[0] = plt_read64(ws->tag_op);
254 gw.u64[1] = plt_read64(ws->wqp_op);
255 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
258 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
259 (gw.u64[0] & (0x3FFull << 36)) << 4 |
260 (gw.u64[0] & 0xffffffff);
262 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
263 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
264 RTE_EVENT_TYPE_ETHDEV) {
265 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
267 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
268 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
269 gw.u64[0] & 0xFFFFF, flags,
271 /* Extracting tstamp, if PTP enabled*/
272 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
274 CNXK_SSO_WQE_SG_PTR);
275 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
277 flags & NIX_RX_OFFLOAD_TSTAMP_F,
278 flags & NIX_RX_MULTI_SEG_F,
279 (uint64_t *)tstamp_ptr);
284 ev->event = gw.u64[0];
290 /* Used in cleaning up workslot. */
291 static __rte_always_inline uint16_t
292 cn9k_sso_hws_get_work_empty(struct cn9k_sso_hws_state *ws, struct rte_event *ev)
295 __uint128_t get_work;
300 #ifdef RTE_ARCH_ARM64
301 asm volatile(PLT_CPU_FEATURE_PREAMBLE
302 " ldr %[tag], [%[tag_loc]] \n"
303 " ldr %[wqp], [%[wqp_loc]] \n"
304 " tbz %[tag], 63, done%= \n"
307 " ldr %[tag], [%[tag_loc]] \n"
308 " ldr %[wqp], [%[wqp_loc]] \n"
309 " tbnz %[tag], 63, rty%= \n"
311 " sub %[mbuf], %[wqp], #0x80 \n"
312 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
314 : [tag_loc] "r"(ws->tag_op), [wqp_loc] "r"(ws->wqp_op));
316 gw.u64[0] = plt_read64(ws->tag_op);
317 while ((BIT_ULL(63)) & gw.u64[0])
318 gw.u64[0] = plt_read64(ws->tag_op);
320 gw.u64[1] = plt_read64(ws->wqp_op);
321 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
324 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
325 (gw.u64[0] & (0x3FFull << 36)) << 4 |
326 (gw.u64[0] & 0xffffffff);
328 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
329 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
330 RTE_EVENT_TYPE_ETHDEV) {
331 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
333 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
334 cn9k_wqe_to_mbuf(gw.u64[1], mbuf, port,
335 gw.u64[0] & 0xFFFFF, 0, NULL);
340 ev->event = gw.u64[0];
346 /* CN9K Fastpath functions. */
347 uint16_t __rte_hot cn9k_sso_hws_enq(void *port, const struct rte_event *ev);
348 uint16_t __rte_hot cn9k_sso_hws_enq_burst(void *port,
349 const struct rte_event ev[],
351 uint16_t __rte_hot cn9k_sso_hws_enq_new_burst(void *port,
352 const struct rte_event ev[],
354 uint16_t __rte_hot cn9k_sso_hws_enq_fwd_burst(void *port,
355 const struct rte_event ev[],
358 uint16_t __rte_hot cn9k_sso_hws_dual_enq(void *port,
359 const struct rte_event *ev);
360 uint16_t __rte_hot cn9k_sso_hws_dual_enq_burst(void *port,
361 const struct rte_event ev[],
363 uint16_t __rte_hot cn9k_sso_hws_dual_enq_new_burst(void *port,
364 const struct rte_event ev[],
366 uint16_t __rte_hot cn9k_sso_hws_dual_enq_fwd_burst(void *port,
367 const struct rte_event ev[],
370 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
371 uint16_t __rte_hot cn9k_sso_hws_deq_##name( \
372 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
373 uint16_t __rte_hot cn9k_sso_hws_deq_burst_##name( \
374 void *port, struct rte_event ev[], uint16_t nb_events, \
375 uint64_t timeout_ticks); \
376 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_##name( \
377 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
378 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_burst_##name( \
379 void *port, struct rte_event ev[], uint16_t nb_events, \
380 uint64_t timeout_ticks); \
381 uint16_t __rte_hot cn9k_sso_hws_deq_seg_##name( \
382 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
383 uint16_t __rte_hot cn9k_sso_hws_deq_seg_burst_##name( \
384 void *port, struct rte_event ev[], uint16_t nb_events, \
385 uint64_t timeout_ticks); \
386 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_##name( \
387 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
388 uint16_t __rte_hot cn9k_sso_hws_deq_tmo_seg_burst_##name( \
389 void *port, struct rte_event ev[], uint16_t nb_events, \
390 uint64_t timeout_ticks);
392 NIX_RX_FASTPATH_MODES
395 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
396 uint16_t __rte_hot cn9k_sso_hws_dual_deq_##name( \
397 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
398 uint16_t __rte_hot cn9k_sso_hws_dual_deq_burst_##name( \
399 void *port, struct rte_event ev[], uint16_t nb_events, \
400 uint64_t timeout_ticks); \
401 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_##name( \
402 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
403 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_burst_##name( \
404 void *port, struct rte_event ev[], uint16_t nb_events, \
405 uint64_t timeout_ticks); \
406 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_##name( \
407 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
408 uint16_t __rte_hot cn9k_sso_hws_dual_deq_seg_burst_##name( \
409 void *port, struct rte_event ev[], uint16_t nb_events, \
410 uint64_t timeout_ticks); \
411 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_##name( \
412 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
413 uint16_t __rte_hot cn9k_sso_hws_dual_deq_tmo_seg_burst_##name( \
414 void *port, struct rte_event ev[], uint16_t nb_events, \
415 uint64_t timeout_ticks);
417 NIX_RX_FASTPATH_MODES
420 static __rte_always_inline void
421 cn9k_sso_txq_fc_wait(const struct cn9k_eth_txq *txq)
423 while (!((txq->nb_sqb_bufs_adj -
424 __atomic_load_n(txq->fc_mem, __ATOMIC_RELAXED))
425 << (txq)->sqes_per_sqb_log2))
429 static __rte_always_inline const struct cn9k_eth_txq *
430 cn9k_sso_hws_xtract_meta(struct rte_mbuf *m,
431 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
433 return (const struct cn9k_eth_txq *)
434 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
437 static __rte_always_inline void
438 cn9k_sso_hws_prepare_pkt(const struct cn9k_eth_txq *txq, struct rte_mbuf *m,
439 uint64_t *cmd, const uint32_t flags)
441 roc_lmt_mov(cmd, txq->cmd, cn9k_nix_tx_ext_subs(flags));
442 cn9k_nix_xmit_prepare(m, cmd, flags, txq->lso_tun_fmt);
445 static __rte_always_inline uint16_t
446 cn9k_sso_hws_event_tx(uint64_t base, struct rte_event *ev, uint64_t *cmd,
447 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
448 const uint32_t flags)
450 struct rte_mbuf *m = ev->mbuf;
451 const struct cn9k_eth_txq *txq;
452 uint16_t ref_cnt = m->refcnt;
454 /* Perform header writes before barrier for TSO */
455 cn9k_nix_xmit_prepare_tso(m, flags);
456 /* Lets commit any changes in the packet here in case when
457 * fast free is set as no further changes will be made to mbuf.
458 * In case of fast free is not set, both cn9k_nix_prepare_mseg()
459 * and cn9k_nix_xmit_prepare() has a barrier after refcnt update.
461 if (!(flags & NIX_TX_OFFLOAD_MBUF_NOFF_F))
463 txq = cn9k_sso_hws_xtract_meta(m, txq_data);
464 cn9k_sso_hws_prepare_pkt(txq, m, cmd, flags);
466 if (flags & NIX_TX_MULTI_SEG_F) {
467 const uint16_t segdw = cn9k_nix_prepare_mseg(m, cmd, flags);
468 if (!CNXK_TT_FROM_EVENT(ev->event)) {
469 cn9k_nix_xmit_mseg_prep_lmt(cmd, txq->lmt_addr, segdw);
470 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
471 cn9k_sso_txq_fc_wait(txq);
472 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
473 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr,
474 txq->io_addr, segdw);
476 cn9k_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr,
480 if (!CNXK_TT_FROM_EVENT(ev->event)) {
481 cn9k_nix_xmit_prep_lmt(cmd, txq->lmt_addr, flags);
482 roc_sso_hws_head_wait(base + SSOW_LF_GWS_TAG);
483 cn9k_sso_txq_fc_wait(txq);
484 if (cn9k_nix_xmit_submit_lmt(txq->io_addr) == 0)
485 cn9k_nix_xmit_one(cmd, txq->lmt_addr,
486 txq->io_addr, flags);
488 cn9k_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr,
493 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
498 cnxk_sso_hws_swtag_flush(base + SSOW_LF_GWS_TAG,
499 base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
504 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
505 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_##name( \
506 void *port, struct rte_event ev[], uint16_t nb_events); \
507 uint16_t __rte_hot cn9k_sso_hws_tx_adptr_enq_seg_##name( \
508 void *port, struct rte_event ev[], uint16_t nb_events); \
509 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_##name( \
510 void *port, struct rte_event ev[], uint16_t nb_events); \
511 uint16_t __rte_hot cn9k_sso_hws_dual_tx_adptr_enq_seg_##name( \
512 void *port, struct rte_event ev[], uint16_t nb_events);
514 NIX_TX_FASTPATH_MODES