1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN10K_WORKER_H__
6 #define __CN10K_WORKER_H__
10 #include "cnxk_ethdev.h"
11 #include "cnxk_eventdev.h"
12 #include "cnxk_worker.h"
13 #include "cn10k_cryptodev_ops.h"
15 #include "cn10k_ethdev.h"
21 static __rte_always_inline uint8_t
22 cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
24 const uint32_t tag = (uint32_t)ev->event;
25 const uint8_t new_tt = ev->sched_type;
26 const uint64_t event_ptr = ev->u64;
27 const uint16_t grp = ev->queue_id;
29 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
30 if (ws->xaq_lmt <= *ws->fc_mem)
33 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
37 static __rte_always_inline void
38 cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)
40 const uint32_t tag = (uint32_t)ev->event;
41 const uint8_t new_tt = ev->sched_type;
42 const uint8_t cur_tt =
43 CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0));
46 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
48 * SSO_TT_ORDERED norm norm untag
49 * SSO_TT_ATOMIC norm norm untag
50 * SSO_TT_UNTAGGED norm norm NOOP
53 if (new_tt == SSO_TT_UNTAGGED) {
54 if (cur_tt != SSO_TT_UNTAGGED)
55 cnxk_sso_hws_swtag_untag(ws->base +
56 SSOW_LF_GWS_OP_SWTAG_UNTAG);
58 cnxk_sso_hws_swtag_norm(tag, new_tt,
59 ws->base + SSOW_LF_GWS_OP_SWTAG_NORM);
64 static __rte_always_inline void
65 cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,
68 const uint32_t tag = (uint32_t)ev->event;
69 const uint8_t new_tt = ev->sched_type;
71 plt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
72 cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
73 ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
76 static __rte_always_inline void
77 cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
78 const struct rte_event *ev)
80 const uint8_t grp = ev->queue_id;
82 /* Group hasn't changed, Use SWTAG to forward the event */
83 if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0)) == grp)
84 cn10k_sso_hws_fwd_swtag(ws, ev);
87 * Group has been changed for group based work pipelining,
88 * Use deschedule/add_work operation to transfer the event to
91 cn10k_sso_hws_fwd_group(ws, ev, grp);
94 static __rte_always_inline void
95 cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
96 const uint32_t tag, const uint32_t flags,
97 const void *const lookup_mem)
99 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
100 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
102 cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
103 (struct rte_mbuf *)mbuf, lookup_mem,
104 mbuf_init | ((uint64_t)port_id) << 48, flags);
107 static __rte_always_inline void
108 cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
109 void *lookup_mem, void *tstamp)
111 uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
112 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
113 struct rte_event_vector *vec;
114 uint16_t nb_mbufs, non_vec;
117 mbuf_init |= ((uint64_t)port_id) << 48;
118 vec = (struct rte_event_vector *)vwqe;
121 nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
122 nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, vec->mbufs, nb_mbufs,
123 flags | NIX_RX_VWQE_F, lookup_mem,
126 non_vec = vec->nb_elem - nb_mbufs;
129 struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
130 struct rte_mbuf *mbuf;
133 mbuf = (struct rte_mbuf *)((char *)cqe -
134 sizeof(struct rte_mbuf));
135 cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
137 /* Extracting tstamp, if PTP enabled*/
138 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
139 CNXK_SSO_WQE_SG_PTR);
140 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
141 flags & NIX_RX_OFFLOAD_TSTAMP_F,
142 flags & NIX_RX_MULTI_SEG_F,
143 (uint64_t *)tstamp_ptr);
144 wqe[0] = (uint64_t *)mbuf;
150 static __rte_always_inline uint16_t
151 cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
152 const uint32_t flags, void *lookup_mem)
155 __uint128_t get_work;
161 gw.get_work = ws->gw_wdata;
162 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
164 PLT_CPU_FEATURE_PREAMBLE
165 "caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
166 "sub %[mbuf], %H[wdata], #0x80 \n"
167 : [wdata] "+r"(gw.get_work), [mbuf] "=&r"(mbuf)
168 : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
171 plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
173 roc_load_pair(gw.u64[0], gw.u64[1],
174 ws->base + SSOW_LF_GWS_WQE0);
175 } while (gw.u64[0] & BIT_ULL(63));
176 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
178 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
179 (gw.u64[0] & (0x3FFull << 36)) << 4 |
180 (gw.u64[0] & 0xffffffff);
182 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
183 if ((flags & CPT_RX_WQE_F) &&
184 (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
185 RTE_EVENT_TYPE_CRYPTODEV)) {
186 gw.u64[1] = cn10k_cpt_crypto_adapter_dequeue(gw.u64[1]);
187 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
188 RTE_EVENT_TYPE_ETHDEV) {
189 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
191 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
192 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
193 gw.u64[0] & 0xFFFFF, flags,
195 /* Extracting tstamp, if PTP enabled*/
196 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
198 CNXK_SSO_WQE_SG_PTR);
199 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
201 flags & NIX_RX_OFFLOAD_TSTAMP_F,
202 flags & NIX_RX_MULTI_SEG_F,
203 (uint64_t *)tstamp_ptr);
205 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
206 RTE_EVENT_TYPE_ETHDEV_VECTOR) {
207 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
208 __uint128_t vwqe_hdr = *(__uint128_t *)gw.u64[1];
210 vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
211 ((vwqe_hdr & 0xFFFF) << 48) |
212 ((uint64_t)port << 32);
213 *(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
214 cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
219 ev->event = gw.u64[0];
225 /* Used in cleaning up workslot. */
226 static __rte_always_inline uint16_t
227 cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
230 __uint128_t get_work;
235 #ifdef RTE_ARCH_ARM64
236 asm volatile(PLT_CPU_FEATURE_PREAMBLE
237 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
238 " tbz %[tag], 63, done%= \n"
241 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
242 " tbnz %[tag], 63, rty%= \n"
244 " sub %[mbuf], %[wqp], #0x80 \n"
245 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
247 : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
251 roc_load_pair(gw.u64[0], gw.u64[1],
252 ws->base + SSOW_LF_GWS_WQE0);
253 } while (gw.u64[0] & BIT_ULL(63));
254 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
257 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
258 (gw.u64[0] & (0x3FFull << 36)) << 4 |
259 (gw.u64[0] & 0xffffffff);
261 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
262 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
263 RTE_EVENT_TYPE_ETHDEV) {
264 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
266 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
267 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
268 gw.u64[0] & 0xFFFFF, 0, NULL);
273 ev->event = gw.u64[0];
279 /* CN10K Fastpath functions. */
280 uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
281 uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
282 const struct rte_event ev[],
284 uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
285 const struct rte_event ev[],
287 uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
288 const struct rte_event ev[],
290 uint16_t __rte_hot cn10k_sso_hws_ca_enq(void *port, struct rte_event ev[],
293 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
294 uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
295 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
296 uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
297 void *port, struct rte_event ev[], uint16_t nb_events, \
298 uint64_t timeout_ticks); \
299 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
300 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
301 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
302 void *port, struct rte_event ev[], uint16_t nb_events, \
303 uint64_t timeout_ticks); \
304 uint16_t __rte_hot cn10k_sso_hws_deq_ca_##name( \
305 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
306 uint16_t __rte_hot cn10k_sso_hws_deq_ca_burst_##name( \
307 void *port, struct rte_event ev[], uint16_t nb_events, \
308 uint64_t timeout_ticks); \
309 uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
310 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
311 uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
312 void *port, struct rte_event ev[], uint16_t nb_events, \
313 uint64_t timeout_ticks); \
314 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
315 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
316 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
317 void *port, struct rte_event ev[], uint16_t nb_events, \
318 uint64_t timeout_ticks); \
319 uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_##name( \
320 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
321 uint16_t __rte_hot cn10k_sso_hws_deq_ca_seg_burst_##name( \
322 void *port, struct rte_event ev[], uint16_t nb_events, \
323 uint64_t timeout_ticks);
325 NIX_RX_FASTPATH_MODES
328 static __rte_always_inline struct cn10k_eth_txq *
329 cn10k_sso_hws_xtract_meta(struct rte_mbuf *m,
330 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
332 return (struct cn10k_eth_txq *)
333 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
336 static __rte_always_inline void
337 cn10k_sso_vwqe_split_tx(struct rte_mbuf **mbufs, uint16_t nb_mbufs,
338 uint64_t *cmd, uint16_t lmt_id, uintptr_t lmt_addr,
339 uint8_t sched_type, uintptr_t base,
340 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
341 const uint32_t flags)
343 uint16_t port[4], queue[4];
344 struct cn10k_eth_txq *txq;
348 for (i = 0; i < nb_mbufs; i += 4) {
349 port[0] = mbufs[i]->port;
350 port[1] = mbufs[i + 1]->port;
351 port[2] = mbufs[i + 2]->port;
352 port[3] = mbufs[i + 3]->port;
354 queue[0] = rte_event_eth_tx_adapter_txq_get(mbufs[i]);
355 queue[1] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 1]);
356 queue[2] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 2]);
357 queue[3] = rte_event_eth_tx_adapter_txq_get(mbufs[i + 3]);
359 if (((port[0] ^ port[1]) & (port[2] ^ port[3])) ||
360 ((queue[0] ^ queue[1]) & (queue[2] ^ queue[3]))) {
362 for (j = 0; j < 4; j++) {
363 struct rte_mbuf *m = mbufs[i + j];
365 txq = (struct cn10k_eth_txq *)
366 txq_data[port[j]][queue[j]];
367 cn10k_nix_tx_skeleton(txq, cmd, flags);
368 /* Perform header writes before barrier
371 if (flags & NIX_TX_OFFLOAD_TSO_F)
372 cn10k_nix_xmit_prepare_tso(m, flags);
374 cn10k_nix_xmit_prepare(m, cmd, lmt_addr, flags,
376 if (flags & NIX_TX_MULTI_SEG_F) {
377 const uint16_t segdw =
378 cn10k_nix_prepare_mseg(
379 m, (uint64_t *)lmt_addr,
381 pa = txq->io_addr | ((segdw - 1) << 4);
384 (cn10k_nix_tx_ext_subs(flags) + 1)
388 roc_sso_hws_head_wait(base +
391 roc_lmt_submit_steorl(lmt_id, pa);
394 txq = (struct cn10k_eth_txq *)
395 txq_data[port[0]][queue[0]];
396 cn10k_nix_xmit_pkts_vector(txq, &mbufs[i], 4, cmd, base
398 flags | NIX_TX_VWQE_F);
403 static __rte_always_inline uint16_t
404 cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
406 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
407 const uint32_t flags)
409 struct cn10k_eth_txq *txq;
416 lmt_addr = ws->lmt_base;
417 ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
419 if (ev->event_type & RTE_EVENT_TYPE_VECTOR) {
420 struct rte_mbuf **mbufs = ev->vec->mbufs;
421 uint64_t meta = *(uint64_t *)ev->vec;
423 if (meta & BIT(31)) {
424 txq = (struct cn10k_eth_txq *)
425 txq_data[meta >> 32][meta >> 48];
427 cn10k_nix_xmit_pkts_vector(
428 txq, mbufs, meta & 0xFFFF, cmd,
429 ws->tx_base + SSOW_LF_GWS_TAG,
430 flags | NIX_TX_VWQE_F);
432 cn10k_sso_vwqe_split_tx(
433 mbufs, meta & 0xFFFF, cmd, lmt_id, lmt_addr,
434 ev->sched_type, ws->tx_base, txq_data, flags);
436 rte_mempool_put(rte_mempool_from_obj(ev->vec), ev->vec);
437 return (meta & 0xFFFF);
442 txq = cn10k_sso_hws_xtract_meta(m, txq_data);
443 cn10k_nix_tx_skeleton(txq, cmd, flags);
444 /* Perform header writes before barrier for TSO */
445 if (flags & NIX_TX_OFFLOAD_TSO_F)
446 cn10k_nix_xmit_prepare_tso(m, flags);
448 cn10k_nix_xmit_prepare(m, cmd, lmt_addr, flags, txq->lso_tun_fmt);
449 if (flags & NIX_TX_MULTI_SEG_F) {
450 const uint16_t segdw =
451 cn10k_nix_prepare_mseg(m, (uint64_t *)lmt_addr, flags);
452 pa = txq->io_addr | ((segdw - 1) << 4);
454 pa = txq->io_addr | (cn10k_nix_tx_ext_subs(flags) + 1) << 4;
457 roc_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
459 roc_lmt_submit_steorl(lmt_id, pa);
461 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
466 cnxk_sso_hws_swtag_flush(ws->tx_base + SSOW_LF_GWS_TAG,
467 ws->tx_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
471 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
472 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name( \
473 void *port, struct rte_event ev[], uint16_t nb_events); \
474 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name( \
475 void *port, struct rte_event ev[], uint16_t nb_events); \
476 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_##name( \
477 void *port, struct rte_event ev[], uint16_t nb_events); \
478 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_seg_##name( \
479 void *port, struct rte_event ev[], uint16_t nb_events);
481 NIX_TX_FASTPATH_MODES