1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN10K_WORKER_H__
6 #define __CN10K_WORKER_H__
10 #include "cnxk_ethdev.h"
11 #include "cnxk_eventdev.h"
12 #include "cnxk_worker.h"
14 #include "cn10k_ethdev.h"
20 static __rte_always_inline uint8_t
21 cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
23 const uint32_t tag = (uint32_t)ev->event;
24 const uint8_t new_tt = ev->sched_type;
25 const uint64_t event_ptr = ev->u64;
26 const uint16_t grp = ev->queue_id;
28 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
29 if (ws->xaq_lmt <= *ws->fc_mem)
32 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
36 static __rte_always_inline void
37 cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)
39 const uint32_t tag = (uint32_t)ev->event;
40 const uint8_t new_tt = ev->sched_type;
41 const uint8_t cur_tt =
42 CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0));
45 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
47 * SSO_TT_ORDERED norm norm untag
48 * SSO_TT_ATOMIC norm norm untag
49 * SSO_TT_UNTAGGED norm norm NOOP
52 if (new_tt == SSO_TT_UNTAGGED) {
53 if (cur_tt != SSO_TT_UNTAGGED)
54 cnxk_sso_hws_swtag_untag(ws->base +
55 SSOW_LF_GWS_OP_SWTAG_UNTAG);
57 cnxk_sso_hws_swtag_norm(tag, new_tt,
58 ws->base + SSOW_LF_GWS_OP_SWTAG_NORM);
63 static __rte_always_inline void
64 cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,
67 const uint32_t tag = (uint32_t)ev->event;
68 const uint8_t new_tt = ev->sched_type;
70 plt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
71 cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
72 ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
75 static __rte_always_inline void
76 cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
77 const struct rte_event *ev)
79 const uint8_t grp = ev->queue_id;
81 /* Group hasn't changed, Use SWTAG to forward the event */
82 if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0)) == grp)
83 cn10k_sso_hws_fwd_swtag(ws, ev);
86 * Group has been changed for group based work pipelining,
87 * Use deschedule/add_work operation to transfer the event to
90 cn10k_sso_hws_fwd_group(ws, ev, grp);
93 static __rte_always_inline void
94 cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
95 const uint32_t tag, const uint32_t flags,
96 const void *const lookup_mem)
98 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
99 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
101 cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
102 (struct rte_mbuf *)mbuf, lookup_mem,
103 mbuf_init | ((uint64_t)port_id) << 48, flags);
106 static __rte_always_inline void
107 cn10k_process_vwqe(uintptr_t vwqe, uint16_t port_id, const uint32_t flags,
108 void *lookup_mem, void *tstamp)
110 uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
111 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
112 struct rte_event_vector *vec;
113 uint16_t nb_mbufs, non_vec;
116 mbuf_init |= ((uint64_t)port_id) << 48;
117 vec = (struct rte_event_vector *)vwqe;
120 nb_mbufs = RTE_ALIGN_FLOOR(vec->nb_elem, NIX_DESCS_PER_LOOP);
121 nb_mbufs = cn10k_nix_recv_pkts_vector(&mbuf_init, vec->mbufs, nb_mbufs,
122 flags | NIX_RX_VWQE_F, lookup_mem,
125 non_vec = vec->nb_elem - nb_mbufs;
128 struct nix_cqe_hdr_s *cqe = (struct nix_cqe_hdr_s *)wqe[0];
129 struct rte_mbuf *mbuf;
132 mbuf = (struct rte_mbuf *)((char *)cqe -
133 sizeof(struct rte_mbuf));
134 cn10k_nix_cqe_to_mbuf(cqe, cqe->tag, mbuf, lookup_mem,
136 /* Extracting tstamp, if PTP enabled*/
137 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)cqe) +
138 CNXK_SSO_WQE_SG_PTR);
139 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, tstamp,
140 flags & NIX_RX_OFFLOAD_TSTAMP_F,
141 flags & NIX_RX_MULTI_SEG_F,
142 (uint64_t *)tstamp_ptr);
143 wqe[0] = (uint64_t *)mbuf;
149 static __rte_always_inline uint16_t
150 cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
151 const uint32_t flags, void *lookup_mem)
154 __uint128_t get_work;
160 gw.get_work = ws->gw_wdata;
161 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
163 PLT_CPU_FEATURE_PREAMBLE
164 "caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
165 "sub %[mbuf], %H[wdata], #0x80 \n"
166 : [wdata] "+r"(gw.get_work), [mbuf] "=&r"(mbuf)
167 : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
170 plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
172 roc_load_pair(gw.u64[0], gw.u64[1],
173 ws->base + SSOW_LF_GWS_WQE0);
174 } while (gw.u64[0] & BIT_ULL(63));
175 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
177 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
178 (gw.u64[0] & (0x3FFull << 36)) << 4 |
179 (gw.u64[0] & 0xffffffff);
181 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
182 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
183 RTE_EVENT_TYPE_ETHDEV) {
184 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
186 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
187 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
188 gw.u64[0] & 0xFFFFF, flags,
190 /* Extracting tstamp, if PTP enabled*/
191 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
193 CNXK_SSO_WQE_SG_PTR);
194 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
196 flags & NIX_RX_OFFLOAD_TSTAMP_F,
197 flags & NIX_RX_MULTI_SEG_F,
198 (uint64_t *)tstamp_ptr);
200 } else if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
201 RTE_EVENT_TYPE_ETHDEV_VECTOR) {
202 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
203 __uint128_t vwqe_hdr = *(__uint128_t *)gw.u64[1];
205 vwqe_hdr = ((vwqe_hdr >> 64) & 0xFFF) | BIT_ULL(31) |
206 ((vwqe_hdr & 0xFFFF) << 48) |
207 ((uint64_t)port << 32);
208 *(uint64_t *)gw.u64[1] = (uint64_t)vwqe_hdr;
209 cn10k_process_vwqe(gw.u64[1], port, flags, lookup_mem,
214 ev->event = gw.u64[0];
220 /* Used in cleaning up workslot. */
221 static __rte_always_inline uint16_t
222 cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
225 __uint128_t get_work;
230 #ifdef RTE_ARCH_ARM64
231 asm volatile(PLT_CPU_FEATURE_PREAMBLE
232 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
233 " tbz %[tag], 63, done%= \n"
236 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
237 " tbnz %[tag], 63, rty%= \n"
239 " sub %[mbuf], %[wqp], #0x80 \n"
240 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
242 : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
246 roc_load_pair(gw.u64[0], gw.u64[1],
247 ws->base + SSOW_LF_GWS_WQE0);
248 } while (gw.u64[0] & BIT_ULL(63));
249 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
252 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
253 (gw.u64[0] & (0x3FFull << 36)) << 4 |
254 (gw.u64[0] & 0xffffffff);
256 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
257 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
258 RTE_EVENT_TYPE_ETHDEV) {
259 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
261 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
262 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
263 gw.u64[0] & 0xFFFFF, 0, NULL);
268 ev->event = gw.u64[0];
274 /* CN10K Fastpath functions. */
275 uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
276 uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
277 const struct rte_event ev[],
279 uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
280 const struct rte_event ev[],
282 uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
283 const struct rte_event ev[],
286 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
287 uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
288 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
289 uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
290 void *port, struct rte_event ev[], uint16_t nb_events, \
291 uint64_t timeout_ticks); \
292 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
293 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
294 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
295 void *port, struct rte_event ev[], uint16_t nb_events, \
296 uint64_t timeout_ticks); \
297 uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
298 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
299 uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
300 void *port, struct rte_event ev[], uint16_t nb_events, \
301 uint64_t timeout_ticks); \
302 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
303 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
304 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
305 void *port, struct rte_event ev[], uint16_t nb_events, \
306 uint64_t timeout_ticks);
308 NIX_RX_FASTPATH_MODES
311 static __rte_always_inline const struct cn10k_eth_txq *
312 cn10k_sso_hws_xtract_meta(struct rte_mbuf *m,
313 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
315 return (const struct cn10k_eth_txq *)
316 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
319 static __rte_always_inline uint16_t
320 cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
322 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
323 const uint32_t flags)
325 const struct cn10k_eth_txq *txq;
326 struct rte_mbuf *m = ev->mbuf;
327 uint16_t ref_cnt = m->refcnt;
332 lmt_addr = ws->lmt_base;
333 ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
334 txq = cn10k_sso_hws_xtract_meta(m, txq_data);
335 cn10k_nix_tx_skeleton(txq, cmd, flags);
336 /* Perform header writes before barrier for TSO */
337 if (flags & NIX_TX_OFFLOAD_TSO_F)
338 cn10k_nix_xmit_prepare_tso(m, flags);
340 cn10k_nix_xmit_prepare(m, cmd, lmt_addr, flags, txq->lso_tun_fmt);
341 if (flags & NIX_TX_MULTI_SEG_F) {
342 const uint16_t segdw =
343 cn10k_nix_prepare_mseg(m, (uint64_t *)lmt_addr, flags);
344 pa = txq->io_addr | ((segdw - 1) << 4);
346 pa = txq->io_addr | (cn10k_nix_tx_ext_subs(flags) + 1) << 4;
349 cnxk_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
351 roc_lmt_submit_steorl(lmt_id, pa);
353 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
358 cnxk_sso_hws_swtag_flush(ws->tx_base + SSOW_LF_GWS_TAG,
359 ws->tx_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
364 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
365 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name( \
366 void *port, struct rte_event ev[], uint16_t nb_events); \
367 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name( \
368 void *port, struct rte_event ev[], uint16_t nb_events); \
369 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_##name( \
370 void *port, struct rte_event ev[], uint16_t nb_events); \
371 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_seg_##name( \
372 void *port, struct rte_event ev[], uint16_t nb_events);
374 NIX_TX_FASTPATH_MODES