1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CN10K_WORKER_H__
6 #define __CN10K_WORKER_H__
8 #include "cnxk_ethdev.h"
9 #include "cnxk_eventdev.h"
10 #include "cnxk_worker.h"
12 #include "cn10k_ethdev.h"
18 static __rte_always_inline uint8_t
19 cn10k_sso_hws_new_event(struct cn10k_sso_hws *ws, const struct rte_event *ev)
21 const uint32_t tag = (uint32_t)ev->event;
22 const uint8_t new_tt = ev->sched_type;
23 const uint64_t event_ptr = ev->u64;
24 const uint16_t grp = ev->queue_id;
26 rte_atomic_thread_fence(__ATOMIC_ACQ_REL);
27 if (ws->xaq_lmt <= *ws->fc_mem)
30 cnxk_sso_hws_add_work(event_ptr, tag, new_tt, ws->grps_base[grp]);
34 static __rte_always_inline void
35 cn10k_sso_hws_fwd_swtag(struct cn10k_sso_hws *ws, const struct rte_event *ev)
37 const uint32_t tag = (uint32_t)ev->event;
38 const uint8_t new_tt = ev->sched_type;
39 const uint8_t cur_tt =
40 CNXK_TT_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0));
43 * cur_tt/new_tt SSO_TT_ORDERED SSO_TT_ATOMIC SSO_TT_UNTAGGED
45 * SSO_TT_ORDERED norm norm untag
46 * SSO_TT_ATOMIC norm norm untag
47 * SSO_TT_UNTAGGED norm norm NOOP
50 if (new_tt == SSO_TT_UNTAGGED) {
51 if (cur_tt != SSO_TT_UNTAGGED)
52 cnxk_sso_hws_swtag_untag(ws->base +
53 SSOW_LF_GWS_OP_SWTAG_UNTAG);
55 cnxk_sso_hws_swtag_norm(tag, new_tt,
56 ws->base + SSOW_LF_GWS_OP_SWTAG_NORM);
61 static __rte_always_inline void
62 cn10k_sso_hws_fwd_group(struct cn10k_sso_hws *ws, const struct rte_event *ev,
65 const uint32_t tag = (uint32_t)ev->event;
66 const uint8_t new_tt = ev->sched_type;
68 plt_write64(ev->u64, ws->base + SSOW_LF_GWS_OP_UPD_WQP_GRP1);
69 cnxk_sso_hws_swtag_desched(tag, new_tt, grp,
70 ws->base + SSOW_LF_GWS_OP_SWTAG_DESCHED);
73 static __rte_always_inline void
74 cn10k_sso_hws_forward_event(struct cn10k_sso_hws *ws,
75 const struct rte_event *ev)
77 const uint8_t grp = ev->queue_id;
79 /* Group hasn't changed, Use SWTAG to forward the event */
80 if (CNXK_GRP_FROM_TAG(plt_read64(ws->base + SSOW_LF_GWS_WQE0)) == grp)
81 cn10k_sso_hws_fwd_swtag(ws, ev);
84 * Group has been changed for group based work pipelining,
85 * Use deschedule/add_work operation to transfer the event to
88 cn10k_sso_hws_fwd_group(ws, ev, grp);
91 static __rte_always_inline void
92 cn10k_wqe_to_mbuf(uint64_t wqe, const uint64_t mbuf, uint8_t port_id,
93 const uint32_t tag, const uint32_t flags,
94 const void *const lookup_mem)
96 const uint64_t mbuf_init = 0x100010000ULL | RTE_PKTMBUF_HEADROOM |
97 (flags & NIX_RX_OFFLOAD_TSTAMP_F ? 8 : 0);
99 cn10k_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
100 (struct rte_mbuf *)mbuf, lookup_mem,
101 mbuf_init | ((uint64_t)port_id) << 48, flags);
104 static __rte_always_inline uint16_t
105 cn10k_sso_hws_get_work(struct cn10k_sso_hws *ws, struct rte_event *ev,
106 const uint32_t flags, void *lookup_mem)
109 __uint128_t get_work;
115 gw.get_work = ws->gw_wdata;
116 #if defined(RTE_ARCH_ARM64) && !defined(__clang__)
118 PLT_CPU_FEATURE_PREAMBLE
119 "caspl %[wdata], %H[wdata], %[wdata], %H[wdata], [%[gw_loc]]\n"
120 "sub %[mbuf], %H[wdata], #0x80 \n"
121 : [wdata] "+r"(gw.get_work), [mbuf] "=&r"(mbuf)
122 : [gw_loc] "r"(ws->base + SSOW_LF_GWS_OP_GET_WORK0)
125 plt_write64(gw.u64[0], ws->base + SSOW_LF_GWS_OP_GET_WORK0);
127 roc_load_pair(gw.u64[0], gw.u64[1],
128 ws->base + SSOW_LF_GWS_WQE0);
129 } while (gw.u64[0] & BIT_ULL(63));
130 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
132 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
133 (gw.u64[0] & (0x3FFull << 36)) << 4 |
134 (gw.u64[0] & 0xffffffff);
136 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
137 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
138 RTE_EVENT_TYPE_ETHDEV) {
139 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
141 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
142 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
143 gw.u64[0] & 0xFFFFF, flags,
145 /* Extracting tstamp, if PTP enabled*/
146 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)
148 CNXK_SSO_WQE_SG_PTR);
149 cnxk_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf,
151 flags & NIX_RX_OFFLOAD_TSTAMP_F,
152 flags & NIX_RX_MULTI_SEG_F,
153 (uint64_t *)tstamp_ptr);
158 ev->event = gw.u64[0];
164 /* Used in cleaning up workslot. */
165 static __rte_always_inline uint16_t
166 cn10k_sso_hws_get_work_empty(struct cn10k_sso_hws *ws, struct rte_event *ev)
169 __uint128_t get_work;
174 #ifdef RTE_ARCH_ARM64
175 asm volatile(PLT_CPU_FEATURE_PREAMBLE
176 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
177 " tbz %[tag], 63, done%= \n"
180 " ldp %[tag], %[wqp], [%[tag_loc]] \n"
181 " tbnz %[tag], 63, rty%= \n"
183 " sub %[mbuf], %[wqp], #0x80 \n"
184 : [tag] "=&r"(gw.u64[0]), [wqp] "=&r"(gw.u64[1]),
186 : [tag_loc] "r"(ws->base + SSOW_LF_GWS_WQE0)
190 roc_load_pair(gw.u64[0], gw.u64[1],
191 ws->base + SSOW_LF_GWS_WQE0);
192 } while (gw.u64[0] & BIT_ULL(63));
193 mbuf = (uint64_t)((char *)gw.u64[1] - sizeof(struct rte_mbuf));
196 gw.u64[0] = (gw.u64[0] & (0x3ull << 32)) << 6 |
197 (gw.u64[0] & (0x3FFull << 36)) << 4 |
198 (gw.u64[0] & 0xffffffff);
200 if (CNXK_TT_FROM_EVENT(gw.u64[0]) != SSO_TT_EMPTY) {
201 if (CNXK_EVENT_TYPE_FROM_TAG(gw.u64[0]) ==
202 RTE_EVENT_TYPE_ETHDEV) {
203 uint8_t port = CNXK_SUB_EVENT_FROM_TAG(gw.u64[0]);
205 gw.u64[0] = CNXK_CLR_SUB_EVENT(gw.u64[0]);
206 cn10k_wqe_to_mbuf(gw.u64[1], mbuf, port,
207 gw.u64[0] & 0xFFFFF, 0, NULL);
212 ev->event = gw.u64[0];
218 /* CN10K Fastpath functions. */
219 uint16_t __rte_hot cn10k_sso_hws_enq(void *port, const struct rte_event *ev);
220 uint16_t __rte_hot cn10k_sso_hws_enq_burst(void *port,
221 const struct rte_event ev[],
223 uint16_t __rte_hot cn10k_sso_hws_enq_new_burst(void *port,
224 const struct rte_event ev[],
226 uint16_t __rte_hot cn10k_sso_hws_enq_fwd_burst(void *port,
227 const struct rte_event ev[],
230 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
231 uint16_t __rte_hot cn10k_sso_hws_deq_##name( \
232 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
233 uint16_t __rte_hot cn10k_sso_hws_deq_burst_##name( \
234 void *port, struct rte_event ev[], uint16_t nb_events, \
235 uint64_t timeout_ticks); \
236 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_##name( \
237 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
238 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_burst_##name( \
239 void *port, struct rte_event ev[], uint16_t nb_events, \
240 uint64_t timeout_ticks); \
241 uint16_t __rte_hot cn10k_sso_hws_deq_seg_##name( \
242 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
243 uint16_t __rte_hot cn10k_sso_hws_deq_seg_burst_##name( \
244 void *port, struct rte_event ev[], uint16_t nb_events, \
245 uint64_t timeout_ticks); \
246 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_##name( \
247 void *port, struct rte_event *ev, uint64_t timeout_ticks); \
248 uint16_t __rte_hot cn10k_sso_hws_deq_tmo_seg_burst_##name( \
249 void *port, struct rte_event ev[], uint16_t nb_events, \
250 uint64_t timeout_ticks);
252 NIX_RX_FASTPATH_MODES
255 static __rte_always_inline const struct cn10k_eth_txq *
256 cn10k_sso_hws_xtract_meta(struct rte_mbuf *m,
257 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
259 return (const struct cn10k_eth_txq *)
260 txq_data[m->port][rte_event_eth_tx_adapter_txq_get(m)];
263 static __rte_always_inline uint16_t
264 cn10k_sso_hws_event_tx(struct cn10k_sso_hws *ws, struct rte_event *ev,
266 const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT],
267 const uint32_t flags)
269 const struct cn10k_eth_txq *txq;
270 struct rte_mbuf *m = ev->mbuf;
271 uint16_t ref_cnt = m->refcnt;
276 lmt_addr = ws->lmt_base;
277 ROC_LMT_BASE_ID_GET(lmt_addr, lmt_id);
278 txq = cn10k_sso_hws_xtract_meta(m, txq_data);
279 cn10k_nix_tx_skeleton(txq, cmd, flags);
280 /* Perform header writes before barrier for TSO */
281 if (flags & NIX_TX_OFFLOAD_TSO_F)
282 cn10k_nix_xmit_prepare_tso(m, flags);
284 cn10k_nix_xmit_prepare(m, cmd, lmt_addr, flags, txq->lso_tun_fmt);
285 if (flags & NIX_TX_MULTI_SEG_F) {
286 const uint16_t segdw =
287 cn10k_nix_prepare_mseg(m, (uint64_t *)lmt_addr, flags);
288 pa = txq->io_addr | ((segdw - 1) << 4);
290 pa = txq->io_addr | (cn10k_nix_tx_ext_subs(flags) + 1) << 4;
293 cnxk_sso_hws_head_wait(ws->tx_base + SSOW_LF_GWS_TAG);
295 roc_lmt_submit_steorl(lmt_id, pa);
297 if (flags & NIX_TX_OFFLOAD_MBUF_NOFF_F) {
302 cnxk_sso_hws_swtag_flush(ws->tx_base + SSOW_LF_GWS_TAG,
303 ws->tx_base + SSOW_LF_GWS_OP_SWTAG_FLUSH);
308 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
309 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_##name( \
310 void *port, struct rte_event ev[], uint16_t nb_events); \
311 uint16_t __rte_hot cn10k_sso_hws_tx_adptr_enq_seg_##name( \
312 void *port, struct rte_event ev[], uint16_t nb_events); \
313 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_##name( \
314 void *port, struct rte_event ev[], uint16_t nb_events); \
315 uint16_t __rte_hot cn10k_sso_hws_dual_tx_adptr_enq_seg_##name( \
316 void *port, struct rte_event ev[], uint16_t nb_events);
318 NIX_TX_FASTPATH_MODES