1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_WORKER_H__
6 #define __OTX2_WORKER_H__
8 #include <rte_common.h>
9 #include <rte_branch_prediction.h>
11 #include <otx2_common.h>
12 #include "otx2_evdev.h"
16 static __rte_always_inline uint16_t
17 otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev,
18 const uint32_t flags, const void * const lookup_mem)
20 union otx2_sso_event event;
25 otx2_write64(BIT_ULL(16) | /* wait for work. */
26 1, /* Use Mask set 0. */
29 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
30 rte_prefetch_non_temporal(lookup_mem);
33 " ldr %[tag], [%[tag_loc]] \n"
34 " ldr %[wqp], [%[wqp_loc]] \n"
35 " tbz %[tag], 63, done%= \n"
38 " ldr %[tag], [%[tag_loc]] \n"
39 " ldr %[wqp], [%[wqp_loc]] \n"
40 " tbnz %[tag], 63, rty%= \n"
42 " prfm pldl1keep, [%[wqp], #8] \n"
43 " sub %[mbuf], %[wqp], #0x80 \n"
44 " prfm pldl1keep, [%[mbuf]] \n"
45 : [tag] "=&r" (event.get_work0),
46 [wqp] "=&r" (get_work1),
48 : [tag_loc] "r" (ws->tag_op),
49 [wqp_loc] "r" (ws->wqp_op)
52 event.get_work0 = otx2_read64(ws->tag_op);
53 while ((BIT_ULL(63)) & event.get_work0)
54 event.get_work0 = otx2_read64(ws->tag_op);
56 get_work1 = otx2_read64(ws->wqp_op);
57 rte_prefetch0((const void *)get_work1);
58 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
59 rte_prefetch0((const void *)mbuf);
62 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
63 (event.get_work0 & (0x3FFull << 36)) << 4 |
64 (event.get_work0 & 0xffffffff);
65 ws->cur_tt = event.sched_type;
66 ws->cur_grp = event.queue_id;
68 if (event.sched_type != SSO_TT_EMPTY &&
69 event.event_type == RTE_EVENT_TYPE_ETHDEV) {
70 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
71 (uint32_t) event.get_work0, flags, lookup_mem);
72 /* Extracting tstamp, if PTP enabled*/
73 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
74 + OTX2_SSO_WQE_SG_PTR);
75 otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
76 flags, (uint64_t *)tstamp_ptr);
80 ev->event = event.get_work0;
86 /* Used in cleaning up workslot. */
87 static __rte_always_inline uint16_t
88 otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev,
91 union otx2_sso_event event;
98 " ldr %[tag], [%[tag_loc]] \n"
99 " ldr %[wqp], [%[wqp_loc]] \n"
100 " tbz %[tag], 63, done%= \n"
103 " ldr %[tag], [%[tag_loc]] \n"
104 " ldr %[wqp], [%[wqp_loc]] \n"
105 " tbnz %[tag], 63, rty%= \n"
107 " prfm pldl1keep, [%[wqp], #8] \n"
108 " sub %[mbuf], %[wqp], #0x80 \n"
109 " prfm pldl1keep, [%[mbuf]] \n"
110 : [tag] "=&r" (event.get_work0),
111 [wqp] "=&r" (get_work1),
113 : [tag_loc] "r" (ws->tag_op),
114 [wqp_loc] "r" (ws->wqp_op)
117 event.get_work0 = otx2_read64(ws->tag_op);
118 while ((BIT_ULL(63)) & event.get_work0)
119 event.get_work0 = otx2_read64(ws->tag_op);
121 get_work1 = otx2_read64(ws->wqp_op);
122 rte_prefetch_non_temporal((const void *)get_work1);
123 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
124 rte_prefetch_non_temporal((const void *)mbuf);
127 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
128 (event.get_work0 & (0x3FFull << 36)) << 4 |
129 (event.get_work0 & 0xffffffff);
130 ws->cur_tt = event.sched_type;
131 ws->cur_grp = event.queue_id;
133 if (event.sched_type != SSO_TT_EMPTY &&
134 event.event_type == RTE_EVENT_TYPE_ETHDEV) {
135 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
136 (uint32_t) event.get_work0, flags, NULL);
137 /* Extracting tstamp, if PTP enabled*/
138 tstamp_ptr = *(uint64_t *)(((struct nix_wqe_hdr_s *)get_work1)
139 + OTX2_SSO_WQE_SG_PTR);
140 otx2_nix_mbuf_to_tstamp((struct rte_mbuf *)mbuf, ws->tstamp,
141 flags, (uint64_t *)tstamp_ptr);
145 ev->event = event.get_work0;
151 static __rte_always_inline void
152 otx2_ssogws_add_work(struct otx2_ssogws *ws, const uint64_t event_ptr,
153 const uint32_t tag, const uint8_t new_tt,
158 add_work0 = tag | ((uint64_t)(new_tt) << 32);
159 otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
162 static __rte_always_inline void
163 otx2_ssogws_swtag_desched(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt,
168 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
169 otx2_write64(val, ws->swtag_desched_op);
172 static __rte_always_inline void
173 otx2_ssogws_swtag_norm(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt)
177 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
178 otx2_write64(val, ws->swtag_norm_op);
181 static __rte_always_inline void
182 otx2_ssogws_swtag_untag(struct otx2_ssogws *ws)
184 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
185 SSOW_LF_GWS_OP_SWTAG_UNTAG);
186 ws->cur_tt = SSO_SYNC_UNTAGGED;
189 static __rte_always_inline void
190 otx2_ssogws_swtag_flush(struct otx2_ssogws *ws)
192 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
193 SSOW_LF_GWS_OP_SWTAG_FLUSH);
194 ws->cur_tt = SSO_SYNC_EMPTY;
197 static __rte_always_inline void
198 otx2_ssogws_desched(struct otx2_ssogws *ws)
200 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
201 SSOW_LF_GWS_OP_DESCHED);
204 static __rte_always_inline void
205 otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
207 #ifdef RTE_ARCH_ARM64
211 " ldr %[swtb], [%[swtp_loc]] \n"
212 " cbz %[swtb], done%= \n"
215 " ldr %[swtb], [%[swtp_loc]] \n"
216 " cbnz %[swtb], rty%= \n"
218 : [swtb] "=&r" (swtp)
219 : [swtp_loc] "r" (ws->swtp_op)
222 /* Wait for the SWTAG/SWTAG_FULL operation */
223 while (otx2_read64(ws->swtp_op))
228 static __rte_always_inline void
229 otx2_ssogws_head_wait(struct otx2_ssogws *ws, const uint8_t wait_flag)
231 while (wait_flag && !(otx2_read64(ws->tag_op) & BIT_ULL(35)))
237 static __rte_always_inline const struct otx2_eth_txq *
238 otx2_ssogws_xtract_meta(struct rte_mbuf *m)
240 return rte_eth_devices[m->port].data->tx_queues[
241 rte_event_eth_tx_adapter_txq_get(m)];
244 static __rte_always_inline void
245 otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
246 uint64_t *cmd, const uint32_t flags)
248 otx2_lmt_mov(cmd, txq->cmd, otx2_nix_tx_ext_subs(flags));
249 otx2_nix_xmit_prepare(m, cmd, flags);
252 static __rte_always_inline uint16_t
253 otx2_ssogws_event_tx(struct otx2_ssogws *ws, struct rte_event ev[],
254 uint64_t *cmd, const uint32_t flags)
256 struct rte_mbuf *m = ev[0].mbuf;
257 const struct otx2_eth_txq *txq = otx2_ssogws_xtract_meta(m);
259 /* Perform header writes before barrier for TSO */
260 otx2_nix_xmit_prepare_tso(m, flags);
261 otx2_ssogws_head_wait(ws, !ev->sched_type);
262 otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
264 if (flags & NIX_TX_MULTI_SEG_F) {
265 const uint16_t segdw = otx2_nix_prepare_mseg(m, cmd, flags);
266 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
267 m->ol_flags, segdw, flags);
268 otx2_nix_xmit_mseg_one(cmd, txq->lmt_addr, txq->io_addr, segdw);
270 /* Passing no of segdw as 4: HDR + EXT + SG + SMEM */
271 otx2_nix_xmit_prepare_tstamp(cmd, &txq->cmd[0],
272 m->ol_flags, 4, flags);
273 otx2_nix_xmit_one(cmd, txq->lmt_addr, txq->io_addr, flags);