1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_WORKER_H__
6 #define __OTX2_WORKER_H__
8 #include <rte_common.h>
9 #include <rte_branch_prediction.h>
11 #include <otx2_common.h>
12 #include "otx2_evdev.h"
16 static __rte_always_inline uint16_t
17 otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev)
19 union otx2_sso_event event;
22 otx2_write64(BIT_ULL(16) | /* wait for work. */
23 1, /* Use Mask set 0. */
28 " ldr %[tag], [%[tag_loc]] \n"
29 " ldr %[wqp], [%[wqp_loc]] \n"
30 " tbz %[tag], 63, done%= \n"
33 " ldr %[tag], [%[tag_loc]] \n"
34 " ldr %[wqp], [%[wqp_loc]] \n"
35 " tbnz %[tag], 63, rty%= \n"
37 " prfm pldl1keep, [%[wqp]] \n"
38 : [tag] "=&r" (event.get_work0),
39 [wqp] "=&r" (get_work1)
40 : [tag_loc] "r" (ws->tag_op),
41 [wqp_loc] "r" (ws->wqp_op)
44 event.get_work0 = otx2_read64(ws->tag_op);
45 while ((BIT_ULL(63)) & event.get_work0)
46 event.get_work0 = otx2_read64(ws->tag_op);
48 get_work1 = otx2_read64(ws->wqp_op);
49 rte_prefetch0((const void *)get_work1);
52 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
53 (event.get_work0 & (0x3FFull << 36)) << 4 |
54 (event.get_work0 & 0xffffffff);
55 ws->cur_tt = event.sched_type;
56 ws->cur_grp = event.queue_id;
59 ev->event = event.get_work0;
65 /* Used in cleaning up workslot. */
66 static __rte_always_inline uint16_t
67 otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev)
69 union otx2_sso_event event;
74 " ldr %[tag], [%[tag_loc]] \n"
75 " ldr %[wqp], [%[wqp_loc]] \n"
76 " tbz %[tag], 63, done%= \n"
79 " ldr %[tag], [%[tag_loc]] \n"
80 " ldr %[wqp], [%[wqp_loc]] \n"
81 " tbnz %[tag], 63, rty%= \n"
83 " prfm pldl1keep, [%[wqp]] \n"
84 : [tag] "=&r" (event.get_work0),
85 [wqp] "=&r" (get_work1)
86 : [tag_loc] "r" (ws->tag_op),
87 [wqp_loc] "r" (ws->wqp_op)
90 event.get_work0 = otx2_read64(ws->tag_op);
91 while ((BIT_ULL(63)) & event.get_work0)
92 event.get_work0 = otx2_read64(ws->tag_op);
94 get_work1 = otx2_read64(ws->wqp_op);
95 rte_prefetch0((const void *)get_work1);
98 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
99 (event.get_work0 & (0x3FFull << 36)) << 4 |
100 (event.get_work0 & 0xffffffff);
101 ws->cur_tt = event.sched_type;
102 ws->cur_grp = event.queue_id;
104 ev->event = event.get_work0;
110 static __rte_always_inline void
111 otx2_ssogws_add_work(struct otx2_ssogws *ws, const uint64_t event_ptr,
112 const uint32_t tag, const uint8_t new_tt,
117 add_work0 = tag | ((uint64_t)(new_tt) << 32);
118 otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
121 static __rte_always_inline void
122 otx2_ssogws_swtag_desched(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt,
127 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
128 otx2_write64(val, ws->swtag_desched_op);
131 static __rte_always_inline void
132 otx2_ssogws_swtag_norm(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt)
136 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
137 otx2_write64(val, ws->swtag_norm_op);
140 static __rte_always_inline void
141 otx2_ssogws_swtag_untag(struct otx2_ssogws *ws)
143 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
144 SSOW_LF_GWS_OP_SWTAG_UNTAG);
145 ws->cur_tt = SSO_SYNC_UNTAGGED;
148 static __rte_always_inline void
149 otx2_ssogws_swtag_flush(struct otx2_ssogws *ws)
151 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
152 SSOW_LF_GWS_OP_SWTAG_FLUSH);
153 ws->cur_tt = SSO_SYNC_EMPTY;
156 static __rte_always_inline void
157 otx2_ssogws_desched(struct otx2_ssogws *ws)
159 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
160 SSOW_LF_GWS_OP_DESCHED);
163 static __rte_always_inline void
164 otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
166 #ifdef RTE_ARCH_ARM64
170 " ldr %[swtb], [%[swtp_loc]] \n"
171 " cbz %[swtb], done%= \n"
174 " ldr %[swtb], [%[swtp_loc]] \n"
175 " cbnz %[swtb], rty%= \n"
177 : [swtb] "=&r" (swtp)
178 : [swtp_loc] "r" (ws->swtp_op)
181 /* Wait for the SWTAG/SWTAG_FULL operation */
182 while (otx2_read64(ws->swtp_op))