1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_WORKER_H__
6 #define __OTX2_WORKER_H__
8 #include <rte_common.h>
9 #include <rte_branch_prediction.h>
11 #include <otx2_common.h>
12 #include "otx2_evdev.h"
16 static __rte_always_inline uint16_t
17 otx2_ssogws_get_work(struct otx2_ssogws *ws, struct rte_event *ev,
18 const uint32_t flags, const void * const lookup_mem)
20 union otx2_sso_event event;
24 otx2_write64(BIT_ULL(16) | /* wait for work. */
25 1, /* Use Mask set 0. */
28 if (flags & NIX_RX_OFFLOAD_PTYPE_F)
29 rte_prefetch_non_temporal(lookup_mem);
32 " ldr %[tag], [%[tag_loc]] \n"
33 " ldr %[wqp], [%[wqp_loc]] \n"
34 " tbz %[tag], 63, done%= \n"
37 " ldr %[tag], [%[tag_loc]] \n"
38 " ldr %[wqp], [%[wqp_loc]] \n"
39 " tbnz %[tag], 63, rty%= \n"
41 " prfm pldl1keep, [%[wqp], #8] \n"
42 " sub %[mbuf], %[wqp], #0x80 \n"
43 " prfm pldl1keep, [%[mbuf]] \n"
44 : [tag] "=&r" (event.get_work0),
45 [wqp] "=&r" (get_work1),
47 : [tag_loc] "r" (ws->tag_op),
48 [wqp_loc] "r" (ws->wqp_op)
51 event.get_work0 = otx2_read64(ws->tag_op);
52 while ((BIT_ULL(63)) & event.get_work0)
53 event.get_work0 = otx2_read64(ws->tag_op);
55 get_work1 = otx2_read64(ws->wqp_op);
56 rte_prefetch0((const void *)get_work1);
57 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
58 rte_prefetch0((const void *)mbuf);
61 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
62 (event.get_work0 & (0x3FFull << 36)) << 4 |
63 (event.get_work0 & 0xffffffff);
64 ws->cur_tt = event.sched_type;
65 ws->cur_grp = event.queue_id;
67 if (event.sched_type != SSO_TT_EMPTY &&
68 event.event_type == RTE_EVENT_TYPE_ETHDEV) {
69 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
70 (uint32_t) event.get_work0, flags, lookup_mem);
74 ev->event = event.get_work0;
80 /* Used in cleaning up workslot. */
81 static __rte_always_inline uint16_t
82 otx2_ssogws_get_work_empty(struct otx2_ssogws *ws, struct rte_event *ev,
85 union otx2_sso_event event;
91 " ldr %[tag], [%[tag_loc]] \n"
92 " ldr %[wqp], [%[wqp_loc]] \n"
93 " tbz %[tag], 63, done%= \n"
96 " ldr %[tag], [%[tag_loc]] \n"
97 " ldr %[wqp], [%[wqp_loc]] \n"
98 " tbnz %[tag], 63, rty%= \n"
100 " prfm pldl1keep, [%[wqp], #8] \n"
101 " sub %[mbuf], %[wqp], #0x80 \n"
102 " prfm pldl1keep, [%[mbuf]] \n"
103 : [tag] "=&r" (event.get_work0),
104 [wqp] "=&r" (get_work1),
106 : [tag_loc] "r" (ws->tag_op),
107 [wqp_loc] "r" (ws->wqp_op)
110 event.get_work0 = otx2_read64(ws->tag_op);
111 while ((BIT_ULL(63)) & event.get_work0)
112 event.get_work0 = otx2_read64(ws->tag_op);
114 get_work1 = otx2_read64(ws->wqp_op);
115 rte_prefetch_non_temporal((const void *)get_work1);
116 mbuf = (uint64_t)((char *)get_work1 - sizeof(struct rte_mbuf));
117 rte_prefetch_non_temporal((const void *)mbuf);
120 event.get_work0 = (event.get_work0 & (0x3ull << 32)) << 6 |
121 (event.get_work0 & (0x3FFull << 36)) << 4 |
122 (event.get_work0 & 0xffffffff);
123 ws->cur_tt = event.sched_type;
124 ws->cur_grp = event.queue_id;
126 if (event.sched_type != SSO_TT_EMPTY &&
127 event.event_type == RTE_EVENT_TYPE_ETHDEV) {
128 otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
129 (uint32_t) event.get_work0, flags, NULL);
133 ev->event = event.get_work0;
139 static __rte_always_inline void
140 otx2_ssogws_add_work(struct otx2_ssogws *ws, const uint64_t event_ptr,
141 const uint32_t tag, const uint8_t new_tt,
146 add_work0 = tag | ((uint64_t)(new_tt) << 32);
147 otx2_store_pair(add_work0, event_ptr, ws->grps_base[grp]);
150 static __rte_always_inline void
151 otx2_ssogws_swtag_desched(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt,
156 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
157 otx2_write64(val, ws->swtag_desched_op);
160 static __rte_always_inline void
161 otx2_ssogws_swtag_norm(struct otx2_ssogws *ws, uint32_t tag, uint8_t new_tt)
165 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
166 otx2_write64(val, ws->swtag_norm_op);
169 static __rte_always_inline void
170 otx2_ssogws_swtag_untag(struct otx2_ssogws *ws)
172 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
173 SSOW_LF_GWS_OP_SWTAG_UNTAG);
174 ws->cur_tt = SSO_SYNC_UNTAGGED;
177 static __rte_always_inline void
178 otx2_ssogws_swtag_flush(struct otx2_ssogws *ws)
180 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
181 SSOW_LF_GWS_OP_SWTAG_FLUSH);
182 ws->cur_tt = SSO_SYNC_EMPTY;
185 static __rte_always_inline void
186 otx2_ssogws_desched(struct otx2_ssogws *ws)
188 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
189 SSOW_LF_GWS_OP_DESCHED);
192 static __rte_always_inline void
193 otx2_ssogws_swtag_wait(struct otx2_ssogws *ws)
195 #ifdef RTE_ARCH_ARM64
199 " ldr %[swtb], [%[swtp_loc]] \n"
200 " cbz %[swtb], done%= \n"
203 " ldr %[swtb], [%[swtp_loc]] \n"
204 " cbnz %[swtb], rty%= \n"
206 : [swtb] "=&r" (swtp)
207 : [swtp_loc] "r" (ws->swtp_op)
210 /* Wait for the SWTAG/SWTAG_FULL operation */
211 while (otx2_read64(ws->swtp_op))