1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_worker.h"
7 static __rte_noinline uint8_t
8 otx2_ssogws_new_event(struct otx2_ssogws *ws, const struct rte_event *ev)
10 const uint32_t tag = (uint32_t)ev->event;
11 const uint8_t new_tt = ev->sched_type;
12 const uint64_t event_ptr = ev->u64;
13 const uint16_t grp = ev->queue_id;
15 if (ws->xaq_lmt <= *ws->fc_mem)
18 otx2_ssogws_add_work(ws, event_ptr, tag, new_tt, grp);
23 static __rte_always_inline void
24 otx2_ssogws_fwd_swtag(struct otx2_ssogws *ws, const struct rte_event *ev)
26 const uint32_t tag = (uint32_t)ev->event;
27 const uint8_t new_tt = ev->sched_type;
28 const uint8_t cur_tt = OTX2_SSOW_TT_FROM_TAG(otx2_read64(ws->tag_op));
31 * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
33 * SSO_SYNC_ORDERED norm norm untag
34 * SSO_SYNC_ATOMIC norm norm untag
35 * SSO_SYNC_UNTAGGED norm norm NOOP
38 if (new_tt == SSO_SYNC_UNTAGGED) {
39 if (cur_tt != SSO_SYNC_UNTAGGED)
40 otx2_ssogws_swtag_untag(ws);
42 otx2_ssogws_swtag_norm(ws, tag, new_tt);
48 static __rte_always_inline void
49 otx2_ssogws_fwd_group(struct otx2_ssogws *ws, const struct rte_event *ev,
52 const uint32_t tag = (uint32_t)ev->event;
53 const uint8_t new_tt = ev->sched_type;
55 otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
56 SSOW_LF_GWS_OP_UPD_WQP_GRP1);
58 otx2_ssogws_swtag_desched(ws, tag, new_tt, grp);
61 static __rte_always_inline void
62 otx2_ssogws_forward_event(struct otx2_ssogws *ws, const struct rte_event *ev)
64 const uint8_t grp = ev->queue_id;
66 /* Group hasn't changed, Use SWTAG to forward the event */
67 if (OTX2_SSOW_GRP_FROM_TAG(otx2_read64(ws->tag_op)) == grp)
68 otx2_ssogws_fwd_swtag(ws, ev);
71 * Group has been changed for group based work pipelining,
72 * Use deschedule/add_work operation to transfer the event to
75 otx2_ssogws_fwd_group(ws, ev, grp);
78 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
80 otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
81 uint64_t timeout_ticks) \
83 struct otx2_ssogws *ws = port; \
85 RTE_SET_USED(timeout_ticks); \
87 if (ws->swtag_req) { \
89 otx2_ssogws_swtag_wait(ws); \
93 return otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
97 otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
99 uint64_t timeout_ticks) \
101 RTE_SET_USED(nb_events); \
103 return otx2_ssogws_deq_ ##name(port, ev, timeout_ticks); \
107 otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev, \
108 uint64_t timeout_ticks) \
110 struct otx2_ssogws *ws = port; \
114 if (ws->swtag_req) { \
116 otx2_ssogws_swtag_wait(ws); \
120 ret = otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
121 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
122 ret = otx2_ssogws_get_work(ws, ev, flags, \
129 otx2_ssogws_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],\
130 uint16_t nb_events, \
131 uint64_t timeout_ticks) \
133 RTE_SET_USED(nb_events); \
135 return otx2_ssogws_deq_timeout_ ##name(port, ev, timeout_ticks);\
139 otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
140 uint64_t timeout_ticks) \
142 struct otx2_ssogws *ws = port; \
144 RTE_SET_USED(timeout_ticks); \
146 if (ws->swtag_req) { \
148 otx2_ssogws_swtag_wait(ws); \
152 return otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
157 otx2_ssogws_deq_seg_burst_ ##name(void *port, struct rte_event ev[], \
158 uint16_t nb_events, \
159 uint64_t timeout_ticks) \
161 RTE_SET_USED(nb_events); \
163 return otx2_ssogws_deq_seg_ ##name(port, ev, timeout_ticks); \
167 otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev, \
168 uint64_t timeout_ticks) \
170 struct otx2_ssogws *ws = port; \
174 if (ws->swtag_req) { \
176 otx2_ssogws_swtag_wait(ws); \
180 ret = otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
182 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
183 ret = otx2_ssogws_get_work(ws, ev, \
184 flags | NIX_RX_MULTI_SEG_F, \
191 otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
192 struct rte_event ev[], \
193 uint16_t nb_events, \
194 uint64_t timeout_ticks) \
196 RTE_SET_USED(nb_events); \
198 return otx2_ssogws_deq_seg_timeout_ ##name(port, ev, \
202 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
206 otx2_ssogws_enq(void *port, const struct rte_event *ev)
208 struct otx2_ssogws *ws = port;
211 case RTE_EVENT_OP_NEW:
213 return otx2_ssogws_new_event(ws, ev);
214 case RTE_EVENT_OP_FORWARD:
215 otx2_ssogws_forward_event(ws, ev);
217 case RTE_EVENT_OP_RELEASE:
218 otx2_ssogws_swtag_flush(ws->tag_op, ws->swtag_flush_op);
228 otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
231 RTE_SET_USED(nb_events);
232 return otx2_ssogws_enq(port, ev);
236 otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
239 struct otx2_ssogws *ws = port;
243 if (ws->xaq_lmt <= *ws->fc_mem)
246 for (i = 0; i < nb_events && rc; i++)
247 rc = otx2_ssogws_new_event(ws, &ev[i]);
253 otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
256 struct otx2_ssogws *ws = port;
258 RTE_SET_USED(nb_events);
259 otx2_ssogws_forward_event(ws, ev);
264 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
266 otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[], \
267 uint16_t nb_events) \
269 struct otx2_ssogws *ws = port; \
272 RTE_SET_USED(nb_events); \
273 return otx2_ssogws_event_tx(ws->base, &ev[0], cmd, \
275 (*)[RTE_MAX_QUEUES_PER_PORT]) \
276 &ws->tx_adptr_data, \
279 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
282 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
284 otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
285 uint16_t nb_events) \
287 uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
288 struct otx2_ssogws *ws = port; \
290 RTE_SET_USED(nb_events); \
291 return otx2_ssogws_event_tx(ws->base, &ev[0], cmd, \
293 (*)[RTE_MAX_QUEUES_PER_PORT]) \
294 &ws->tx_adptr_data, \
295 (flags) | NIX_TX_MULTI_SEG_F); \
297 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
301 ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id, uintptr_t base,
302 otx2_handle_event_t fn, void *arg)
304 uint64_t cq_ds_cnt = 1;
311 enable = otx2_read64(base + SSO_LF_GGRP_QCTL);
315 val = queue_id; /* GGRP ID */
316 val |= BIT_ULL(18); /* Grouped */
317 val |= BIT_ULL(16); /* WAIT */
319 aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
320 ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
321 cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
322 cq_ds_cnt &= 0x3FFF3FFF0000;
324 while (aq_cnt || cq_ds_cnt || ds_cnt) {
325 otx2_write64(val, ws->getwrk_op);
326 otx2_ssogws_get_work_empty(ws, &ev, 0);
327 if (fn != NULL && ev.u64 != 0)
329 if (ev.sched_type != SSO_TT_EMPTY)
330 otx2_ssogws_swtag_flush(ws->tag_op, ws->swtag_flush_op);
332 aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
333 ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
334 cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
335 /* Extract cq and ds count */
336 cq_ds_cnt &= 0x3FFF3FFF0000;
339 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
340 SSOW_LF_GWS_OP_GWC_INVAL);
345 ssogws_reset(struct otx2_ssogws *ws)
347 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
352 /* Wait till getwork/swtp/waitw/desched completes. */
354 pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
356 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58)));
358 tag = otx2_read64(base + SSOW_LF_GWS_TAG);
359 pend_tt = (tag >> 32) & 0x3;
360 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
361 if (pend_tt == SSO_SYNC_ATOMIC || pend_tt == SSO_SYNC_ORDERED)
362 otx2_ssogws_swtag_untag(ws);
363 otx2_ssogws_desched(ws);
367 /* Wait for desched to complete. */
369 pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
371 } while (pend_state & BIT_ULL(58));