1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #include "otx2_worker.h"
7 static __rte_noinline uint8_t
8 otx2_ssogws_new_event(struct otx2_ssogws *ws, const struct rte_event *ev)
10 const uint32_t tag = (uint32_t)ev->event;
11 const uint8_t new_tt = ev->sched_type;
12 const uint64_t event_ptr = ev->u64;
13 const uint16_t grp = ev->queue_id;
15 if (ws->xaq_lmt <= *ws->fc_mem)
18 otx2_ssogws_add_work(ws, event_ptr, tag, new_tt, grp);
23 static __rte_always_inline void
24 otx2_ssogws_fwd_swtag(struct otx2_ssogws *ws, const struct rte_event *ev)
26 const uint32_t tag = (uint32_t)ev->event;
27 const uint8_t new_tt = ev->sched_type;
28 const uint8_t cur_tt = ws->cur_tt;
31 * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
33 * SSO_SYNC_ORDERED norm norm untag
34 * SSO_SYNC_ATOMIC norm norm untag
35 * SSO_SYNC_UNTAGGED norm norm NOOP
38 if (new_tt == SSO_SYNC_UNTAGGED) {
39 if (cur_tt != SSO_SYNC_UNTAGGED)
40 otx2_ssogws_swtag_untag(ws);
42 otx2_ssogws_swtag_norm(ws, tag, new_tt);
48 static __rte_always_inline void
49 otx2_ssogws_fwd_group(struct otx2_ssogws *ws, const struct rte_event *ev,
52 const uint32_t tag = (uint32_t)ev->event;
53 const uint8_t new_tt = ev->sched_type;
55 otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
56 SSOW_LF_GWS_OP_UPD_WQP_GRP1);
58 otx2_ssogws_swtag_desched(ws, tag, new_tt, grp);
61 static __rte_always_inline void
62 otx2_ssogws_forward_event(struct otx2_ssogws *ws, const struct rte_event *ev)
64 const uint8_t grp = ev->queue_id;
66 /* Group hasn't changed, Use SWTAG to forward the event */
67 if (ws->cur_grp == grp)
68 otx2_ssogws_fwd_swtag(ws, ev);
71 * Group has been changed for group based work pipelining,
72 * Use deschedule/add_work operation to transfer the event to
75 otx2_ssogws_fwd_group(ws, ev, grp);
78 static __rte_always_inline void
79 otx2_ssogws_release_event(struct otx2_ssogws *ws)
81 otx2_ssogws_swtag_flush(ws);
84 #define R(name, f6, f5, f4, f3, f2, f1, f0, flags) \
86 otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
87 uint64_t timeout_ticks) \
89 struct otx2_ssogws *ws = port; \
91 RTE_SET_USED(timeout_ticks); \
93 if (ws->swtag_req) { \
95 otx2_ssogws_swtag_wait(ws); \
99 return otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
103 otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
104 uint16_t nb_events, \
105 uint64_t timeout_ticks) \
107 RTE_SET_USED(nb_events); \
109 return otx2_ssogws_deq_ ##name(port, ev, timeout_ticks); \
113 otx2_ssogws_deq_timeout_ ##name(void *port, struct rte_event *ev, \
114 uint64_t timeout_ticks) \
116 struct otx2_ssogws *ws = port; \
120 if (ws->swtag_req) { \
122 otx2_ssogws_swtag_wait(ws); \
126 ret = otx2_ssogws_get_work(ws, ev, flags, ws->lookup_mem); \
127 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
128 ret = otx2_ssogws_get_work(ws, ev, flags, \
135 otx2_ssogws_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],\
136 uint16_t nb_events, \
137 uint64_t timeout_ticks) \
139 RTE_SET_USED(nb_events); \
141 return otx2_ssogws_deq_timeout_ ##name(port, ev, timeout_ticks);\
145 otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
146 uint64_t timeout_ticks) \
148 struct otx2_ssogws *ws = port; \
150 RTE_SET_USED(timeout_ticks); \
152 if (ws->swtag_req) { \
154 otx2_ssogws_swtag_wait(ws); \
158 return otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
163 otx2_ssogws_deq_seg_burst_ ##name(void *port, struct rte_event ev[], \
164 uint16_t nb_events, \
165 uint64_t timeout_ticks) \
167 RTE_SET_USED(nb_events); \
169 return otx2_ssogws_deq_seg_ ##name(port, ev, timeout_ticks); \
173 otx2_ssogws_deq_seg_timeout_ ##name(void *port, struct rte_event *ev, \
174 uint64_t timeout_ticks) \
176 struct otx2_ssogws *ws = port; \
180 if (ws->swtag_req) { \
182 otx2_ssogws_swtag_wait(ws); \
186 ret = otx2_ssogws_get_work(ws, ev, flags | NIX_RX_MULTI_SEG_F, \
188 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++) \
189 ret = otx2_ssogws_get_work(ws, ev, \
190 flags | NIX_RX_MULTI_SEG_F, \
197 otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
198 struct rte_event ev[], \
199 uint16_t nb_events, \
200 uint64_t timeout_ticks) \
202 RTE_SET_USED(nb_events); \
204 return otx2_ssogws_deq_seg_timeout_ ##name(port, ev, \
208 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
212 otx2_ssogws_enq(void *port, const struct rte_event *ev)
214 struct otx2_ssogws *ws = port;
217 case RTE_EVENT_OP_NEW:
219 return otx2_ssogws_new_event(ws, ev);
220 case RTE_EVENT_OP_FORWARD:
221 otx2_ssogws_forward_event(ws, ev);
223 case RTE_EVENT_OP_RELEASE:
224 otx2_ssogws_release_event(ws);
234 otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
237 RTE_SET_USED(nb_events);
238 return otx2_ssogws_enq(port, ev);
242 otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
245 struct otx2_ssogws *ws = port;
249 if (ws->xaq_lmt <= *ws->fc_mem)
252 for (i = 0; i < nb_events && rc; i++)
253 rc = otx2_ssogws_new_event(ws, &ev[i]);
259 otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
262 struct otx2_ssogws *ws = port;
264 RTE_SET_USED(nb_events);
265 otx2_ssogws_forward_event(ws, ev);
270 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
272 otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[], \
273 uint16_t nb_events) \
275 struct otx2_ssogws *ws = port; \
278 RTE_SET_USED(nb_events); \
279 return otx2_ssogws_event_tx(ws, ev, cmd, (const uint64_t \
280 (*)[RTE_MAX_QUEUES_PER_PORT]) \
281 &ws->tx_adptr_data, \
284 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
287 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags) \
289 otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
290 uint16_t nb_events) \
292 struct otx2_ssogws *ws = port; \
293 uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2]; \
295 RTE_SET_USED(nb_events); \
296 return otx2_ssogws_event_tx(ws, ev, cmd, (const uint64_t \
297 (*)[RTE_MAX_QUEUES_PER_PORT]) \
298 &ws->tx_adptr_data, \
299 (flags) | NIX_TX_MULTI_SEG_F); \
301 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
305 ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id, uintptr_t base,
306 otx2_handle_event_t fn, void *arg)
308 uint64_t cq_ds_cnt = 1;
315 enable = otx2_read64(base + SSO_LF_GGRP_QCTL);
319 val = queue_id; /* GGRP ID */
320 val |= BIT_ULL(18); /* Grouped */
321 val |= BIT_ULL(16); /* WAIT */
323 aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
324 ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
325 cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
326 cq_ds_cnt &= 0x3FFF3FFF0000;
328 while (aq_cnt || cq_ds_cnt || ds_cnt) {
329 otx2_write64(val, ws->getwrk_op);
330 otx2_ssogws_get_work_empty(ws, &ev, 0);
331 if (fn != NULL && ev.u64 != 0)
333 if (ev.sched_type != SSO_TT_EMPTY)
334 otx2_ssogws_swtag_flush(ws);
336 aq_cnt = otx2_read64(base + SSO_LF_GGRP_AQ_CNT);
337 ds_cnt = otx2_read64(base + SSO_LF_GGRP_MISC_CNT);
338 cq_ds_cnt = otx2_read64(base + SSO_LF_GGRP_INT_CNT);
339 /* Extract cq and ds count */
340 cq_ds_cnt &= 0x3FFF3FFF0000;
343 otx2_write64(0, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
344 SSOW_LF_GWS_OP_GWC_INVAL);
349 ssogws_reset(struct otx2_ssogws *ws)
351 uintptr_t base = OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op);
356 /* Wait till getwork/swtp/waitw/desched completes. */
358 pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
360 } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58)));
362 tag = otx2_read64(base + SSOW_LF_GWS_TAG);
363 pend_tt = (tag >> 32) & 0x3;
364 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
365 if (pend_tt == SSO_SYNC_ATOMIC || pend_tt == SSO_SYNC_ORDERED)
366 otx2_ssogws_swtag_untag(ws);
367 otx2_ssogws_desched(ws);
371 /* Wait for desched to complete. */
373 pend_state = otx2_read64(base + SSOW_LF_GWS_PENDSTATE);
375 } while (pend_state & BIT_ULL(58));