1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "ssovf_worker.h"
7 static __rte_always_inline void
8 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
10 const uint64_t event_ptr = ev->u64;
11 const uint32_t tag = (uint32_t)ev->event;
12 const uint8_t new_tt = ev->sched_type;
13 const uint8_t grp = ev->queue_id;
15 ssows_add_work(ws, event_ptr, tag, new_tt, grp);
18 static __rte_always_inline void
19 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
21 const uint8_t cur_tt = ws->cur_tt;
22 const uint8_t new_tt = ev->sched_type;
23 const uint32_t tag = (uint32_t)ev->event;
25 * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
27 * SSO_SYNC_ORDERED norm norm untag
28 * SSO_SYNC_ATOMIC norm norm untag
29 * SSO_SYNC_UNTAGGED full full NOOP
31 if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32 if (new_tt != SSO_SYNC_UNTAGGED) {
33 ssows_swtag_full(ws, ev->u64, tag,
37 if (likely(new_tt != SSO_SYNC_UNTAGGED))
38 ssows_swtag_norm(ws, tag, new_tt);
40 ssows_swtag_untag(ws);
45 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
47 static __rte_always_inline void
48 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
50 const uint64_t event_ptr = ev->u64;
51 const uint32_t tag = (uint32_t)ev->event;
52 const uint8_t cur_tt = ws->cur_tt;
53 const uint8_t new_tt = ev->sched_type;
55 if (cur_tt == SSO_SYNC_ORDERED) {
56 /* Create unique tag based on custom event type and new grp */
57 uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
61 ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
67 ssows_add_work(ws, event_ptr, tag, new_tt, grp);
70 static __rte_always_inline void
71 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
73 const uint8_t grp = ev->queue_id;
75 /* Group hasn't changed, Use SWTAG to forward the event */
76 if (ws->cur_grp == grp)
77 ssows_fwd_swtag(ws, ev, grp);
80 * Group has been changed for group based work pipelining,
81 * Use deschedule/add_work operation to transfer the event to
84 ssows_fwd_group(ws, ev, grp);
87 static __rte_always_inline void
88 ssows_release_event(struct ssows *ws)
90 if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91 ssows_swtag_untag(ws);
94 __rte_always_inline uint16_t __rte_hot
95 ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
97 struct ssows *ws = port;
99 RTE_SET_USED(timeout_ticks);
103 ssows_swtag_wait(ws);
106 return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
110 __rte_always_inline uint16_t __rte_hot
111 ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
113 struct ssows *ws = port;
119 ssows_swtag_wait(ws);
121 ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
122 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
123 ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE);
129 ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
130 uint64_t timeout_ticks)
132 RTE_SET_USED(nb_events);
134 return ssows_deq(port, ev, timeout_ticks);
138 ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
139 uint64_t timeout_ticks)
141 RTE_SET_USED(nb_events);
143 return ssows_deq_timeout(port, ev, timeout_ticks);
146 __rte_always_inline uint16_t __rte_hot
147 ssows_deq_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
149 struct ssows *ws = port;
151 RTE_SET_USED(timeout_ticks);
155 ssows_swtag_wait(ws);
158 return ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
159 OCCTX_RX_MULTI_SEG_F);
163 __rte_always_inline uint16_t __rte_hot
164 ssows_deq_timeout_mseg(void *port, struct rte_event *ev, uint64_t timeout_ticks)
166 struct ssows *ws = port;
172 ssows_swtag_wait(ws);
174 ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
175 OCCTX_RX_MULTI_SEG_F);
176 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
177 ret = ssows_get_work(ws, ev, OCCTX_RX_OFFLOAD_NONE |
178 OCCTX_RX_MULTI_SEG_F);
184 ssows_deq_burst_mseg(void *port, struct rte_event ev[], uint16_t nb_events,
185 uint64_t timeout_ticks)
187 RTE_SET_USED(nb_events);
189 return ssows_deq_mseg(port, ev, timeout_ticks);
193 ssows_deq_timeout_burst_mseg(void *port, struct rte_event ev[],
194 uint16_t nb_events, uint64_t timeout_ticks)
196 RTE_SET_USED(nb_events);
198 return ssows_deq_timeout_mseg(port, ev, timeout_ticks);
201 __rte_always_inline uint16_t __rte_hot
202 ssows_enq(void *port, const struct rte_event *ev)
204 struct ssows *ws = port;
208 case RTE_EVENT_OP_NEW:
210 ssows_new_event(ws, ev);
212 case RTE_EVENT_OP_FORWARD:
213 ssows_forward_event(ws, ev);
215 case RTE_EVENT_OP_RELEASE:
216 ssows_release_event(ws);
225 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
227 RTE_SET_USED(nb_events);
228 return ssows_enq(port, ev);
232 ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
235 struct ssows *ws = port;
238 for (i = 0; i < nb_events; i++)
239 ssows_new_event(ws, &ev[i]);
245 ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
247 struct ssows *ws = port;
248 RTE_SET_USED(nb_events);
250 ssows_forward_event(ws, ev);
256 ssows_flush_events(struct ssows *ws, uint8_t queue_id,
257 ssows_handle_event_t fn, void *arg)
261 uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
262 uint64_t get_work0, get_work1;
263 uint64_t sched_type_queue;
264 uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
266 enable = ssovf_read64(base + SSO_VHGRP_QCTL);
270 reg_off = SSOW_VHWS_OP_GET_WORK0;
271 reg_off |= 1 << 17; /* Grouped */
272 reg_off |= 1 << 16; /* WAIT */
273 reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
274 while (aq_cnt || cq_ds_cnt) {
275 aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
276 cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
277 /* Extract cq and ds count */
278 cq_ds_cnt &= 0x1FFF1FFF0000;
280 ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
282 sched_type_queue = (get_work0 >> 32) & 0xfff;
283 ws->cur_tt = sched_type_queue & 0x3;
284 ws->cur_grp = sched_type_queue >> 2;
285 sched_type_queue = sched_type_queue << 38;
286 ev.event = sched_type_queue | (get_work0 & 0xffffffff);
287 if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
288 ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
289 (ev.event >> 20) & 0x7F,
290 OCCTX_RX_OFFLOAD_NONE |
291 OCCTX_RX_MULTI_SEG_F);
295 if (fn != NULL && ev.u64 != 0)
301 ssows_reset(struct ssows *ws)
308 tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
309 pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
311 if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
312 pend_tt = (pend_tag >> 32) & 0x3;
313 if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
316 tt = (tag >> 32) & 0x3;
317 if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
318 ssows_swtag_untag(ws);
322 static __rte_always_inline uint16_t
323 __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
324 uint16_t nb_events, const uint16_t flag)
329 struct rte_eth_dev *ethdev;
330 struct ssows *ws = port;
331 struct octeontx_txq *txq;
334 RTE_SET_USED(nb_events);
335 switch (ev->sched_type) {
336 case SSO_SYNC_ORDERED:
337 ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
339 ssows_swtag_wait(ws);
341 case SSO_SYNC_UNTAGGED:
342 ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
345 ssows_swtag_wait(ws);
347 case SSO_SYNC_ATOMIC:
354 queue_id = rte_event_eth_tx_adapter_txq_get(m);
355 ethdev = &rte_eth_devices[port_id];
356 txq = ethdev->data->tx_queues[queue_id];
358 return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
362 sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
365 return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
366 OCCTX_TX_OFFLOAD_NONE);
370 sso_event_tx_adapter_enqueue_mseg(void *port, struct rte_event ev[],
373 return __sso_event_tx_adapter_enqueue(port, ev, nb_events,
374 OCCTX_TX_OFFLOAD_NONE |
375 OCCTX_TX_MULTI_SEG_F);