1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include "ssovf_worker.h"
7 static __rte_always_inline void
8 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
10 const uint64_t event_ptr = ev->u64;
11 const uint32_t tag = (uint32_t)ev->event;
12 const uint8_t new_tt = ev->sched_type;
13 const uint8_t grp = ev->queue_id;
15 ssows_add_work(ws, event_ptr, tag, new_tt, grp);
18 static __rte_always_inline void
19 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
21 const uint8_t cur_tt = ws->cur_tt;
22 const uint8_t new_tt = ev->sched_type;
23 const uint32_t tag = (uint32_t)ev->event;
25 * cur_tt/new_tt SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
27 * SSO_SYNC_ORDERED norm norm untag
28 * SSO_SYNC_ATOMIC norm norm untag
29 * SSO_SYNC_UNTAGGED full full NOOP
31 if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32 if (new_tt != SSO_SYNC_UNTAGGED) {
33 ssows_swtag_full(ws, ev->u64, tag,
37 if (likely(new_tt != SSO_SYNC_UNTAGGED))
38 ssows_swtag_norm(ws, tag, new_tt);
40 ssows_swtag_untag(ws);
45 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
47 static __rte_always_inline void
48 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
50 const uint64_t event_ptr = ev->u64;
51 const uint32_t tag = (uint32_t)ev->event;
52 const uint8_t cur_tt = ws->cur_tt;
53 const uint8_t new_tt = ev->sched_type;
55 if (cur_tt == SSO_SYNC_ORDERED) {
56 /* Create unique tag based on custom event type and new grp */
57 uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
61 ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
67 ssows_add_work(ws, event_ptr, tag, new_tt, grp);
70 static __rte_always_inline void
71 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
73 const uint8_t grp = ev->queue_id;
75 /* Group hasn't changed, Use SWTAG to forward the event */
76 if (ws->cur_grp == grp)
77 ssows_fwd_swtag(ws, ev, grp);
80 * Group has been changed for group based work pipelining,
81 * Use deschedule/add_work operation to transfer the event to
84 ssows_fwd_group(ws, ev, grp);
87 static __rte_always_inline void
88 ssows_release_event(struct ssows *ws)
90 if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91 ssows_swtag_untag(ws);
94 __rte_always_inline uint16_t __hot
95 ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
97 struct ssows *ws = port;
99 RTE_SET_USED(timeout_ticks);
103 ssows_swtag_wait(ws);
106 return ssows_get_work(ws, ev);
110 __rte_always_inline uint16_t __hot
111 ssows_deq_timeout(void *port, struct rte_event *ev, uint64_t timeout_ticks)
113 struct ssows *ws = port;
119 ssows_swtag_wait(ws);
121 ret = ssows_get_work(ws, ev);
122 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)
123 ret = ssows_get_work(ws, ev);
129 ssows_deq_burst(void *port, struct rte_event ev[], uint16_t nb_events,
130 uint64_t timeout_ticks)
132 RTE_SET_USED(nb_events);
134 return ssows_deq(port, ev, timeout_ticks);
138 ssows_deq_timeout_burst(void *port, struct rte_event ev[], uint16_t nb_events,
139 uint64_t timeout_ticks)
141 RTE_SET_USED(nb_events);
143 return ssows_deq_timeout(port, ev, timeout_ticks);
146 __rte_always_inline uint16_t __hot
147 ssows_enq(void *port, const struct rte_event *ev)
149 struct ssows *ws = port;
153 case RTE_EVENT_OP_NEW:
155 ssows_new_event(ws, ev);
157 case RTE_EVENT_OP_FORWARD:
158 ssows_forward_event(ws, ev);
160 case RTE_EVENT_OP_RELEASE:
161 ssows_release_event(ws);
170 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
172 RTE_SET_USED(nb_events);
173 return ssows_enq(port, ev);
177 ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
180 struct ssows *ws = port;
183 for (i = 0; i < nb_events; i++)
184 ssows_new_event(ws, &ev[i]);
190 ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
192 struct ssows *ws = port;
193 RTE_SET_USED(nb_events);
195 ssows_forward_event(ws, ev);
201 ssows_flush_events(struct ssows *ws, uint8_t queue_id,
202 ssows_handle_event_t fn, void *arg)
206 uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
207 uint64_t get_work0, get_work1;
208 uint64_t sched_type_queue;
209 uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
211 enable = ssovf_read64(base + SSO_VHGRP_QCTL);
215 reg_off = SSOW_VHWS_OP_GET_WORK0;
216 reg_off |= 1 << 17; /* Grouped */
217 reg_off |= 1 << 16; /* WAIT */
218 reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
219 while (aq_cnt || cq_ds_cnt) {
220 aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
221 cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
222 /* Extract cq and ds count */
223 cq_ds_cnt &= 0x1FFF1FFF0000;
225 ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
227 sched_type_queue = (get_work0 >> 32) & 0xfff;
228 ws->cur_tt = sched_type_queue & 0x3;
229 ws->cur_grp = sched_type_queue >> 2;
230 sched_type_queue = sched_type_queue << 38;
231 ev.event = sched_type_queue | (get_work0 & 0xffffffff);
232 if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
233 ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
234 (ev.event >> 20) & 0x7F);
238 if (fn != NULL && ev.u64 != 0)
244 ssows_reset(struct ssows *ws)
251 tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
252 pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
254 if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
255 pend_tt = (pend_tag >> 32) & 0x3;
256 if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
259 tt = (tag >> 32) & 0x3;
260 if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
261 ssows_swtag_untag(ws);
266 sso_event_tx_adapter_enqueue(void *port,
267 struct rte_event ev[], uint16_t nb_events)
272 struct rte_eth_dev *ethdev;
273 struct ssows *ws = port;
274 struct octeontx_txq *txq;
277 RTE_SET_USED(nb_events);
278 switch (ev->sched_type) {
279 case SSO_SYNC_ORDERED:
280 ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
282 ssows_swtag_wait(ws);
284 case SSO_SYNC_UNTAGGED:
285 ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
288 ssows_swtag_wait(ws);
290 case SSO_SYNC_ATOMIC:
297 queue_id = rte_event_eth_tx_adapter_txq_get(m);
298 ethdev = &rte_eth_devices[port_id];
299 txq = ethdev->data->tx_queues[queue_id];
302 if (__octeontx_xmit_pkts(dq->lmtline_va, dq->ioreg_va, dq->fc_status_va,