58fd588f6d046d27c8a00f25a91cb5d6d4779818
[dpdk.git] / drivers / event / octeontx2 / otx2_worker_dual.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include "otx2_worker_dual.h"
6 #include "otx2_worker.h"
7
8 static __rte_noinline uint8_t
9 otx2_ssogws_dual_new_event(struct otx2_ssogws_dual *ws,
10                            const struct rte_event *ev)
11 {
12         const uint32_t tag = (uint32_t)ev->event;
13         const uint8_t new_tt = ev->sched_type;
14         const uint64_t event_ptr = ev->u64;
15         const uint16_t grp = ev->queue_id;
16
17         if (ws->xaq_lmt <= *ws->fc_mem)
18                 return 0;
19
20         otx2_ssogws_dual_add_work(ws, event_ptr, tag, new_tt, grp);
21
22         return 1;
23 }
24
25 static __rte_always_inline void
26 otx2_ssogws_dual_fwd_swtag(struct otx2_ssogws_state *ws,
27                            const struct rte_event *ev)
28 {
29         const uint32_t tag = (uint32_t)ev->event;
30         const uint8_t new_tt = ev->sched_type;
31         const uint8_t cur_tt = ws->cur_tt;
32
33         /* 96XX model
34          * cur_tt/new_tt     SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
35          *
36          * SSO_SYNC_ORDERED        norm           norm             untag
37          * SSO_SYNC_ATOMIC         norm           norm             untag
38          * SSO_SYNC_UNTAGGED       norm           norm             NOOP
39          */
40         if (new_tt == SSO_SYNC_UNTAGGED) {
41                 if (cur_tt != SSO_SYNC_UNTAGGED)
42                         otx2_ssogws_swtag_untag((struct otx2_ssogws *)ws);
43         } else {
44                 otx2_ssogws_swtag_norm((struct otx2_ssogws *)ws, tag, new_tt);
45         }
46 }
47
48 static __rte_always_inline void
49 otx2_ssogws_dual_fwd_group(struct otx2_ssogws_state *ws,
50                            const struct rte_event *ev, const uint16_t grp)
51 {
52         const uint32_t tag = (uint32_t)ev->event;
53         const uint8_t new_tt = ev->sched_type;
54
55         otx2_write64(ev->u64, OTX2_SSOW_GET_BASE_ADDR(ws->getwrk_op) +
56                      SSOW_LF_GWS_OP_UPD_WQP_GRP1);
57         rte_smp_wmb();
58         otx2_ssogws_swtag_desched((struct otx2_ssogws *)ws, tag, new_tt, grp);
59 }
60
61 static __rte_always_inline void
62 otx2_ssogws_dual_forward_event(struct otx2_ssogws_dual *ws,
63                                struct otx2_ssogws_state *vws,
64                                const struct rte_event *ev)
65 {
66         const uint8_t grp = ev->queue_id;
67
68         /* Group hasn't changed, Use SWTAG to forward the event */
69         if (vws->cur_grp == grp) {
70                 otx2_ssogws_dual_fwd_swtag(vws, ev);
71                 ws->swtag_req = 1;
72         } else {
73         /*
74          * Group has been changed for group based work pipelining,
75          * Use deschedule/add_work operation to transfer the event to
76          * new group/core
77          */
78                 otx2_ssogws_dual_fwd_group(vws, ev, grp);
79         }
80 }
81
82 uint16_t __hot
83 otx2_ssogws_dual_enq(void *port, const struct rte_event *ev)
84 {
85         struct otx2_ssogws_dual *ws = port;
86         struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
87
88         switch (ev->op) {
89         case RTE_EVENT_OP_NEW:
90                 rte_smp_mb();
91                 return otx2_ssogws_dual_new_event(ws, ev);
92         case RTE_EVENT_OP_FORWARD:
93                 otx2_ssogws_dual_forward_event(ws, vws, ev);
94                 break;
95         case RTE_EVENT_OP_RELEASE:
96                 otx2_ssogws_swtag_flush((struct otx2_ssogws *)vws);
97                 break;
98         default:
99                 return 0;
100         }
101
102         return 1;
103 }
104
105 uint16_t __hot
106 otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
107                            uint16_t nb_events)
108 {
109         RTE_SET_USED(nb_events);
110         return otx2_ssogws_dual_enq(port, ev);
111 }
112
113 uint16_t __hot
114 otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
115                                uint16_t nb_events)
116 {
117         struct otx2_ssogws_dual *ws = port;
118         uint16_t i, rc = 1;
119
120         rte_smp_mb();
121         if (ws->xaq_lmt <= *ws->fc_mem)
122                 return 0;
123
124         for (i = 0; i < nb_events && rc; i++)
125                 rc = otx2_ssogws_dual_new_event(ws, &ev[i]);
126
127         return nb_events;
128 }
129
130 uint16_t __hot
131 otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
132                                uint16_t nb_events)
133 {
134         struct otx2_ssogws_dual *ws = port;
135         struct otx2_ssogws_state *vws = &ws->ws_state[!ws->vws];
136
137         RTE_SET_USED(nb_events);
138         otx2_ssogws_dual_forward_event(ws, vws, ev);
139
140         return 1;
141 }
142
143 uint16_t __hot
144 otx2_ssogws_dual_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks)
145 {
146         struct otx2_ssogws_dual *ws = port;
147         uint8_t gw;
148
149         RTE_SET_USED(timeout_ticks);
150         if (ws->swtag_req) {
151                 otx2_ssogws_swtag_wait((struct otx2_ssogws *)
152                                        &ws->ws_state[!ws->vws]);
153                 ws->swtag_req = 0;
154                 return 1;
155         }
156
157         gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws],
158                                        &ws->ws_state[!ws->vws], ev);
159         ws->vws = !ws->vws;
160
161         return gw;
162 }
163
164 uint16_t __hot
165 otx2_ssogws_dual_deq_burst(void *port, struct rte_event ev[],
166                            uint16_t nb_events, uint64_t timeout_ticks)
167 {
168         RTE_SET_USED(nb_events);
169
170         return otx2_ssogws_dual_deq(port, ev, timeout_ticks);
171 }
172
173 uint16_t __hot
174 otx2_ssogws_dual_deq_timeout(void *port, struct rte_event *ev,
175                              uint64_t timeout_ticks)
176 {
177         struct otx2_ssogws_dual *ws = port;
178         uint64_t iter;
179         uint8_t gw;
180
181         if (ws->swtag_req) {
182                 otx2_ssogws_swtag_wait((struct otx2_ssogws *)
183                                        &ws->ws_state[!ws->vws]);
184                 ws->swtag_req = 0;
185                 return 1;
186         }
187
188         gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws],
189                                        &ws->ws_state[!ws->vws], ev);
190         ws->vws = !ws->vws;
191         for (iter = 1; iter < timeout_ticks && (gw == 0); iter++) {
192                 gw = otx2_ssogws_dual_get_work(&ws->ws_state[ws->vws],
193                                                &ws->ws_state[!ws->vws], ev);
194                 ws->vws = !ws->vws;
195         }
196
197         return gw;
198 }
199
200 uint16_t __hot
201 otx2_ssogws_dual_deq_timeout_burst(void *port, struct rte_event ev[],
202                                    uint16_t nb_events, uint64_t timeout_ticks)
203 {
204         RTE_SET_USED(nb_events);
205
206         return otx2_ssogws_dual_deq_timeout(port, ev, timeout_ticks);
207 }