mbuf: add rte prefix to offload flags
[dpdk.git] / drivers / event / octeontx / ssovf_worker.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include "ssovf_worker.h"
6
7 static __rte_always_inline void
8 ssows_new_event(struct ssows *ws, const struct rte_event *ev)
9 {
10         const uint64_t event_ptr = ev->u64;
11         const uint32_t tag = (uint32_t)ev->event;
12         const uint8_t new_tt = ev->sched_type;
13         const uint8_t grp = ev->queue_id;
14
15         ssows_add_work(ws, event_ptr, tag, new_tt, grp);
16 }
17
18 static __rte_always_inline void
19 ssows_fwd_swtag(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
20 {
21         const uint8_t cur_tt = ws->cur_tt;
22         const uint8_t new_tt = ev->sched_type;
23         const uint32_t tag = (uint32_t)ev->event;
24         /*
25          * cur_tt/new_tt     SSO_SYNC_ORDERED SSO_SYNC_ATOMIC SSO_SYNC_UNTAGGED
26          *
27          * SSO_SYNC_ORDERED        norm           norm             untag
28          * SSO_SYNC_ATOMIC         norm           norm             untag
29          * SSO_SYNC_UNTAGGED       full           full             NOOP
30          */
31         if (unlikely(cur_tt == SSO_SYNC_UNTAGGED)) {
32                 if (new_tt != SSO_SYNC_UNTAGGED) {
33                         ssows_swtag_full(ws, ev->u64, tag,
34                                 new_tt, grp);
35                 }
36         } else {
37                 if (likely(new_tt != SSO_SYNC_UNTAGGED))
38                         ssows_swtag_norm(ws, tag, new_tt);
39                 else
40                         ssows_swtag_untag(ws);
41         }
42         ws->swtag_req = 1;
43 }
44
45 #define OCT_EVENT_TYPE_GRP_FWD (RTE_EVENT_TYPE_MAX - 1)
46
47 static __rte_always_inline void
48 ssows_fwd_group(struct ssows *ws, const struct rte_event *ev, const uint8_t grp)
49 {
50         const uint64_t event_ptr = ev->u64;
51         const uint32_t tag = (uint32_t)ev->event;
52         const uint8_t cur_tt = ws->cur_tt;
53         const uint8_t new_tt = ev->sched_type;
54
55         if (cur_tt == SSO_SYNC_ORDERED) {
56                 /* Create unique tag based on custom event type and new grp */
57                 uint32_t newtag = OCT_EVENT_TYPE_GRP_FWD << 28;
58
59                 newtag |= grp << 20;
60                 newtag |= tag;
61                 ssows_swtag_norm(ws, newtag, SSO_SYNC_ATOMIC);
62                 rte_smp_wmb();
63                 ssows_swtag_wait(ws);
64         } else {
65                 rte_smp_wmb();
66         }
67         ssows_add_work(ws, event_ptr, tag, new_tt, grp);
68 }
69
70 static __rte_always_inline void
71 ssows_forward_event(struct ssows *ws, const struct rte_event *ev)
72 {
73         const uint8_t grp = ev->queue_id;
74
75         /* Group hasn't changed, Use SWTAG to forward the event */
76         if (ws->cur_grp == grp)
77                 ssows_fwd_swtag(ws, ev, grp);
78         else
79         /*
80          * Group has been changed for group based work pipelining,
81          * Use deschedule/add_work operation to transfer the event to
82          * new group/core
83          */
84                 ssows_fwd_group(ws, ev, grp);
85 }
86
87 static __rte_always_inline void
88 ssows_release_event(struct ssows *ws)
89 {
90         if (likely(ws->cur_tt != SSO_SYNC_UNTAGGED))
91                 ssows_swtag_untag(ws);
92 }
93
94 #define R(name, f2, f1, f0, flags)                                           \
95 static uint16_t __rte_noinline  __rte_hot                                    \
96 ssows_deq_ ##name(void *port, struct rte_event *ev, uint64_t timeout_ticks)  \
97 {                                                                            \
98         struct ssows *ws = port;                                             \
99                                                                              \
100         RTE_SET_USED(timeout_ticks);                                         \
101                                                                              \
102         if (ws->swtag_req) {                                                 \
103                 ws->swtag_req = 0;                                           \
104                 ssows_swtag_wait(ws);                                        \
105                 return 1;                                                    \
106         } else {                                                             \
107                 return ssows_get_work(ws, ev, flags);                        \
108         }                                                                    \
109 }                                                                            \
110                                                                              \
111 static uint16_t __rte_hot                                                    \
112 ssows_deq_burst_ ##name(void *port, struct rte_event ev[],                   \
113                          uint16_t nb_events, uint64_t timeout_ticks)         \
114 {                                                                            \
115         RTE_SET_USED(nb_events);                                             \
116                                                                              \
117         return ssows_deq_ ##name(port, ev, timeout_ticks);                   \
118 }                                                                            \
119                                                                              \
120 static uint16_t __rte_hot                                                    \
121 ssows_deq_timeout_ ##name(void *port, struct rte_event *ev,                  \
122                           uint64_t timeout_ticks)                            \
123 {                                                                            \
124         struct ssows *ws = port;                                             \
125         uint64_t iter;                                                       \
126         uint16_t ret = 1;                                                    \
127                                                                              \
128         if (ws->swtag_req) {                                                 \
129                 ws->swtag_req = 0;                                           \
130                 ssows_swtag_wait(ws);                                        \
131         } else {                                                             \
132                 ret = ssows_get_work(ws, ev, flags);                         \
133                 for (iter = 1; iter < timeout_ticks && (ret == 0); iter++)   \
134                         ret = ssows_get_work(ws, ev, flags);                 \
135         }                                                                    \
136         return ret;                                                          \
137 }                                                                            \
138                                                                              \
139 static uint16_t __rte_hot                                                    \
140 ssows_deq_timeout_burst_ ##name(void *port, struct rte_event ev[],           \
141                                 uint16_t nb_events, uint64_t timeout_ticks)  \
142 {                                                                            \
143         RTE_SET_USED(nb_events);                                             \
144                                                                              \
145         return ssows_deq_timeout_ ##name(port, ev, timeout_ticks);           \
146 }
147
148 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
149 #undef R
150
151 __rte_always_inline uint16_t __rte_hot
152 ssows_enq(void *port, const struct rte_event *ev)
153 {
154         struct ssows *ws = port;
155         uint16_t ret = 1;
156
157         switch (ev->op) {
158         case RTE_EVENT_OP_NEW:
159                 rte_smp_wmb();
160                 ssows_new_event(ws, ev);
161                 break;
162         case RTE_EVENT_OP_FORWARD:
163                 ssows_forward_event(ws, ev);
164                 break;
165         case RTE_EVENT_OP_RELEASE:
166                 ssows_release_event(ws);
167                 break;
168         default:
169                 ret = 0;
170         }
171         return ret;
172 }
173
174 uint16_t __rte_hot
175 ssows_enq_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
176 {
177         RTE_SET_USED(nb_events);
178         return ssows_enq(port, ev);
179 }
180
181 uint16_t __rte_hot
182 ssows_enq_new_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
183 {
184         uint16_t i;
185         struct ssows *ws = port;
186
187         rte_smp_wmb();
188         for (i = 0; i < nb_events; i++)
189                 ssows_new_event(ws,  &ev[i]);
190
191         return nb_events;
192 }
193
194 uint16_t __rte_hot
195 ssows_enq_fwd_burst(void *port, const struct rte_event ev[], uint16_t nb_events)
196 {
197         struct ssows *ws = port;
198         RTE_SET_USED(nb_events);
199
200         ssows_forward_event(ws,  ev);
201
202         return 1;
203 }
204
205 void
206 ssows_flush_events(struct ssows *ws, uint8_t queue_id,
207                                 ssows_handle_event_t fn, void *arg)
208 {
209         uint32_t reg_off;
210         struct rte_event ev;
211         uint64_t enable, aq_cnt = 1, cq_ds_cnt = 1;
212         uint64_t get_work0, get_work1;
213         uint64_t sched_type_queue;
214         uint8_t *base = ssovf_bar(OCTEONTX_SSO_GROUP, queue_id, 0);
215
216         enable = ssovf_read64(base + SSO_VHGRP_QCTL);
217         if (!enable)
218                 return;
219
220         reg_off = SSOW_VHWS_OP_GET_WORK0;
221         reg_off |= 1 << 17; /* Grouped */
222         reg_off |= 1 << 16; /* WAIT */
223         reg_off |= queue_id << 4; /* INDEX_GGRP_MASK(group number) */
224         while (aq_cnt || cq_ds_cnt) {
225                 aq_cnt = ssovf_read64(base + SSO_VHGRP_AQ_CNT);
226                 cq_ds_cnt = ssovf_read64(base + SSO_VHGRP_INT_CNT);
227                 /* Extract cq and ds count */
228                 cq_ds_cnt &= 0x1FFF1FFF0000;
229
230                 ssovf_load_pair(get_work0, get_work1, ws->base + reg_off);
231
232                 sched_type_queue = (get_work0 >> 32) & 0xfff;
233                 ws->cur_tt = sched_type_queue & 0x3;
234                 ws->cur_grp = sched_type_queue >> 2;
235                 sched_type_queue = sched_type_queue << 38;
236                 ev.event = sched_type_queue | (get_work0 & 0xffffffff);
237                 if (get_work1 && ev.event_type == RTE_EVENT_TYPE_ETHDEV)
238                         ev.mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
239                                         (ev.event >> 20) & 0x7F,
240                                         OCCTX_RX_OFFLOAD_NONE |
241                                         OCCTX_RX_MULTI_SEG_F,
242                                         ws->lookup_mem);
243                 else
244                         ev.u64 = get_work1;
245
246                 if (fn != NULL && ev.u64 != 0)
247                         fn(arg, ev);
248         }
249 }
250
251 void
252 ssows_reset(struct ssows *ws)
253 {
254         uint64_t tag;
255         uint64_t pend_tag;
256         uint8_t pend_tt;
257         uint8_t tt;
258
259         tag = ssovf_read64(ws->base + SSOW_VHWS_TAG);
260         pend_tag = ssovf_read64(ws->base + SSOW_VHWS_PENDTAG);
261
262         if (pend_tag & (1ULL << 63)) { /* Tagswitch pending */
263                 pend_tt = (pend_tag >> 32) & 0x3;
264                 if (pend_tt == SSO_SYNC_ORDERED || pend_tt == SSO_SYNC_ATOMIC)
265                         ssows_desched(ws);
266         } else {
267                 tt = (tag >> 32) & 0x3;
268                 if (tt == SSO_SYNC_ORDERED || tt == SSO_SYNC_ATOMIC)
269                         ssows_swtag_untag(ws);
270         }
271 }
272
273 static __rte_always_inline uint16_t
274 __sso_event_tx_adapter_enqueue(void *port, struct rte_event ev[],
275                                uint16_t nb_events, uint64_t *cmd,
276                                const uint16_t flag)
277 {
278         uint16_t port_id;
279         uint16_t queue_id;
280         struct rte_mbuf *m;
281         struct rte_eth_dev *ethdev;
282         struct ssows *ws = port;
283         struct octeontx_txq *txq;
284
285         RTE_SET_USED(nb_events);
286         switch (ev->sched_type) {
287         case SSO_SYNC_ORDERED:
288                 ssows_swtag_norm(ws, ev->event, SSO_SYNC_ATOMIC);
289                 rte_io_wmb();
290                 ssows_swtag_wait(ws);
291                 break;
292         case SSO_SYNC_UNTAGGED:
293                 ssows_swtag_full(ws, ev->u64, ev->event, SSO_SYNC_ATOMIC,
294                                 ev->queue_id);
295                 rte_io_wmb();
296                 ssows_swtag_wait(ws);
297                 break;
298         case SSO_SYNC_ATOMIC:
299                 rte_io_wmb();
300                 break;
301         }
302
303         m = ev[0].mbuf;
304         port_id = m->port;
305         queue_id = rte_event_eth_tx_adapter_txq_get(m);
306         ethdev = &rte_eth_devices[port_id];
307         txq = ethdev->data->tx_queues[queue_id];
308
309         return __octeontx_xmit_pkts(txq, &m, 1, cmd, flag);
310 }
311
312 #define T(name, f3, f2, f1, f0, sz, flags)                                   \
313 static uint16_t __rte_noinline  __rte_hot                                    \
314 sso_event_tx_adapter_enqueue_ ## name(void *port, struct rte_event ev[],     \
315                                   uint16_t nb_events)                        \
316 {                                                                            \
317         uint64_t cmd[sz];                                                    \
318         return __sso_event_tx_adapter_enqueue(port, ev, nb_events, cmd,      \
319                                               flags);                        \
320 }
321
322 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
323 #undef T
324
325 static uint16_t __rte_hot
326 ssow_crypto_adapter_enqueue(void *port, struct rte_event ev[],
327                             uint16_t nb_events)
328 {
329         RTE_SET_USED(nb_events);
330
331         return otx_crypto_adapter_enqueue(port, ev->event_ptr);
332 }
333
334 void
335 ssovf_fastpath_fns_set(struct rte_eventdev *dev)
336 {
337         struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
338
339         dev->enqueue       = ssows_enq;
340         dev->enqueue_burst = ssows_enq_burst;
341         dev->enqueue_new_burst = ssows_enq_new_burst;
342         dev->enqueue_forward_burst = ssows_enq_fwd_burst;
343
344         dev->ca_enqueue = ssow_crypto_adapter_enqueue;
345
346         const event_tx_adapter_enqueue ssow_txa_enqueue[2][2][2][2] = {
347 #define T(name, f3, f2, f1, f0, sz, flags)                              \
348         [f3][f2][f1][f0] =  sso_event_tx_adapter_enqueue_ ##name,
349
350 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
351 #undef T
352         };
353
354         dev->txa_enqueue = ssow_txa_enqueue
355                 [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_MBUF_NOFF_F)]
356                 [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_OL3_OL4_CSUM_F)]
357                 [!!(edev->tx_offload_flags & OCCTX_TX_OFFLOAD_L3_L4_CSUM_F)]
358                 [!!(edev->tx_offload_flags & OCCTX_TX_MULTI_SEG_F)];
359
360         dev->txa_enqueue_same_dest = dev->txa_enqueue;
361
362         /* Assigning dequeue func pointers */
363         const event_dequeue_t ssow_deq[2][2][2] = {
364 #define R(name, f2, f1, f0, flags)                                      \
365         [f2][f1][f0] =  ssows_deq_ ##name,
366
367 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
368 #undef R
369         };
370
371         dev->dequeue = ssow_deq
372                 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
373                 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
374                 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
375
376         const event_dequeue_burst_t ssow_deq_burst[2][2][2] = {
377 #define R(name, f2, f1, f0, flags)                                      \
378         [f2][f1][f0] =  ssows_deq_burst_ ##name,
379
380 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
381 #undef R
382         };
383
384         dev->dequeue_burst = ssow_deq_burst
385                 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
386                 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
387                 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
388
389         if (edev->is_timeout_deq) {
390                 const event_dequeue_t ssow_deq_timeout[2][2][2] = {
391 #define R(name, f2, f1, f0, flags)                                      \
392         [f2][f1][f0] =  ssows_deq_timeout_ ##name,
393
394 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
395 #undef R
396                 };
397
398         dev->dequeue = ssow_deq_timeout
399                 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
400                 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
401                 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
402
403         const event_dequeue_burst_t ssow_deq_timeout_burst[2][2][2] = {
404 #define R(name, f2, f1, f0, flags)                                      \
405         [f2][f1][f0] =  ssows_deq_timeout_burst_ ##name,
406
407 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
408 #undef R
409                 };
410
411         dev->dequeue_burst = ssow_deq_timeout_burst
412                 [!!(edev->rx_offload_flags & OCCTX_RX_VLAN_FLTR_F)]
413                 [!!(edev->rx_offload_flags & OCCTX_RX_OFFLOAD_CSUM_F)]
414                 [!!(edev->rx_offload_flags & OCCTX_RX_MULTI_SEG_F)];
415         }
416 }
417
418 static void
419 octeontx_create_rx_ol_flags_array(void *mem)
420 {
421         uint16_t idx, errcode, errlev;
422         uint32_t val, *ol_flags;
423
424         /* Skip ptype array memory */
425         ol_flags = (uint32_t *)mem;
426
427         for (idx = 0; idx < BIT(ERRCODE_ERRLEN_WIDTH); idx++) {
428                 errcode = idx & 0xff;
429                 errlev = (idx & 0x700) >> 8;
430
431                 val = RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
432                 val |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
433                 val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_UNKNOWN;
434
435                 switch (errlev) {
436                 case OCCTX_ERRLEV_RE:
437                         if (errcode) {
438                                 val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
439                                 val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
440                         } else {
441                                 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
442                                 val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
443                         }
444                         break;
445                 case OCCTX_ERRLEV_LC:
446                         if (errcode == OCCTX_EC_IP4_CSUM) {
447                                 val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
448                                 val |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
449                         } else {
450                                 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
451                         }
452                         break;
453                 case OCCTX_ERRLEV_LD:
454                         /* Check if parsed packet is neither IPv4 or IPV6 */
455                         if (errcode == OCCTX_EC_IP4_NOT)
456                                 break;
457                         val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
458                         if (errcode == OCCTX_EC_L4_CSUM)
459                                 val |= RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
460                         else
461                                 val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
462                         break;
463                 case OCCTX_ERRLEV_LE:
464                         if (errcode == OCCTX_EC_IP4_CSUM)
465                                 val |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
466                         else
467                                 val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
468                         break;
469                 case OCCTX_ERRLEV_LF:
470                         /* Check if parsed packet is neither IPv4 or IPV6 */
471                         if (errcode == OCCTX_EC_IP4_NOT)
472                                 break;
473                         val |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
474                         if (errcode == OCCTX_EC_L4_CSUM)
475                                 val |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
476                         else
477                                 val |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
478                         break;
479                 }
480
481                 ol_flags[idx] = val;
482         }
483 }
484
485 void *
486 octeontx_fastpath_lookup_mem_get(void)
487 {
488         const char name[] = OCCTX_FASTPATH_LOOKUP_MEM;
489         const struct rte_memzone *mz;
490         void *mem;
491
492         mz = rte_memzone_lookup(name);
493         if (mz != NULL)
494                 return mz->addr;
495
496         /* Request for the first time */
497         mz = rte_memzone_reserve_aligned(name, LOOKUP_ARRAY_SZ,
498                                          SOCKET_ID_ANY, 0, OCCTX_ALIGN);
499         if (mz != NULL) {
500                 mem = mz->addr;
501                 /* Form the rx ol_flags based on errcode */
502                 octeontx_create_rx_ol_flags_array(mem);
503                 return mem;
504         }
505         return NULL;
506 }