mbuf: add namespace to offload flags
[dpdk.git] / drivers / event / octeontx / ssovf_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <arpa/inet.h>
6
7 #ifndef _SSOVF_WORKER_H_
8 #define _SSOVF_WORKER_H_
9
10 #include <rte_common.h>
11 #include <rte_branch_prediction.h>
12
13 #include <octeontx_mbox.h>
14
15 #include "ssovf_evdev.h"
16 #include "octeontx_rxtx.h"
17 #include "otx_cryptodev_ops.h"
18
19 /* Alignment */
20 #define OCCTX_ALIGN  128
21
22 /* Fastpath lookup */
23 #define OCCTX_FASTPATH_LOOKUP_MEM       "octeontx_fastpath_lookup_mem"
24
25 /* WQE's ERRCODE + ERRLEV (11 bits) */
26 #define ERRCODE_ERRLEN_WIDTH            11
27 #define ERR_ARRAY_SZ                    ((BIT(ERRCODE_ERRLEN_WIDTH)) *\
28                                         sizeof(uint32_t))
29
30 #define LOOKUP_ARRAY_SZ                 (ERR_ARRAY_SZ)
31
32 #define OCCTX_EC_IP4_NOT                0x41
33 #define OCCTX_EC_IP4_CSUM               0x42
34 #define OCCTX_EC_L4_CSUM                0x62
35
36 enum OCCTX_ERRLEV_E {
37         OCCTX_ERRLEV_RE = 0,
38         OCCTX_ERRLEV_LA = 1,
39         OCCTX_ERRLEV_LB = 2,
40         OCCTX_ERRLEV_LC = 3,
41         OCCTX_ERRLEV_LD = 4,
42         OCCTX_ERRLEV_LE = 5,
43         OCCTX_ERRLEV_LF = 6,
44         OCCTX_ERRLEV_LG = 7,
45 };
46
47 enum {
48         SSO_SYNC_ORDERED,
49         SSO_SYNC_ATOMIC,
50         SSO_SYNC_UNTAGGED,
51         SSO_SYNC_EMPTY
52 };
53
54 /* SSO Operations */
55
56 static __rte_always_inline uint32_t
57 ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
58 {
59         const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
60
61         return ol_flags[(in & 0x7ff)];
62 }
63
64 static __rte_always_inline void
65 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
66                                struct rte_mbuf *mbuf)
67 {
68         octtx_pki_buflink_t *buflink;
69         rte_iova_t *iova_list;
70         uint8_t nb_segs;
71         uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
72
73         nb_segs = wqe->s.w0.bufs;
74
75         buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
76                                           sizeof(octtx_pki_buflink_t));
77
78         while (--nb_segs) {
79                 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
80                 mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
81                               - (OCTTX_PACKET_LATER_SKIP / 128);
82                 mbuf = mbuf->next;
83
84                 mbuf->data_off = sizeof(octtx_pki_buflink_t);
85
86                 RTE_MEMPOOL_CHECK_COOKIES(mbuf->pool, (void **)&mbuf, 1, 1);
87                 if (nb_segs == 1)
88                         mbuf->data_len = bytes_left;
89                 else
90                         mbuf->data_len = buflink->w0.s.size;
91
92                 bytes_left = bytes_left - buflink->w0.s.size;
93                 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
94
95         }
96 }
97
98 static __rte_always_inline struct rte_mbuf *
99 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
100                           const uint16_t flag, const void *lookup_mem)
101 {
102         struct rte_mbuf *mbuf;
103         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
104
105         /* Get mbuf from wqe */
106         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
107         rte_prefetch_non_temporal(mbuf);
108         mbuf->packet_type =
109                 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
110         mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
111         mbuf->ol_flags = 0;
112         mbuf->pkt_len = wqe->s.w1.len;
113
114         if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
115                 mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
116                                                                wqe->w[2]);
117
118         if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
119                 mbuf->nb_segs = wqe->s.w0.bufs;
120                 mbuf->data_len = wqe->s.w5.size;
121                 ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
122         } else {
123                 mbuf->nb_segs = 1;
124                 mbuf->data_len = mbuf->pkt_len;
125         }
126
127         if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
128                 if (likely(wqe->s.w2.vv)) {
129                         mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
130                         mbuf->vlan_tci =
131                                 ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
132                                         mbuf->data_off + wqe->s.w4.vlptr + 2)));
133                 }
134         }
135
136         mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
137         rte_mbuf_refcnt_set(mbuf, 1);
138
139         return mbuf;
140 }
141
142 static __rte_always_inline void
143 ssovf_octeontx_wqe_free(uint64_t work)
144 {
145         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
146         uint8_t nb_segs = wqe->s.w0.bufs;
147         octtx_pki_buflink_t *buflink;
148         struct rte_mbuf *mbuf, *head;
149         rte_iova_t *iova_list;
150
151         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
152         buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
153                                           sizeof(octtx_pki_buflink_t));
154         head = mbuf;
155         while (--nb_segs) {
156                 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
157                 mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
158                         - (OCTTX_PACKET_LATER_SKIP / 128);
159
160                 mbuf->next = NULL;
161                 rte_pktmbuf_free(mbuf);
162                 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
163         }
164         rte_pktmbuf_free(head);
165 }
166
167 static __rte_always_inline uint16_t
168 ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
169 {
170         uint64_t get_work0, get_work1;
171         uint64_t sched_type_queue;
172
173         ssovf_load_pair(get_work0, get_work1, ws->getwork);
174
175         sched_type_queue = (get_work0 >> 32) & 0xfff;
176         ws->cur_tt = sched_type_queue & 0x3;
177         ws->cur_grp = sched_type_queue >> 2;
178         sched_type_queue = sched_type_queue << 38;
179         ev->event = sched_type_queue | (get_work0 & 0xffffffff);
180
181         if (get_work1) {
182                 if (ev->event_type == RTE_EVENT_TYPE_ETHDEV)
183                         get_work1 = (uintptr_t)ssovf_octeontx_wqe_to_pkt(
184                                 get_work1, (ev->event >> 20) & 0x7F, flag,
185                                 ws->lookup_mem);
186                 else if (ev->event_type == RTE_EVENT_TYPE_CRYPTODEV)
187                         get_work1 = otx_crypto_adapter_dequeue(get_work1);
188                 ev->u64 = get_work1;
189         } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
190                 ssovf_octeontx_wqe_free(get_work1);
191                 return 0;
192         }
193
194         return !!get_work1;
195 }
196
197 static __rte_always_inline void
198 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
199                         const uint8_t new_tt, const uint8_t grp)
200 {
201         uint64_t add_work0;
202
203         add_work0 = tag | ((uint64_t)(new_tt) << 32);
204         ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
205 }
206
207 static __rte_always_inline void
208 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
209                         const uint8_t new_tt, const uint8_t grp)
210 {
211         uint64_t swtag_full0;
212
213         swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
214                                 ((uint64_t)grp << 34);
215         ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
216                                 SSOW_VHWS_OP_SWTAG_FULL0));
217 }
218
219 static __rte_always_inline void
220 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
221 {
222         uint64_t val;
223
224         val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
225         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
226 }
227
228 static __rte_always_inline void
229 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
230 {
231         uint64_t val;
232
233         val = tag | ((uint64_t)(new_tt & 0x3) << 32);
234         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
235 }
236
237 static __rte_always_inline void
238 ssows_swtag_untag(struct ssows *ws)
239 {
240         ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
241         ws->cur_tt = SSO_SYNC_UNTAGGED;
242 }
243
244 static __rte_always_inline void
245 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
246 {
247         ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
248                                 SSOW_VHWS_OP_UPD_WQP_GRP0));
249 }
250
251 static __rte_always_inline void
252 ssows_desched(struct ssows *ws)
253 {
254         ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
255 }
256
257 static __rte_always_inline void
258 ssows_swtag_wait(struct ssows *ws)
259 {
260         /* Wait for the SWTAG/SWTAG_FULL operation */
261         while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
262         ;
263 }
264
265 static __rte_always_inline void
266 ssows_head_wait(struct ssows *ws)
267 {
268         while (!(ssovf_read64(ws->base + SSOW_VHWS_TAG) & (1ULL << 35)))
269                 ;
270 }
271 #endif /* _SSOVF_WORKER_H_ */