net/mlx5: support connection tracking between two ports
[dpdk.git] / drivers / event / octeontx / ssovf_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <arpa/inet.h>
6
7 #include <rte_common.h>
8 #include <rte_branch_prediction.h>
9
10 #include <octeontx_mbox.h>
11
12 #include "ssovf_evdev.h"
13 #include "octeontx_rxtx.h"
14
15 /* Alignment */
16 #define OCCTX_ALIGN  128
17
18 /* Fastpath lookup */
19 #define OCCTX_FASTPATH_LOOKUP_MEM       "octeontx_fastpath_lookup_mem"
20
21 /* WQE's ERRCODE + ERRLEV (11 bits) */
22 #define ERRCODE_ERRLEN_WIDTH            11
23 #define ERR_ARRAY_SZ                    ((BIT(ERRCODE_ERRLEN_WIDTH)) *\
24                                         sizeof(uint32_t))
25
26 #define LOOKUP_ARRAY_SZ                 (ERR_ARRAY_SZ)
27
28 #define OCCTX_EC_IP4_NOT                0x41
29 #define OCCTX_EC_IP4_CSUM               0x42
30 #define OCCTX_EC_L4_CSUM                0x62
31
32 enum OCCTX_ERRLEV_E {
33         OCCTX_ERRLEV_RE = 0,
34         OCCTX_ERRLEV_LA = 1,
35         OCCTX_ERRLEV_LB = 2,
36         OCCTX_ERRLEV_LC = 3,
37         OCCTX_ERRLEV_LD = 4,
38         OCCTX_ERRLEV_LE = 5,
39         OCCTX_ERRLEV_LF = 6,
40         OCCTX_ERRLEV_LG = 7,
41 };
42
43 enum {
44         SSO_SYNC_ORDERED,
45         SSO_SYNC_ATOMIC,
46         SSO_SYNC_UNTAGGED,
47         SSO_SYNC_EMPTY
48 };
49
50 /* SSO Operations */
51
52 static __rte_always_inline uint32_t
53 ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
54 {
55         const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
56
57         return ol_flags[(in & 0x7ff)];
58 }
59
60 static __rte_always_inline void
61 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
62                                struct rte_mbuf *mbuf)
63 {
64         octtx_pki_buflink_t *buflink;
65         rte_iova_t *iova_list;
66         uint8_t nb_segs;
67         uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
68
69         nb_segs = wqe->s.w0.bufs;
70
71         buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
72                                           sizeof(octtx_pki_buflink_t));
73
74         while (--nb_segs) {
75                 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
76                 mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
77                               - (OCTTX_PACKET_LATER_SKIP / 128);
78                 mbuf = mbuf->next;
79
80                 mbuf->data_off = sizeof(octtx_pki_buflink_t);
81
82                 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
83                 if (nb_segs == 1)
84                         mbuf->data_len = bytes_left;
85                 else
86                         mbuf->data_len = buflink->w0.s.size;
87
88                 bytes_left = bytes_left - buflink->w0.s.size;
89                 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
90
91         }
92 }
93
94 static __rte_always_inline struct rte_mbuf *
95 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
96                           const uint16_t flag, const void *lookup_mem)
97 {
98         struct rte_mbuf *mbuf;
99         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
100
101         /* Get mbuf from wqe */
102         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
103         rte_prefetch_non_temporal(mbuf);
104         mbuf->packet_type =
105                 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
106         mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
107         mbuf->ol_flags = 0;
108         mbuf->pkt_len = wqe->s.w1.len;
109
110         if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
111                 mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
112                                                                wqe->w[2]);
113
114         if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
115                 mbuf->nb_segs = wqe->s.w0.bufs;
116                 mbuf->data_len = wqe->s.w5.size;
117                 ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
118         } else {
119                 mbuf->nb_segs = 1;
120                 mbuf->data_len = mbuf->pkt_len;
121         }
122
123         if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
124                 if (likely(wqe->s.w2.vv)) {
125                         mbuf->ol_flags |= PKT_RX_VLAN;
126                         mbuf->vlan_tci =
127                                 ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
128                                         mbuf->data_off + wqe->s.w4.vlptr + 2)));
129                 }
130         }
131
132         mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
133         rte_mbuf_refcnt_set(mbuf, 1);
134
135         return mbuf;
136 }
137
138 static __rte_always_inline void
139 ssovf_octeontx_wqe_free(uint64_t work)
140 {
141         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
142         uint8_t nb_segs = wqe->s.w0.bufs;
143         octtx_pki_buflink_t *buflink;
144         struct rte_mbuf *mbuf, *head;
145         rte_iova_t *iova_list;
146
147         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
148         buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
149                                           sizeof(octtx_pki_buflink_t));
150         head = mbuf;
151         while (--nb_segs) {
152                 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
153                 mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
154                         - (OCTTX_PACKET_LATER_SKIP / 128);
155
156                 mbuf->next = NULL;
157                 rte_pktmbuf_free(mbuf);
158                 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
159         }
160         rte_pktmbuf_free(head);
161 }
162
163 static __rte_always_inline uint16_t
164 ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
165 {
166         uint64_t get_work0, get_work1;
167         uint64_t sched_type_queue;
168
169         ssovf_load_pair(get_work0, get_work1, ws->getwork);
170
171         sched_type_queue = (get_work0 >> 32) & 0xfff;
172         ws->cur_tt = sched_type_queue & 0x3;
173         ws->cur_grp = sched_type_queue >> 2;
174         sched_type_queue = sched_type_queue << 38;
175         ev->event = sched_type_queue | (get_work0 & 0xffffffff);
176
177         if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
178                 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
179                                 (ev->event >> 20) & 0x7F, flag, ws->lookup_mem);
180         } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
181                 ssovf_octeontx_wqe_free(get_work1);
182                 return 0;
183         } else {
184                 ev->u64 = get_work1;
185         }
186
187         return !!get_work1;
188 }
189
190 static __rte_always_inline void
191 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
192                         const uint8_t new_tt, const uint8_t grp)
193 {
194         uint64_t add_work0;
195
196         add_work0 = tag | ((uint64_t)(new_tt) << 32);
197         ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
198 }
199
200 static __rte_always_inline void
201 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
202                         const uint8_t new_tt, const uint8_t grp)
203 {
204         uint64_t swtag_full0;
205
206         swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
207                                 ((uint64_t)grp << 34);
208         ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
209                                 SSOW_VHWS_OP_SWTAG_FULL0));
210 }
211
212 static __rte_always_inline void
213 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
214 {
215         uint64_t val;
216
217         val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
218         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
219 }
220
221 static __rte_always_inline void
222 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
223 {
224         uint64_t val;
225
226         val = tag | ((uint64_t)(new_tt & 0x3) << 32);
227         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
228 }
229
230 static __rte_always_inline void
231 ssows_swtag_untag(struct ssows *ws)
232 {
233         ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
234         ws->cur_tt = SSO_SYNC_UNTAGGED;
235 }
236
237 static __rte_always_inline void
238 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
239 {
240         ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
241                                 SSOW_VHWS_OP_UPD_WQP_GRP0));
242 }
243
244 static __rte_always_inline void
245 ssows_desched(struct ssows *ws)
246 {
247         ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
248 }
249
250 static __rte_always_inline void
251 ssows_swtag_wait(struct ssows *ws)
252 {
253         /* Wait for the SWTAG/SWTAG_FULL operation */
254         while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
255         ;
256 }