04c831514dffc1a1e6db2f04c9e5d9574d5c1a71
[dpdk.git] / drivers / event / octeontx / ssovf_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
7
8 #include <octeontx_mbox.h>
9
10 #include "ssovf_evdev.h"
11 #include "octeontx_rxtx.h"
12
13 enum {
14         SSO_SYNC_ORDERED,
15         SSO_SYNC_ATOMIC,
16         SSO_SYNC_UNTAGGED,
17         SSO_SYNC_EMPTY
18 };
19
20 /* SSO Operations */
21
22 static __rte_always_inline void
23 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
24                                struct rte_mbuf *mbuf)
25 {
26         octtx_pki_buflink_t *buflink;
27         rte_iova_t *iova_list;
28         uint8_t nb_segs;
29         uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
30
31         nb_segs = wqe->s.w0.bufs;
32
33         buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
34                                           sizeof(octtx_pki_buflink_t));
35
36         while (--nb_segs) {
37                 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
38                 mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
39                               - (OCTTX_PACKET_LATER_SKIP / 128);
40                 mbuf = mbuf->next;
41
42                 mbuf->data_off = sizeof(octtx_pki_buflink_t);
43
44                 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
45                 if (nb_segs == 1)
46                         mbuf->data_len = bytes_left;
47                 else
48                         mbuf->data_len = buflink->w0.s.size;
49
50                 bytes_left = bytes_left - buflink->w0.s.size;
51                 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
52
53         }
54 }
55
56 static __rte_always_inline struct rte_mbuf *
57 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
58                           const uint16_t flag)
59 {
60         struct rte_mbuf *mbuf;
61         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
62
63         /* Get mbuf from wqe */
64         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
65         rte_prefetch_non_temporal(mbuf);
66         mbuf->packet_type =
67                 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
68         mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
69         mbuf->ol_flags = 0;
70         mbuf->pkt_len = wqe->s.w1.len;
71
72         if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
73                 mbuf->nb_segs = wqe->s.w0.bufs;
74                 mbuf->data_len = wqe->s.w5.size;
75                 ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
76         } else {
77                 mbuf->nb_segs = 1;
78                 mbuf->data_len = mbuf->pkt_len;
79         }
80
81         if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
82                 if (likely(wqe->s.w2.vv)) {
83                         mbuf->ol_flags |= PKT_RX_VLAN;
84                         mbuf->vlan_tci =
85                                 ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
86                                         mbuf->data_off + wqe->s.w4.vlptr + 2)));
87                 }
88         }
89
90         mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
91         rte_mbuf_refcnt_set(mbuf, 1);
92
93         return mbuf;
94 }
95
96 static __rte_always_inline void
97 ssovf_octeontx_wqe_free(uint64_t work)
98 {
99         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
100         uint8_t nb_segs = wqe->s.w0.bufs;
101         octtx_pki_buflink_t *buflink;
102         struct rte_mbuf *mbuf, *head;
103         rte_iova_t *iova_list;
104
105         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
106         buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
107                                           sizeof(octtx_pki_buflink_t));
108         head = mbuf;
109         while (--nb_segs) {
110                 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
111                 mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
112                         - (OCTTX_PACKET_LATER_SKIP / 128);
113
114                 mbuf->next = NULL;
115                 rte_pktmbuf_free(mbuf);
116                 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
117         }
118         rte_pktmbuf_free(head);
119 }
120
121 static __rte_always_inline uint16_t
122 ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
123 {
124         uint64_t get_work0, get_work1;
125         uint64_t sched_type_queue;
126
127         ssovf_load_pair(get_work0, get_work1, ws->getwork);
128
129         sched_type_queue = (get_work0 >> 32) & 0xfff;
130         ws->cur_tt = sched_type_queue & 0x3;
131         ws->cur_grp = sched_type_queue >> 2;
132         sched_type_queue = sched_type_queue << 38;
133         ev->event = sched_type_queue | (get_work0 & 0xffffffff);
134
135         if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
136                 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
137                                 (ev->event >> 20) & 0x7F, flag);
138         } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
139                 ssovf_octeontx_wqe_free(get_work1);
140                 return 0;
141         } else {
142                 ev->u64 = get_work1;
143         }
144
145         return !!get_work1;
146 }
147
148 static __rte_always_inline void
149 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
150                         const uint8_t new_tt, const uint8_t grp)
151 {
152         uint64_t add_work0;
153
154         add_work0 = tag | ((uint64_t)(new_tt) << 32);
155         ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
156 }
157
158 static __rte_always_inline void
159 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
160                         const uint8_t new_tt, const uint8_t grp)
161 {
162         uint64_t swtag_full0;
163
164         swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
165                                 ((uint64_t)grp << 34);
166         ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
167                                 SSOW_VHWS_OP_SWTAG_FULL0));
168 }
169
170 static __rte_always_inline void
171 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
172 {
173         uint64_t val;
174
175         val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
176         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
177 }
178
179 static __rte_always_inline void
180 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
181 {
182         uint64_t val;
183
184         val = tag | ((uint64_t)(new_tt & 0x3) << 32);
185         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
186 }
187
188 static __rte_always_inline void
189 ssows_swtag_untag(struct ssows *ws)
190 {
191         ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
192         ws->cur_tt = SSO_SYNC_UNTAGGED;
193 }
194
195 static __rte_always_inline void
196 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
197 {
198         ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
199                                 SSOW_VHWS_OP_UPD_WQP_GRP0));
200 }
201
202 static __rte_always_inline void
203 ssows_desched(struct ssows *ws)
204 {
205         ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
206 }
207
208 static __rte_always_inline void
209 ssows_swtag_wait(struct ssows *ws)
210 {
211         /* Wait for the SWTAG/SWTAG_FULL operation */
212         while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
213         ;
214 }