event/octeontx: fix partial Rx packet handling
[dpdk.git] / drivers / event / octeontx / ssovf_worker.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
7
8 #include <octeontx_mbox.h>
9
10 #include "ssovf_evdev.h"
11 #include "octeontx_rxtx.h"
12
13 enum {
14         SSO_SYNC_ORDERED,
15         SSO_SYNC_ATOMIC,
16         SSO_SYNC_UNTAGGED,
17         SSO_SYNC_EMPTY
18 };
19
20 #ifndef __hot
21 #define __hot   __attribute__((hot))
22 #endif
23
24 /* SSO Operations */
25
26 static __rte_always_inline struct rte_mbuf *
27 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
28 {
29         struct rte_mbuf *mbuf;
30         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
31
32         /* Get mbuf from wqe */
33         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
34         rte_prefetch_non_temporal(mbuf);
35         mbuf->packet_type =
36                 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
37         mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
38         mbuf->pkt_len = wqe->s.w1.len;
39         mbuf->data_len = mbuf->pkt_len;
40         mbuf->nb_segs = 1;
41         mbuf->ol_flags = 0;
42         mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
43         rte_mbuf_refcnt_set(mbuf, 1);
44
45         return mbuf;
46 }
47
48 static __rte_always_inline void
49 ssovf_octeontx_wqe_free(uint64_t work)
50 {
51         octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
52         struct rte_mbuf *mbuf;
53
54         mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
55         rte_pktmbuf_free(mbuf);
56 }
57
58 static __rte_always_inline uint16_t
59 ssows_get_work(struct ssows *ws, struct rte_event *ev)
60 {
61         uint64_t get_work0, get_work1;
62         uint64_t sched_type_queue;
63
64         ssovf_load_pair(get_work0, get_work1, ws->getwork);
65
66         sched_type_queue = (get_work0 >> 32) & 0xfff;
67         ws->cur_tt = sched_type_queue & 0x3;
68         ws->cur_grp = sched_type_queue >> 2;
69         sched_type_queue = sched_type_queue << 38;
70         ev->event = sched_type_queue | (get_work0 & 0xffffffff);
71
72         if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
73                 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
74                                 (ev->event >> 20) & 0x7F);
75         } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
76                 ssovf_octeontx_wqe_free(get_work1);
77                 return 0;
78         } else {
79                 ev->u64 = get_work1;
80         }
81
82         return !!get_work1;
83 }
84
85 static __rte_always_inline void
86 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
87                         const uint8_t new_tt, const uint8_t grp)
88 {
89         uint64_t add_work0;
90
91         add_work0 = tag | ((uint64_t)(new_tt) << 32);
92         ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
93 }
94
95 static __rte_always_inline void
96 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
97                         const uint8_t new_tt, const uint8_t grp)
98 {
99         uint64_t swtag_full0;
100
101         swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
102                                 ((uint64_t)grp << 34);
103         ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
104                                 SSOW_VHWS_OP_SWTAG_FULL0));
105 }
106
107 static __rte_always_inline void
108 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
109 {
110         uint64_t val;
111
112         val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
113         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
114 }
115
116 static __rte_always_inline void
117 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
118 {
119         uint64_t val;
120
121         val = tag | ((uint64_t)(new_tt & 0x3) << 32);
122         ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
123 }
124
125 static __rte_always_inline void
126 ssows_swtag_untag(struct ssows *ws)
127 {
128         ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
129         ws->cur_tt = SSO_SYNC_UNTAGGED;
130 }
131
132 static __rte_always_inline void
133 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
134 {
135         ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
136                                 SSOW_VHWS_OP_UPD_WQP_GRP0));
137 }
138
139 static __rte_always_inline void
140 ssows_desched(struct ssows *ws)
141 {
142         ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
143 }
144
145 static __rte_always_inline void
146 ssows_swtag_wait(struct ssows *ws)
147 {
148         /* Wait for the SWTAG/SWTAG_FULL operation */
149         while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))
150         ;
151 }