1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
7 #include <rte_common.h>
8 #include <rte_branch_prediction.h>
10 #include <octeontx_mbox.h>
12 #include "ssovf_evdev.h"
13 #include "octeontx_rxtx.h"
16 #define OCCTX_ALIGN 128
19 #define OCCTX_FASTPATH_LOOKUP_MEM "octeontx_fastpath_lookup_mem"
21 /* WQE's ERRCODE + ERRLEV (11 bits) */
22 #define ERRCODE_ERRLEN_WIDTH 11
23 #define ERR_ARRAY_SZ ((BIT(ERRCODE_ERRLEN_WIDTH)) *\
26 #define LOOKUP_ARRAY_SZ (ERR_ARRAY_SZ)
28 #define OCCTX_EC_IP4_NOT 0x41
29 #define OCCTX_EC_IP4_CSUM 0x42
30 #define OCCTX_EC_L4_CSUM 0x62
52 static __rte_always_inline uint32_t
53 ssovf_octeontx_rx_olflags_get(const void * const lookup_mem, const uint64_t in)
55 const uint32_t * const ol_flags = (const uint32_t *)lookup_mem;
57 return ol_flags[(in & 0x7ff)];
60 static __rte_always_inline void
61 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
62 struct rte_mbuf *mbuf)
64 octtx_pki_buflink_t *buflink;
65 rte_iova_t *iova_list;
67 uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
69 nb_segs = wqe->s.w0.bufs;
71 buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
72 sizeof(octtx_pki_buflink_t));
75 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
76 mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
77 - (OCTTX_PACKET_LATER_SKIP / 128);
80 mbuf->data_off = sizeof(octtx_pki_buflink_t);
82 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
84 mbuf->data_len = bytes_left;
86 mbuf->data_len = buflink->w0.s.size;
88 bytes_left = bytes_left - buflink->w0.s.size;
89 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
94 static __rte_always_inline struct rte_mbuf *
95 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
96 const uint16_t flag, const void *lookup_mem)
98 struct rte_mbuf *mbuf;
99 octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
101 /* Get mbuf from wqe */
102 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
103 rte_prefetch_non_temporal(mbuf);
105 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
106 mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
108 mbuf->pkt_len = wqe->s.w1.len;
110 if (!!(flag & OCCTX_RX_OFFLOAD_CSUM_F))
111 mbuf->ol_flags = ssovf_octeontx_rx_olflags_get(lookup_mem,
114 if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
115 mbuf->nb_segs = wqe->s.w0.bufs;
116 mbuf->data_len = wqe->s.w5.size;
117 ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
120 mbuf->data_len = mbuf->pkt_len;
123 if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
124 if (likely(wqe->s.w2.vv)) {
125 mbuf->ol_flags |= PKT_RX_VLAN;
127 ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
128 mbuf->data_off + wqe->s.w4.vlptr + 2)));
132 mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
133 rte_mbuf_refcnt_set(mbuf, 1);
138 static __rte_always_inline void
139 ssovf_octeontx_wqe_free(uint64_t work)
141 octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
142 uint8_t nb_segs = wqe->s.w0.bufs;
143 octtx_pki_buflink_t *buflink;
144 struct rte_mbuf *mbuf, *head;
145 rte_iova_t *iova_list;
147 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
148 buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
149 sizeof(octtx_pki_buflink_t));
152 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
153 mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
154 - (OCTTX_PACKET_LATER_SKIP / 128);
157 rte_pktmbuf_free(mbuf);
158 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
160 rte_pktmbuf_free(head);
163 static __rte_always_inline uint16_t
164 ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
166 uint64_t get_work0, get_work1;
167 uint64_t sched_type_queue;
169 ssovf_load_pair(get_work0, get_work1, ws->getwork);
171 sched_type_queue = (get_work0 >> 32) & 0xfff;
172 ws->cur_tt = sched_type_queue & 0x3;
173 ws->cur_grp = sched_type_queue >> 2;
174 sched_type_queue = sched_type_queue << 38;
175 ev->event = sched_type_queue | (get_work0 & 0xffffffff);
177 if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
178 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
179 (ev->event >> 20) & 0x7F, flag, ws->lookup_mem);
180 } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
181 ssovf_octeontx_wqe_free(get_work1);
190 static __rte_always_inline void
191 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
192 const uint8_t new_tt, const uint8_t grp)
196 add_work0 = tag | ((uint64_t)(new_tt) << 32);
197 ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
200 static __rte_always_inline void
201 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
202 const uint8_t new_tt, const uint8_t grp)
204 uint64_t swtag_full0;
206 swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
207 ((uint64_t)grp << 34);
208 ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
209 SSOW_VHWS_OP_SWTAG_FULL0));
212 static __rte_always_inline void
213 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
217 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
218 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
221 static __rte_always_inline void
222 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
226 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
227 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
230 static __rte_always_inline void
231 ssows_swtag_untag(struct ssows *ws)
233 ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
234 ws->cur_tt = SSO_SYNC_UNTAGGED;
237 static __rte_always_inline void
238 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
240 ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
241 SSOW_VHWS_OP_UPD_WQP_GRP0));
244 static __rte_always_inline void
245 ssows_desched(struct ssows *ws)
247 ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
250 static __rte_always_inline void
251 ssows_swtag_wait(struct ssows *ws)
253 /* Wait for the SWTAG/SWTAG_FULL operation */
254 while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))