1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
8 #include <octeontx_mbox.h>
10 #include "ssovf_evdev.h"
11 #include "octeontx_rxtx.h"
22 static __rte_always_inline void
23 ssovf_octeontx_wqe_xtract_mseg(octtx_wqe_t *wqe,
24 struct rte_mbuf *mbuf)
26 octtx_pki_buflink_t *buflink;
27 rte_iova_t *iova_list;
29 uint64_t bytes_left = wqe->s.w1.len - wqe->s.w5.size;
31 nb_segs = wqe->s.w0.bufs;
33 buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
34 sizeof(octtx_pki_buflink_t));
37 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
38 mbuf->next = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
39 - (OCTTX_PACKET_LATER_SKIP / 128);
42 mbuf->data_off = sizeof(octtx_pki_buflink_t);
44 __mempool_check_cookies(mbuf->pool, (void **)&mbuf, 1, 1);
46 mbuf->data_len = bytes_left;
48 mbuf->data_len = buflink->w0.s.size;
50 bytes_left = bytes_left - buflink->w0.s.size;
51 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
56 static __rte_always_inline struct rte_mbuf *
57 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info,
60 struct rte_mbuf *mbuf;
61 octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
63 /* Get mbuf from wqe */
64 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
65 rte_prefetch_non_temporal(mbuf);
67 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
68 mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
70 mbuf->pkt_len = wqe->s.w1.len;
72 if (!!(flag & OCCTX_RX_MULTI_SEG_F)) {
73 mbuf->nb_segs = wqe->s.w0.bufs;
74 mbuf->data_len = wqe->s.w5.size;
75 ssovf_octeontx_wqe_xtract_mseg(wqe, mbuf);
78 mbuf->data_len = mbuf->pkt_len;
81 if (!!(flag & OCCTX_RX_VLAN_FLTR_F)) {
82 if (likely(wqe->s.w2.vv)) {
83 mbuf->ol_flags |= PKT_RX_VLAN;
85 ntohs(*((uint16_t *)((char *)mbuf->buf_addr +
86 mbuf->data_off + wqe->s.w4.vlptr + 2)));
90 mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
91 rte_mbuf_refcnt_set(mbuf, 1);
96 static __rte_always_inline void
97 ssovf_octeontx_wqe_free(uint64_t work)
99 octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
100 uint8_t nb_segs = wqe->s.w0.bufs;
101 octtx_pki_buflink_t *buflink;
102 struct rte_mbuf *mbuf, *head;
103 rte_iova_t *iova_list;
105 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
106 buflink = (octtx_pki_buflink_t *)((uintptr_t)wqe->s.w3.addr -
107 sizeof(octtx_pki_buflink_t));
110 iova_list = (rte_iova_t *)(uintptr_t)(buflink->w1.s.addr);
111 mbuf = (struct rte_mbuf *)(rte_iova_t *)(iova_list - 2)
112 - (OCTTX_PACKET_LATER_SKIP / 128);
115 rte_pktmbuf_free(mbuf);
116 buflink = (octtx_pki_buflink_t *)(rte_iova_t *)(iova_list - 2);
118 rte_pktmbuf_free(head);
121 static __rte_always_inline uint16_t
122 ssows_get_work(struct ssows *ws, struct rte_event *ev, const uint16_t flag)
124 uint64_t get_work0, get_work1;
125 uint64_t sched_type_queue;
127 ssovf_load_pair(get_work0, get_work1, ws->getwork);
129 sched_type_queue = (get_work0 >> 32) & 0xfff;
130 ws->cur_tt = sched_type_queue & 0x3;
131 ws->cur_grp = sched_type_queue >> 2;
132 sched_type_queue = sched_type_queue << 38;
133 ev->event = sched_type_queue | (get_work0 & 0xffffffff);
135 if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
136 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
137 (ev->event >> 20) & 0x7F, flag);
138 } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
139 ssovf_octeontx_wqe_free(get_work1);
148 static __rte_always_inline void
149 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
150 const uint8_t new_tt, const uint8_t grp)
154 add_work0 = tag | ((uint64_t)(new_tt) << 32);
155 ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
158 static __rte_always_inline void
159 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
160 const uint8_t new_tt, const uint8_t grp)
162 uint64_t swtag_full0;
164 swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
165 ((uint64_t)grp << 34);
166 ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
167 SSOW_VHWS_OP_SWTAG_FULL0));
170 static __rte_always_inline void
171 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
175 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
176 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
179 static __rte_always_inline void
180 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
184 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
185 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
188 static __rte_always_inline void
189 ssows_swtag_untag(struct ssows *ws)
191 ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
192 ws->cur_tt = SSO_SYNC_UNTAGGED;
195 static __rte_always_inline void
196 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
198 ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
199 SSOW_VHWS_OP_UPD_WQP_GRP0));
202 static __rte_always_inline void
203 ssows_desched(struct ssows *ws)
205 ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
208 static __rte_always_inline void
209 ssows_swtag_wait(struct ssows *ws)
211 /* Wait for the SWTAG/SWTAG_FULL operation */
212 while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))