1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #include <rte_common.h>
6 #include <rte_branch_prediction.h>
8 #include <octeontx_mbox.h>
10 #include "ssovf_evdev.h"
11 #include "octeontx_rxtx.h"
22 static __rte_always_inline struct rte_mbuf *
23 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_info)
25 struct rte_mbuf *mbuf;
26 octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
28 /* Get mbuf from wqe */
29 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
30 rte_prefetch_non_temporal(mbuf);
32 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
33 mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
34 mbuf->pkt_len = wqe->s.w1.len;
35 mbuf->data_len = mbuf->pkt_len;
38 mbuf->port = rte_octeontx_pchan_map[port_info >> 4][port_info & 0xF];
39 rte_mbuf_refcnt_set(mbuf, 1);
44 static __rte_always_inline void
45 ssovf_octeontx_wqe_free(uint64_t work)
47 octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
48 struct rte_mbuf *mbuf;
50 mbuf = (struct rte_mbuf *)((uintptr_t)wqe - OCTTX_PACKET_WQE_SKIP);
51 rte_pktmbuf_free(mbuf);
54 static __rte_always_inline uint16_t
55 ssows_get_work(struct ssows *ws, struct rte_event *ev)
57 uint64_t get_work0, get_work1;
58 uint64_t sched_type_queue;
60 ssovf_load_pair(get_work0, get_work1, ws->getwork);
62 sched_type_queue = (get_work0 >> 32) & 0xfff;
63 ws->cur_tt = sched_type_queue & 0x3;
64 ws->cur_grp = sched_type_queue >> 2;
65 sched_type_queue = sched_type_queue << 38;
66 ev->event = sched_type_queue | (get_work0 & 0xffffffff);
68 if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
69 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
70 (ev->event >> 20) & 0x7F);
71 } else if (unlikely((get_work0 & 0xFFFFFFFF) == 0xFFFFFFFF)) {
72 ssovf_octeontx_wqe_free(get_work1);
81 static __rte_always_inline void
82 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
83 const uint8_t new_tt, const uint8_t grp)
87 add_work0 = tag | ((uint64_t)(new_tt) << 32);
88 ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
91 static __rte_always_inline void
92 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
93 const uint8_t new_tt, const uint8_t grp)
97 swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
98 ((uint64_t)grp << 34);
99 ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
100 SSOW_VHWS_OP_SWTAG_FULL0));
103 static __rte_always_inline void
104 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
108 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
109 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
112 static __rte_always_inline void
113 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
117 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
118 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
121 static __rte_always_inline void
122 ssows_swtag_untag(struct ssows *ws)
124 ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
125 ws->cur_tt = SSO_SYNC_UNTAGGED;
128 static __rte_always_inline void
129 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
131 ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
132 SSOW_VHWS_OP_UPD_WQP_GRP0));
135 static __rte_always_inline void
136 ssows_desched(struct ssows *ws)
138 ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
141 static __rte_always_inline void
142 ssows_swtag_wait(struct ssows *ws)
144 /* Wait for the SWTAG/SWTAG_FULL operation */
145 while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))