4 * Copyright (C) Cavium, Inc. 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_common.h>
35 #include <rte_branch_prediction.h>
37 #include <octeontx_mbox.h>
39 #include "ssovf_evdev.h"
40 #include "octeontx_rxtx.h"
50 #define __hot __attribute__((hot))
55 static __rte_always_inline struct rte_mbuf *
56 ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
58 struct rte_mbuf *mbuf;
59 octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
60 rte_prefetch_non_temporal(wqe);
62 /* Get mbuf from wqe */
63 mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
64 OCTTX_PACKET_WQE_SKIP);
66 ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
67 mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
68 mbuf->pkt_len = wqe->s.w1.len;
69 mbuf->data_len = mbuf->pkt_len;
73 rte_mbuf_refcnt_set(mbuf, 1);
77 static __rte_always_inline uint16_t
78 ssows_get_work(struct ssows *ws, struct rte_event *ev)
80 uint64_t get_work0, get_work1;
81 uint64_t sched_type_queue;
83 ssovf_load_pair(get_work0, get_work1, ws->getwork);
85 sched_type_queue = (get_work0 >> 32) & 0xfff;
86 ws->cur_tt = sched_type_queue & 0x3;
87 ws->cur_grp = sched_type_queue >> 2;
88 sched_type_queue = sched_type_queue << 38;
89 ev->event = sched_type_queue | (get_work0 & 0xffffffff);
90 if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
91 ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
92 (ev->event >> 20) & 0xF);
100 static __rte_always_inline void
101 ssows_add_work(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
102 const uint8_t new_tt, const uint8_t grp)
106 add_work0 = tag | ((uint64_t)(new_tt) << 32);
107 ssovf_store_pair(add_work0, event_ptr, ws->grps[grp]);
110 static __rte_always_inline void
111 ssows_swtag_full(struct ssows *ws, const uint64_t event_ptr, const uint32_t tag,
112 const uint8_t new_tt, const uint8_t grp)
114 uint64_t swtag_full0;
116 swtag_full0 = tag | ((uint64_t)(new_tt & 0x3) << 32) |
117 ((uint64_t)grp << 34);
118 ssovf_store_pair(swtag_full0, event_ptr, (ws->base +
119 SSOW_VHWS_OP_SWTAG_FULL0));
122 static __rte_always_inline void
123 ssows_swtag_desched(struct ssows *ws, uint32_t tag, uint8_t new_tt, uint8_t grp)
127 val = tag | ((uint64_t)(new_tt & 0x3) << 32) | ((uint64_t)grp << 34);
128 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_DESCHED);
131 static __rte_always_inline void
132 ssows_swtag_norm(struct ssows *ws, uint32_t tag, uint8_t new_tt)
136 val = tag | ((uint64_t)(new_tt & 0x3) << 32);
137 ssovf_write64(val, ws->base + SSOW_VHWS_OP_SWTAG_NORM);
140 static __rte_always_inline void
141 ssows_swtag_untag(struct ssows *ws)
143 ssovf_write64(0, ws->base + SSOW_VHWS_OP_SWTAG_UNTAG);
144 ws->cur_tt = SSO_SYNC_UNTAGGED;
147 static __rte_always_inline void
148 ssows_upd_wqp(struct ssows *ws, uint8_t grp, uint64_t event_ptr)
150 ssovf_store_pair((uint64_t)grp << 34, event_ptr, (ws->base +
151 SSOW_VHWS_OP_UPD_WQP_GRP0));
154 static __rte_always_inline void
155 ssows_desched(struct ssows *ws)
157 ssovf_write64(0, ws->base + SSOW_VHWS_OP_DESCHED);
160 static __rte_always_inline void
161 ssows_swtag_wait(struct ssows *ws)
163 /* Wait for the SWTAG/SWTAG_FULL operation */
164 while (ssovf_read64(ws->base + SSOW_VHWS_SWTP))