4 * Copyright (C) Cavium, Inc. 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef __SSOVF_EVDEV_H__
34 #define __SSOVF_EVDEV_H__
36 #include <rte_config.h>
37 #include <rte_eventdev_pmd_vdev.h>
40 #include <octeontx_mbox.h>
42 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
44 #ifdef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
45 #define ssovf_log_info(fmt, args...) \
46 RTE_LOG(INFO, EVENTDEV, "[%s] %s() " fmt "\n", \
47 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
48 #define ssovf_log_dbg(fmt, args...) \
49 RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() " fmt "\n", \
50 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
52 #define ssovf_log_info(fmt, args...)
53 #define ssovf_log_dbg(fmt, args...)
56 #define ssovf_func_trace ssovf_log_dbg
57 #define ssovf_log_err(fmt, args...) \
58 RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
59 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
61 #define SSO_MAX_VHGRP (64)
62 #define SSO_MAX_VHWS (32)
64 /* SSO VF register offsets */
65 #define SSO_VHGRP_QCTL (0x010ULL)
66 #define SSO_VHGRP_INT (0x100ULL)
67 #define SSO_VHGRP_INT_W1S (0x108ULL)
68 #define SSO_VHGRP_INT_ENA_W1S (0x110ULL)
69 #define SSO_VHGRP_INT_ENA_W1C (0x118ULL)
70 #define SSO_VHGRP_INT_THR (0x140ULL)
71 #define SSO_VHGRP_INT_CNT (0x180ULL)
72 #define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
73 #define SSO_VHGRP_AQ_CNT (0x1C0ULL)
74 #define SSO_VHGRP_AQ_THR (0x1E0ULL)
77 #define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
78 #define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL)
80 /* SSOW VF register offsets (BAR0) */
81 #define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3))
82 #define SSOW_VHWS_TAG (0x300ULL)
83 #define SSOW_VHWS_WQP (0x308ULL)
84 #define SSOW_VHWS_LINKS (0x310ULL)
85 #define SSOW_VHWS_PENDTAG (0x340ULL)
86 #define SSOW_VHWS_PENDWQP (0x348ULL)
87 #define SSOW_VHWS_SWTP (0x400ULL)
88 #define SSOW_VHWS_OP_ALLOC_WE (0x410ULL)
89 #define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL)
90 #define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL)
91 #define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL)
92 #define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL)
93 #define SSOW_VHWS_OP_DESCHED (0x860ULL)
94 #define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL)
95 #define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL)
96 #define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL)
97 #define SSOW_VHWS_OP_SWTP_SET (0xC20ULL)
98 #define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL)
99 #define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL)
100 #define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL)
101 #define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL)
102 #define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
103 #define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
105 /* Mailbox message constants */
106 #define SSO_COPROC 0x2
108 #define SSO_GETDOMAINCFG 0x1
109 #define SSO_IDENTIFY 0x2
110 #define SSO_GET_DEV_INFO 0x3
111 #define SSO_GET_GETWORK_WAIT 0x4
112 #define SSO_SET_GETWORK_WAIT 0x5
113 #define SSO_CONVERT_NS_GETWORK_ITER 0x6
114 #define SSO_GRP_GET_PRIORITY 0x7
115 #define SSO_GRP_SET_PRIORITY 0x8
118 * In Cavium OcteonTX SoC, all accesses to the device registers are
119 * implictly strongly ordered. So, The relaxed version of IO operation is
120 * safe to use with out any IO memory barriers.
122 #define ssovf_read64 rte_read64_relaxed
123 #define ssovf_write64 rte_write64_relaxed
125 /* ARM64 specific functions */
126 #if defined(RTE_ARCH_ARM64)
127 #define ssovf_load_pair(val0, val1, addr) ({ \
129 "ldp %x[x0], %x[x1], [%x[p1]]" \
130 :[x0]"=r"(val0), [x1]"=r"(val1) \
134 #define ssovf_store_pair(val0, val1, addr) ({ \
136 "stp %x[x0], %x[x1], [%x[p1]]" \
137 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
139 #else /* Un optimized functions for building on non arm64 arch */
141 #define ssovf_load_pair(val0, val1, addr) \
143 val0 = rte_read64(addr); \
144 val1 = rte_read64(((uint8_t *)addr) + 8); \
147 #define ssovf_store_pair(val0, val1, addr) \
149 rte_write64(val0, addr); \
150 rte_write64(val1, (((uint8_t *)addr) + 8)); \
156 uint8_t max_event_queues;
157 uint8_t max_event_ports;
158 uint8_t is_timeout_deq;
159 uint8_t nb_event_queues;
160 uint8_t nb_event_ports;
161 uint32_t min_deq_timeout_ns;
162 uint32_t max_deq_timeout_ns;
163 int32_t max_num_events;
164 } __rte_cache_aligned;
166 /* Event port aka HWS */
173 uint8_t *grps[SSO_MAX_VHGRP];
175 } __rte_cache_aligned;
177 static inline struct ssovf_evdev *
178 ssovf_pmd_priv(const struct rte_eventdev *eventdev)
180 return eventdev->data->dev_private;
183 uint16_t ssows_enq(void *port, const struct rte_event *ev);
184 uint16_t ssows_enq_burst(void *port,
185 const struct rte_event ev[], uint16_t nb_events);
186 uint16_t ssows_enq_new_burst(void *port,
187 const struct rte_event ev[], uint16_t nb_events);
188 uint16_t ssows_enq_fwd_burst(void *port,
189 const struct rte_event ev[], uint16_t nb_events);
190 uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
191 uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
192 uint16_t nb_events, uint64_t timeout_ticks);
193 uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
194 uint64_t timeout_ticks);
195 uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
196 uint16_t nb_events, uint64_t timeout_ticks);
197 void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
198 void ssows_reset(struct ssows *ws);
200 #endif /* __SSOVF_EVDEV_H__ */