1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
5 #ifndef __SSOVF_EVDEV_H__
6 #define __SSOVF_EVDEV_H__
8 #include <rte_eventdev_pmd_vdev.h>
11 #include <octeontx_mbox.h>
12 #include <octeontx_ethdev.h>
14 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
16 #define SSOVF_LOG(level, fmt, args...) \
17 rte_log(RTE_LOG_ ## level, otx_logtype_ssovf, \
18 "[%s] %s() " fmt "\n", \
19 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
21 #define ssovf_log_info(fmt, ...) SSOVF_LOG(INFO, fmt, ##__VA_ARGS__)
22 #define ssovf_log_dbg(fmt, ...) SSOVF_LOG(DEBUG, fmt, ##__VA_ARGS__)
23 #define ssovf_log_err(fmt, ...) SSOVF_LOG(ERR, fmt, ##__VA_ARGS__)
24 #define ssovf_func_trace ssovf_log_dbg
25 #define ssovf_log_selftest ssovf_log_info
27 #define SSO_MAX_VHGRP (64)
28 #define SSO_MAX_VHWS (32)
30 /* SSO VF register offsets */
31 #define SSO_VHGRP_QCTL (0x010ULL)
32 #define SSO_VHGRP_INT (0x100ULL)
33 #define SSO_VHGRP_INT_W1S (0x108ULL)
34 #define SSO_VHGRP_INT_ENA_W1S (0x110ULL)
35 #define SSO_VHGRP_INT_ENA_W1C (0x118ULL)
36 #define SSO_VHGRP_INT_THR (0x140ULL)
37 #define SSO_VHGRP_INT_CNT (0x180ULL)
38 #define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
39 #define SSO_VHGRP_AQ_CNT (0x1C0ULL)
40 #define SSO_VHGRP_AQ_THR (0x1E0ULL)
43 #define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
44 #define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL)
46 /* SSOW VF register offsets (BAR0) */
47 #define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3))
48 #define SSOW_VHWS_TAG (0x300ULL)
49 #define SSOW_VHWS_WQP (0x308ULL)
50 #define SSOW_VHWS_LINKS (0x310ULL)
51 #define SSOW_VHWS_PENDTAG (0x340ULL)
52 #define SSOW_VHWS_PENDWQP (0x348ULL)
53 #define SSOW_VHWS_SWTP (0x400ULL)
54 #define SSOW_VHWS_OP_ALLOC_WE (0x410ULL)
55 #define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL)
56 #define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL)
57 #define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL)
58 #define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL)
59 #define SSOW_VHWS_OP_DESCHED (0x860ULL)
60 #define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL)
61 #define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL)
62 #define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL)
63 #define SSOW_VHWS_OP_SWTP_SET (0xC20ULL)
64 #define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL)
65 #define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL)
66 #define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL)
67 #define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL)
68 #define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
69 #define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
71 /* Mailbox message constants */
72 #define SSO_COPROC 0x2
74 #define SSO_GETDOMAINCFG 0x1
75 #define SSO_IDENTIFY 0x2
76 #define SSO_GET_DEV_INFO 0x3
77 #define SSO_GET_GETWORK_WAIT 0x4
78 #define SSO_SET_GETWORK_WAIT 0x5
79 #define SSO_CONVERT_NS_GETWORK_ITER 0x6
80 #define SSO_GRP_GET_PRIORITY 0x7
81 #define SSO_GRP_SET_PRIORITY 0x8
83 #define SSOVF_SELFTEST_ARG ("selftest")
86 * In Cavium OcteonTX SoC, all accesses to the device registers are
87 * implictly strongly ordered. So, The relaxed version of IO operation is
88 * safe to use with out any IO memory barriers.
90 #define ssovf_read64 rte_read64_relaxed
91 #define ssovf_write64 rte_write64_relaxed
93 /* ARM64 specific functions */
94 #if defined(RTE_ARCH_ARM64)
95 #define ssovf_load_pair(val0, val1, addr) ({ \
97 "ldp %x[x0], %x[x1], [%x[p1]]" \
98 :[x0]"=r"(val0), [x1]"=r"(val1) \
102 #define ssovf_store_pair(val0, val1, addr) ({ \
104 "stp %x[x0], %x[x1], [%x[p1]]" \
105 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
107 #else /* Un optimized functions for building on non arm64 arch */
109 #define ssovf_load_pair(val0, val1, addr) \
111 val0 = rte_read64(addr); \
112 val1 = rte_read64(((uint8_t *)addr) + 8); \
115 #define ssovf_store_pair(val0, val1, addr) \
117 rte_write64(val0, addr); \
118 rte_write64(val1, (((uint8_t *)addr) + 8)); \
123 uint16_t domain; /* Domain id */
124 uint8_t total_ssovfs; /* Total sso groups available in domain */
125 uint8_t total_ssowvfs;/* Total sso hws available in domain */
129 OCTEONTX_SSO_GROUP, /* SSO group vf */
130 OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
134 uint8_t max_event_queues;
135 uint8_t max_event_ports;
136 uint8_t is_timeout_deq;
137 uint8_t nb_event_queues;
138 uint8_t nb_event_ports;
139 uint32_t min_deq_timeout_ns;
140 uint32_t max_deq_timeout_ns;
141 int32_t max_num_events;
142 } __rte_cache_aligned;
144 /* Event port aka HWS */
151 uint8_t *grps[SSO_MAX_VHGRP];
153 } __rte_cache_aligned;
155 static inline struct ssovf_evdev *
156 ssovf_pmd_priv(const struct rte_eventdev *eventdev)
158 return eventdev->data->dev_private;
161 extern int otx_logtype_ssovf;
163 uint16_t ssows_enq(void *port, const struct rte_event *ev);
164 uint16_t ssows_enq_burst(void *port,
165 const struct rte_event ev[], uint16_t nb_events);
166 uint16_t ssows_enq_new_burst(void *port,
167 const struct rte_event ev[], uint16_t nb_events);
168 uint16_t ssows_enq_fwd_burst(void *port,
169 const struct rte_event ev[], uint16_t nb_events);
170 uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
171 uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
172 uint16_t nb_events, uint64_t timeout_ticks);
173 uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
174 uint64_t timeout_ticks);
175 uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
176 uint16_t nb_events, uint64_t timeout_ticks);
178 typedef void (*ssows_handle_event_t)(void *arg, struct rte_event ev);
179 void ssows_flush_events(struct ssows *ws, uint8_t queue_id,
180 ssows_handle_event_t fn, void *arg);
181 void ssows_reset(struct ssows *ws);
182 int ssovf_info(struct ssovf_info *info);
183 void *ssovf_bar(enum ssovf_type, uint8_t id, uint8_t bar);
184 int test_eventdev_octeontx(void);
186 #endif /* __SSOVF_EVDEV_H__ */