4 * Copyright (C) Cavium networks Ltd. 2017.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium networks nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #ifndef __SSOVF_EVDEV_H__
34 #define __SSOVF_EVDEV_H__
36 #include <rte_config.h>
37 #include <rte_eventdev_pmd.h>
40 #include "rte_pmd_octeontx_ssovf.h"
42 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
44 #ifdef RTE_LIBRTE_PMD_OCTEONTX_SSOVF_DEBUG
45 #define ssovf_log_info(fmt, args...) \
46 RTE_LOG(INFO, EVENTDEV, "[%s] %s() " fmt "\n", \
47 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
48 #define ssovf_log_dbg(fmt, args...) \
49 RTE_LOG(DEBUG, EVENTDEV, "[%s] %s() " fmt "\n", \
50 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
52 #define ssovf_log_info(fmt, args...)
53 #define ssovf_log_dbg(fmt, args...)
56 #define ssovf_func_trace ssovf_log_dbg
57 #define ssovf_log_err(fmt, args...) \
58 RTE_LOG(ERR, EVENTDEV, "[%s] %s() " fmt "\n", \
59 RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD), __func__, ## args)
61 #define PCI_VENDOR_ID_CAVIUM 0x177D
62 #define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
63 #define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
65 #define SSO_MAX_VHGRP (64)
66 #define SSO_MAX_VHWS (32)
68 /* SSO VF register offsets */
69 #define SSO_VHGRP_QCTL (0x010ULL)
70 #define SSO_VHGRP_INT (0x100ULL)
71 #define SSO_VHGRP_INT_W1S (0x108ULL)
72 #define SSO_VHGRP_INT_ENA_W1S (0x110ULL)
73 #define SSO_VHGRP_INT_ENA_W1C (0x118ULL)
74 #define SSO_VHGRP_INT_THR (0x140ULL)
75 #define SSO_VHGRP_INT_CNT (0x180ULL)
76 #define SSO_VHGRP_XAQ_CNT (0x1B0ULL)
77 #define SSO_VHGRP_AQ_CNT (0x1C0ULL)
78 #define SSO_VHGRP_AQ_THR (0x1E0ULL)
79 #define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
82 #define SSO_VHGRP_OP_ADD_WORK0 (0x00ULL)
83 #define SSO_VHGRP_OP_ADD_WORK1 (0x08ULL)
85 /* SSOW VF register offsets (BAR0) */
86 #define SSOW_VHWS_GRPMSK_CHGX(x) (0x080ULL | ((x) << 3))
87 #define SSOW_VHWS_TAG (0x300ULL)
88 #define SSOW_VHWS_WQP (0x308ULL)
89 #define SSOW_VHWS_LINKS (0x310ULL)
90 #define SSOW_VHWS_PENDTAG (0x340ULL)
91 #define SSOW_VHWS_PENDWQP (0x348ULL)
92 #define SSOW_VHWS_SWTP (0x400ULL)
93 #define SSOW_VHWS_OP_ALLOC_WE (0x410ULL)
94 #define SSOW_VHWS_OP_UPD_WQP_GRP0 (0x440ULL)
95 #define SSOW_VHWS_OP_UPD_WQP_GRP1 (0x448ULL)
96 #define SSOW_VHWS_OP_SWTAG_UNTAG (0x490ULL)
97 #define SSOW_VHWS_OP_SWTAG_CLR (0x820ULL)
98 #define SSOW_VHWS_OP_DESCHED (0x860ULL)
99 #define SSOW_VHWS_OP_DESCHED_NOSCH (0x870ULL)
100 #define SSOW_VHWS_OP_SWTAG_DESCHED (0x8C0ULL)
101 #define SSOW_VHWS_OP_SWTAG_NOSCHED (0x8D0ULL)
102 #define SSOW_VHWS_OP_SWTP_SET (0xC20ULL)
103 #define SSOW_VHWS_OP_SWTAG_NORM (0xC80ULL)
104 #define SSOW_VHWS_OP_SWTAG_FULL0 (0xCA0UL)
105 #define SSOW_VHWS_OP_SWTAG_FULL1 (0xCA8ULL)
106 #define SSOW_VHWS_OP_CLR_NSCHED (0x10000ULL)
107 #define SSOW_VHWS_OP_GET_WORK0 (0x80000ULL)
108 #define SSOW_VHWS_OP_GET_WORK1 (0x80008ULL)
110 #define SSOW_BAR4_LEN (64 * 1024)
112 /* Mailbox message constants */
113 #define SSO_COPROC 0x2
115 #define SSO_GETDOMAINCFG 0x1
116 #define SSO_IDENTIFY 0x2
117 #define SSO_GET_DEV_INFO 0x3
118 #define SSO_GET_GETWORK_WAIT 0x4
119 #define SSO_SET_GETWORK_WAIT 0x5
120 #define SSO_CONVERT_NS_GETWORK_ITER 0x6
121 #define SSO_GRP_GET_PRIORITY 0x7
122 #define SSO_GRP_SET_PRIORITY 0x8
125 * In Cavium OcteonTX SoC, all accesses to the device registers are
126 * implictly strongly ordered. So, The relaxed version of IO operation is
127 * safe to use with out any IO memory barriers.
129 #define ssovf_read64 rte_read64_relaxed
130 #define ssovf_write64 rte_write64_relaxed
132 /* ARM64 specific functions */
133 #if defined(RTE_ARCH_ARM64)
134 #define ssovf_load_pair(val0, val1, addr) ({ \
136 "ldp %x[x0], %x[x1], [%x[p1]]" \
137 :[x0]"=r"(val0), [x1]"=r"(val1) \
141 #define ssovf_store_pair(val0, val1, addr) ({ \
143 "stp %x[x0], %x[x1], [%x[p1]]" \
144 ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
146 #else /* Un optimized functions for building on non arm64 arch */
148 #define ssovf_load_pair(val0, val1, addr) \
150 val0 = rte_read64(addr); \
151 val1 = rte_read64(((uint8_t *)addr) + 8); \
154 #define ssovf_store_pair(val0, val1, addr) \
156 rte_write64(val0, addr); \
157 rte_write64(val1, (((uint8_t *)addr) + 8)); \
163 uint8_t max_event_queues;
164 uint8_t max_event_ports;
165 uint8_t is_timeout_deq;
166 uint8_t nb_event_queues;
167 uint8_t nb_event_ports;
168 uint32_t min_deq_timeout_ns;
169 uint32_t max_deq_timeout_ns;
170 int32_t max_num_events;
171 } __rte_cache_aligned;
173 /* Event port aka HWS */
180 uint8_t *grps[SSO_MAX_VHGRP];
182 } __rte_cache_aligned;
184 static inline struct ssovf_evdev *
185 ssovf_pmd_priv(const struct rte_eventdev *eventdev)
187 return eventdev->data->dev_private;
190 uint16_t ssows_enq(void *port, const struct rte_event *ev);
191 uint16_t ssows_enq_burst(void *port,
192 const struct rte_event ev[], uint16_t nb_events);
193 uint16_t ssows_deq(void *port, struct rte_event *ev, uint64_t timeout_ticks);
194 uint16_t ssows_deq_burst(void *port, struct rte_event ev[],
195 uint16_t nb_events, uint64_t timeout_ticks);
196 uint16_t ssows_deq_timeout(void *port, struct rte_event *ev,
197 uint64_t timeout_ticks);
198 uint16_t ssows_deq_timeout_burst(void *port, struct rte_event ev[],
199 uint16_t nb_events, uint64_t timeout_ticks);
200 void ssows_flush_events(struct ssows *ws, uint8_t queue_id);
201 void ssows_reset(struct ssows *ws);
203 #endif /* __SSOVF_EVDEV_H__ */