1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2019 Marvell International Ltd.
5 #ifndef __OTX2_EVDEV_H__
6 #define __OTX2_EVDEV_H__
8 #include <rte_eventdev.h>
9 #include <rte_eventdev_pmd.h>
10 #include <rte_event_eth_rx_adapter.h>
11 #include <rte_event_eth_tx_adapter.h>
13 #include "otx2_common.h"
15 #include "otx2_ethdev.h"
16 #include "otx2_mempool.h"
18 #define EVENTDEV_NAME_OCTEONTX2_PMD otx2_eventdev
20 #define sso_func_trace otx2_sso_dbg
22 #define OTX2_SSO_MAX_VHGRP RTE_EVENT_MAX_QUEUES_PER_DEV
23 #define OTX2_SSO_MAX_VHWS (UINT8_MAX)
24 #define OTX2_SSO_FC_NAME "otx2_evdev_xaq_fc"
25 #define OTX2_SSO_SQB_LIMIT (0x180)
26 #define OTX2_SSO_XAQ_SLACK (8)
27 #define OTX2_SSO_XAQ_CACHE_CNT (0x7)
28 #define OTX2_SSO_WQE_SG_PTR (9)
30 /* SSO LF register offsets (BAR2) */
31 #define SSO_LF_GGRP_OP_ADD_WORK0 (0x0ull)
32 #define SSO_LF_GGRP_OP_ADD_WORK1 (0x8ull)
34 #define SSO_LF_GGRP_QCTL (0x20ull)
35 #define SSO_LF_GGRP_EXE_DIS (0x80ull)
36 #define SSO_LF_GGRP_INT (0x100ull)
37 #define SSO_LF_GGRP_INT_W1S (0x108ull)
38 #define SSO_LF_GGRP_INT_ENA_W1S (0x110ull)
39 #define SSO_LF_GGRP_INT_ENA_W1C (0x118ull)
40 #define SSO_LF_GGRP_INT_THR (0x140ull)
41 #define SSO_LF_GGRP_INT_CNT (0x180ull)
42 #define SSO_LF_GGRP_XAQ_CNT (0x1b0ull)
43 #define SSO_LF_GGRP_AQ_CNT (0x1c0ull)
44 #define SSO_LF_GGRP_AQ_THR (0x1e0ull)
45 #define SSO_LF_GGRP_MISC_CNT (0x200ull)
47 /* SSOW LF register offsets (BAR2) */
48 #define SSOW_LF_GWS_LINKS (0x10ull)
49 #define SSOW_LF_GWS_PENDWQP (0x40ull)
50 #define SSOW_LF_GWS_PENDSTATE (0x50ull)
51 #define SSOW_LF_GWS_NW_TIM (0x70ull)
52 #define SSOW_LF_GWS_GRPMSK_CHG (0x80ull)
53 #define SSOW_LF_GWS_INT (0x100ull)
54 #define SSOW_LF_GWS_INT_W1S (0x108ull)
55 #define SSOW_LF_GWS_INT_ENA_W1S (0x110ull)
56 #define SSOW_LF_GWS_INT_ENA_W1C (0x118ull)
57 #define SSOW_LF_GWS_TAG (0x200ull)
58 #define SSOW_LF_GWS_WQP (0x210ull)
59 #define SSOW_LF_GWS_SWTP (0x220ull)
60 #define SSOW_LF_GWS_PENDTAG (0x230ull)
61 #define SSOW_LF_GWS_OP_ALLOC_WE (0x400ull)
62 #define SSOW_LF_GWS_OP_GET_WORK (0x600ull)
63 #define SSOW_LF_GWS_OP_SWTAG_FLUSH (0x800ull)
64 #define SSOW_LF_GWS_OP_SWTAG_UNTAG (0x810ull)
65 #define SSOW_LF_GWS_OP_SWTP_CLR (0x820ull)
66 #define SSOW_LF_GWS_OP_UPD_WQP_GRP0 (0x830ull)
67 #define SSOW_LF_GWS_OP_UPD_WQP_GRP1 (0x838ull)
68 #define SSOW_LF_GWS_OP_DESCHED (0x880ull)
69 #define SSOW_LF_GWS_OP_DESCHED_NOSCH (0x8c0ull)
70 #define SSOW_LF_GWS_OP_SWTAG_DESCHED (0x980ull)
71 #define SSOW_LF_GWS_OP_SWTAG_NOSCHED (0x9c0ull)
72 #define SSOW_LF_GWS_OP_CLR_NSCHED0 (0xa00ull)
73 #define SSOW_LF_GWS_OP_CLR_NSCHED1 (0xa08ull)
74 #define SSOW_LF_GWS_OP_SWTP_SET (0xc00ull)
75 #define SSOW_LF_GWS_OP_SWTAG_NORM (0xc10ull)
76 #define SSOW_LF_GWS_OP_SWTAG_FULL0 (0xc20ull)
77 #define SSOW_LF_GWS_OP_SWTAG_FULL1 (0xc28ull)
78 #define SSOW_LF_GWS_OP_GWC_INVAL (0xe00ull)
80 #define OTX2_SSOW_GET_BASE_ADDR(_GW) ((_GW) - SSOW_LF_GWS_OP_GET_WORK)
82 #define NSEC2USEC(__ns) ((__ns) / 1E3)
83 #define USEC2NSEC(__us) ((__us) * 1E3)
84 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
85 #define TICK2NSEC(__tck, __freq) (((__tck) * 1E9) / (__freq))
87 enum otx2_sso_lf_type {
92 union otx2_sso_event {
96 uint32_t sub_event_type:8;
97 uint32_t event_type:4;
100 uint8_t sched_type:2;
114 struct otx2_sso_qos {
121 struct otx2_sso_evdev {
122 OTX2_DEV; /* Base class */
123 uint8_t max_event_queues;
124 uint8_t max_event_ports;
125 uint8_t is_timeout_deq;
126 uint8_t nb_event_queues;
127 uint8_t nb_event_ports;
130 uint32_t min_dequeue_timeout_ns;
131 uint32_t max_dequeue_timeout_ns;
132 int32_t max_num_events;
137 struct rte_mempool *xaq_pool;
138 uint64_t rx_offloads;
139 uint64_t tx_offloads;
140 uint16_t rx_adptr_pool_cnt;
141 uint32_t adptr_xae_cnt;
142 uint64_t *rx_adptr_pools;
147 uint8_t qos_queue_cnt;
148 struct otx2_sso_qos *qos_parse_data;
151 uint32_t xaq_buf_size;
154 uint16_t sso_msixoff[OTX2_SSO_MAX_VHGRP];
155 uint16_t ssow_msixoff[OTX2_SSO_MAX_VHWS];
157 struct otx2_timesync_info *tstamp;
158 } __rte_cache_aligned;
160 #define OTX2_SSOGWS_OPS \
162 uintptr_t getwrk_op; \
166 uintptr_t swtag_norm_op; \
167 uintptr_t swtag_desched_op; \
171 /* Event port aka GWS */
173 /* Get Work Fastpath data */
178 /* Add Work Fastpath data */
179 uint64_t xaq_lmt __rte_cache_aligned;
181 uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
183 struct otx2_timesync_info *tstamp;
184 } __rte_cache_aligned;
186 struct otx2_ssogws_state {
190 struct otx2_ssogws_dual {
191 /* Get Work Fastpath data */
192 struct otx2_ssogws_state ws_state[2]; /* Ping and Pong */
194 uint8_t vws; /* Ping pong bit */
197 /* Add Work Fastpath data */
198 uint64_t xaq_lmt __rte_cache_aligned;
200 uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
202 struct otx2_timesync_info *tstamp;
203 } __rte_cache_aligned;
205 static inline struct otx2_sso_evdev *
206 sso_pmd_priv(const struct rte_eventdev *event_dev)
208 return event_dev->data->dev_private;
211 static const union mbuf_initializer mbuf_init = {
213 .data_off = RTE_PKTMBUF_HEADROOM,
220 static __rte_always_inline void
221 otx2_wqe_to_mbuf(uint64_t get_work1, const uint64_t mbuf, uint8_t port_id,
222 const uint32_t tag, const uint32_t flags,
223 const void * const lookup_mem)
225 struct nix_wqe_hdr_s *wqe = (struct nix_wqe_hdr_s *)get_work1;
226 uint64_t val = mbuf_init.value | (uint64_t)port_id << 48;
228 if (flags & NIX_RX_OFFLOAD_TSTAMP_F)
229 val |= NIX_TIMESYNC_RX_OFFSET;
231 otx2_nix_cqe_to_mbuf((struct nix_cqe_hdr_s *)wqe, tag,
232 (struct rte_mbuf *)mbuf, lookup_mem,
238 parse_kvargs_flag(const char *key, const char *value, void *opaque)
242 *(uint8_t *)opaque = !!atoi(value);
247 parse_kvargs_value(const char *key, const char *value, void *opaque)
251 *(uint32_t *)opaque = (uint32_t)atoi(value);
255 #define SSO_RX_ADPTR_ENQ_FASTPATH_FUNC NIX_RX_FASTPATH_MODES
256 #define SSO_TX_ADPTR_ENQ_FASTPATH_FUNC NIX_TX_FASTPATH_MODES
258 /* Single WS API's */
259 uint16_t otx2_ssogws_enq(void *port, const struct rte_event *ev);
260 uint16_t otx2_ssogws_enq_burst(void *port, const struct rte_event ev[],
262 uint16_t otx2_ssogws_enq_new_burst(void *port, const struct rte_event ev[],
264 uint16_t otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
268 uint16_t otx2_ssogws_dual_enq(void *port, const struct rte_event *ev);
269 uint16_t otx2_ssogws_dual_enq_burst(void *port, const struct rte_event ev[],
271 uint16_t otx2_ssogws_dual_enq_new_burst(void *port, const struct rte_event ev[],
273 uint16_t otx2_ssogws_dual_enq_fwd_burst(void *port, const struct rte_event ev[],
276 /* Auto generated API's */
277 #define R(name, f5, f4, f3, f2, f1, f0, flags) \
278 uint16_t otx2_ssogws_deq_ ##name(void *port, struct rte_event *ev, \
279 uint64_t timeout_ticks); \
280 uint16_t otx2_ssogws_deq_burst_ ##name(void *port, struct rte_event ev[], \
281 uint16_t nb_events, \
282 uint64_t timeout_ticks); \
283 uint16_t otx2_ssogws_deq_timeout_ ##name(void *port, \
284 struct rte_event *ev, \
285 uint64_t timeout_ticks); \
286 uint16_t otx2_ssogws_deq_timeout_burst_ ##name(void *port, \
287 struct rte_event ev[], \
288 uint16_t nb_events, \
289 uint64_t timeout_ticks); \
290 uint16_t otx2_ssogws_deq_seg_ ##name(void *port, struct rte_event *ev, \
291 uint64_t timeout_ticks); \
292 uint16_t otx2_ssogws_deq_seg_burst_ ##name(void *port, \
293 struct rte_event ev[], \
294 uint16_t nb_events, \
295 uint64_t timeout_ticks); \
296 uint16_t otx2_ssogws_deq_seg_timeout_ ##name(void *port, \
297 struct rte_event *ev, \
298 uint64_t timeout_ticks); \
299 uint16_t otx2_ssogws_deq_seg_timeout_burst_ ##name(void *port, \
300 struct rte_event ev[], \
301 uint16_t nb_events, \
302 uint64_t timeout_ticks); \
304 uint16_t otx2_ssogws_dual_deq_ ##name(void *port, struct rte_event *ev, \
305 uint64_t timeout_ticks); \
306 uint16_t otx2_ssogws_dual_deq_burst_ ##name(void *port, \
307 struct rte_event ev[], \
308 uint16_t nb_events, \
309 uint64_t timeout_ticks); \
310 uint16_t otx2_ssogws_dual_deq_timeout_ ##name(void *port, \
311 struct rte_event *ev, \
312 uint64_t timeout_ticks); \
313 uint16_t otx2_ssogws_dual_deq_timeout_burst_ ##name(void *port, \
314 struct rte_event ev[], \
315 uint16_t nb_events, \
316 uint64_t timeout_ticks); \
317 uint16_t otx2_ssogws_dual_deq_seg_ ##name(void *port, struct rte_event *ev, \
318 uint64_t timeout_ticks); \
319 uint16_t otx2_ssogws_dual_deq_seg_burst_ ##name(void *port, \
320 struct rte_event ev[], \
321 uint16_t nb_events, \
322 uint64_t timeout_ticks); \
323 uint16_t otx2_ssogws_dual_deq_seg_timeout_ ##name(void *port, \
324 struct rte_event *ev, \
325 uint64_t timeout_ticks); \
326 uint16_t otx2_ssogws_dual_deq_seg_timeout_burst_ ##name(void *port, \
327 struct rte_event ev[], \
328 uint16_t nb_events, \
329 uint64_t timeout_ticks);\
331 SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
334 #define T(name, f5, f4, f3, f2, f1, f0, sz, flags) \
335 uint16_t otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],\
336 uint16_t nb_events); \
337 uint16_t otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, \
338 struct rte_event ev[], \
339 uint16_t nb_events); \
340 uint16_t otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port, \
341 struct rte_event ev[], \
342 uint16_t nb_events); \
343 uint16_t otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port, \
344 struct rte_event ev[], \
345 uint16_t nb_events); \
347 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
350 void sso_updt_xae_cnt(struct otx2_sso_evdev *dev, void *data,
351 uint32_t event_type);
352 int sso_xae_reconfigure(struct rte_eventdev *event_dev);
353 void sso_fastpath_fns_set(struct rte_eventdev *event_dev);
355 int otx2_sso_rx_adapter_caps_get(const struct rte_eventdev *event_dev,
356 const struct rte_eth_dev *eth_dev,
358 int otx2_sso_rx_adapter_queue_add(const struct rte_eventdev *event_dev,
359 const struct rte_eth_dev *eth_dev,
361 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
362 int otx2_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
363 const struct rte_eth_dev *eth_dev,
364 int32_t rx_queue_id);
365 int otx2_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
366 const struct rte_eth_dev *eth_dev);
367 int otx2_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
368 const struct rte_eth_dev *eth_dev);
369 int otx2_sso_tx_adapter_caps_get(const struct rte_eventdev *dev,
370 const struct rte_eth_dev *eth_dev,
372 int otx2_sso_tx_adapter_queue_add(uint8_t id,
373 const struct rte_eventdev *event_dev,
374 const struct rte_eth_dev *eth_dev,
375 int32_t tx_queue_id);
377 int otx2_sso_tx_adapter_queue_del(uint8_t id,
378 const struct rte_eventdev *event_dev,
379 const struct rte_eth_dev *eth_dev,
380 int32_t tx_queue_id);
383 typedef void (*otx2_handle_event_t)(void *arg, struct rte_event ev);
384 void ssogws_flush_events(struct otx2_ssogws *ws, uint8_t queue_id,
385 uintptr_t base, otx2_handle_event_t fn, void *arg);
386 void ssogws_reset(struct otx2_ssogws *ws);
388 int otx2_sso_selftest(void);
389 /* Init and Fini API's */
390 int otx2_sso_init(struct rte_eventdev *event_dev);
391 int otx2_sso_fini(struct rte_eventdev *event_dev);
393 int sso_register_irqs(const struct rte_eventdev *event_dev);
394 void sso_unregister_irqs(const struct rte_eventdev *event_dev);
396 #endif /* __OTX2_EVDEV_H__ */