1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #ifndef __CNXK_EVENTDEV_H__
6 #define __CNXK_EVENTDEV_H__
10 #include <cryptodev_pmd.h>
11 #include <rte_devargs.h>
12 #include <rte_ethdev.h>
13 #include <rte_event_eth_rx_adapter.h>
14 #include <rte_event_eth_tx_adapter.h>
15 #include <rte_kvargs.h>
16 #include <rte_mbuf_pool_ops.h>
19 #include <eventdev_pmd_pci.h>
23 #include "cnxk_tim_evdev.h"
25 #define CNXK_SSO_XAE_CNT "xae_cnt"
26 #define CNXK_SSO_GGRP_QOS "qos"
27 #define CNXK_SSO_FORCE_BP "force_rx_bp"
28 #define CN9K_SSO_SINGLE_WS "single_ws"
29 #define CN10K_SSO_GW_MODE "gw_mode"
31 #define NSEC2USEC(__ns) ((__ns) / 1E3)
32 #define USEC2NSEC(__us) ((__us)*1E3)
33 #define NSEC2TICK(__ns, __freq) (((__ns) * (__freq)) / 1E9)
35 #define CNXK_SSO_MAX_HWGRP (RTE_EVENT_MAX_QUEUES_PER_DEV + 1)
36 #define CNXK_SSO_FC_NAME "cnxk_evdev_xaq_fc"
37 #define CNXK_SSO_MZ_NAME "cnxk_evdev_mz"
38 #define CNXK_SSO_XAQ_CACHE_CNT (0x7)
39 #define CNXK_SSO_XAQ_SLACK (8)
40 #define CNXK_SSO_WQE_SG_PTR (9)
41 #define CNXK_SSO_PRIORITY_CNT (0x8)
42 #define CNXK_SSO_WEIGHT_MAX (0x3f)
43 #define CNXK_SSO_WEIGHT_MIN (0x3)
44 #define CNXK_SSO_WEIGHT_CNT (CNXK_SSO_WEIGHT_MAX - CNXK_SSO_WEIGHT_MIN + 1)
45 #define CNXK_SSO_AFFINITY_CNT (0x10)
47 #define CNXK_TT_FROM_TAG(x) (((x) >> 32) & SSO_TT_EMPTY)
48 #define CNXK_TT_FROM_EVENT(x) (((x) >> 38) & SSO_TT_EMPTY)
49 #define CNXK_EVENT_TYPE_FROM_TAG(x) (((x) >> 28) & 0xf)
50 #define CNXK_SUB_EVENT_FROM_TAG(x) (((x) >> 20) & 0xff)
51 #define CNXK_CLR_SUB_EVENT(x) (~(0xffull << 20) & x)
52 #define CNXK_GRP_FROM_TAG(x) (((x) >> 36) & 0x3ff)
53 #define CNXK_SWTAG_PEND(x) (BIT_ULL(62) & x)
54 #define CNXK_TAG_IS_HEAD(x) (BIT_ULL(35) & x)
56 #define CN9K_SSOW_GET_BASE_ADDR(_GW) ((_GW)-SSOW_LF_GWS_OP_GET_WORK0)
58 #define CN10K_GW_MODE_NONE 0
59 #define CN10K_GW_MODE_PREF 1
60 #define CN10K_GW_MODE_PREF_WFE 2
62 #define CNXK_QOS_NORMALIZE(val, min, max, cnt) \
63 (min + val / ((max + cnt - 1) / cnt))
64 #define CNXK_SSO_FLUSH_RETRY_MAX 0xfff
66 #define CNXK_VALID_DEV_OR_ERR_RET(dev, drv_name) \
68 if (strncmp(dev->driver->name, drv_name, strlen(drv_name))) \
72 typedef void *(*cnxk_sso_init_hws_mem_t)(void *dev, uint8_t port_id);
73 typedef void (*cnxk_sso_hws_setup_t)(void *dev, void *ws, uintptr_t grp_base);
74 typedef void (*cnxk_sso_hws_release_t)(void *dev, void *ws);
75 typedef int (*cnxk_sso_link_t)(void *dev, void *ws, uint16_t *map,
77 typedef int (*cnxk_sso_unlink_t)(void *dev, void *ws, uint16_t *map,
79 typedef void (*cnxk_handle_event_t)(void *arg, struct rte_event ev);
80 typedef void (*cnxk_sso_hws_reset_t)(void *arg, void *ws);
81 typedef int (*cnxk_sso_hws_flush_t)(void *ws, uint8_t queue_id, uintptr_t base,
82 cnxk_handle_event_t fn, void *arg);
91 struct cnxk_sso_mlt_prio {
96 struct cnxk_sso_evdev {
98 uint8_t max_event_queues;
99 uint8_t max_event_ports;
100 uint8_t is_timeout_deq;
101 uint8_t nb_event_queues;
102 uint8_t nb_event_ports;
105 uint32_t min_dequeue_timeout_ns;
106 uint32_t max_dequeue_timeout_ns;
107 int32_t max_num_events;
110 uint64_t rx_offloads;
111 uint64_t tx_offloads;
112 uint64_t adptr_xae_cnt;
113 uint16_t rx_adptr_pool_cnt;
114 uint64_t *rx_adptr_pools;
115 uint64_t *tx_adptr_data;
116 size_t tx_adptr_data_sz;
117 uint16_t max_port_id;
118 uint16_t max_queue_id[RTE_MAX_ETHPORTS];
119 uint8_t tx_adptr_configured;
120 uint16_t tim_adptr_ring_cnt;
121 uint16_t *timer_adptr_rings;
122 uint64_t *timer_adptr_sz;
123 uint16_t vec_pool_cnt;
125 struct cnxk_sso_mlt_prio mlt_prio[RTE_EVENT_MAX_QUEUES_PER_DEV];
128 uint8_t qos_queue_cnt;
129 struct cnxk_sso_qos *qos_parse_data;
130 uint8_t force_ena_bp;
136 uint8_t is_ca_internal_port;
137 } __rte_cache_aligned;
139 struct cn10k_sso_hws {
143 struct cnxk_timesync_info *tstamp;
148 /* Add Work Fastpath data */
149 uint64_t xaq_lmt __rte_cache_aligned;
152 /* Tx Fastpath data */
153 uintptr_t lmt_base __rte_cache_aligned;
154 uint64_t lso_tun_fmt;
155 uint8_t tx_adptr_data[];
156 } __rte_cache_aligned;
158 /* Event port a.k.a GWS */
159 struct cn9k_sso_hws {
163 struct cnxk_timesync_info *tstamp;
167 /* Add Work Fastpath data */
168 uint64_t xaq_lmt __rte_cache_aligned;
171 /* Tx Fastpath data */
172 uint64_t lso_tun_fmt __rte_cache_aligned;
173 uint8_t tx_adptr_data[];
174 } __rte_cache_aligned;
176 struct cn9k_sso_hws_dual {
177 uint64_t base[2]; /* Ping and Pong */
180 struct cnxk_timesync_info *tstamp;
183 uint8_t vws; /* Ping pong bit */
185 /* Add Work Fastpath data */
186 uint64_t xaq_lmt __rte_cache_aligned;
189 /* Tx Fastpath data */
190 uint64_t lso_tun_fmt __rte_cache_aligned;
191 uint8_t tx_adptr_data[];
192 } __rte_cache_aligned;
194 struct cnxk_sso_hws_cookie {
195 const struct rte_eventdev *event_dev;
197 } __rte_cache_aligned;
200 parse_kvargs_flag(const char *key, const char *value, void *opaque)
204 *(uint8_t *)opaque = !!atoi(value);
209 parse_kvargs_value(const char *key, const char *value, void *opaque)
213 *(uint32_t *)opaque = (uint32_t)atoi(value);
217 static inline struct cnxk_sso_evdev *
218 cnxk_sso_pmd_priv(const struct rte_eventdev *event_dev)
220 return event_dev->data->dev_private;
223 static inline struct cnxk_sso_hws_cookie *
224 cnxk_sso_hws_get_cookie(void *ws)
226 return RTE_PTR_SUB(ws, sizeof(struct cnxk_sso_hws_cookie));
229 /* Configuration functions */
230 int cnxk_sso_xae_reconfigure(struct rte_eventdev *event_dev);
231 int cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev);
232 void cnxk_sso_updt_xae_cnt(struct cnxk_sso_evdev *dev, void *data,
233 uint32_t event_type);
235 /* Common ops API. */
236 int cnxk_sso_init(struct rte_eventdev *event_dev);
237 int cnxk_sso_fini(struct rte_eventdev *event_dev);
238 int cnxk_sso_remove(struct rte_pci_device *pci_dev);
239 void cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
240 struct rte_event_dev_info *dev_info);
241 int cnxk_sso_dev_validate(const struct rte_eventdev *event_dev);
242 int cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
243 cnxk_sso_init_hws_mem_t init_hws_mem,
244 cnxk_sso_hws_setup_t hws_setup);
245 void cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
246 cnxk_sso_link_t link_fn);
247 void cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
248 struct rte_event_queue_conf *queue_conf);
249 int cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
250 const struct rte_event_queue_conf *queue_conf);
251 void cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id);
252 int cnxk_sso_queue_attribute_get(struct rte_eventdev *event_dev,
253 uint8_t queue_id, uint32_t attr_id,
254 uint32_t *attr_value);
255 int cnxk_sso_queue_attribute_set(struct rte_eventdev *event_dev,
256 uint8_t queue_id, uint32_t attr_id,
257 uint64_t attr_value);
258 void cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
259 struct rte_event_port_conf *port_conf);
260 int cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
261 cnxk_sso_hws_setup_t hws_setup_fn);
262 int cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
263 uint64_t *tmo_ticks);
264 int cnxk_sso_start(struct rte_eventdev *event_dev,
265 cnxk_sso_hws_reset_t reset_fn,
266 cnxk_sso_hws_flush_t flush_fn);
267 void cnxk_sso_stop(struct rte_eventdev *event_dev,
268 cnxk_sso_hws_reset_t reset_fn,
269 cnxk_sso_hws_flush_t flush_fn);
270 int cnxk_sso_close(struct rte_eventdev *event_dev, cnxk_sso_unlink_t unlink_fn);
271 int cnxk_sso_selftest(const char *dev_name);
272 void cnxk_sso_dump(struct rte_eventdev *event_dev, FILE *f);
275 int cnxk_sso_xstats_get_names(const struct rte_eventdev *event_dev,
276 enum rte_event_dev_xstats_mode mode,
277 uint8_t queue_port_id,
278 struct rte_event_dev_xstats_name *xstats_names,
279 unsigned int *ids, unsigned int size);
280 int cnxk_sso_xstats_get(const struct rte_eventdev *event_dev,
281 enum rte_event_dev_xstats_mode mode,
282 uint8_t queue_port_id, const unsigned int ids[],
283 uint64_t values[], unsigned int n);
284 int cnxk_sso_xstats_reset(struct rte_eventdev *event_dev,
285 enum rte_event_dev_xstats_mode mode,
286 int16_t queue_port_id, const uint32_t ids[],
289 /* Crypto adapter APIs. */
290 int cnxk_crypto_adapter_qp_add(const struct rte_eventdev *event_dev,
291 const struct rte_cryptodev *cdev,
292 int32_t queue_pair_id);
293 int cnxk_crypto_adapter_qp_del(const struct rte_cryptodev *cdev,
294 int32_t queue_pair_id);
297 void cn9k_sso_set_rsrc(void *arg);
299 /* Common adapter ops */
300 int cnxk_sso_rx_adapter_queue_add(
301 const struct rte_eventdev *event_dev, const struct rte_eth_dev *eth_dev,
303 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf);
304 int cnxk_sso_rx_adapter_queue_del(const struct rte_eventdev *event_dev,
305 const struct rte_eth_dev *eth_dev,
306 int32_t rx_queue_id);
307 int cnxk_sso_rx_adapter_start(const struct rte_eventdev *event_dev,
308 const struct rte_eth_dev *eth_dev);
309 int cnxk_sso_rx_adapter_stop(const struct rte_eventdev *event_dev,
310 const struct rte_eth_dev *eth_dev);
311 int cnxk_sso_tx_adapter_queue_add(const struct rte_eventdev *event_dev,
312 const struct rte_eth_dev *eth_dev,
313 int32_t tx_queue_id);
314 int cnxk_sso_tx_adapter_queue_del(const struct rte_eventdev *event_dev,
315 const struct rte_eth_dev *eth_dev,
316 int32_t tx_queue_id);
318 #endif /* __CNXK_EVENTDEV_H__ */