1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
9 #define CN9K_DUAL_WS_NB_WS 2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
13 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
15 ws->tag_op = base + SSOW_LF_GWS_TAG;
16 ws->wqp_op = base + SSOW_LF_GWS_WQP;
17 ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
18 ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
19 ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
20 ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
24 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
26 struct cnxk_sso_evdev *dev = arg;
27 struct cn9k_sso_hws_dual *dws;
28 struct cn9k_sso_hws *ws;
33 rc = roc_sso_hws_link(&dev->sso,
34 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
36 rc |= roc_sso_hws_link(&dev->sso,
37 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
41 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
48 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
50 struct cnxk_sso_evdev *dev = arg;
51 struct cn9k_sso_hws_dual *dws;
52 struct cn9k_sso_hws *ws;
57 rc = roc_sso_hws_unlink(&dev->sso,
58 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
60 rc |= roc_sso_hws_unlink(&dev->sso,
61 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
65 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
72 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
74 struct cnxk_sso_evdev *dev = arg;
75 struct cn9k_sso_hws_dual *dws;
76 struct cn9k_sso_hws *ws;
79 /* Set get_work tmo for HWS */
80 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
83 rte_memcpy(dws->grps_base, grps_base,
84 sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
85 dws->fc_mem = dev->fc_mem;
86 dws->xaq_lmt = dev->xaq_lmt;
88 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
92 rte_memcpy(ws->grps_base, grps_base,
93 sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
94 ws->fc_mem = dev->fc_mem;
95 ws->xaq_lmt = dev->xaq_lmt;
97 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
102 cn9k_sso_hws_release(void *arg, void *hws)
104 struct cnxk_sso_evdev *dev = arg;
105 struct cn9k_sso_hws_dual *dws;
106 struct cn9k_sso_hws *ws;
111 for (i = 0; i < dev->nb_event_queues; i++) {
112 roc_sso_hws_unlink(&dev->sso,
113 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
115 roc_sso_hws_unlink(&dev->sso,
116 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
119 memset(dws, 0, sizeof(*dws));
122 for (i = 0; i < dev->nb_event_queues; i++)
123 roc_sso_hws_unlink(&dev->sso, ws->hws_id,
125 memset(ws, 0, sizeof(*ws));
130 cn9k_sso_set_rsrc(void *arg)
132 struct cnxk_sso_evdev *dev = arg;
135 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
137 dev->max_event_ports = dev->sso.max_hws;
138 dev->max_event_queues =
139 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
140 RTE_EVENT_MAX_QUEUES_PER_DEV :
145 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
147 struct cnxk_sso_evdev *dev = arg;
150 hws = hws * CN9K_DUAL_WS_NB_WS;
152 return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
156 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
158 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
160 event_dev->enqueue = cn9k_sso_hws_enq;
161 event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
162 event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
163 event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
166 event_dev->enqueue = cn9k_sso_hws_dual_enq;
167 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
168 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
169 event_dev->enqueue_forward_burst =
170 cn9k_sso_hws_dual_enq_fwd_burst;
175 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
177 struct cnxk_sso_evdev *dev = arg;
178 struct cn9k_sso_hws_dual *dws;
179 struct cn9k_sso_hws *ws;
183 dws = rte_zmalloc("cn9k_dual_ws",
184 sizeof(struct cn9k_sso_hws_dual) +
186 RTE_CACHE_LINE_SIZE);
188 plt_err("Failed to alloc memory for port=%d", port_id);
192 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
193 dws->base[0] = roc_sso_hws_base_get(
194 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
195 dws->base[1] = roc_sso_hws_base_get(
196 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
197 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
198 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
199 dws->hws_id = port_id;
205 /* Allocate event port memory */
206 ws = rte_zmalloc("cn9k_ws",
207 sizeof(struct cn9k_sso_hws) +
209 RTE_CACHE_LINE_SIZE);
211 plt_err("Failed to alloc memory for port=%d", port_id);
215 /* First cache line is reserved for cookie */
216 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
217 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
218 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
219 ws->hws_id = port_id;
229 cn9k_sso_info_get(struct rte_eventdev *event_dev,
230 struct rte_event_dev_info *dev_info)
232 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
234 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
235 cnxk_sso_info_get(dev, dev_info);
239 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
241 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
244 rc = cnxk_sso_dev_validate(event_dev);
246 plt_err("Invalid event device configuration");
250 roc_sso_rsrc_fini(&dev->sso);
252 rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
254 plt_err("Failed to initialize SSO resources");
258 rc = cnxk_sso_xaq_allocate(dev);
262 rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
267 /* Restore any prior port-queue mapping. */
268 cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
275 roc_sso_rsrc_fini(&dev->sso);
276 dev->nb_event_ports = 0;
281 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
282 const struct rte_event_port_conf *port_conf)
285 RTE_SET_USED(port_conf);
286 return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
290 cn9k_sso_port_release(void *port)
292 struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
293 struct cnxk_sso_evdev *dev;
298 dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
299 if (!gws_cookie->configured)
302 cn9k_sso_hws_release(dev, port);
303 memset(gws_cookie, 0, sizeof(*gws_cookie));
305 rte_free(gws_cookie);
309 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
310 const uint8_t queues[], const uint8_t priorities[],
313 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
314 uint16_t hwgrp_ids[nb_links];
317 RTE_SET_USED(priorities);
318 for (link = 0; link < nb_links; link++)
319 hwgrp_ids[link] = queues[link];
320 nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
322 return (int)nb_links;
326 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
327 uint8_t queues[], uint16_t nb_unlinks)
329 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
330 uint16_t hwgrp_ids[nb_unlinks];
333 for (unlink = 0; unlink < nb_unlinks; unlink++)
334 hwgrp_ids[unlink] = queues[unlink];
335 nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
337 return (int)nb_unlinks;
340 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
341 .dev_infos_get = cn9k_sso_info_get,
342 .dev_configure = cn9k_sso_dev_configure,
343 .queue_def_conf = cnxk_sso_queue_def_conf,
344 .queue_setup = cnxk_sso_queue_setup,
345 .queue_release = cnxk_sso_queue_release,
346 .port_def_conf = cnxk_sso_port_def_conf,
347 .port_setup = cn9k_sso_port_setup,
348 .port_release = cn9k_sso_port_release,
349 .port_link = cn9k_sso_port_link,
350 .port_unlink = cn9k_sso_port_unlink,
351 .timeout_ticks = cnxk_sso_timeout_ticks,
355 cn9k_sso_init(struct rte_eventdev *event_dev)
357 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
360 if (RTE_CACHE_LINE_SIZE != 128) {
361 plt_err("Driver not compiled for CN9K");
367 plt_err("Failed to initialize platform model");
371 event_dev->dev_ops = &cn9k_sso_dev_ops;
372 /* For secondary processes, the primary has done all the work */
373 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
374 cn9k_sso_fp_fns_set(event_dev);
378 rc = cnxk_sso_init(event_dev);
382 cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
383 if (!dev->max_event_ports || !dev->max_event_queues) {
384 plt_err("Not enough eventdev resource queues=%d ports=%d",
385 dev->max_event_queues, dev->max_event_ports);
386 cnxk_sso_fini(event_dev);
390 plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
391 event_dev->data->name, dev->max_event_queues,
392 dev->max_event_ports);
398 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
400 return rte_event_pmd_pci_probe(
401 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
404 static const struct rte_pci_id cn9k_pci_sso_map[] = {
410 static struct rte_pci_driver cn9k_pci_sso = {
411 .id_table = cn9k_pci_sso_map,
412 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
413 .probe = cn9k_sso_probe,
414 .remove = cnxk_sso_remove,
417 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
418 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
419 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
420 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
421 CNXK_SSO_GGRP_QOS "=<string>"
422 CN9K_SSO_SINGLE_WS "=1");