1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(C) 2021 Marvell.
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
9 #define CN9K_DUAL_WS_NB_WS 2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
13 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
15 ws->tag_op = base + SSOW_LF_GWS_TAG;
16 ws->wqp_op = base + SSOW_LF_GWS_WQP;
17 ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
18 ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
19 ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
20 ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
24 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
26 struct cnxk_sso_evdev *dev = arg;
27 struct cn9k_sso_hws_dual *dws;
28 struct cn9k_sso_hws *ws;
33 rc = roc_sso_hws_link(&dev->sso,
34 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
36 rc |= roc_sso_hws_link(&dev->sso,
37 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
41 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
48 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
50 struct cnxk_sso_evdev *dev = arg;
51 struct cn9k_sso_hws_dual *dws;
52 struct cn9k_sso_hws *ws;
57 rc = roc_sso_hws_unlink(&dev->sso,
58 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
60 rc |= roc_sso_hws_unlink(&dev->sso,
61 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
65 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
72 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
74 struct cnxk_sso_evdev *dev = arg;
75 struct cn9k_sso_hws_dual *dws;
76 struct cn9k_sso_hws *ws;
79 /* Set get_work tmo for HWS */
80 val = NSEC2USEC(dev->deq_tmo_ns) - 1;
83 rte_memcpy(dws->grps_base, grps_base,
84 sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
85 dws->fc_mem = dev->fc_mem;
86 dws->xaq_lmt = dev->xaq_lmt;
88 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
92 rte_memcpy(ws->grps_base, grps_base,
93 sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
94 ws->fc_mem = dev->fc_mem;
95 ws->xaq_lmt = dev->xaq_lmt;
97 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
102 cn9k_sso_hws_release(void *arg, void *hws)
104 struct cnxk_sso_evdev *dev = arg;
105 struct cn9k_sso_hws_dual *dws;
106 struct cn9k_sso_hws *ws;
111 for (i = 0; i < dev->nb_event_queues; i++) {
112 roc_sso_hws_unlink(&dev->sso,
113 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
115 roc_sso_hws_unlink(&dev->sso,
116 CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
119 memset(dws, 0, sizeof(*dws));
122 for (i = 0; i < dev->nb_event_queues; i++)
123 roc_sso_hws_unlink(&dev->sso, ws->hws_id,
125 memset(ws, 0, sizeof(*ws));
130 cn9k_sso_set_rsrc(void *arg)
132 struct cnxk_sso_evdev *dev = arg;
135 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
137 dev->max_event_ports = dev->sso.max_hws;
138 dev->max_event_queues =
139 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
140 RTE_EVENT_MAX_QUEUES_PER_DEV :
145 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
147 struct cnxk_sso_evdev *dev = arg;
150 hws = hws * CN9K_DUAL_WS_NB_WS;
152 return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
156 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
158 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
160 event_dev->enqueue = cn9k_sso_hws_enq;
161 event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
162 event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
163 event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
165 event_dev->dequeue = cn9k_sso_hws_deq;
166 event_dev->dequeue_burst = cn9k_sso_hws_deq_burst;
167 if (dev->deq_tmo_ns) {
168 event_dev->dequeue = cn9k_sso_hws_tmo_deq;
169 event_dev->dequeue_burst = cn9k_sso_hws_tmo_deq_burst;
173 event_dev->enqueue = cn9k_sso_hws_dual_enq;
174 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
175 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
176 event_dev->enqueue_forward_burst =
177 cn9k_sso_hws_dual_enq_fwd_burst;
179 event_dev->dequeue = cn9k_sso_hws_dual_deq;
180 event_dev->dequeue_burst = cn9k_sso_hws_dual_deq_burst;
181 if (dev->deq_tmo_ns) {
182 event_dev->dequeue = cn9k_sso_hws_dual_tmo_deq;
183 event_dev->dequeue_burst =
184 cn9k_sso_hws_dual_tmo_deq_burst;
190 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
192 struct cnxk_sso_evdev *dev = arg;
193 struct cn9k_sso_hws_dual *dws;
194 struct cn9k_sso_hws *ws;
198 dws = rte_zmalloc("cn9k_dual_ws",
199 sizeof(struct cn9k_sso_hws_dual) +
201 RTE_CACHE_LINE_SIZE);
203 plt_err("Failed to alloc memory for port=%d", port_id);
207 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
208 dws->base[0] = roc_sso_hws_base_get(
209 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
210 dws->base[1] = roc_sso_hws_base_get(
211 &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
212 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
213 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
214 dws->hws_id = port_id;
220 /* Allocate event port memory */
221 ws = rte_zmalloc("cn9k_ws",
222 sizeof(struct cn9k_sso_hws) +
224 RTE_CACHE_LINE_SIZE);
226 plt_err("Failed to alloc memory for port=%d", port_id);
230 /* First cache line is reserved for cookie */
231 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
232 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
233 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
234 ws->hws_id = port_id;
244 cn9k_sso_info_get(struct rte_eventdev *event_dev,
245 struct rte_event_dev_info *dev_info)
247 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
249 dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
250 cnxk_sso_info_get(dev, dev_info);
254 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
256 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
259 rc = cnxk_sso_dev_validate(event_dev);
261 plt_err("Invalid event device configuration");
265 roc_sso_rsrc_fini(&dev->sso);
267 rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
269 plt_err("Failed to initialize SSO resources");
273 rc = cnxk_sso_xaq_allocate(dev);
277 rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
282 /* Restore any prior port-queue mapping. */
283 cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
290 roc_sso_rsrc_fini(&dev->sso);
291 dev->nb_event_ports = 0;
296 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
297 const struct rte_event_port_conf *port_conf)
300 RTE_SET_USED(port_conf);
301 return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
305 cn9k_sso_port_release(void *port)
307 struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
308 struct cnxk_sso_evdev *dev;
313 dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
314 if (!gws_cookie->configured)
317 cn9k_sso_hws_release(dev, port);
318 memset(gws_cookie, 0, sizeof(*gws_cookie));
320 rte_free(gws_cookie);
324 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
325 const uint8_t queues[], const uint8_t priorities[],
328 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
329 uint16_t hwgrp_ids[nb_links];
332 RTE_SET_USED(priorities);
333 for (link = 0; link < nb_links; link++)
334 hwgrp_ids[link] = queues[link];
335 nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
337 return (int)nb_links;
341 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
342 uint8_t queues[], uint16_t nb_unlinks)
344 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
345 uint16_t hwgrp_ids[nb_unlinks];
348 for (unlink = 0; unlink < nb_unlinks; unlink++)
349 hwgrp_ids[unlink] = queues[unlink];
350 nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
352 return (int)nb_unlinks;
355 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
356 .dev_infos_get = cn9k_sso_info_get,
357 .dev_configure = cn9k_sso_dev_configure,
358 .queue_def_conf = cnxk_sso_queue_def_conf,
359 .queue_setup = cnxk_sso_queue_setup,
360 .queue_release = cnxk_sso_queue_release,
361 .port_def_conf = cnxk_sso_port_def_conf,
362 .port_setup = cn9k_sso_port_setup,
363 .port_release = cn9k_sso_port_release,
364 .port_link = cn9k_sso_port_link,
365 .port_unlink = cn9k_sso_port_unlink,
366 .timeout_ticks = cnxk_sso_timeout_ticks,
370 cn9k_sso_init(struct rte_eventdev *event_dev)
372 struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
375 if (RTE_CACHE_LINE_SIZE != 128) {
376 plt_err("Driver not compiled for CN9K");
382 plt_err("Failed to initialize platform model");
386 event_dev->dev_ops = &cn9k_sso_dev_ops;
387 /* For secondary processes, the primary has done all the work */
388 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
389 cn9k_sso_fp_fns_set(event_dev);
393 rc = cnxk_sso_init(event_dev);
397 cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
398 if (!dev->max_event_ports || !dev->max_event_queues) {
399 plt_err("Not enough eventdev resource queues=%d ports=%d",
400 dev->max_event_queues, dev->max_event_ports);
401 cnxk_sso_fini(event_dev);
405 plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
406 event_dev->data->name, dev->max_event_queues,
407 dev->max_event_ports);
413 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
415 return rte_event_pmd_pci_probe(
416 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
419 static const struct rte_pci_id cn9k_pci_sso_map[] = {
425 static struct rte_pci_driver cn9k_pci_sso = {
426 .id_table = cn9k_pci_sso_map,
427 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
428 .probe = cn9k_sso_probe,
429 .remove = cnxk_sso_remove,
432 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
433 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
434 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
435 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
436 CNXK_SSO_GGRP_QOS "=<string>"
437 CN9K_SSO_SINGLE_WS "=1");