event/cnxk: add SSO GWS dequeue fast path
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 static void
13 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
14 {
15         ws->tag_op = base + SSOW_LF_GWS_TAG;
16         ws->wqp_op = base + SSOW_LF_GWS_WQP;
17         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
18         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
19         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
20         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
21 }
22
23 static int
24 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
25 {
26         struct cnxk_sso_evdev *dev = arg;
27         struct cn9k_sso_hws_dual *dws;
28         struct cn9k_sso_hws *ws;
29         int rc;
30
31         if (dev->dual_ws) {
32                 dws = port;
33                 rc = roc_sso_hws_link(&dev->sso,
34                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
35                                       nb_link);
36                 rc |= roc_sso_hws_link(&dev->sso,
37                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
38                                        map, nb_link);
39         } else {
40                 ws = port;
41                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
42         }
43
44         return rc;
45 }
46
47 static int
48 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
49 {
50         struct cnxk_sso_evdev *dev = arg;
51         struct cn9k_sso_hws_dual *dws;
52         struct cn9k_sso_hws *ws;
53         int rc;
54
55         if (dev->dual_ws) {
56                 dws = port;
57                 rc = roc_sso_hws_unlink(&dev->sso,
58                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
59                                         map, nb_link);
60                 rc |= roc_sso_hws_unlink(&dev->sso,
61                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
62                                          map, nb_link);
63         } else {
64                 ws = port;
65                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
66         }
67
68         return rc;
69 }
70
71 static void
72 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
73 {
74         struct cnxk_sso_evdev *dev = arg;
75         struct cn9k_sso_hws_dual *dws;
76         struct cn9k_sso_hws *ws;
77         uint64_t val;
78
79         /* Set get_work tmo for HWS */
80         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
81         if (dev->dual_ws) {
82                 dws = hws;
83                 rte_memcpy(dws->grps_base, grps_base,
84                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
85                 dws->fc_mem = dev->fc_mem;
86                 dws->xaq_lmt = dev->xaq_lmt;
87
88                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
90         } else {
91                 ws = hws;
92                 rte_memcpy(ws->grps_base, grps_base,
93                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
94                 ws->fc_mem = dev->fc_mem;
95                 ws->xaq_lmt = dev->xaq_lmt;
96
97                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
98         }
99 }
100
101 static void
102 cn9k_sso_hws_release(void *arg, void *hws)
103 {
104         struct cnxk_sso_evdev *dev = arg;
105         struct cn9k_sso_hws_dual *dws;
106         struct cn9k_sso_hws *ws;
107         int i;
108
109         if (dev->dual_ws) {
110                 dws = hws;
111                 for (i = 0; i < dev->nb_event_queues; i++) {
112                         roc_sso_hws_unlink(&dev->sso,
113                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
114                                            (uint16_t *)&i, 1);
115                         roc_sso_hws_unlink(&dev->sso,
116                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
117                                            (uint16_t *)&i, 1);
118                 }
119                 memset(dws, 0, sizeof(*dws));
120         } else {
121                 ws = hws;
122                 for (i = 0; i < dev->nb_event_queues; i++)
123                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
124                                            (uint16_t *)&i, 1);
125                 memset(ws, 0, sizeof(*ws));
126         }
127 }
128
129 static void
130 cn9k_sso_set_rsrc(void *arg)
131 {
132         struct cnxk_sso_evdev *dev = arg;
133
134         if (dev->dual_ws)
135                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
136         else
137                 dev->max_event_ports = dev->sso.max_hws;
138         dev->max_event_queues =
139                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
140                               RTE_EVENT_MAX_QUEUES_PER_DEV :
141                               dev->sso.max_hwgrp;
142 }
143
144 static int
145 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
146 {
147         struct cnxk_sso_evdev *dev = arg;
148
149         if (dev->dual_ws)
150                 hws = hws * CN9K_DUAL_WS_NB_WS;
151
152         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
153 }
154
155 static void
156 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
157 {
158         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
159
160         event_dev->enqueue = cn9k_sso_hws_enq;
161         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
162         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
163         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
164
165         event_dev->dequeue = cn9k_sso_hws_deq;
166         event_dev->dequeue_burst = cn9k_sso_hws_deq_burst;
167         if (dev->deq_tmo_ns) {
168                 event_dev->dequeue = cn9k_sso_hws_tmo_deq;
169                 event_dev->dequeue_burst = cn9k_sso_hws_tmo_deq_burst;
170         }
171
172         if (dev->dual_ws) {
173                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
174                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
175                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
176                 event_dev->enqueue_forward_burst =
177                         cn9k_sso_hws_dual_enq_fwd_burst;
178
179                 event_dev->dequeue = cn9k_sso_hws_dual_deq;
180                 event_dev->dequeue_burst = cn9k_sso_hws_dual_deq_burst;
181                 if (dev->deq_tmo_ns) {
182                         event_dev->dequeue = cn9k_sso_hws_dual_tmo_deq;
183                         event_dev->dequeue_burst =
184                                 cn9k_sso_hws_dual_tmo_deq_burst;
185                 }
186         }
187 }
188
189 static void *
190 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
191 {
192         struct cnxk_sso_evdev *dev = arg;
193         struct cn9k_sso_hws_dual *dws;
194         struct cn9k_sso_hws *ws;
195         void *data;
196
197         if (dev->dual_ws) {
198                 dws = rte_zmalloc("cn9k_dual_ws",
199                                   sizeof(struct cn9k_sso_hws_dual) +
200                                           RTE_CACHE_LINE_SIZE,
201                                   RTE_CACHE_LINE_SIZE);
202                 if (dws == NULL) {
203                         plt_err("Failed to alloc memory for port=%d", port_id);
204                         return NULL;
205                 }
206
207                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
208                 dws->base[0] = roc_sso_hws_base_get(
209                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
210                 dws->base[1] = roc_sso_hws_base_get(
211                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
212                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
213                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
214                 dws->hws_id = port_id;
215                 dws->swtag_req = 0;
216                 dws->vws = 0;
217
218                 data = dws;
219         } else {
220                 /* Allocate event port memory */
221                 ws = rte_zmalloc("cn9k_ws",
222                                  sizeof(struct cn9k_sso_hws) +
223                                          RTE_CACHE_LINE_SIZE,
224                                  RTE_CACHE_LINE_SIZE);
225                 if (ws == NULL) {
226                         plt_err("Failed to alloc memory for port=%d", port_id);
227                         return NULL;
228                 }
229
230                 /* First cache line is reserved for cookie */
231                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
232                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
233                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
234                 ws->hws_id = port_id;
235                 ws->swtag_req = 0;
236
237                 data = ws;
238         }
239
240         return data;
241 }
242
243 static void
244 cn9k_sso_info_get(struct rte_eventdev *event_dev,
245                   struct rte_event_dev_info *dev_info)
246 {
247         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
248
249         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
250         cnxk_sso_info_get(dev, dev_info);
251 }
252
253 static int
254 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
255 {
256         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
257         int rc;
258
259         rc = cnxk_sso_dev_validate(event_dev);
260         if (rc < 0) {
261                 plt_err("Invalid event device configuration");
262                 return -EINVAL;
263         }
264
265         roc_sso_rsrc_fini(&dev->sso);
266
267         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
268         if (rc < 0) {
269                 plt_err("Failed to initialize SSO resources");
270                 return -ENODEV;
271         }
272
273         rc = cnxk_sso_xaq_allocate(dev);
274         if (rc < 0)
275                 goto cnxk_rsrc_fini;
276
277         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
278                                     cn9k_sso_hws_setup);
279         if (rc < 0)
280                 goto cnxk_rsrc_fini;
281
282         /* Restore any prior port-queue mapping. */
283         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
284
285         dev->configured = 1;
286         rte_mb();
287
288         return 0;
289 cnxk_rsrc_fini:
290         roc_sso_rsrc_fini(&dev->sso);
291         dev->nb_event_ports = 0;
292         return rc;
293 }
294
295 static int
296 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
297                     const struct rte_event_port_conf *port_conf)
298 {
299
300         RTE_SET_USED(port_conf);
301         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
302 }
303
304 static void
305 cn9k_sso_port_release(void *port)
306 {
307         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
308         struct cnxk_sso_evdev *dev;
309
310         if (port == NULL)
311                 return;
312
313         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
314         if (!gws_cookie->configured)
315                 goto free;
316
317         cn9k_sso_hws_release(dev, port);
318         memset(gws_cookie, 0, sizeof(*gws_cookie));
319 free:
320         rte_free(gws_cookie);
321 }
322
323 static int
324 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
325                    const uint8_t queues[], const uint8_t priorities[],
326                    uint16_t nb_links)
327 {
328         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
329         uint16_t hwgrp_ids[nb_links];
330         uint16_t link;
331
332         RTE_SET_USED(priorities);
333         for (link = 0; link < nb_links; link++)
334                 hwgrp_ids[link] = queues[link];
335         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
336
337         return (int)nb_links;
338 }
339
340 static int
341 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
342                      uint8_t queues[], uint16_t nb_unlinks)
343 {
344         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
345         uint16_t hwgrp_ids[nb_unlinks];
346         uint16_t unlink;
347
348         for (unlink = 0; unlink < nb_unlinks; unlink++)
349                 hwgrp_ids[unlink] = queues[unlink];
350         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
351
352         return (int)nb_unlinks;
353 }
354
355 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
356         .dev_infos_get = cn9k_sso_info_get,
357         .dev_configure = cn9k_sso_dev_configure,
358         .queue_def_conf = cnxk_sso_queue_def_conf,
359         .queue_setup = cnxk_sso_queue_setup,
360         .queue_release = cnxk_sso_queue_release,
361         .port_def_conf = cnxk_sso_port_def_conf,
362         .port_setup = cn9k_sso_port_setup,
363         .port_release = cn9k_sso_port_release,
364         .port_link = cn9k_sso_port_link,
365         .port_unlink = cn9k_sso_port_unlink,
366         .timeout_ticks = cnxk_sso_timeout_ticks,
367 };
368
369 static int
370 cn9k_sso_init(struct rte_eventdev *event_dev)
371 {
372         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
373         int rc;
374
375         if (RTE_CACHE_LINE_SIZE != 128) {
376                 plt_err("Driver not compiled for CN9K");
377                 return -EFAULT;
378         }
379
380         rc = roc_plt_init();
381         if (rc < 0) {
382                 plt_err("Failed to initialize platform model");
383                 return rc;
384         }
385
386         event_dev->dev_ops = &cn9k_sso_dev_ops;
387         /* For secondary processes, the primary has done all the work */
388         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
389                 cn9k_sso_fp_fns_set(event_dev);
390                 return 0;
391         }
392
393         rc = cnxk_sso_init(event_dev);
394         if (rc < 0)
395                 return rc;
396
397         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
398         if (!dev->max_event_ports || !dev->max_event_queues) {
399                 plt_err("Not enough eventdev resource queues=%d ports=%d",
400                         dev->max_event_queues, dev->max_event_ports);
401                 cnxk_sso_fini(event_dev);
402                 return -ENODEV;
403         }
404
405         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
406                     event_dev->data->name, dev->max_event_queues,
407                     dev->max_event_ports);
408
409         return 0;
410 }
411
412 static int
413 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
414 {
415         return rte_event_pmd_pci_probe(
416                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
417 }
418
419 static const struct rte_pci_id cn9k_pci_sso_map[] = {
420         {
421                 .vendor_id = 0,
422         },
423 };
424
425 static struct rte_pci_driver cn9k_pci_sso = {
426         .id_table = cn9k_pci_sso_map,
427         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
428         .probe = cn9k_sso_probe,
429         .remove = cnxk_sso_remove,
430 };
431
432 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
433 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
434 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
435 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
436                               CNXK_SSO_GGRP_QOS "=<string>"
437                               CN9K_SSO_SINGLE_WS "=1");