event/cnxk: add SSO GWS enqueue fast path
[dpdk.git] / drivers / event / cnxk / cn9k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn9k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 #define CN9K_DUAL_WS_NB_WS          2
10 #define CN9K_DUAL_WS_PAIR_ID(x, id) (((x)*CN9K_DUAL_WS_NB_WS) + id)
11
12 static void
13 cn9k_init_hws_ops(struct cn9k_sso_hws_state *ws, uintptr_t base)
14 {
15         ws->tag_op = base + SSOW_LF_GWS_TAG;
16         ws->wqp_op = base + SSOW_LF_GWS_WQP;
17         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
18         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
19         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
20         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
21 }
22
23 static int
24 cn9k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
25 {
26         struct cnxk_sso_evdev *dev = arg;
27         struct cn9k_sso_hws_dual *dws;
28         struct cn9k_sso_hws *ws;
29         int rc;
30
31         if (dev->dual_ws) {
32                 dws = port;
33                 rc = roc_sso_hws_link(&dev->sso,
34                                       CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0), map,
35                                       nb_link);
36                 rc |= roc_sso_hws_link(&dev->sso,
37                                        CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
38                                        map, nb_link);
39         } else {
40                 ws = port;
41                 rc = roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
42         }
43
44         return rc;
45 }
46
47 static int
48 cn9k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
49 {
50         struct cnxk_sso_evdev *dev = arg;
51         struct cn9k_sso_hws_dual *dws;
52         struct cn9k_sso_hws *ws;
53         int rc;
54
55         if (dev->dual_ws) {
56                 dws = port;
57                 rc = roc_sso_hws_unlink(&dev->sso,
58                                         CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
59                                         map, nb_link);
60                 rc |= roc_sso_hws_unlink(&dev->sso,
61                                          CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
62                                          map, nb_link);
63         } else {
64                 ws = port;
65                 rc = roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
66         }
67
68         return rc;
69 }
70
71 static void
72 cn9k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
73 {
74         struct cnxk_sso_evdev *dev = arg;
75         struct cn9k_sso_hws_dual *dws;
76         struct cn9k_sso_hws *ws;
77         uint64_t val;
78
79         /* Set get_work tmo for HWS */
80         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
81         if (dev->dual_ws) {
82                 dws = hws;
83                 rte_memcpy(dws->grps_base, grps_base,
84                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
85                 dws->fc_mem = dev->fc_mem;
86                 dws->xaq_lmt = dev->xaq_lmt;
87
88                 plt_write64(val, dws->base[0] + SSOW_LF_GWS_NW_TIM);
89                 plt_write64(val, dws->base[1] + SSOW_LF_GWS_NW_TIM);
90         } else {
91                 ws = hws;
92                 rte_memcpy(ws->grps_base, grps_base,
93                            sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
94                 ws->fc_mem = dev->fc_mem;
95                 ws->xaq_lmt = dev->xaq_lmt;
96
97                 plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
98         }
99 }
100
101 static void
102 cn9k_sso_hws_release(void *arg, void *hws)
103 {
104         struct cnxk_sso_evdev *dev = arg;
105         struct cn9k_sso_hws_dual *dws;
106         struct cn9k_sso_hws *ws;
107         int i;
108
109         if (dev->dual_ws) {
110                 dws = hws;
111                 for (i = 0; i < dev->nb_event_queues; i++) {
112                         roc_sso_hws_unlink(&dev->sso,
113                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 0),
114                                            (uint16_t *)&i, 1);
115                         roc_sso_hws_unlink(&dev->sso,
116                                            CN9K_DUAL_WS_PAIR_ID(dws->hws_id, 1),
117                                            (uint16_t *)&i, 1);
118                 }
119                 memset(dws, 0, sizeof(*dws));
120         } else {
121                 ws = hws;
122                 for (i = 0; i < dev->nb_event_queues; i++)
123                         roc_sso_hws_unlink(&dev->sso, ws->hws_id,
124                                            (uint16_t *)&i, 1);
125                 memset(ws, 0, sizeof(*ws));
126         }
127 }
128
129 static void
130 cn9k_sso_set_rsrc(void *arg)
131 {
132         struct cnxk_sso_evdev *dev = arg;
133
134         if (dev->dual_ws)
135                 dev->max_event_ports = dev->sso.max_hws / CN9K_DUAL_WS_NB_WS;
136         else
137                 dev->max_event_ports = dev->sso.max_hws;
138         dev->max_event_queues =
139                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
140                               RTE_EVENT_MAX_QUEUES_PER_DEV :
141                               dev->sso.max_hwgrp;
142 }
143
144 static int
145 cn9k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
146 {
147         struct cnxk_sso_evdev *dev = arg;
148
149         if (dev->dual_ws)
150                 hws = hws * CN9K_DUAL_WS_NB_WS;
151
152         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
153 }
154
155 static void
156 cn9k_sso_fp_fns_set(struct rte_eventdev *event_dev)
157 {
158         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
159
160         event_dev->enqueue = cn9k_sso_hws_enq;
161         event_dev->enqueue_burst = cn9k_sso_hws_enq_burst;
162         event_dev->enqueue_new_burst = cn9k_sso_hws_enq_new_burst;
163         event_dev->enqueue_forward_burst = cn9k_sso_hws_enq_fwd_burst;
164
165         if (dev->dual_ws) {
166                 event_dev->enqueue = cn9k_sso_hws_dual_enq;
167                 event_dev->enqueue_burst = cn9k_sso_hws_dual_enq_burst;
168                 event_dev->enqueue_new_burst = cn9k_sso_hws_dual_enq_new_burst;
169                 event_dev->enqueue_forward_burst =
170                         cn9k_sso_hws_dual_enq_fwd_burst;
171         }
172 }
173
174 static void *
175 cn9k_sso_init_hws_mem(void *arg, uint8_t port_id)
176 {
177         struct cnxk_sso_evdev *dev = arg;
178         struct cn9k_sso_hws_dual *dws;
179         struct cn9k_sso_hws *ws;
180         void *data;
181
182         if (dev->dual_ws) {
183                 dws = rte_zmalloc("cn9k_dual_ws",
184                                   sizeof(struct cn9k_sso_hws_dual) +
185                                           RTE_CACHE_LINE_SIZE,
186                                   RTE_CACHE_LINE_SIZE);
187                 if (dws == NULL) {
188                         plt_err("Failed to alloc memory for port=%d", port_id);
189                         return NULL;
190                 }
191
192                 dws = RTE_PTR_ADD(dws, sizeof(struct cnxk_sso_hws_cookie));
193                 dws->base[0] = roc_sso_hws_base_get(
194                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 0));
195                 dws->base[1] = roc_sso_hws_base_get(
196                         &dev->sso, CN9K_DUAL_WS_PAIR_ID(port_id, 1));
197                 cn9k_init_hws_ops(&dws->ws_state[0], dws->base[0]);
198                 cn9k_init_hws_ops(&dws->ws_state[1], dws->base[1]);
199                 dws->hws_id = port_id;
200                 dws->swtag_req = 0;
201                 dws->vws = 0;
202
203                 data = dws;
204         } else {
205                 /* Allocate event port memory */
206                 ws = rte_zmalloc("cn9k_ws",
207                                  sizeof(struct cn9k_sso_hws) +
208                                          RTE_CACHE_LINE_SIZE,
209                                  RTE_CACHE_LINE_SIZE);
210                 if (ws == NULL) {
211                         plt_err("Failed to alloc memory for port=%d", port_id);
212                         return NULL;
213                 }
214
215                 /* First cache line is reserved for cookie */
216                 ws = RTE_PTR_ADD(ws, sizeof(struct cnxk_sso_hws_cookie));
217                 ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
218                 cn9k_init_hws_ops((struct cn9k_sso_hws_state *)ws, ws->base);
219                 ws->hws_id = port_id;
220                 ws->swtag_req = 0;
221
222                 data = ws;
223         }
224
225         return data;
226 }
227
228 static void
229 cn9k_sso_info_get(struct rte_eventdev *event_dev,
230                   struct rte_event_dev_info *dev_info)
231 {
232         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
233
234         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN9K_PMD);
235         cnxk_sso_info_get(dev, dev_info);
236 }
237
238 static int
239 cn9k_sso_dev_configure(const struct rte_eventdev *event_dev)
240 {
241         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
242         int rc;
243
244         rc = cnxk_sso_dev_validate(event_dev);
245         if (rc < 0) {
246                 plt_err("Invalid event device configuration");
247                 return -EINVAL;
248         }
249
250         roc_sso_rsrc_fini(&dev->sso);
251
252         rc = cn9k_sso_rsrc_init(dev, dev->nb_event_ports, dev->nb_event_queues);
253         if (rc < 0) {
254                 plt_err("Failed to initialize SSO resources");
255                 return -ENODEV;
256         }
257
258         rc = cnxk_sso_xaq_allocate(dev);
259         if (rc < 0)
260                 goto cnxk_rsrc_fini;
261
262         rc = cnxk_setup_event_ports(event_dev, cn9k_sso_init_hws_mem,
263                                     cn9k_sso_hws_setup);
264         if (rc < 0)
265                 goto cnxk_rsrc_fini;
266
267         /* Restore any prior port-queue mapping. */
268         cnxk_sso_restore_links(event_dev, cn9k_sso_hws_link);
269
270         dev->configured = 1;
271         rte_mb();
272
273         return 0;
274 cnxk_rsrc_fini:
275         roc_sso_rsrc_fini(&dev->sso);
276         dev->nb_event_ports = 0;
277         return rc;
278 }
279
280 static int
281 cn9k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
282                     const struct rte_event_port_conf *port_conf)
283 {
284
285         RTE_SET_USED(port_conf);
286         return cnxk_sso_port_setup(event_dev, port_id, cn9k_sso_hws_setup);
287 }
288
289 static void
290 cn9k_sso_port_release(void *port)
291 {
292         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
293         struct cnxk_sso_evdev *dev;
294
295         if (port == NULL)
296                 return;
297
298         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
299         if (!gws_cookie->configured)
300                 goto free;
301
302         cn9k_sso_hws_release(dev, port);
303         memset(gws_cookie, 0, sizeof(*gws_cookie));
304 free:
305         rte_free(gws_cookie);
306 }
307
308 static int
309 cn9k_sso_port_link(struct rte_eventdev *event_dev, void *port,
310                    const uint8_t queues[], const uint8_t priorities[],
311                    uint16_t nb_links)
312 {
313         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
314         uint16_t hwgrp_ids[nb_links];
315         uint16_t link;
316
317         RTE_SET_USED(priorities);
318         for (link = 0; link < nb_links; link++)
319                 hwgrp_ids[link] = queues[link];
320         nb_links = cn9k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
321
322         return (int)nb_links;
323 }
324
325 static int
326 cn9k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
327                      uint8_t queues[], uint16_t nb_unlinks)
328 {
329         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
330         uint16_t hwgrp_ids[nb_unlinks];
331         uint16_t unlink;
332
333         for (unlink = 0; unlink < nb_unlinks; unlink++)
334                 hwgrp_ids[unlink] = queues[unlink];
335         nb_unlinks = cn9k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
336
337         return (int)nb_unlinks;
338 }
339
340 static struct rte_eventdev_ops cn9k_sso_dev_ops = {
341         .dev_infos_get = cn9k_sso_info_get,
342         .dev_configure = cn9k_sso_dev_configure,
343         .queue_def_conf = cnxk_sso_queue_def_conf,
344         .queue_setup = cnxk_sso_queue_setup,
345         .queue_release = cnxk_sso_queue_release,
346         .port_def_conf = cnxk_sso_port_def_conf,
347         .port_setup = cn9k_sso_port_setup,
348         .port_release = cn9k_sso_port_release,
349         .port_link = cn9k_sso_port_link,
350         .port_unlink = cn9k_sso_port_unlink,
351         .timeout_ticks = cnxk_sso_timeout_ticks,
352 };
353
354 static int
355 cn9k_sso_init(struct rte_eventdev *event_dev)
356 {
357         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
358         int rc;
359
360         if (RTE_CACHE_LINE_SIZE != 128) {
361                 plt_err("Driver not compiled for CN9K");
362                 return -EFAULT;
363         }
364
365         rc = roc_plt_init();
366         if (rc < 0) {
367                 plt_err("Failed to initialize platform model");
368                 return rc;
369         }
370
371         event_dev->dev_ops = &cn9k_sso_dev_ops;
372         /* For secondary processes, the primary has done all the work */
373         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
374                 cn9k_sso_fp_fns_set(event_dev);
375                 return 0;
376         }
377
378         rc = cnxk_sso_init(event_dev);
379         if (rc < 0)
380                 return rc;
381
382         cn9k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
383         if (!dev->max_event_ports || !dev->max_event_queues) {
384                 plt_err("Not enough eventdev resource queues=%d ports=%d",
385                         dev->max_event_queues, dev->max_event_ports);
386                 cnxk_sso_fini(event_dev);
387                 return -ENODEV;
388         }
389
390         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
391                     event_dev->data->name, dev->max_event_queues,
392                     dev->max_event_ports);
393
394         return 0;
395 }
396
397 static int
398 cn9k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
399 {
400         return rte_event_pmd_pci_probe(
401                 pci_drv, pci_dev, sizeof(struct cnxk_sso_evdev), cn9k_sso_init);
402 }
403
404 static const struct rte_pci_id cn9k_pci_sso_map[] = {
405         {
406                 .vendor_id = 0,
407         },
408 };
409
410 static struct rte_pci_driver cn9k_pci_sso = {
411         .id_table = cn9k_pci_sso_map,
412         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
413         .probe = cn9k_sso_probe,
414         .remove = cnxk_sso_remove,
415 };
416
417 RTE_PMD_REGISTER_PCI(event_cn9k, cn9k_pci_sso);
418 RTE_PMD_REGISTER_PCI_TABLE(event_cn9k, cn9k_pci_sso_map);
419 RTE_PMD_REGISTER_KMOD_DEP(event_cn9k, "vfio-pci");
420 RTE_PMD_REGISTER_PARAM_STRING(event_cn9k, CNXK_SSO_XAE_CNT "=<int>"
421                               CNXK_SSO_GGRP_QOS "=<string>"
422                               CN9K_SSO_SINGLE_WS "=1");