net/mlx5: fix meter policy flow match item
[dpdk.git] / drivers / event / cnxk / cn10k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn10k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 static void
10 cn10k_init_hws_ops(struct cn10k_sso_hws *ws, uintptr_t base)
11 {
12         ws->tag_wqe_op = base + SSOW_LF_GWS_WQE0;
13         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
14         ws->updt_wqe_op = base + SSOW_LF_GWS_OP_UPD_WQP_GRP1;
15         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
16         ws->swtag_untag_op = base + SSOW_LF_GWS_OP_SWTAG_UNTAG;
17         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
18         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
19 }
20
21 static uint32_t
22 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
23 {
24         uint32_t wdata = BIT(16) | 1;
25
26         switch (dev->gw_mode) {
27         case CN10K_GW_MODE_NONE:
28         default:
29                 break;
30         case CN10K_GW_MODE_PREF:
31                 wdata |= BIT(19);
32                 break;
33         case CN10K_GW_MODE_PREF_WFE:
34                 wdata |= BIT(20) | BIT(19);
35                 break;
36         }
37
38         return wdata;
39 }
40
41 static void *
42 cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
43 {
44         struct cnxk_sso_evdev *dev = arg;
45         struct cn10k_sso_hws *ws;
46
47         /* Allocate event port memory */
48         ws = rte_zmalloc("cn10k_ws",
49                          sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
50                          RTE_CACHE_LINE_SIZE);
51         if (ws == NULL) {
52                 plt_err("Failed to alloc memory for port=%d", port_id);
53                 return NULL;
54         }
55
56         /* First cache line is reserved for cookie */
57         ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
58         ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
59         cn10k_init_hws_ops(ws, ws->base);
60         ws->hws_id = port_id;
61         ws->swtag_req = 0;
62         ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
63         ws->lmt_base = dev->sso.lmt_base;
64
65         return ws;
66 }
67
68 static int
69 cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
70 {
71         struct cnxk_sso_evdev *dev = arg;
72         struct cn10k_sso_hws *ws = port;
73
74         return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
75 }
76
77 static int
78 cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
79 {
80         struct cnxk_sso_evdev *dev = arg;
81         struct cn10k_sso_hws *ws = port;
82
83         return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
84 }
85
86 static void
87 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
88 {
89         struct cnxk_sso_evdev *dev = arg;
90         struct cn10k_sso_hws *ws = hws;
91         uint64_t val;
92
93         rte_memcpy(ws->grps_base, grps_base,
94                    sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
95         ws->fc_mem = dev->fc_mem;
96         ws->xaq_lmt = dev->xaq_lmt;
97
98         /* Set get_work timeout for HWS */
99         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
100         plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
101 }
102
103 static void
104 cn10k_sso_hws_release(void *arg, void *hws)
105 {
106         struct cnxk_sso_evdev *dev = arg;
107         struct cn10k_sso_hws *ws = hws;
108         int i;
109
110         for (i = 0; i < dev->nb_event_queues; i++)
111                 roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
112         memset(ws, 0, sizeof(*ws));
113 }
114
115 static void
116 cn10k_sso_hws_flush_events(void *hws, uint8_t queue_id, uintptr_t base,
117                            cnxk_handle_event_t fn, void *arg)
118 {
119         struct cn10k_sso_hws *ws = hws;
120         uint64_t cq_ds_cnt = 1;
121         uint64_t aq_cnt = 1;
122         uint64_t ds_cnt = 1;
123         struct rte_event ev;
124         uint64_t val, req;
125
126         plt_write64(0, base + SSO_LF_GGRP_QCTL);
127
128         req = queue_id;     /* GGRP ID */
129         req |= BIT_ULL(18); /* Grouped */
130         req |= BIT_ULL(16); /* WAIT */
131
132         aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
133         ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
134         cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
135         cq_ds_cnt &= 0x3FFF3FFF0000;
136
137         while (aq_cnt || cq_ds_cnt || ds_cnt) {
138                 plt_write64(req, ws->getwrk_op);
139                 cn10k_sso_hws_get_work_empty(ws, &ev);
140                 if (fn != NULL && ev.u64 != 0)
141                         fn(arg, ev);
142                 if (ev.sched_type != SSO_TT_EMPTY)
143                         cnxk_sso_hws_swtag_flush(ws->tag_wqe_op,
144                                                  ws->swtag_flush_op);
145                 do {
146                         val = plt_read64(ws->base + SSOW_LF_GWS_PENDSTATE);
147                 } while (val & BIT_ULL(56));
148                 aq_cnt = plt_read64(base + SSO_LF_GGRP_AQ_CNT);
149                 ds_cnt = plt_read64(base + SSO_LF_GGRP_MISC_CNT);
150                 cq_ds_cnt = plt_read64(base + SSO_LF_GGRP_INT_CNT);
151                 /* Extract cq and ds count */
152                 cq_ds_cnt &= 0x3FFF3FFF0000;
153         }
154
155         plt_write64(0, ws->base + SSOW_LF_GWS_OP_GWC_INVAL);
156         rte_mb();
157 }
158
159 static void
160 cn10k_sso_hws_reset(void *arg, void *hws)
161 {
162         struct cnxk_sso_evdev *dev = arg;
163         struct cn10k_sso_hws *ws = hws;
164         uintptr_t base = ws->base;
165         uint64_t pend_state;
166         union {
167                 __uint128_t wdata;
168                 uint64_t u64[2];
169         } gw;
170         uint8_t pend_tt;
171
172         /* Wait till getwork/swtp/waitw/desched completes. */
173         do {
174                 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
175         } while (pend_state & (BIT_ULL(63) | BIT_ULL(62) | BIT_ULL(58) |
176                                BIT_ULL(56) | BIT_ULL(54)));
177         pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
178         if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
179                 if (pend_tt == SSO_TT_ATOMIC || pend_tt == SSO_TT_ORDERED)
180                         cnxk_sso_hws_swtag_untag(base +
181                                                  SSOW_LF_GWS_OP_SWTAG_UNTAG);
182                 plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
183         }
184
185         /* Wait for desched to complete. */
186         do {
187                 pend_state = plt_read64(base + SSOW_LF_GWS_PENDSTATE);
188         } while (pend_state & BIT_ULL(58));
189
190         switch (dev->gw_mode) {
191         case CN10K_GW_MODE_PREF:
192                 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) & BIT_ULL(63))
193                         ;
194                 break;
195         case CN10K_GW_MODE_PREF_WFE:
196                 while (plt_read64(base + SSOW_LF_GWS_PRF_WQE0) &
197                        SSOW_LF_GWS_TAG_PEND_GET_WORK_BIT)
198                         continue;
199                 plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
200                 break;
201         case CN10K_GW_MODE_NONE:
202         default:
203                 break;
204         }
205
206         if (CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_PRF_WQE0)) !=
207             SSO_TT_EMPTY) {
208                 plt_write64(BIT_ULL(16) | 1, ws->getwrk_op);
209                 do {
210                         roc_load_pair(gw.u64[0], gw.u64[1], ws->tag_wqe_op);
211                 } while (gw.u64[0] & BIT_ULL(63));
212                 pend_tt = CNXK_TT_FROM_TAG(plt_read64(base + SSOW_LF_GWS_WQE0));
213                 if (pend_tt != SSO_TT_EMPTY) { /* Work was pending */
214                         if (pend_tt == SSO_TT_ATOMIC ||
215                             pend_tt == SSO_TT_ORDERED)
216                                 cnxk_sso_hws_swtag_untag(
217                                         base + SSOW_LF_GWS_OP_SWTAG_UNTAG);
218                         plt_write64(0, base + SSOW_LF_GWS_OP_DESCHED);
219                 }
220         }
221
222         plt_write64(0, base + SSOW_LF_GWS_OP_GWC_INVAL);
223         rte_mb();
224 }
225
226 static void
227 cn10k_sso_set_rsrc(void *arg)
228 {
229         struct cnxk_sso_evdev *dev = arg;
230
231         dev->max_event_ports = dev->sso.max_hws;
232         dev->max_event_queues =
233                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
234                               RTE_EVENT_MAX_QUEUES_PER_DEV :
235                               dev->sso.max_hwgrp;
236 }
237
238 static int
239 cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
240 {
241         struct cnxk_sso_evdev *dev = arg;
242
243         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
244 }
245
246 static void
247 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
248 {
249         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
250
251         event_dev->enqueue = cn10k_sso_hws_enq;
252         event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
253         event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
254         event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
255
256         event_dev->dequeue = cn10k_sso_hws_deq;
257         event_dev->dequeue_burst = cn10k_sso_hws_deq_burst;
258         if (dev->is_timeout_deq) {
259                 event_dev->dequeue = cn10k_sso_hws_tmo_deq;
260                 event_dev->dequeue_burst = cn10k_sso_hws_tmo_deq_burst;
261         }
262 }
263
264 static void
265 cn10k_sso_info_get(struct rte_eventdev *event_dev,
266                    struct rte_event_dev_info *dev_info)
267 {
268         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
269
270         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
271         cnxk_sso_info_get(dev, dev_info);
272 }
273
274 static int
275 cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
276 {
277         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
278         int rc;
279
280         rc = cnxk_sso_dev_validate(event_dev);
281         if (rc < 0) {
282                 plt_err("Invalid event device configuration");
283                 return -EINVAL;
284         }
285
286         roc_sso_rsrc_fini(&dev->sso);
287
288         rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
289                                  dev->nb_event_queues);
290         if (rc < 0) {
291                 plt_err("Failed to initialize SSO resources");
292                 return -ENODEV;
293         }
294
295         rc = cnxk_sso_xaq_allocate(dev);
296         if (rc < 0)
297                 goto cnxk_rsrc_fini;
298
299         rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
300                                     cn10k_sso_hws_setup);
301         if (rc < 0)
302                 goto cnxk_rsrc_fini;
303
304         /* Restore any prior port-queue mapping. */
305         cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
306
307         dev->configured = 1;
308         rte_mb();
309
310         return 0;
311 cnxk_rsrc_fini:
312         roc_sso_rsrc_fini(&dev->sso);
313         dev->nb_event_ports = 0;
314         return rc;
315 }
316
317 static int
318 cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
319                      const struct rte_event_port_conf *port_conf)
320 {
321
322         RTE_SET_USED(port_conf);
323         return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
324 }
325
326 static void
327 cn10k_sso_port_release(void *port)
328 {
329         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
330         struct cnxk_sso_evdev *dev;
331
332         if (port == NULL)
333                 return;
334
335         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
336         if (!gws_cookie->configured)
337                 goto free;
338
339         cn10k_sso_hws_release(dev, port);
340         memset(gws_cookie, 0, sizeof(*gws_cookie));
341 free:
342         rte_free(gws_cookie);
343 }
344
345 static int
346 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
347                     const uint8_t queues[], const uint8_t priorities[],
348                     uint16_t nb_links)
349 {
350         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
351         uint16_t hwgrp_ids[nb_links];
352         uint16_t link;
353
354         RTE_SET_USED(priorities);
355         for (link = 0; link < nb_links; link++)
356                 hwgrp_ids[link] = queues[link];
357         nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
358
359         return (int)nb_links;
360 }
361
362 static int
363 cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
364                       uint8_t queues[], uint16_t nb_unlinks)
365 {
366         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
367         uint16_t hwgrp_ids[nb_unlinks];
368         uint16_t unlink;
369
370         for (unlink = 0; unlink < nb_unlinks; unlink++)
371                 hwgrp_ids[unlink] = queues[unlink];
372         nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
373
374         return (int)nb_unlinks;
375 }
376
377 static int
378 cn10k_sso_start(struct rte_eventdev *event_dev)
379 {
380         int rc;
381
382         rc = cnxk_sso_start(event_dev, cn10k_sso_hws_reset,
383                             cn10k_sso_hws_flush_events);
384         if (rc < 0)
385                 return rc;
386         cn10k_sso_fp_fns_set(event_dev);
387
388         return rc;
389 }
390
391 static void
392 cn10k_sso_stop(struct rte_eventdev *event_dev)
393 {
394         cnxk_sso_stop(event_dev, cn10k_sso_hws_reset,
395                       cn10k_sso_hws_flush_events);
396 }
397
398 static int
399 cn10k_sso_close(struct rte_eventdev *event_dev)
400 {
401         return cnxk_sso_close(event_dev, cn10k_sso_hws_unlink);
402 }
403
404 static int
405 cn10k_sso_selftest(void)
406 {
407         return cnxk_sso_selftest(RTE_STR(event_cn10k));
408 }
409
410 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
411         .dev_infos_get = cn10k_sso_info_get,
412         .dev_configure = cn10k_sso_dev_configure,
413         .queue_def_conf = cnxk_sso_queue_def_conf,
414         .queue_setup = cnxk_sso_queue_setup,
415         .queue_release = cnxk_sso_queue_release,
416         .port_def_conf = cnxk_sso_port_def_conf,
417         .port_setup = cn10k_sso_port_setup,
418         .port_release = cn10k_sso_port_release,
419         .port_link = cn10k_sso_port_link,
420         .port_unlink = cn10k_sso_port_unlink,
421         .timeout_ticks = cnxk_sso_timeout_ticks,
422
423         .timer_adapter_caps_get = cnxk_tim_caps_get,
424
425         .dump = cnxk_sso_dump,
426         .dev_start = cn10k_sso_start,
427         .dev_stop = cn10k_sso_stop,
428         .dev_close = cn10k_sso_close,
429         .dev_selftest = cn10k_sso_selftest,
430 };
431
432 static int
433 cn10k_sso_init(struct rte_eventdev *event_dev)
434 {
435         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
436         int rc;
437
438         if (RTE_CACHE_LINE_SIZE != 64) {
439                 plt_err("Driver not compiled for CN9K");
440                 return -EFAULT;
441         }
442
443         rc = roc_plt_init();
444         if (rc < 0) {
445                 plt_err("Failed to initialize platform model");
446                 return rc;
447         }
448
449         event_dev->dev_ops = &cn10k_sso_dev_ops;
450         /* For secondary processes, the primary has done all the work */
451         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
452                 cn10k_sso_fp_fns_set(event_dev);
453                 return 0;
454         }
455
456         rc = cnxk_sso_init(event_dev);
457         if (rc < 0)
458                 return rc;
459
460         cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
461         if (!dev->max_event_ports || !dev->max_event_queues) {
462                 plt_err("Not enough eventdev resource queues=%d ports=%d",
463                         dev->max_event_queues, dev->max_event_ports);
464                 cnxk_sso_fini(event_dev);
465                 return -ENODEV;
466         }
467
468         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
469                     event_dev->data->name, dev->max_event_queues,
470                     dev->max_event_ports);
471
472         return 0;
473 }
474
475 static int
476 cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
477 {
478         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
479                                        sizeof(struct cnxk_sso_evdev),
480                                        cn10k_sso_init);
481 }
482
483 static const struct rte_pci_id cn10k_pci_sso_map[] = {
484         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
485         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
486         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
487         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
488         {
489                 .vendor_id = 0,
490         },
491 };
492
493 static struct rte_pci_driver cn10k_pci_sso = {
494         .id_table = cn10k_pci_sso_map,
495         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
496         .probe = cn10k_sso_probe,
497         .remove = cnxk_sso_remove,
498 };
499
500 RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
501 RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
502 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
503 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
504                               CNXK_SSO_GGRP_QOS "=<string>"
505                               CN10K_SSO_GW_MODE "=<int>"
506                               CNXK_TIM_DISABLE_NPA "=1"
507                               CNXK_TIM_CHNK_SLOTS "=<int>"
508                               CNXK_TIM_RINGS_LMT "=<int>"
509                               CNXK_TIM_STATS_ENA "=1");