event/cnxk: add SSO GWS enqueue fast path
[dpdk.git] / drivers / event / cnxk / cn10k_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cn10k_worker.h"
6 #include "cnxk_eventdev.h"
7 #include "cnxk_worker.h"
8
9 static void
10 cn10k_init_hws_ops(struct cn10k_sso_hws *ws, uintptr_t base)
11 {
12         ws->tag_wqe_op = base + SSOW_LF_GWS_WQE0;
13         ws->getwrk_op = base + SSOW_LF_GWS_OP_GET_WORK0;
14         ws->updt_wqe_op = base + SSOW_LF_GWS_OP_UPD_WQP_GRP1;
15         ws->swtag_norm_op = base + SSOW_LF_GWS_OP_SWTAG_NORM;
16         ws->swtag_untag_op = base + SSOW_LF_GWS_OP_SWTAG_UNTAG;
17         ws->swtag_flush_op = base + SSOW_LF_GWS_OP_SWTAG_FLUSH;
18         ws->swtag_desched_op = base + SSOW_LF_GWS_OP_SWTAG_DESCHED;
19 }
20
21 static uint32_t
22 cn10k_sso_gw_mode_wdata(struct cnxk_sso_evdev *dev)
23 {
24         uint32_t wdata = BIT(16) | 1;
25
26         switch (dev->gw_mode) {
27         case CN10K_GW_MODE_NONE:
28         default:
29                 break;
30         case CN10K_GW_MODE_PREF:
31                 wdata |= BIT(19);
32                 break;
33         case CN10K_GW_MODE_PREF_WFE:
34                 wdata |= BIT(20) | BIT(19);
35                 break;
36         }
37
38         return wdata;
39 }
40
41 static void *
42 cn10k_sso_init_hws_mem(void *arg, uint8_t port_id)
43 {
44         struct cnxk_sso_evdev *dev = arg;
45         struct cn10k_sso_hws *ws;
46
47         /* Allocate event port memory */
48         ws = rte_zmalloc("cn10k_ws",
49                          sizeof(struct cn10k_sso_hws) + RTE_CACHE_LINE_SIZE,
50                          RTE_CACHE_LINE_SIZE);
51         if (ws == NULL) {
52                 plt_err("Failed to alloc memory for port=%d", port_id);
53                 return NULL;
54         }
55
56         /* First cache line is reserved for cookie */
57         ws = (struct cn10k_sso_hws *)((uint8_t *)ws + RTE_CACHE_LINE_SIZE);
58         ws->base = roc_sso_hws_base_get(&dev->sso, port_id);
59         cn10k_init_hws_ops(ws, ws->base);
60         ws->hws_id = port_id;
61         ws->swtag_req = 0;
62         ws->gw_wdata = cn10k_sso_gw_mode_wdata(dev);
63         ws->lmt_base = dev->sso.lmt_base;
64
65         return ws;
66 }
67
68 static int
69 cn10k_sso_hws_link(void *arg, void *port, uint16_t *map, uint16_t nb_link)
70 {
71         struct cnxk_sso_evdev *dev = arg;
72         struct cn10k_sso_hws *ws = port;
73
74         return roc_sso_hws_link(&dev->sso, ws->hws_id, map, nb_link);
75 }
76
77 static int
78 cn10k_sso_hws_unlink(void *arg, void *port, uint16_t *map, uint16_t nb_link)
79 {
80         struct cnxk_sso_evdev *dev = arg;
81         struct cn10k_sso_hws *ws = port;
82
83         return roc_sso_hws_unlink(&dev->sso, ws->hws_id, map, nb_link);
84 }
85
86 static void
87 cn10k_sso_hws_setup(void *arg, void *hws, uintptr_t *grps_base)
88 {
89         struct cnxk_sso_evdev *dev = arg;
90         struct cn10k_sso_hws *ws = hws;
91         uint64_t val;
92
93         rte_memcpy(ws->grps_base, grps_base,
94                    sizeof(uintptr_t) * CNXK_SSO_MAX_HWGRP);
95         ws->fc_mem = dev->fc_mem;
96         ws->xaq_lmt = dev->xaq_lmt;
97
98         /* Set get_work timeout for HWS */
99         val = NSEC2USEC(dev->deq_tmo_ns) - 1;
100         plt_write64(val, ws->base + SSOW_LF_GWS_NW_TIM);
101 }
102
103 static void
104 cn10k_sso_hws_release(void *arg, void *hws)
105 {
106         struct cnxk_sso_evdev *dev = arg;
107         struct cn10k_sso_hws *ws = hws;
108         int i;
109
110         for (i = 0; i < dev->nb_event_queues; i++)
111                 roc_sso_hws_unlink(&dev->sso, ws->hws_id, (uint16_t *)&i, 1);
112         memset(ws, 0, sizeof(*ws));
113 }
114
115 static void
116 cn10k_sso_set_rsrc(void *arg)
117 {
118         struct cnxk_sso_evdev *dev = arg;
119
120         dev->max_event_ports = dev->sso.max_hws;
121         dev->max_event_queues =
122                 dev->sso.max_hwgrp > RTE_EVENT_MAX_QUEUES_PER_DEV ?
123                               RTE_EVENT_MAX_QUEUES_PER_DEV :
124                               dev->sso.max_hwgrp;
125 }
126
127 static int
128 cn10k_sso_rsrc_init(void *arg, uint8_t hws, uint8_t hwgrp)
129 {
130         struct cnxk_sso_evdev *dev = arg;
131
132         return roc_sso_rsrc_init(&dev->sso, hws, hwgrp);
133 }
134
135 static void
136 cn10k_sso_fp_fns_set(struct rte_eventdev *event_dev)
137 {
138         PLT_SET_USED(event_dev);
139         event_dev->enqueue = cn10k_sso_hws_enq;
140         event_dev->enqueue_burst = cn10k_sso_hws_enq_burst;
141         event_dev->enqueue_new_burst = cn10k_sso_hws_enq_new_burst;
142         event_dev->enqueue_forward_burst = cn10k_sso_hws_enq_fwd_burst;
143 }
144
145 static void
146 cn10k_sso_info_get(struct rte_eventdev *event_dev,
147                    struct rte_event_dev_info *dev_info)
148 {
149         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
150
151         dev_info->driver_name = RTE_STR(EVENTDEV_NAME_CN10K_PMD);
152         cnxk_sso_info_get(dev, dev_info);
153 }
154
155 static int
156 cn10k_sso_dev_configure(const struct rte_eventdev *event_dev)
157 {
158         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
159         int rc;
160
161         rc = cnxk_sso_dev_validate(event_dev);
162         if (rc < 0) {
163                 plt_err("Invalid event device configuration");
164                 return -EINVAL;
165         }
166
167         roc_sso_rsrc_fini(&dev->sso);
168
169         rc = cn10k_sso_rsrc_init(dev, dev->nb_event_ports,
170                                  dev->nb_event_queues);
171         if (rc < 0) {
172                 plt_err("Failed to initialize SSO resources");
173                 return -ENODEV;
174         }
175
176         rc = cnxk_sso_xaq_allocate(dev);
177         if (rc < 0)
178                 goto cnxk_rsrc_fini;
179
180         rc = cnxk_setup_event_ports(event_dev, cn10k_sso_init_hws_mem,
181                                     cn10k_sso_hws_setup);
182         if (rc < 0)
183                 goto cnxk_rsrc_fini;
184
185         /* Restore any prior port-queue mapping. */
186         cnxk_sso_restore_links(event_dev, cn10k_sso_hws_link);
187
188         dev->configured = 1;
189         rte_mb();
190
191         return 0;
192 cnxk_rsrc_fini:
193         roc_sso_rsrc_fini(&dev->sso);
194         dev->nb_event_ports = 0;
195         return rc;
196 }
197
198 static int
199 cn10k_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
200                      const struct rte_event_port_conf *port_conf)
201 {
202
203         RTE_SET_USED(port_conf);
204         return cnxk_sso_port_setup(event_dev, port_id, cn10k_sso_hws_setup);
205 }
206
207 static void
208 cn10k_sso_port_release(void *port)
209 {
210         struct cnxk_sso_hws_cookie *gws_cookie = cnxk_sso_hws_get_cookie(port);
211         struct cnxk_sso_evdev *dev;
212
213         if (port == NULL)
214                 return;
215
216         dev = cnxk_sso_pmd_priv(gws_cookie->event_dev);
217         if (!gws_cookie->configured)
218                 goto free;
219
220         cn10k_sso_hws_release(dev, port);
221         memset(gws_cookie, 0, sizeof(*gws_cookie));
222 free:
223         rte_free(gws_cookie);
224 }
225
226 static int
227 cn10k_sso_port_link(struct rte_eventdev *event_dev, void *port,
228                     const uint8_t queues[], const uint8_t priorities[],
229                     uint16_t nb_links)
230 {
231         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
232         uint16_t hwgrp_ids[nb_links];
233         uint16_t link;
234
235         RTE_SET_USED(priorities);
236         for (link = 0; link < nb_links; link++)
237                 hwgrp_ids[link] = queues[link];
238         nb_links = cn10k_sso_hws_link(dev, port, hwgrp_ids, nb_links);
239
240         return (int)nb_links;
241 }
242
243 static int
244 cn10k_sso_port_unlink(struct rte_eventdev *event_dev, void *port,
245                       uint8_t queues[], uint16_t nb_unlinks)
246 {
247         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
248         uint16_t hwgrp_ids[nb_unlinks];
249         uint16_t unlink;
250
251         for (unlink = 0; unlink < nb_unlinks; unlink++)
252                 hwgrp_ids[unlink] = queues[unlink];
253         nb_unlinks = cn10k_sso_hws_unlink(dev, port, hwgrp_ids, nb_unlinks);
254
255         return (int)nb_unlinks;
256 }
257
258 static struct rte_eventdev_ops cn10k_sso_dev_ops = {
259         .dev_infos_get = cn10k_sso_info_get,
260         .dev_configure = cn10k_sso_dev_configure,
261         .queue_def_conf = cnxk_sso_queue_def_conf,
262         .queue_setup = cnxk_sso_queue_setup,
263         .queue_release = cnxk_sso_queue_release,
264         .port_def_conf = cnxk_sso_port_def_conf,
265         .port_setup = cn10k_sso_port_setup,
266         .port_release = cn10k_sso_port_release,
267         .port_link = cn10k_sso_port_link,
268         .port_unlink = cn10k_sso_port_unlink,
269         .timeout_ticks = cnxk_sso_timeout_ticks,
270 };
271
272 static int
273 cn10k_sso_init(struct rte_eventdev *event_dev)
274 {
275         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
276         int rc;
277
278         if (RTE_CACHE_LINE_SIZE != 64) {
279                 plt_err("Driver not compiled for CN9K");
280                 return -EFAULT;
281         }
282
283         rc = roc_plt_init();
284         if (rc < 0) {
285                 plt_err("Failed to initialize platform model");
286                 return rc;
287         }
288
289         event_dev->dev_ops = &cn10k_sso_dev_ops;
290         /* For secondary processes, the primary has done all the work */
291         if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
292                 cn10k_sso_fp_fns_set(event_dev);
293                 return 0;
294         }
295
296         rc = cnxk_sso_init(event_dev);
297         if (rc < 0)
298                 return rc;
299
300         cn10k_sso_set_rsrc(cnxk_sso_pmd_priv(event_dev));
301         if (!dev->max_event_ports || !dev->max_event_queues) {
302                 plt_err("Not enough eventdev resource queues=%d ports=%d",
303                         dev->max_event_queues, dev->max_event_ports);
304                 cnxk_sso_fini(event_dev);
305                 return -ENODEV;
306         }
307
308         plt_sso_dbg("Initializing %s max_queues=%d max_ports=%d",
309                     event_dev->data->name, dev->max_event_queues,
310                     dev->max_event_ports);
311
312         return 0;
313 }
314
315 static int
316 cn10k_sso_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
317 {
318         return rte_event_pmd_pci_probe(pci_drv, pci_dev,
319                                        sizeof(struct cnxk_sso_evdev),
320                                        cn10k_sso_init);
321 }
322
323 static const struct rte_pci_id cn10k_pci_sso_map[] = {
324         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
325         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_PF),
326         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KA, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
327         CNXK_PCI_ID(PCI_SUBSYSTEM_DEVID_CN10KAS, PCI_DEVID_CNXK_RVU_SSO_TIM_VF),
328         {
329                 .vendor_id = 0,
330         },
331 };
332
333 static struct rte_pci_driver cn10k_pci_sso = {
334         .id_table = cn10k_pci_sso_map,
335         .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_NEED_IOVA_AS_VA,
336         .probe = cn10k_sso_probe,
337         .remove = cnxk_sso_remove,
338 };
339
340 RTE_PMD_REGISTER_PCI(event_cn10k, cn10k_pci_sso);
341 RTE_PMD_REGISTER_PCI_TABLE(event_cn10k, cn10k_pci_sso_map);
342 RTE_PMD_REGISTER_KMOD_DEP(event_cn10k, "vfio-pci");
343 RTE_PMD_REGISTER_PARAM_STRING(event_cn10k, CNXK_SSO_XAE_CNT "=<int>"
344                               CNXK_SSO_GGRP_QOS "=<string>"
345                               CN10K_SSO_GW_MODE "=<int>");