event/cnxk: add event port link and unlink
[dpdk.git] / drivers / event / cnxk / cnxk_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include "cnxk_eventdev.h"
6
7 void
8 cnxk_sso_info_get(struct cnxk_sso_evdev *dev,
9                   struct rte_event_dev_info *dev_info)
10 {
11
12         dev_info->min_dequeue_timeout_ns = dev->min_dequeue_timeout_ns;
13         dev_info->max_dequeue_timeout_ns = dev->max_dequeue_timeout_ns;
14         dev_info->max_event_queues = dev->max_event_queues;
15         dev_info->max_event_queue_flows = (1ULL << 20);
16         dev_info->max_event_queue_priority_levels = 8;
17         dev_info->max_event_priority_levels = 1;
18         dev_info->max_event_ports = dev->max_event_ports;
19         dev_info->max_event_port_dequeue_depth = 1;
20         dev_info->max_event_port_enqueue_depth = 1;
21         dev_info->max_num_events = dev->max_num_events;
22         dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
23                                   RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
24                                   RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES |
25                                   RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
26                                   RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
27                                   RTE_EVENT_DEV_CAP_NONSEQ_MODE |
28                                   RTE_EVENT_DEV_CAP_CARRY_FLOW_ID;
29 }
30
31 int
32 cnxk_sso_xaq_allocate(struct cnxk_sso_evdev *dev)
33 {
34         char pool_name[RTE_MEMZONE_NAMESIZE];
35         uint32_t xaq_cnt, npa_aura_id;
36         const struct rte_memzone *mz;
37         struct npa_aura_s *aura;
38         static int reconfig_cnt;
39         int rc;
40
41         if (dev->xaq_pool) {
42                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
43                 if (rc < 0) {
44                         plt_err("Failed to release XAQ %d", rc);
45                         return rc;
46                 }
47                 rte_mempool_free(dev->xaq_pool);
48                 dev->xaq_pool = NULL;
49         }
50
51         /*
52          * Allocate memory for Add work backpressure.
53          */
54         mz = rte_memzone_lookup(CNXK_SSO_FC_NAME);
55         if (mz == NULL)
56                 mz = rte_memzone_reserve_aligned(CNXK_SSO_FC_NAME,
57                                                  sizeof(struct npa_aura_s) +
58                                                          RTE_CACHE_LINE_SIZE,
59                                                  0, 0, RTE_CACHE_LINE_SIZE);
60         if (mz == NULL) {
61                 plt_err("Failed to allocate mem for fcmem");
62                 return -ENOMEM;
63         }
64
65         dev->fc_iova = mz->iova;
66         dev->fc_mem = mz->addr;
67
68         aura = (struct npa_aura_s *)((uintptr_t)dev->fc_mem +
69                                      RTE_CACHE_LINE_SIZE);
70         memset(aura, 0, sizeof(struct npa_aura_s));
71
72         aura->fc_ena = 1;
73         aura->fc_addr = dev->fc_iova;
74         aura->fc_hyst_bits = 0; /* Store count on all updates */
75
76         /* Taken from HRM 14.3.3(4) */
77         xaq_cnt = dev->nb_event_queues * CNXK_SSO_XAQ_CACHE_CNT;
78         if (dev->xae_cnt)
79                 xaq_cnt += dev->xae_cnt / dev->sso.xae_waes;
80         else
81                 xaq_cnt += (dev->sso.iue / dev->sso.xae_waes) +
82                            (CNXK_SSO_XAQ_SLACK * dev->nb_event_queues);
83
84         plt_sso_dbg("Configuring %d xaq buffers", xaq_cnt);
85         /* Setup XAQ based on number of nb queues. */
86         snprintf(pool_name, 30, "cnxk_xaq_buf_pool_%d", reconfig_cnt);
87         dev->xaq_pool = (void *)rte_mempool_create_empty(
88                 pool_name, xaq_cnt, dev->sso.xaq_buf_size, 0, 0,
89                 rte_socket_id(), 0);
90
91         if (dev->xaq_pool == NULL) {
92                 plt_err("Unable to create empty mempool.");
93                 rte_memzone_free(mz);
94                 return -ENOMEM;
95         }
96
97         rc = rte_mempool_set_ops_byname(dev->xaq_pool,
98                                         rte_mbuf_platform_mempool_ops(), aura);
99         if (rc != 0) {
100                 plt_err("Unable to set xaqpool ops.");
101                 goto alloc_fail;
102         }
103
104         rc = rte_mempool_populate_default(dev->xaq_pool);
105         if (rc < 0) {
106                 plt_err("Unable to set populate xaqpool.");
107                 goto alloc_fail;
108         }
109         reconfig_cnt++;
110         /* When SW does addwork (enqueue) check if there is space in XAQ by
111          * comparing fc_addr above against the xaq_lmt calculated below.
112          * There should be a minimum headroom (CNXK_SSO_XAQ_SLACK / 2) for SSO
113          * to request XAQ to cache them even before enqueue is called.
114          */
115         dev->xaq_lmt =
116                 xaq_cnt - (CNXK_SSO_XAQ_SLACK / 2 * dev->nb_event_queues);
117         dev->nb_xaq_cfg = xaq_cnt;
118
119         npa_aura_id = roc_npa_aura_handle_to_aura(dev->xaq_pool->pool_id);
120         return roc_sso_hwgrp_alloc_xaq(&dev->sso, npa_aura_id,
121                                        dev->nb_event_queues);
122 alloc_fail:
123         rte_mempool_free(dev->xaq_pool);
124         rte_memzone_free(mz);
125         return rc;
126 }
127
128 int
129 cnxk_setup_event_ports(const struct rte_eventdev *event_dev,
130                        cnxk_sso_init_hws_mem_t init_hws_fn,
131                        cnxk_sso_hws_setup_t setup_hws_fn)
132 {
133         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
134         int i;
135
136         for (i = 0; i < dev->nb_event_ports; i++) {
137                 struct cnxk_sso_hws_cookie *ws_cookie;
138                 void *ws;
139
140                 /* Free memory prior to re-allocation if needed */
141                 if (event_dev->data->ports[i] != NULL)
142                         ws = event_dev->data->ports[i];
143                 else
144                         ws = init_hws_fn(dev, i);
145                 if (ws == NULL)
146                         goto hws_fini;
147                 ws_cookie = cnxk_sso_hws_get_cookie(ws);
148                 ws_cookie->event_dev = event_dev;
149                 ws_cookie->configured = 1;
150                 event_dev->data->ports[i] = ws;
151                 cnxk_sso_port_setup((struct rte_eventdev *)(uintptr_t)event_dev,
152                                     i, setup_hws_fn);
153         }
154
155         return 0;
156 hws_fini:
157         for (i = i - 1; i >= 0; i--) {
158                 event_dev->data->ports[i] = NULL;
159                 rte_free(cnxk_sso_hws_get_cookie(event_dev->data->ports[i]));
160         }
161         return -ENOMEM;
162 }
163
164 void
165 cnxk_sso_restore_links(const struct rte_eventdev *event_dev,
166                        cnxk_sso_link_t link_fn)
167 {
168         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
169         uint16_t *links_map, hwgrp[CNXK_SSO_MAX_HWGRP];
170         int i, j;
171
172         for (i = 0; i < dev->nb_event_ports; i++) {
173                 uint16_t nb_hwgrp = 0;
174
175                 links_map = event_dev->data->links_map;
176                 /* Point links_map to this port specific area */
177                 links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
178
179                 for (j = 0; j < dev->nb_event_queues; j++) {
180                         if (links_map[j] == 0xdead)
181                                 continue;
182                         hwgrp[nb_hwgrp] = j;
183                         nb_hwgrp++;
184                 }
185
186                 link_fn(dev, event_dev->data->ports[i], hwgrp, nb_hwgrp);
187         }
188 }
189
190 int
191 cnxk_sso_dev_validate(const struct rte_eventdev *event_dev)
192 {
193         struct rte_event_dev_config *conf = &event_dev->data->dev_conf;
194         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
195         uint32_t deq_tmo_ns;
196         int rc;
197
198         deq_tmo_ns = conf->dequeue_timeout_ns;
199
200         if (deq_tmo_ns == 0)
201                 deq_tmo_ns = dev->min_dequeue_timeout_ns;
202         if (deq_tmo_ns < dev->min_dequeue_timeout_ns ||
203             deq_tmo_ns > dev->max_dequeue_timeout_ns) {
204                 plt_err("Unsupported dequeue timeout requested");
205                 return -EINVAL;
206         }
207
208         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
209                 dev->is_timeout_deq = 1;
210
211         dev->deq_tmo_ns = deq_tmo_ns;
212
213         if (!conf->nb_event_queues || !conf->nb_event_ports ||
214             conf->nb_event_ports > dev->max_event_ports ||
215             conf->nb_event_queues > dev->max_event_queues) {
216                 plt_err("Unsupported event queues/ports requested");
217                 return -EINVAL;
218         }
219
220         if (conf->nb_event_port_dequeue_depth > 1) {
221                 plt_err("Unsupported event port deq depth requested");
222                 return -EINVAL;
223         }
224
225         if (conf->nb_event_port_enqueue_depth > 1) {
226                 plt_err("Unsupported event port enq depth requested");
227                 return -EINVAL;
228         }
229
230         if (dev->xaq_pool) {
231                 rc = roc_sso_hwgrp_release_xaq(&dev->sso, dev->nb_event_queues);
232                 if (rc < 0) {
233                         plt_err("Failed to release XAQ %d", rc);
234                         return rc;
235                 }
236                 rte_mempool_free(dev->xaq_pool);
237                 dev->xaq_pool = NULL;
238         }
239
240         dev->nb_event_queues = conf->nb_event_queues;
241         dev->nb_event_ports = conf->nb_event_ports;
242
243         return 0;
244 }
245
246 void
247 cnxk_sso_queue_def_conf(struct rte_eventdev *event_dev, uint8_t queue_id,
248                         struct rte_event_queue_conf *queue_conf)
249 {
250         RTE_SET_USED(event_dev);
251         RTE_SET_USED(queue_id);
252
253         queue_conf->nb_atomic_flows = (1ULL << 20);
254         queue_conf->nb_atomic_order_sequences = (1ULL << 20);
255         queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
256         queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
257 }
258
259 int
260 cnxk_sso_queue_setup(struct rte_eventdev *event_dev, uint8_t queue_id,
261                      const struct rte_event_queue_conf *queue_conf)
262 {
263         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
264
265         plt_sso_dbg("Queue=%d prio=%d", queue_id, queue_conf->priority);
266         /* Normalize <0-255> to <0-7> */
267         return roc_sso_hwgrp_set_priority(&dev->sso, queue_id, 0xFF, 0xFF,
268                                           queue_conf->priority / 32);
269 }
270
271 void
272 cnxk_sso_queue_release(struct rte_eventdev *event_dev, uint8_t queue_id)
273 {
274         RTE_SET_USED(event_dev);
275         RTE_SET_USED(queue_id);
276 }
277
278 void
279 cnxk_sso_port_def_conf(struct rte_eventdev *event_dev, uint8_t port_id,
280                        struct rte_event_port_conf *port_conf)
281 {
282         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
283
284         RTE_SET_USED(port_id);
285         port_conf->new_event_threshold = dev->max_num_events;
286         port_conf->dequeue_depth = 1;
287         port_conf->enqueue_depth = 1;
288 }
289
290 int
291 cnxk_sso_port_setup(struct rte_eventdev *event_dev, uint8_t port_id,
292                     cnxk_sso_hws_setup_t hws_setup_fn)
293 {
294         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
295         uintptr_t grps_base[CNXK_SSO_MAX_HWGRP] = {0};
296         uint16_t q;
297
298         plt_sso_dbg("Port=%d", port_id);
299         if (event_dev->data->ports[port_id] == NULL) {
300                 plt_err("Invalid port Id %d", port_id);
301                 return -EINVAL;
302         }
303
304         for (q = 0; q < dev->nb_event_queues; q++) {
305                 grps_base[q] = roc_sso_hwgrp_base_get(&dev->sso, q);
306                 if (grps_base[q] == 0) {
307                         plt_err("Failed to get grp[%d] base addr", q);
308                         return -EINVAL;
309                 }
310         }
311
312         hws_setup_fn(dev, event_dev->data->ports[port_id], grps_base);
313         plt_sso_dbg("Port=%d ws=%p", port_id, event_dev->data->ports[port_id]);
314         rte_mb();
315
316         return 0;
317 }
318
319 int
320 cnxk_sso_timeout_ticks(struct rte_eventdev *event_dev, uint64_t ns,
321                        uint64_t *tmo_ticks)
322 {
323         RTE_SET_USED(event_dev);
324         *tmo_ticks = NSEC2TICK(ns, rte_get_timer_hz());
325
326         return 0;
327 }
328
329 static void
330 parse_queue_param(char *value, void *opaque)
331 {
332         struct cnxk_sso_qos queue_qos = {0};
333         uint8_t *val = (uint8_t *)&queue_qos;
334         struct cnxk_sso_evdev *dev = opaque;
335         char *tok = strtok(value, "-");
336         struct cnxk_sso_qos *old_ptr;
337
338         if (!strlen(value))
339                 return;
340
341         while (tok != NULL) {
342                 *val = atoi(tok);
343                 tok = strtok(NULL, "-");
344                 val++;
345         }
346
347         if (val != (&queue_qos.iaq_prcnt + 1)) {
348                 plt_err("Invalid QoS parameter expected [Qx-XAQ-TAQ-IAQ]");
349                 return;
350         }
351
352         dev->qos_queue_cnt++;
353         old_ptr = dev->qos_parse_data;
354         dev->qos_parse_data = rte_realloc(
355                 dev->qos_parse_data,
356                 sizeof(struct cnxk_sso_qos) * dev->qos_queue_cnt, 0);
357         if (dev->qos_parse_data == NULL) {
358                 dev->qos_parse_data = old_ptr;
359                 dev->qos_queue_cnt--;
360                 return;
361         }
362         dev->qos_parse_data[dev->qos_queue_cnt - 1] = queue_qos;
363 }
364
365 static void
366 parse_qos_list(const char *value, void *opaque)
367 {
368         char *s = strdup(value);
369         char *start = NULL;
370         char *end = NULL;
371         char *f = s;
372
373         while (*s) {
374                 if (*s == '[')
375                         start = s;
376                 else if (*s == ']')
377                         end = s;
378
379                 if (start && start < end) {
380                         *end = 0;
381                         parse_queue_param(start + 1, opaque);
382                         s = end;
383                         start = end;
384                 }
385                 s++;
386         }
387
388         free(f);
389 }
390
391 static int
392 parse_sso_kvargs_dict(const char *key, const char *value, void *opaque)
393 {
394         RTE_SET_USED(key);
395
396         /* Dict format [Qx-XAQ-TAQ-IAQ][Qz-XAQ-TAQ-IAQ] use '-' cause ','
397          * isn't allowed. Everything is expressed in percentages, 0 represents
398          * default.
399          */
400         parse_qos_list(value, opaque);
401
402         return 0;
403 }
404
405 static void
406 cnxk_sso_parse_devargs(struct cnxk_sso_evdev *dev, struct rte_devargs *devargs)
407 {
408         struct rte_kvargs *kvlist;
409
410         if (devargs == NULL)
411                 return;
412         kvlist = rte_kvargs_parse(devargs->args, NULL);
413         if (kvlist == NULL)
414                 return;
415
416         rte_kvargs_process(kvlist, CNXK_SSO_XAE_CNT, &parse_kvargs_value,
417                            &dev->xae_cnt);
418         rte_kvargs_process(kvlist, CNXK_SSO_GGRP_QOS, &parse_sso_kvargs_dict,
419                            dev);
420         rte_kvargs_free(kvlist);
421 }
422
423 int
424 cnxk_sso_init(struct rte_eventdev *event_dev)
425 {
426         const struct rte_memzone *mz = NULL;
427         struct rte_pci_device *pci_dev;
428         struct cnxk_sso_evdev *dev;
429         int rc;
430
431         mz = rte_memzone_reserve(CNXK_SSO_MZ_NAME, sizeof(uint64_t),
432                                  SOCKET_ID_ANY, 0);
433         if (mz == NULL) {
434                 plt_err("Failed to create eventdev memzone");
435                 return -ENOMEM;
436         }
437
438         dev = cnxk_sso_pmd_priv(event_dev);
439         pci_dev = container_of(event_dev->dev, struct rte_pci_device, device);
440         dev->sso.pci_dev = pci_dev;
441
442         *(uint64_t *)mz->addr = (uint64_t)dev;
443         cnxk_sso_parse_devargs(dev, pci_dev->device.devargs);
444
445         /* Initialize the base cnxk_dev object */
446         rc = roc_sso_dev_init(&dev->sso);
447         if (rc < 0) {
448                 plt_err("Failed to initialize RoC SSO rc=%d", rc);
449                 goto error;
450         }
451
452         dev->is_timeout_deq = 0;
453         dev->min_dequeue_timeout_ns = USEC2NSEC(1);
454         dev->max_dequeue_timeout_ns = USEC2NSEC(0x3FF);
455         dev->max_num_events = -1;
456         dev->nb_event_queues = 0;
457         dev->nb_event_ports = 0;
458
459         return 0;
460
461 error:
462         rte_memzone_free(mz);
463         return rc;
464 }
465
466 int
467 cnxk_sso_fini(struct rte_eventdev *event_dev)
468 {
469         struct cnxk_sso_evdev *dev = cnxk_sso_pmd_priv(event_dev);
470
471         /* For secondary processes, nothing to be done */
472         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
473                 return 0;
474
475         roc_sso_rsrc_fini(&dev->sso);
476         roc_sso_dev_fini(&dev->sso);
477
478         return 0;
479 }
480
481 int
482 cnxk_sso_remove(struct rte_pci_device *pci_dev)
483 {
484         return rte_event_pmd_pci_remove(pci_dev, cnxk_sso_fini);
485 }