event/sw: use dynamically-sized IQs
[dpdk.git] / lib / librte_eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
23 #include <rte_eal.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32
33 #include "rte_eventdev.h"
34 #include "rte_eventdev_pmd.h"
35
36 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
37
38 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
39
40 static struct rte_eventdev_global eventdev_globals = {
41         .nb_devs                = 0
42 };
43
44 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
45
46 /* Event dev north bound API implementation */
47
48 uint8_t
49 rte_event_dev_count(void)
50 {
51         return rte_eventdev_globals->nb_devs;
52 }
53
54 int
55 rte_event_dev_get_dev_id(const char *name)
56 {
57         int i;
58
59         if (!name)
60                 return -EINVAL;
61
62         for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
63                 if ((strcmp(rte_event_devices[i].data->name, name)
64                                 == 0) &&
65                                 (rte_event_devices[i].attached ==
66                                                 RTE_EVENTDEV_ATTACHED))
67                         return i;
68         return -ENODEV;
69 }
70
71 int
72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74         struct rte_eventdev *dev;
75
76         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77         dev = &rte_eventdevs[dev_id];
78
79         return dev->data->socket_id;
80 }
81
82 int
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85         struct rte_eventdev *dev;
86
87         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88         dev = &rte_eventdevs[dev_id];
89
90         if (dev_info == NULL)
91                 return -EINVAL;
92
93         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94
95         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
97
98         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
99
100         dev_info->dev = dev->dev;
101         return 0;
102 }
103
104 int
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
106                                 uint32_t *caps)
107 {
108         struct rte_eventdev *dev;
109
110         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
112
113         dev = &rte_eventdevs[dev_id];
114
115         if (caps == NULL)
116                 return -EINVAL;
117         *caps = 0;
118
119         return dev->dev_ops->eth_rx_adapter_caps_get ?
120                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
121                                                 &rte_eth_devices[eth_port_id],
122                                                 caps)
123                                 : 0;
124 }
125
126 static inline int
127 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
128 {
129         uint8_t old_nb_queues = dev->data->nb_queues;
130         struct rte_event_queue_conf *queues_cfg;
131         unsigned int i;
132
133         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
134                          dev->data->dev_id);
135
136         /* First time configuration */
137         if (dev->data->queues_cfg == NULL && nb_queues != 0) {
138                 /* Allocate memory to store queue configuration */
139                 dev->data->queues_cfg = rte_zmalloc_socket(
140                                 "eventdev->data->queues_cfg",
141                                 sizeof(dev->data->queues_cfg[0]) * nb_queues,
142                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
143                 if (dev->data->queues_cfg == NULL) {
144                         dev->data->nb_queues = 0;
145                         RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
146                                         "nb_queues %u", nb_queues);
147                         return -(ENOMEM);
148                 }
149         /* Re-configure */
150         } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
151                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
152
153                 for (i = nb_queues; i < old_nb_queues; i++)
154                         (*dev->dev_ops->queue_release)(dev, i);
155
156                 /* Re allocate memory to store queue configuration */
157                 queues_cfg = dev->data->queues_cfg;
158                 queues_cfg = rte_realloc(queues_cfg,
159                                 sizeof(queues_cfg[0]) * nb_queues,
160                                 RTE_CACHE_LINE_SIZE);
161                 if (queues_cfg == NULL) {
162                         RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
163                                                 " nb_queues %u", nb_queues);
164                         return -(ENOMEM);
165                 }
166                 dev->data->queues_cfg = queues_cfg;
167
168                 if (nb_queues > old_nb_queues) {
169                         uint8_t new_qs = nb_queues - old_nb_queues;
170
171                         memset(queues_cfg + old_nb_queues, 0,
172                                 sizeof(queues_cfg[0]) * new_qs);
173                 }
174         } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
175                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
176
177                 for (i = nb_queues; i < old_nb_queues; i++)
178                         (*dev->dev_ops->queue_release)(dev, i);
179         }
180
181         dev->data->nb_queues = nb_queues;
182         return 0;
183 }
184
185 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
186
187 static inline int
188 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
189 {
190         uint8_t old_nb_ports = dev->data->nb_ports;
191         void **ports;
192         uint16_t *links_map;
193         struct rte_event_port_conf *ports_cfg;
194         unsigned int i;
195
196         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
197                          dev->data->dev_id);
198
199         /* First time configuration */
200         if (dev->data->ports == NULL && nb_ports != 0) {
201                 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
202                                 sizeof(dev->data->ports[0]) * nb_ports,
203                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
204                 if (dev->data->ports == NULL) {
205                         dev->data->nb_ports = 0;
206                         RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
207                                         "nb_ports %u", nb_ports);
208                         return -(ENOMEM);
209                 }
210
211                 /* Allocate memory to store port configurations */
212                 dev->data->ports_cfg =
213                         rte_zmalloc_socket("eventdev->ports_cfg",
214                         sizeof(dev->data->ports_cfg[0]) * nb_ports,
215                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
216                 if (dev->data->ports_cfg == NULL) {
217                         dev->data->nb_ports = 0;
218                         RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
219                                         "nb_ports %u", nb_ports);
220                         return -(ENOMEM);
221                 }
222
223                 /* Allocate memory to store queue to port link connection */
224                 dev->data->links_map =
225                         rte_zmalloc_socket("eventdev->links_map",
226                         sizeof(dev->data->links_map[0]) * nb_ports *
227                         RTE_EVENT_MAX_QUEUES_PER_DEV,
228                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
229                 if (dev->data->links_map == NULL) {
230                         dev->data->nb_ports = 0;
231                         RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
232                                         "nb_ports %u", nb_ports);
233                         return -(ENOMEM);
234                 }
235                 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
236                         dev->data->links_map[i] =
237                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
238         } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
239                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
240
241                 ports = dev->data->ports;
242                 ports_cfg = dev->data->ports_cfg;
243                 links_map = dev->data->links_map;
244
245                 for (i = nb_ports; i < old_nb_ports; i++)
246                         (*dev->dev_ops->port_release)(ports[i]);
247
248                 /* Realloc memory for ports */
249                 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
250                                 RTE_CACHE_LINE_SIZE);
251                 if (ports == NULL) {
252                         RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
253                                                 " nb_ports %u", nb_ports);
254                         return -(ENOMEM);
255                 }
256
257                 /* Realloc memory for ports_cfg */
258                 ports_cfg = rte_realloc(ports_cfg,
259                         sizeof(ports_cfg[0]) * nb_ports,
260                         RTE_CACHE_LINE_SIZE);
261                 if (ports_cfg == NULL) {
262                         RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
263                                                 " nb_ports %u", nb_ports);
264                         return -(ENOMEM);
265                 }
266
267                 /* Realloc memory to store queue to port link connection */
268                 links_map = rte_realloc(links_map,
269                         sizeof(dev->data->links_map[0]) * nb_ports *
270                         RTE_EVENT_MAX_QUEUES_PER_DEV,
271                         RTE_CACHE_LINE_SIZE);
272                 if (links_map == NULL) {
273                         dev->data->nb_ports = 0;
274                         RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
275                                         "nb_ports %u", nb_ports);
276                         return -(ENOMEM);
277                 }
278
279                 if (nb_ports > old_nb_ports) {
280                         uint8_t new_ps = nb_ports - old_nb_ports;
281                         unsigned int old_links_map_end =
282                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
283                         unsigned int links_map_end =
284                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
285
286                         memset(ports + old_nb_ports, 0,
287                                 sizeof(ports[0]) * new_ps);
288                         memset(ports_cfg + old_nb_ports, 0,
289                                 sizeof(ports_cfg[0]) * new_ps);
290                         for (i = old_links_map_end; i < links_map_end; i++)
291                                 links_map[i] =
292                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
293                 }
294
295                 dev->data->ports = ports;
296                 dev->data->ports_cfg = ports_cfg;
297                 dev->data->links_map = links_map;
298         } else if (dev->data->ports != NULL && nb_ports == 0) {
299                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
300
301                 ports = dev->data->ports;
302                 for (i = nb_ports; i < old_nb_ports; i++)
303                         (*dev->dev_ops->port_release)(ports[i]);
304         }
305
306         dev->data->nb_ports = nb_ports;
307         return 0;
308 }
309
310 int
311 rte_event_dev_configure(uint8_t dev_id,
312                         const struct rte_event_dev_config *dev_conf)
313 {
314         struct rte_eventdev *dev;
315         struct rte_event_dev_info info;
316         int diag;
317
318         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
319         dev = &rte_eventdevs[dev_id];
320
321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
322         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
323
324         if (dev->data->dev_started) {
325                 RTE_EDEV_LOG_ERR(
326                     "device %d must be stopped to allow configuration", dev_id);
327                 return -EBUSY;
328         }
329
330         if (dev_conf == NULL)
331                 return -EINVAL;
332
333         (*dev->dev_ops->dev_infos_get)(dev, &info);
334
335         /* Check dequeue_timeout_ns value is in limit */
336         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
337                 if (dev_conf->dequeue_timeout_ns &&
338                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
339                         || dev_conf->dequeue_timeout_ns >
340                                  info.max_dequeue_timeout_ns)) {
341                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
342                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
343                         dev_id, dev_conf->dequeue_timeout_ns,
344                         info.min_dequeue_timeout_ns,
345                         info.max_dequeue_timeout_ns);
346                         return -EINVAL;
347                 }
348         }
349
350         /* Check nb_events_limit is in limit */
351         if (dev_conf->nb_events_limit > info.max_num_events) {
352                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
353                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
354                 return -EINVAL;
355         }
356
357         /* Check nb_event_queues is in limit */
358         if (!dev_conf->nb_event_queues) {
359                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
360                                         dev_id);
361                 return -EINVAL;
362         }
363         if (dev_conf->nb_event_queues > info.max_event_queues) {
364                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
365                 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
366                 return -EINVAL;
367         }
368
369         /* Check nb_event_ports is in limit */
370         if (!dev_conf->nb_event_ports) {
371                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
372                 return -EINVAL;
373         }
374         if (dev_conf->nb_event_ports > info.max_event_ports) {
375                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
376                 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
377                 return -EINVAL;
378         }
379
380         /* Check nb_event_queue_flows is in limit */
381         if (!dev_conf->nb_event_queue_flows) {
382                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
383                 return -EINVAL;
384         }
385         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
386                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
387                 dev_id, dev_conf->nb_event_queue_flows,
388                 info.max_event_queue_flows);
389                 return -EINVAL;
390         }
391
392         /* Check nb_event_port_dequeue_depth is in limit */
393         if (!dev_conf->nb_event_port_dequeue_depth) {
394                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
395                                         dev_id);
396                 return -EINVAL;
397         }
398         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
399                  (dev_conf->nb_event_port_dequeue_depth >
400                          info.max_event_port_dequeue_depth)) {
401                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
402                 dev_id, dev_conf->nb_event_port_dequeue_depth,
403                 info.max_event_port_dequeue_depth);
404                 return -EINVAL;
405         }
406
407         /* Check nb_event_port_enqueue_depth is in limit */
408         if (!dev_conf->nb_event_port_enqueue_depth) {
409                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
410                                         dev_id);
411                 return -EINVAL;
412         }
413         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
414                 (dev_conf->nb_event_port_enqueue_depth >
415                          info.max_event_port_enqueue_depth)) {
416                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
417                 dev_id, dev_conf->nb_event_port_enqueue_depth,
418                 info.max_event_port_enqueue_depth);
419                 return -EINVAL;
420         }
421
422         /* Copy the dev_conf parameter into the dev structure */
423         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
424
425         /* Setup new number of queues and reconfigure device. */
426         diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
427         if (diag != 0) {
428                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
429                                 dev_id, diag);
430                 return diag;
431         }
432
433         /* Setup new number of ports and reconfigure device. */
434         diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
435         if (diag != 0) {
436                 rte_event_dev_queue_config(dev, 0);
437                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
438                                 dev_id, diag);
439                 return diag;
440         }
441
442         /* Configure the device */
443         diag = (*dev->dev_ops->dev_configure)(dev);
444         if (diag != 0) {
445                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
446                 rte_event_dev_queue_config(dev, 0);
447                 rte_event_dev_port_config(dev, 0);
448         }
449
450         dev->data->event_dev_cap = info.event_dev_cap;
451         return diag;
452 }
453
454 static inline int
455 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
456 {
457         if (queue_id < dev->data->nb_queues && queue_id <
458                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
459                 return 1;
460         else
461                 return 0;
462 }
463
464 int
465 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
466                                  struct rte_event_queue_conf *queue_conf)
467 {
468         struct rte_eventdev *dev;
469
470         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
471         dev = &rte_eventdevs[dev_id];
472
473         if (queue_conf == NULL)
474                 return -EINVAL;
475
476         if (!is_valid_queue(dev, queue_id)) {
477                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
478                 return -EINVAL;
479         }
480
481         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
482         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
483         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
484         return 0;
485 }
486
487 static inline int
488 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
489 {
490         if (queue_conf &&
491                 !(queue_conf->event_queue_cfg &
492                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
493                 ((queue_conf->event_queue_cfg &
494                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
495                 (queue_conf->schedule_type
496                         == RTE_SCHED_TYPE_ATOMIC)
497                 ))
498                 return 1;
499         else
500                 return 0;
501 }
502
503 static inline int
504 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
505 {
506         if (queue_conf &&
507                 !(queue_conf->event_queue_cfg &
508                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
509                 ((queue_conf->event_queue_cfg &
510                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
511                 (queue_conf->schedule_type
512                         == RTE_SCHED_TYPE_ORDERED)
513                 ))
514                 return 1;
515         else
516                 return 0;
517 }
518
519
520 int
521 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
522                       const struct rte_event_queue_conf *queue_conf)
523 {
524         struct rte_eventdev *dev;
525         struct rte_event_queue_conf def_conf;
526
527         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
528         dev = &rte_eventdevs[dev_id];
529
530         if (!is_valid_queue(dev, queue_id)) {
531                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
532                 return -EINVAL;
533         }
534
535         /* Check nb_atomic_flows limit */
536         if (is_valid_atomic_queue_conf(queue_conf)) {
537                 if (queue_conf->nb_atomic_flows == 0 ||
538                     queue_conf->nb_atomic_flows >
539                         dev->data->dev_conf.nb_event_queue_flows) {
540                         RTE_EDEV_LOG_ERR(
541                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
542                         dev_id, queue_id, queue_conf->nb_atomic_flows,
543                         dev->data->dev_conf.nb_event_queue_flows);
544                         return -EINVAL;
545                 }
546         }
547
548         /* Check nb_atomic_order_sequences limit */
549         if (is_valid_ordered_queue_conf(queue_conf)) {
550                 if (queue_conf->nb_atomic_order_sequences == 0 ||
551                     queue_conf->nb_atomic_order_sequences >
552                         dev->data->dev_conf.nb_event_queue_flows) {
553                         RTE_EDEV_LOG_ERR(
554                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
555                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
556                         dev->data->dev_conf.nb_event_queue_flows);
557                         return -EINVAL;
558                 }
559         }
560
561         if (dev->data->dev_started) {
562                 RTE_EDEV_LOG_ERR(
563                     "device %d must be stopped to allow queue setup", dev_id);
564                 return -EBUSY;
565         }
566
567         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
568
569         if (queue_conf == NULL) {
570                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
571                                         -ENOTSUP);
572                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
573                 queue_conf = &def_conf;
574         }
575
576         dev->data->queues_cfg[queue_id] = *queue_conf;
577         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
578 }
579
580 static inline int
581 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
582 {
583         if (port_id < dev->data->nb_ports)
584                 return 1;
585         else
586                 return 0;
587 }
588
589 int
590 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
591                                  struct rte_event_port_conf *port_conf)
592 {
593         struct rte_eventdev *dev;
594
595         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
596         dev = &rte_eventdevs[dev_id];
597
598         if (port_conf == NULL)
599                 return -EINVAL;
600
601         if (!is_valid_port(dev, port_id)) {
602                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
603                 return -EINVAL;
604         }
605
606         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
607         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
608         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
609         return 0;
610 }
611
612 int
613 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
614                      const struct rte_event_port_conf *port_conf)
615 {
616         struct rte_eventdev *dev;
617         struct rte_event_port_conf def_conf;
618         int diag;
619
620         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
621         dev = &rte_eventdevs[dev_id];
622
623         if (!is_valid_port(dev, port_id)) {
624                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
625                 return -EINVAL;
626         }
627
628         /* Check new_event_threshold limit */
629         if ((port_conf && !port_conf->new_event_threshold) ||
630                         (port_conf && port_conf->new_event_threshold >
631                                  dev->data->dev_conf.nb_events_limit)) {
632                 RTE_EDEV_LOG_ERR(
633                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
634                         dev_id, port_id, port_conf->new_event_threshold,
635                         dev->data->dev_conf.nb_events_limit);
636                 return -EINVAL;
637         }
638
639         /* Check dequeue_depth limit */
640         if ((port_conf && !port_conf->dequeue_depth) ||
641                         (port_conf && port_conf->dequeue_depth >
642                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
643                 RTE_EDEV_LOG_ERR(
644                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
645                         dev_id, port_id, port_conf->dequeue_depth,
646                         dev->data->dev_conf.nb_event_port_dequeue_depth);
647                 return -EINVAL;
648         }
649
650         /* Check enqueue_depth limit */
651         if ((port_conf && !port_conf->enqueue_depth) ||
652                         (port_conf && port_conf->enqueue_depth >
653                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
654                 RTE_EDEV_LOG_ERR(
655                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
656                         dev_id, port_id, port_conf->enqueue_depth,
657                         dev->data->dev_conf.nb_event_port_enqueue_depth);
658                 return -EINVAL;
659         }
660
661         if (dev->data->dev_started) {
662                 RTE_EDEV_LOG_ERR(
663                     "device %d must be stopped to allow port setup", dev_id);
664                 return -EBUSY;
665         }
666
667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
668
669         if (port_conf == NULL) {
670                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
671                                         -ENOTSUP);
672                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
673                 port_conf = &def_conf;
674         }
675
676         dev->data->ports_cfg[port_id] = *port_conf;
677
678         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
679
680         /* Unlink all the queues from this port(default state after setup) */
681         if (!diag)
682                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
683
684         if (diag < 0)
685                 return diag;
686
687         return 0;
688 }
689
690 int
691 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
692                        uint32_t *attr_value)
693 {
694         struct rte_eventdev *dev;
695
696         if (!attr_value)
697                 return -EINVAL;
698         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
699         dev = &rte_eventdevs[dev_id];
700
701         switch (attr_id) {
702         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
703                 *attr_value = dev->data->nb_ports;
704                 break;
705         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
706                 *attr_value = dev->data->nb_queues;
707                 break;
708         case RTE_EVENT_DEV_ATTR_STARTED:
709                 *attr_value = dev->data->dev_started;
710                 break;
711         default:
712                 return -EINVAL;
713         }
714
715         return 0;
716 }
717
718 int
719 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
720                         uint32_t *attr_value)
721 {
722         struct rte_eventdev *dev;
723
724         if (!attr_value)
725                 return -EINVAL;
726
727         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
728         dev = &rte_eventdevs[dev_id];
729         if (!is_valid_port(dev, port_id)) {
730                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
731                 return -EINVAL;
732         }
733
734         switch (attr_id) {
735         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
736                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
737                 break;
738         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
739                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
740                 break;
741         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
742                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
743                 break;
744         default:
745                 return -EINVAL;
746         };
747         return 0;
748 }
749
750 int
751 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
752                         uint32_t *attr_value)
753 {
754         struct rte_event_queue_conf *conf;
755         struct rte_eventdev *dev;
756
757         if (!attr_value)
758                 return -EINVAL;
759
760         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
761         dev = &rte_eventdevs[dev_id];
762         if (!is_valid_queue(dev, queue_id)) {
763                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
764                 return -EINVAL;
765         }
766
767         conf = &dev->data->queues_cfg[queue_id];
768
769         switch (attr_id) {
770         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
771                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
772                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
773                         *attr_value = conf->priority;
774                 break;
775         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
776                 *attr_value = conf->nb_atomic_flows;
777                 break;
778         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
779                 *attr_value = conf->nb_atomic_order_sequences;
780                 break;
781         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
782                 *attr_value = conf->event_queue_cfg;
783                 break;
784         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
785                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
786                         return -EOVERFLOW;
787
788                 *attr_value = conf->schedule_type;
789                 break;
790         default:
791                 return -EINVAL;
792         };
793         return 0;
794 }
795
796 int
797 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
798                     const uint8_t queues[], const uint8_t priorities[],
799                     uint16_t nb_links)
800 {
801         struct rte_eventdev *dev;
802         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
803         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
804         uint16_t *links_map;
805         int i, diag;
806
807         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
808         dev = &rte_eventdevs[dev_id];
809
810         if (*dev->dev_ops->port_link == NULL) {
811                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
812                 rte_errno = -ENOTSUP;
813                 return 0;
814         }
815
816         if (!is_valid_port(dev, port_id)) {
817                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
818                 rte_errno = -EINVAL;
819                 return 0;
820         }
821
822         if (queues == NULL) {
823                 for (i = 0; i < dev->data->nb_queues; i++)
824                         queues_list[i] = i;
825
826                 queues = queues_list;
827                 nb_links = dev->data->nb_queues;
828         }
829
830         if (priorities == NULL) {
831                 for (i = 0; i < nb_links; i++)
832                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
833
834                 priorities = priorities_list;
835         }
836
837         for (i = 0; i < nb_links; i++)
838                 if (queues[i] >= dev->data->nb_queues) {
839                         rte_errno = -EINVAL;
840                         return 0;
841                 }
842
843         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
844                                                 queues, priorities, nb_links);
845         if (diag < 0)
846                 return diag;
847
848         links_map = dev->data->links_map;
849         /* Point links_map to this port specific area */
850         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
851         for (i = 0; i < diag; i++)
852                 links_map[queues[i]] = (uint8_t)priorities[i];
853
854         return diag;
855 }
856
857 int
858 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
859                       uint8_t queues[], uint16_t nb_unlinks)
860 {
861         struct rte_eventdev *dev;
862         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
863         int i, diag;
864         uint16_t *links_map;
865
866         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
867         dev = &rte_eventdevs[dev_id];
868
869         if (*dev->dev_ops->port_unlink == NULL) {
870                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
871                 rte_errno = -ENOTSUP;
872                 return 0;
873         }
874
875         if (!is_valid_port(dev, port_id)) {
876                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
877                 rte_errno = -EINVAL;
878                 return 0;
879         }
880
881         if (queues == NULL) {
882                 for (i = 0; i < dev->data->nb_queues; i++)
883                         all_queues[i] = i;
884                 queues = all_queues;
885                 nb_unlinks = dev->data->nb_queues;
886         }
887
888         for (i = 0; i < nb_unlinks; i++)
889                 if (queues[i] >= dev->data->nb_queues) {
890                         rte_errno = -EINVAL;
891                         return 0;
892                 }
893
894         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
895                                         queues, nb_unlinks);
896
897         if (diag < 0)
898                 return diag;
899
900         links_map = dev->data->links_map;
901         /* Point links_map to this port specific area */
902         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
903         for (i = 0; i < diag; i++)
904                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
905
906         return diag;
907 }
908
909 int
910 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
911                          uint8_t queues[], uint8_t priorities[])
912 {
913         struct rte_eventdev *dev;
914         uint16_t *links_map;
915         int i, count = 0;
916
917         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
918         dev = &rte_eventdevs[dev_id];
919         if (!is_valid_port(dev, port_id)) {
920                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
921                 return -EINVAL;
922         }
923
924         links_map = dev->data->links_map;
925         /* Point links_map to this port specific area */
926         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
927         for (i = 0; i < dev->data->nb_queues; i++) {
928                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
929                         queues[count] = i;
930                         priorities[count] = (uint8_t)links_map[i];
931                         ++count;
932                 }
933         }
934         return count;
935 }
936
937 int
938 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
939                                  uint64_t *timeout_ticks)
940 {
941         struct rte_eventdev *dev;
942
943         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
944         dev = &rte_eventdevs[dev_id];
945         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
946
947         if (timeout_ticks == NULL)
948                 return -EINVAL;
949
950         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
951 }
952
953 int
954 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
955 {
956         struct rte_eventdev *dev;
957
958         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
959         dev = &rte_eventdevs[dev_id];
960
961         if (service_id == NULL)
962                 return -EINVAL;
963
964         if (dev->data->service_inited)
965                 *service_id = dev->data->service_id;
966
967         return dev->data->service_inited ? 0 : -ESRCH;
968 }
969
970 int
971 rte_event_dev_dump(uint8_t dev_id, FILE *f)
972 {
973         struct rte_eventdev *dev;
974
975         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
976         dev = &rte_eventdevs[dev_id];
977         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
978
979         (*dev->dev_ops->dump)(dev, f);
980         return 0;
981
982 }
983
984 static int
985 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
986                 uint8_t queue_port_id)
987 {
988         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
989         if (dev->dev_ops->xstats_get_names != NULL)
990                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
991                                                         queue_port_id,
992                                                         NULL, NULL, 0);
993         return 0;
994 }
995
996 int
997 rte_event_dev_xstats_names_get(uint8_t dev_id,
998                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
999                 struct rte_event_dev_xstats_name *xstats_names,
1000                 unsigned int *ids, unsigned int size)
1001 {
1002         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1003         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1004                                                           queue_port_id);
1005         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1006                         (int)size < cnt_expected_entries)
1007                 return cnt_expected_entries;
1008
1009         /* dev_id checked above */
1010         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1011
1012         if (dev->dev_ops->xstats_get_names != NULL)
1013                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1014                                 queue_port_id, xstats_names, ids, size);
1015
1016         return -ENOTSUP;
1017 }
1018
1019 /* retrieve eventdev extended statistics */
1020 int
1021 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1022                 uint8_t queue_port_id, const unsigned int ids[],
1023                 uint64_t values[], unsigned int n)
1024 {
1025         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1026         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1027
1028         /* implemented by the driver */
1029         if (dev->dev_ops->xstats_get != NULL)
1030                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1031                                 ids, values, n);
1032         return -ENOTSUP;
1033 }
1034
1035 uint64_t
1036 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1037                 unsigned int *id)
1038 {
1039         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1040         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1041         unsigned int temp = -1;
1042
1043         if (id != NULL)
1044                 *id = (unsigned int)-1;
1045         else
1046                 id = &temp; /* ensure driver never gets a NULL value */
1047
1048         /* implemented by driver */
1049         if (dev->dev_ops->xstats_get_by_name != NULL)
1050                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1051         return -ENOTSUP;
1052 }
1053
1054 int rte_event_dev_xstats_reset(uint8_t dev_id,
1055                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1056                 const uint32_t ids[], uint32_t nb_ids)
1057 {
1058         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1059         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1060
1061         if (dev->dev_ops->xstats_reset != NULL)
1062                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1063                                                         ids, nb_ids);
1064         return -ENOTSUP;
1065 }
1066
1067 int
1068 rte_event_dev_start(uint8_t dev_id)
1069 {
1070         struct rte_eventdev *dev;
1071         int diag;
1072
1073         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1074
1075         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1076         dev = &rte_eventdevs[dev_id];
1077         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1078
1079         if (dev->data->dev_started != 0) {
1080                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1081                         dev_id);
1082                 return 0;
1083         }
1084
1085         diag = (*dev->dev_ops->dev_start)(dev);
1086         if (diag == 0)
1087                 dev->data->dev_started = 1;
1088         else
1089                 return diag;
1090
1091         return 0;
1092 }
1093
1094 void
1095 rte_event_dev_stop(uint8_t dev_id)
1096 {
1097         struct rte_eventdev *dev;
1098
1099         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1100
1101         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1102         dev = &rte_eventdevs[dev_id];
1103         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1104
1105         if (dev->data->dev_started == 0) {
1106                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1107                         dev_id);
1108                 return;
1109         }
1110
1111         dev->data->dev_started = 0;
1112         (*dev->dev_ops->dev_stop)(dev);
1113 }
1114
1115 int
1116 rte_event_dev_close(uint8_t dev_id)
1117 {
1118         struct rte_eventdev *dev;
1119
1120         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1121         dev = &rte_eventdevs[dev_id];
1122         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1123
1124         /* Device must be stopped before it can be closed */
1125         if (dev->data->dev_started == 1) {
1126                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1127                                 dev_id);
1128                 return -EBUSY;
1129         }
1130
1131         return (*dev->dev_ops->dev_close)(dev);
1132 }
1133
1134 static inline int
1135 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1136                 int socket_id)
1137 {
1138         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1139         const struct rte_memzone *mz;
1140         int n;
1141
1142         /* Generate memzone name */
1143         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1144         if (n >= (int)sizeof(mz_name))
1145                 return -EINVAL;
1146
1147         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1148                 mz = rte_memzone_reserve(mz_name,
1149                                 sizeof(struct rte_eventdev_data),
1150                                 socket_id, 0);
1151         } else
1152                 mz = rte_memzone_lookup(mz_name);
1153
1154         if (mz == NULL)
1155                 return -ENOMEM;
1156
1157         *data = mz->addr;
1158         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1159                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1160
1161         return 0;
1162 }
1163
1164 static inline uint8_t
1165 rte_eventdev_find_free_device_index(void)
1166 {
1167         uint8_t dev_id;
1168
1169         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1170                 if (rte_eventdevs[dev_id].attached ==
1171                                 RTE_EVENTDEV_DETACHED)
1172                         return dev_id;
1173         }
1174         return RTE_EVENT_MAX_DEVS;
1175 }
1176
1177 struct rte_eventdev *
1178 rte_event_pmd_allocate(const char *name, int socket_id)
1179 {
1180         struct rte_eventdev *eventdev;
1181         uint8_t dev_id;
1182
1183         if (rte_event_pmd_get_named_dev(name) != NULL) {
1184                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1185                                 "allocated!", name);
1186                 return NULL;
1187         }
1188
1189         dev_id = rte_eventdev_find_free_device_index();
1190         if (dev_id == RTE_EVENT_MAX_DEVS) {
1191                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1192                 return NULL;
1193         }
1194
1195         eventdev = &rte_eventdevs[dev_id];
1196
1197         if (eventdev->data == NULL) {
1198                 struct rte_eventdev_data *eventdev_data = NULL;
1199
1200                 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1201                                 socket_id);
1202
1203                 if (retval < 0 || eventdev_data == NULL)
1204                         return NULL;
1205
1206                 eventdev->data = eventdev_data;
1207
1208                 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1209                                 "%s", name);
1210
1211                 eventdev->data->dev_id = dev_id;
1212                 eventdev->data->socket_id = socket_id;
1213                 eventdev->data->dev_started = 0;
1214
1215                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1216
1217                 eventdev_globals.nb_devs++;
1218         }
1219
1220         return eventdev;
1221 }
1222
1223 int
1224 rte_event_pmd_release(struct rte_eventdev *eventdev)
1225 {
1226         int ret;
1227         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1228         const struct rte_memzone *mz;
1229
1230         if (eventdev == NULL)
1231                 return -EINVAL;
1232
1233         eventdev->attached = RTE_EVENTDEV_DETACHED;
1234         eventdev_globals.nb_devs--;
1235
1236         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1237                 rte_free(eventdev->data->dev_private);
1238
1239                 /* Generate memzone name */
1240                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1241                                 eventdev->data->dev_id);
1242                 if (ret >= (int)sizeof(mz_name))
1243                         return -EINVAL;
1244
1245                 mz = rte_memzone_lookup(mz_name);
1246                 if (mz == NULL)
1247                         return -ENOMEM;
1248
1249                 ret = rte_memzone_free(mz);
1250                 if (ret)
1251                         return ret;
1252         }
1253
1254         eventdev->data = NULL;
1255         return 0;
1256 }