mem: fix potential bad unmap on map failure
[dpdk.git] / lib / librte_eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
23 #include <rte_eal.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32
33 #include "rte_eventdev.h"
34 #include "rte_eventdev_pmd.h"
35
36 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
37
38 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
39
40 static struct rte_eventdev_global eventdev_globals = {
41         .nb_devs                = 0
42 };
43
44 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
45
46 /* Event dev north bound API implementation */
47
48 uint8_t
49 rte_event_dev_count(void)
50 {
51         return rte_eventdev_globals->nb_devs;
52 }
53
54 int
55 rte_event_dev_get_dev_id(const char *name)
56 {
57         int i;
58
59         if (!name)
60                 return -EINVAL;
61
62         for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
63                 if ((strcmp(rte_event_devices[i].data->name, name)
64                                 == 0) &&
65                                 (rte_event_devices[i].attached ==
66                                                 RTE_EVENTDEV_ATTACHED))
67                         return i;
68         return -ENODEV;
69 }
70
71 int
72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74         struct rte_eventdev *dev;
75
76         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77         dev = &rte_eventdevs[dev_id];
78
79         return dev->data->socket_id;
80 }
81
82 int
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85         struct rte_eventdev *dev;
86
87         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88         dev = &rte_eventdevs[dev_id];
89
90         if (dev_info == NULL)
91                 return -EINVAL;
92
93         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94
95         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
97
98         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
99
100         dev_info->dev = dev->dev;
101         return 0;
102 }
103
104 int
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
106                                 uint32_t *caps)
107 {
108         struct rte_eventdev *dev;
109
110         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
112
113         dev = &rte_eventdevs[dev_id];
114
115         if (caps == NULL)
116                 return -EINVAL;
117         *caps = 0;
118
119         return dev->dev_ops->eth_rx_adapter_caps_get ?
120                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
121                                                 &rte_eth_devices[eth_port_id],
122                                                 caps)
123                                 : 0;
124 }
125
126 int __rte_experimental
127 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
128 {
129         struct rte_eventdev *dev;
130         const struct rte_event_timer_adapter_ops *ops;
131
132         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
133
134         dev = &rte_eventdevs[dev_id];
135
136         if (caps == NULL)
137                 return -EINVAL;
138         *caps = 0;
139
140         return dev->dev_ops->timer_adapter_caps_get ?
141                                 (*dev->dev_ops->timer_adapter_caps_get)(dev,
142                                                                         0,
143                                                                         caps,
144                                                                         &ops)
145                                 : 0;
146 }
147
148 static inline int
149 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
150 {
151         uint8_t old_nb_queues = dev->data->nb_queues;
152         struct rte_event_queue_conf *queues_cfg;
153         unsigned int i;
154
155         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
156                          dev->data->dev_id);
157
158         /* First time configuration */
159         if (dev->data->queues_cfg == NULL && nb_queues != 0) {
160                 /* Allocate memory to store queue configuration */
161                 dev->data->queues_cfg = rte_zmalloc_socket(
162                                 "eventdev->data->queues_cfg",
163                                 sizeof(dev->data->queues_cfg[0]) * nb_queues,
164                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
165                 if (dev->data->queues_cfg == NULL) {
166                         dev->data->nb_queues = 0;
167                         RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
168                                         "nb_queues %u", nb_queues);
169                         return -(ENOMEM);
170                 }
171         /* Re-configure */
172         } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
173                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
174
175                 for (i = nb_queues; i < old_nb_queues; i++)
176                         (*dev->dev_ops->queue_release)(dev, i);
177
178                 /* Re allocate memory to store queue configuration */
179                 queues_cfg = dev->data->queues_cfg;
180                 queues_cfg = rte_realloc(queues_cfg,
181                                 sizeof(queues_cfg[0]) * nb_queues,
182                                 RTE_CACHE_LINE_SIZE);
183                 if (queues_cfg == NULL) {
184                         RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
185                                                 " nb_queues %u", nb_queues);
186                         return -(ENOMEM);
187                 }
188                 dev->data->queues_cfg = queues_cfg;
189
190                 if (nb_queues > old_nb_queues) {
191                         uint8_t new_qs = nb_queues - old_nb_queues;
192
193                         memset(queues_cfg + old_nb_queues, 0,
194                                 sizeof(queues_cfg[0]) * new_qs);
195                 }
196         } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
197                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
198
199                 for (i = nb_queues; i < old_nb_queues; i++)
200                         (*dev->dev_ops->queue_release)(dev, i);
201         }
202
203         dev->data->nb_queues = nb_queues;
204         return 0;
205 }
206
207 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
208
209 static inline int
210 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
211 {
212         uint8_t old_nb_ports = dev->data->nb_ports;
213         void **ports;
214         uint16_t *links_map;
215         struct rte_event_port_conf *ports_cfg;
216         unsigned int i;
217
218         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
219                          dev->data->dev_id);
220
221         /* First time configuration */
222         if (dev->data->ports == NULL && nb_ports != 0) {
223                 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
224                                 sizeof(dev->data->ports[0]) * nb_ports,
225                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
226                 if (dev->data->ports == NULL) {
227                         dev->data->nb_ports = 0;
228                         RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
229                                         "nb_ports %u", nb_ports);
230                         return -(ENOMEM);
231                 }
232
233                 /* Allocate memory to store port configurations */
234                 dev->data->ports_cfg =
235                         rte_zmalloc_socket("eventdev->ports_cfg",
236                         sizeof(dev->data->ports_cfg[0]) * nb_ports,
237                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
238                 if (dev->data->ports_cfg == NULL) {
239                         dev->data->nb_ports = 0;
240                         RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
241                                         "nb_ports %u", nb_ports);
242                         return -(ENOMEM);
243                 }
244
245                 /* Allocate memory to store queue to port link connection */
246                 dev->data->links_map =
247                         rte_zmalloc_socket("eventdev->links_map",
248                         sizeof(dev->data->links_map[0]) * nb_ports *
249                         RTE_EVENT_MAX_QUEUES_PER_DEV,
250                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
251                 if (dev->data->links_map == NULL) {
252                         dev->data->nb_ports = 0;
253                         RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
254                                         "nb_ports %u", nb_ports);
255                         return -(ENOMEM);
256                 }
257                 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
258                         dev->data->links_map[i] =
259                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
260         } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
261                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
262
263                 ports = dev->data->ports;
264                 ports_cfg = dev->data->ports_cfg;
265                 links_map = dev->data->links_map;
266
267                 for (i = nb_ports; i < old_nb_ports; i++)
268                         (*dev->dev_ops->port_release)(ports[i]);
269
270                 /* Realloc memory for ports */
271                 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
272                                 RTE_CACHE_LINE_SIZE);
273                 if (ports == NULL) {
274                         RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
275                                                 " nb_ports %u", nb_ports);
276                         return -(ENOMEM);
277                 }
278
279                 /* Realloc memory for ports_cfg */
280                 ports_cfg = rte_realloc(ports_cfg,
281                         sizeof(ports_cfg[0]) * nb_ports,
282                         RTE_CACHE_LINE_SIZE);
283                 if (ports_cfg == NULL) {
284                         RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
285                                                 " nb_ports %u", nb_ports);
286                         return -(ENOMEM);
287                 }
288
289                 /* Realloc memory to store queue to port link connection */
290                 links_map = rte_realloc(links_map,
291                         sizeof(dev->data->links_map[0]) * nb_ports *
292                         RTE_EVENT_MAX_QUEUES_PER_DEV,
293                         RTE_CACHE_LINE_SIZE);
294                 if (links_map == NULL) {
295                         dev->data->nb_ports = 0;
296                         RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
297                                         "nb_ports %u", nb_ports);
298                         return -(ENOMEM);
299                 }
300
301                 if (nb_ports > old_nb_ports) {
302                         uint8_t new_ps = nb_ports - old_nb_ports;
303                         unsigned int old_links_map_end =
304                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
305                         unsigned int links_map_end =
306                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
307
308                         memset(ports + old_nb_ports, 0,
309                                 sizeof(ports[0]) * new_ps);
310                         memset(ports_cfg + old_nb_ports, 0,
311                                 sizeof(ports_cfg[0]) * new_ps);
312                         for (i = old_links_map_end; i < links_map_end; i++)
313                                 links_map[i] =
314                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
315                 }
316
317                 dev->data->ports = ports;
318                 dev->data->ports_cfg = ports_cfg;
319                 dev->data->links_map = links_map;
320         } else if (dev->data->ports != NULL && nb_ports == 0) {
321                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
322
323                 ports = dev->data->ports;
324                 for (i = nb_ports; i < old_nb_ports; i++)
325                         (*dev->dev_ops->port_release)(ports[i]);
326         }
327
328         dev->data->nb_ports = nb_ports;
329         return 0;
330 }
331
332 int
333 rte_event_dev_configure(uint8_t dev_id,
334                         const struct rte_event_dev_config *dev_conf)
335 {
336         struct rte_eventdev *dev;
337         struct rte_event_dev_info info;
338         int diag;
339
340         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
341         dev = &rte_eventdevs[dev_id];
342
343         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
344         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
345
346         if (dev->data->dev_started) {
347                 RTE_EDEV_LOG_ERR(
348                     "device %d must be stopped to allow configuration", dev_id);
349                 return -EBUSY;
350         }
351
352         if (dev_conf == NULL)
353                 return -EINVAL;
354
355         (*dev->dev_ops->dev_infos_get)(dev, &info);
356
357         /* Check dequeue_timeout_ns value is in limit */
358         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
359                 if (dev_conf->dequeue_timeout_ns &&
360                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
361                         || dev_conf->dequeue_timeout_ns >
362                                  info.max_dequeue_timeout_ns)) {
363                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
364                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
365                         dev_id, dev_conf->dequeue_timeout_ns,
366                         info.min_dequeue_timeout_ns,
367                         info.max_dequeue_timeout_ns);
368                         return -EINVAL;
369                 }
370         }
371
372         /* Check nb_events_limit is in limit */
373         if (dev_conf->nb_events_limit > info.max_num_events) {
374                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
375                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
376                 return -EINVAL;
377         }
378
379         /* Check nb_event_queues is in limit */
380         if (!dev_conf->nb_event_queues) {
381                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
382                                         dev_id);
383                 return -EINVAL;
384         }
385         if (dev_conf->nb_event_queues > info.max_event_queues) {
386                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
387                 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
388                 return -EINVAL;
389         }
390
391         /* Check nb_event_ports is in limit */
392         if (!dev_conf->nb_event_ports) {
393                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
394                 return -EINVAL;
395         }
396         if (dev_conf->nb_event_ports > info.max_event_ports) {
397                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
398                 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
399                 return -EINVAL;
400         }
401
402         /* Check nb_event_queue_flows is in limit */
403         if (!dev_conf->nb_event_queue_flows) {
404                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
405                 return -EINVAL;
406         }
407         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
408                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
409                 dev_id, dev_conf->nb_event_queue_flows,
410                 info.max_event_queue_flows);
411                 return -EINVAL;
412         }
413
414         /* Check nb_event_port_dequeue_depth is in limit */
415         if (!dev_conf->nb_event_port_dequeue_depth) {
416                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
417                                         dev_id);
418                 return -EINVAL;
419         }
420         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
421                  (dev_conf->nb_event_port_dequeue_depth >
422                          info.max_event_port_dequeue_depth)) {
423                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
424                 dev_id, dev_conf->nb_event_port_dequeue_depth,
425                 info.max_event_port_dequeue_depth);
426                 return -EINVAL;
427         }
428
429         /* Check nb_event_port_enqueue_depth is in limit */
430         if (!dev_conf->nb_event_port_enqueue_depth) {
431                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
432                                         dev_id);
433                 return -EINVAL;
434         }
435         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
436                 (dev_conf->nb_event_port_enqueue_depth >
437                          info.max_event_port_enqueue_depth)) {
438                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
439                 dev_id, dev_conf->nb_event_port_enqueue_depth,
440                 info.max_event_port_enqueue_depth);
441                 return -EINVAL;
442         }
443
444         /* Copy the dev_conf parameter into the dev structure */
445         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
446
447         /* Setup new number of queues and reconfigure device. */
448         diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
449         if (diag != 0) {
450                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
451                                 dev_id, diag);
452                 return diag;
453         }
454
455         /* Setup new number of ports and reconfigure device. */
456         diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
457         if (diag != 0) {
458                 rte_event_dev_queue_config(dev, 0);
459                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
460                                 dev_id, diag);
461                 return diag;
462         }
463
464         /* Configure the device */
465         diag = (*dev->dev_ops->dev_configure)(dev);
466         if (diag != 0) {
467                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
468                 rte_event_dev_queue_config(dev, 0);
469                 rte_event_dev_port_config(dev, 0);
470         }
471
472         dev->data->event_dev_cap = info.event_dev_cap;
473         return diag;
474 }
475
476 static inline int
477 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
478 {
479         if (queue_id < dev->data->nb_queues && queue_id <
480                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
481                 return 1;
482         else
483                 return 0;
484 }
485
486 int
487 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
488                                  struct rte_event_queue_conf *queue_conf)
489 {
490         struct rte_eventdev *dev;
491
492         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
493         dev = &rte_eventdevs[dev_id];
494
495         if (queue_conf == NULL)
496                 return -EINVAL;
497
498         if (!is_valid_queue(dev, queue_id)) {
499                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
500                 return -EINVAL;
501         }
502
503         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
504         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
505         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
506         return 0;
507 }
508
509 static inline int
510 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
511 {
512         if (queue_conf &&
513                 !(queue_conf->event_queue_cfg &
514                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
515                 ((queue_conf->event_queue_cfg &
516                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
517                 (queue_conf->schedule_type
518                         == RTE_SCHED_TYPE_ATOMIC)
519                 ))
520                 return 1;
521         else
522                 return 0;
523 }
524
525 static inline int
526 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
527 {
528         if (queue_conf &&
529                 !(queue_conf->event_queue_cfg &
530                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
531                 ((queue_conf->event_queue_cfg &
532                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
533                 (queue_conf->schedule_type
534                         == RTE_SCHED_TYPE_ORDERED)
535                 ))
536                 return 1;
537         else
538                 return 0;
539 }
540
541
542 int
543 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
544                       const struct rte_event_queue_conf *queue_conf)
545 {
546         struct rte_eventdev *dev;
547         struct rte_event_queue_conf def_conf;
548
549         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
550         dev = &rte_eventdevs[dev_id];
551
552         if (!is_valid_queue(dev, queue_id)) {
553                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
554                 return -EINVAL;
555         }
556
557         /* Check nb_atomic_flows limit */
558         if (is_valid_atomic_queue_conf(queue_conf)) {
559                 if (queue_conf->nb_atomic_flows == 0 ||
560                     queue_conf->nb_atomic_flows >
561                         dev->data->dev_conf.nb_event_queue_flows) {
562                         RTE_EDEV_LOG_ERR(
563                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
564                         dev_id, queue_id, queue_conf->nb_atomic_flows,
565                         dev->data->dev_conf.nb_event_queue_flows);
566                         return -EINVAL;
567                 }
568         }
569
570         /* Check nb_atomic_order_sequences limit */
571         if (is_valid_ordered_queue_conf(queue_conf)) {
572                 if (queue_conf->nb_atomic_order_sequences == 0 ||
573                     queue_conf->nb_atomic_order_sequences >
574                         dev->data->dev_conf.nb_event_queue_flows) {
575                         RTE_EDEV_LOG_ERR(
576                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
577                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
578                         dev->data->dev_conf.nb_event_queue_flows);
579                         return -EINVAL;
580                 }
581         }
582
583         if (dev->data->dev_started) {
584                 RTE_EDEV_LOG_ERR(
585                     "device %d must be stopped to allow queue setup", dev_id);
586                 return -EBUSY;
587         }
588
589         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
590
591         if (queue_conf == NULL) {
592                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
593                                         -ENOTSUP);
594                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
595                 queue_conf = &def_conf;
596         }
597
598         dev->data->queues_cfg[queue_id] = *queue_conf;
599         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
600 }
601
602 static inline int
603 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
604 {
605         if (port_id < dev->data->nb_ports)
606                 return 1;
607         else
608                 return 0;
609 }
610
611 int
612 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
613                                  struct rte_event_port_conf *port_conf)
614 {
615         struct rte_eventdev *dev;
616
617         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
618         dev = &rte_eventdevs[dev_id];
619
620         if (port_conf == NULL)
621                 return -EINVAL;
622
623         if (!is_valid_port(dev, port_id)) {
624                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
625                 return -EINVAL;
626         }
627
628         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
629         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
630         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
631         return 0;
632 }
633
634 int
635 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
636                      const struct rte_event_port_conf *port_conf)
637 {
638         struct rte_eventdev *dev;
639         struct rte_event_port_conf def_conf;
640         int diag;
641
642         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
643         dev = &rte_eventdevs[dev_id];
644
645         if (!is_valid_port(dev, port_id)) {
646                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
647                 return -EINVAL;
648         }
649
650         /* Check new_event_threshold limit */
651         if ((port_conf && !port_conf->new_event_threshold) ||
652                         (port_conf && port_conf->new_event_threshold >
653                                  dev->data->dev_conf.nb_events_limit)) {
654                 RTE_EDEV_LOG_ERR(
655                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
656                         dev_id, port_id, port_conf->new_event_threshold,
657                         dev->data->dev_conf.nb_events_limit);
658                 return -EINVAL;
659         }
660
661         /* Check dequeue_depth limit */
662         if ((port_conf && !port_conf->dequeue_depth) ||
663                         (port_conf && port_conf->dequeue_depth >
664                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
665                 RTE_EDEV_LOG_ERR(
666                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
667                         dev_id, port_id, port_conf->dequeue_depth,
668                         dev->data->dev_conf.nb_event_port_dequeue_depth);
669                 return -EINVAL;
670         }
671
672         /* Check enqueue_depth limit */
673         if ((port_conf && !port_conf->enqueue_depth) ||
674                         (port_conf && port_conf->enqueue_depth >
675                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
676                 RTE_EDEV_LOG_ERR(
677                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
678                         dev_id, port_id, port_conf->enqueue_depth,
679                         dev->data->dev_conf.nb_event_port_enqueue_depth);
680                 return -EINVAL;
681         }
682
683         if (port_conf && port_conf->disable_implicit_release &&
684             !(dev->data->event_dev_cap &
685               RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
686                 RTE_EDEV_LOG_ERR(
687                    "dev%d port%d Implicit release disable not supported",
688                         dev_id, port_id);
689                 return -EINVAL;
690         }
691
692         if (dev->data->dev_started) {
693                 RTE_EDEV_LOG_ERR(
694                     "device %d must be stopped to allow port setup", dev_id);
695                 return -EBUSY;
696         }
697
698         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
699
700         if (port_conf == NULL) {
701                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
702                                         -ENOTSUP);
703                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
704                 port_conf = &def_conf;
705         }
706
707         dev->data->ports_cfg[port_id] = *port_conf;
708
709         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
710
711         /* Unlink all the queues from this port(default state after setup) */
712         if (!diag)
713                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
714
715         if (diag < 0)
716                 return diag;
717
718         return 0;
719 }
720
721 int
722 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
723                        uint32_t *attr_value)
724 {
725         struct rte_eventdev *dev;
726
727         if (!attr_value)
728                 return -EINVAL;
729         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
730         dev = &rte_eventdevs[dev_id];
731
732         switch (attr_id) {
733         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
734                 *attr_value = dev->data->nb_ports;
735                 break;
736         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
737                 *attr_value = dev->data->nb_queues;
738                 break;
739         case RTE_EVENT_DEV_ATTR_STARTED:
740                 *attr_value = dev->data->dev_started;
741                 break;
742         default:
743                 return -EINVAL;
744         }
745
746         return 0;
747 }
748
749 int
750 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
751                         uint32_t *attr_value)
752 {
753         struct rte_eventdev *dev;
754
755         if (!attr_value)
756                 return -EINVAL;
757
758         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
759         dev = &rte_eventdevs[dev_id];
760         if (!is_valid_port(dev, port_id)) {
761                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
762                 return -EINVAL;
763         }
764
765         switch (attr_id) {
766         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
767                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
768                 break;
769         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
770                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
771                 break;
772         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
773                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
774                 break;
775         default:
776                 return -EINVAL;
777         };
778         return 0;
779 }
780
781 int
782 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
783                         uint32_t *attr_value)
784 {
785         struct rte_event_queue_conf *conf;
786         struct rte_eventdev *dev;
787
788         if (!attr_value)
789                 return -EINVAL;
790
791         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
792         dev = &rte_eventdevs[dev_id];
793         if (!is_valid_queue(dev, queue_id)) {
794                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
795                 return -EINVAL;
796         }
797
798         conf = &dev->data->queues_cfg[queue_id];
799
800         switch (attr_id) {
801         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
802                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
803                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
804                         *attr_value = conf->priority;
805                 break;
806         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
807                 *attr_value = conf->nb_atomic_flows;
808                 break;
809         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
810                 *attr_value = conf->nb_atomic_order_sequences;
811                 break;
812         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
813                 *attr_value = conf->event_queue_cfg;
814                 break;
815         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
816                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
817                         return -EOVERFLOW;
818
819                 *attr_value = conf->schedule_type;
820                 break;
821         default:
822                 return -EINVAL;
823         };
824         return 0;
825 }
826
827 int
828 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
829                     const uint8_t queues[], const uint8_t priorities[],
830                     uint16_t nb_links)
831 {
832         struct rte_eventdev *dev;
833         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
834         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
835         uint16_t *links_map;
836         int i, diag;
837
838         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
839         dev = &rte_eventdevs[dev_id];
840
841         if (*dev->dev_ops->port_link == NULL) {
842                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
843                 rte_errno = -ENOTSUP;
844                 return 0;
845         }
846
847         if (!is_valid_port(dev, port_id)) {
848                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
849                 rte_errno = -EINVAL;
850                 return 0;
851         }
852
853         if (queues == NULL) {
854                 for (i = 0; i < dev->data->nb_queues; i++)
855                         queues_list[i] = i;
856
857                 queues = queues_list;
858                 nb_links = dev->data->nb_queues;
859         }
860
861         if (priorities == NULL) {
862                 for (i = 0; i < nb_links; i++)
863                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
864
865                 priorities = priorities_list;
866         }
867
868         for (i = 0; i < nb_links; i++)
869                 if (queues[i] >= dev->data->nb_queues) {
870                         rte_errno = -EINVAL;
871                         return 0;
872                 }
873
874         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
875                                                 queues, priorities, nb_links);
876         if (diag < 0)
877                 return diag;
878
879         links_map = dev->data->links_map;
880         /* Point links_map to this port specific area */
881         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
882         for (i = 0; i < diag; i++)
883                 links_map[queues[i]] = (uint8_t)priorities[i];
884
885         return diag;
886 }
887
888 int
889 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
890                       uint8_t queues[], uint16_t nb_unlinks)
891 {
892         struct rte_eventdev *dev;
893         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
894         int i, diag, j;
895         uint16_t *links_map;
896
897         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0);
898         dev = &rte_eventdevs[dev_id];
899
900         if (*dev->dev_ops->port_unlink == NULL) {
901                 RTE_PMD_DEBUG_TRACE("Function not supported\n");
902                 rte_errno = -ENOTSUP;
903                 return 0;
904         }
905
906         if (!is_valid_port(dev, port_id)) {
907                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
908                 rte_errno = -EINVAL;
909                 return 0;
910         }
911
912         links_map = dev->data->links_map;
913         /* Point links_map to this port specific area */
914         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
915
916         if (queues == NULL) {
917                 j = 0;
918                 for (i = 0; i < dev->data->nb_queues; i++) {
919                         if (links_map[i] !=
920                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
921                                 all_queues[j] = i;
922                                 j++;
923                         }
924                 }
925                 queues = all_queues;
926         } else {
927                 for (j = 0; j < nb_unlinks; j++) {
928                         if (links_map[queues[j]] ==
929                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
930                                 break;
931                 }
932         }
933
934         nb_unlinks = j;
935         for (i = 0; i < nb_unlinks; i++)
936                 if (queues[i] >= dev->data->nb_queues) {
937                         rte_errno = -EINVAL;
938                         return 0;
939                 }
940
941         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
942                                         queues, nb_unlinks);
943
944         if (diag < 0)
945                 return diag;
946
947         for (i = 0; i < diag; i++)
948                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
949
950         return diag;
951 }
952
953 int
954 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
955                          uint8_t queues[], uint8_t priorities[])
956 {
957         struct rte_eventdev *dev;
958         uint16_t *links_map;
959         int i, count = 0;
960
961         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
962         dev = &rte_eventdevs[dev_id];
963         if (!is_valid_port(dev, port_id)) {
964                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
965                 return -EINVAL;
966         }
967
968         links_map = dev->data->links_map;
969         /* Point links_map to this port specific area */
970         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
971         for (i = 0; i < dev->data->nb_queues; i++) {
972                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
973                         queues[count] = i;
974                         priorities[count] = (uint8_t)links_map[i];
975                         ++count;
976                 }
977         }
978         return count;
979 }
980
981 int
982 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
983                                  uint64_t *timeout_ticks)
984 {
985         struct rte_eventdev *dev;
986
987         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
988         dev = &rte_eventdevs[dev_id];
989         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
990
991         if (timeout_ticks == NULL)
992                 return -EINVAL;
993
994         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
995 }
996
997 int
998 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
999 {
1000         struct rte_eventdev *dev;
1001
1002         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1003         dev = &rte_eventdevs[dev_id];
1004
1005         if (service_id == NULL)
1006                 return -EINVAL;
1007
1008         if (dev->data->service_inited)
1009                 *service_id = dev->data->service_id;
1010
1011         return dev->data->service_inited ? 0 : -ESRCH;
1012 }
1013
1014 int
1015 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1016 {
1017         struct rte_eventdev *dev;
1018
1019         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1020         dev = &rte_eventdevs[dev_id];
1021         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1022
1023         (*dev->dev_ops->dump)(dev, f);
1024         return 0;
1025
1026 }
1027
1028 static int
1029 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1030                 uint8_t queue_port_id)
1031 {
1032         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1033         if (dev->dev_ops->xstats_get_names != NULL)
1034                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1035                                                         queue_port_id,
1036                                                         NULL, NULL, 0);
1037         return 0;
1038 }
1039
1040 int
1041 rte_event_dev_xstats_names_get(uint8_t dev_id,
1042                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1043                 struct rte_event_dev_xstats_name *xstats_names,
1044                 unsigned int *ids, unsigned int size)
1045 {
1046         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1047         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1048                                                           queue_port_id);
1049         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1050                         (int)size < cnt_expected_entries)
1051                 return cnt_expected_entries;
1052
1053         /* dev_id checked above */
1054         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1055
1056         if (dev->dev_ops->xstats_get_names != NULL)
1057                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1058                                 queue_port_id, xstats_names, ids, size);
1059
1060         return -ENOTSUP;
1061 }
1062
1063 /* retrieve eventdev extended statistics */
1064 int
1065 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1066                 uint8_t queue_port_id, const unsigned int ids[],
1067                 uint64_t values[], unsigned int n)
1068 {
1069         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1070         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1071
1072         /* implemented by the driver */
1073         if (dev->dev_ops->xstats_get != NULL)
1074                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1075                                 ids, values, n);
1076         return -ENOTSUP;
1077 }
1078
1079 uint64_t
1080 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1081                 unsigned int *id)
1082 {
1083         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1084         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1085         unsigned int temp = -1;
1086
1087         if (id != NULL)
1088                 *id = (unsigned int)-1;
1089         else
1090                 id = &temp; /* ensure driver never gets a NULL value */
1091
1092         /* implemented by driver */
1093         if (dev->dev_ops->xstats_get_by_name != NULL)
1094                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1095         return -ENOTSUP;
1096 }
1097
1098 int rte_event_dev_xstats_reset(uint8_t dev_id,
1099                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1100                 const uint32_t ids[], uint32_t nb_ids)
1101 {
1102         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1103         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1104
1105         if (dev->dev_ops->xstats_reset != NULL)
1106                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1107                                                         ids, nb_ids);
1108         return -ENOTSUP;
1109 }
1110
1111 int rte_event_dev_selftest(uint8_t dev_id)
1112 {
1113         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1114         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1115
1116         if (dev->dev_ops->dev_selftest != NULL)
1117                 return (*dev->dev_ops->dev_selftest)();
1118         return -ENOTSUP;
1119 }
1120
1121 int
1122 rte_event_dev_start(uint8_t dev_id)
1123 {
1124         struct rte_eventdev *dev;
1125         int diag;
1126
1127         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1128
1129         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1130         dev = &rte_eventdevs[dev_id];
1131         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1132
1133         if (dev->data->dev_started != 0) {
1134                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1135                         dev_id);
1136                 return 0;
1137         }
1138
1139         diag = (*dev->dev_ops->dev_start)(dev);
1140         if (diag == 0)
1141                 dev->data->dev_started = 1;
1142         else
1143                 return diag;
1144
1145         return 0;
1146 }
1147
1148 int
1149 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1150                 eventdev_stop_flush_t callback, void *userdata)
1151 {
1152         struct rte_eventdev *dev;
1153
1154         RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1155
1156         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1157         dev = &rte_eventdevs[dev_id];
1158
1159         dev->dev_ops->dev_stop_flush = callback;
1160         dev->data->dev_stop_flush_arg = userdata;
1161
1162         return 0;
1163 }
1164
1165 void
1166 rte_event_dev_stop(uint8_t dev_id)
1167 {
1168         struct rte_eventdev *dev;
1169
1170         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1171
1172         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1173         dev = &rte_eventdevs[dev_id];
1174         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1175
1176         if (dev->data->dev_started == 0) {
1177                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1178                         dev_id);
1179                 return;
1180         }
1181
1182         dev->data->dev_started = 0;
1183         (*dev->dev_ops->dev_stop)(dev);
1184 }
1185
1186 int
1187 rte_event_dev_close(uint8_t dev_id)
1188 {
1189         struct rte_eventdev *dev;
1190
1191         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1192         dev = &rte_eventdevs[dev_id];
1193         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1194
1195         /* Device must be stopped before it can be closed */
1196         if (dev->data->dev_started == 1) {
1197                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1198                                 dev_id);
1199                 return -EBUSY;
1200         }
1201
1202         return (*dev->dev_ops->dev_close)(dev);
1203 }
1204
1205 static inline int
1206 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1207                 int socket_id)
1208 {
1209         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1210         const struct rte_memzone *mz;
1211         int n;
1212
1213         /* Generate memzone name */
1214         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1215         if (n >= (int)sizeof(mz_name))
1216                 return -EINVAL;
1217
1218         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1219                 mz = rte_memzone_reserve(mz_name,
1220                                 sizeof(struct rte_eventdev_data),
1221                                 socket_id, 0);
1222         } else
1223                 mz = rte_memzone_lookup(mz_name);
1224
1225         if (mz == NULL)
1226                 return -ENOMEM;
1227
1228         *data = mz->addr;
1229         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1230                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1231
1232         return 0;
1233 }
1234
1235 static inline uint8_t
1236 rte_eventdev_find_free_device_index(void)
1237 {
1238         uint8_t dev_id;
1239
1240         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1241                 if (rte_eventdevs[dev_id].attached ==
1242                                 RTE_EVENTDEV_DETACHED)
1243                         return dev_id;
1244         }
1245         return RTE_EVENT_MAX_DEVS;
1246 }
1247
1248 struct rte_eventdev *
1249 rte_event_pmd_allocate(const char *name, int socket_id)
1250 {
1251         struct rte_eventdev *eventdev;
1252         uint8_t dev_id;
1253
1254         if (rte_event_pmd_get_named_dev(name) != NULL) {
1255                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1256                                 "allocated!", name);
1257                 return NULL;
1258         }
1259
1260         dev_id = rte_eventdev_find_free_device_index();
1261         if (dev_id == RTE_EVENT_MAX_DEVS) {
1262                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1263                 return NULL;
1264         }
1265
1266         eventdev = &rte_eventdevs[dev_id];
1267
1268         if (eventdev->data == NULL) {
1269                 struct rte_eventdev_data *eventdev_data = NULL;
1270
1271                 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1272                                 socket_id);
1273
1274                 if (retval < 0 || eventdev_data == NULL)
1275                         return NULL;
1276
1277                 eventdev->data = eventdev_data;
1278
1279                 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1280                                 "%s", name);
1281
1282                 eventdev->data->dev_id = dev_id;
1283                 eventdev->data->socket_id = socket_id;
1284                 eventdev->data->dev_started = 0;
1285
1286                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1287
1288                 eventdev_globals.nb_devs++;
1289         }
1290
1291         return eventdev;
1292 }
1293
1294 int
1295 rte_event_pmd_release(struct rte_eventdev *eventdev)
1296 {
1297         int ret;
1298         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1299         const struct rte_memzone *mz;
1300
1301         if (eventdev == NULL)
1302                 return -EINVAL;
1303
1304         eventdev->attached = RTE_EVENTDEV_DETACHED;
1305         eventdev_globals.nb_devs--;
1306
1307         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1308                 rte_free(eventdev->data->dev_private);
1309
1310                 /* Generate memzone name */
1311                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1312                                 eventdev->data->dev_id);
1313                 if (ret >= (int)sizeof(mz_name))
1314                         return -EINVAL;
1315
1316                 mz = rte_memzone_lookup(mz_name);
1317                 if (mz == NULL)
1318                         return -ENOMEM;
1319
1320                 ret = rte_memzone_free(mz);
1321                 if (ret)
1322                         return ret;
1323         }
1324
1325         eventdev->data = NULL;
1326         return 0;
1327 }