e6ed0a32df29145b77901738302fec21d9fc9752
[dpdk.git] / lib / librte_eventdev / rte_eventdev.c
1 /*
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016 Cavium, Inc. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Cavium, Inc nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <ctype.h>
34 #include <stdio.h>
35 #include <stdlib.h>
36 #include <string.h>
37 #include <stdarg.h>
38 #include <errno.h>
39 #include <stdint.h>
40 #include <inttypes.h>
41 #include <sys/types.h>
42 #include <sys/queue.h>
43
44 #include <rte_byteorder.h>
45 #include <rte_log.h>
46 #include <rte_debug.h>
47 #include <rte_dev.h>
48 #include <rte_memory.h>
49 #include <rte_memcpy.h>
50 #include <rte_memzone.h>
51 #include <rte_eal.h>
52 #include <rte_per_lcore.h>
53 #include <rte_lcore.h>
54 #include <rte_atomic.h>
55 #include <rte_branch_prediction.h>
56 #include <rte_common.h>
57 #include <rte_malloc.h>
58 #include <rte_errno.h>
59
60 #include "rte_eventdev.h"
61 #include "rte_eventdev_pmd.h"
62
63 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
64
65 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
66
67 static struct rte_eventdev_global eventdev_globals = {
68         .nb_devs                = 0
69 };
70
71 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
72
73 /* Event dev north bound API implementation */
74
75 uint8_t
76 rte_event_dev_count(void)
77 {
78         return rte_eventdev_globals->nb_devs;
79 }
80
81 int
82 rte_event_dev_get_dev_id(const char *name)
83 {
84         int i;
85
86         if (!name)
87                 return -EINVAL;
88
89         for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
90                 if ((strcmp(rte_event_devices[i].data->name, name)
91                                 == 0) &&
92                                 (rte_event_devices[i].attached ==
93                                                 RTE_EVENTDEV_ATTACHED))
94                         return i;
95         return -ENODEV;
96 }
97
98 int
99 rte_event_dev_socket_id(uint8_t dev_id)
100 {
101         struct rte_eventdev *dev;
102
103         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
104         dev = &rte_eventdevs[dev_id];
105
106         return dev->data->socket_id;
107 }
108
109 int
110 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
111 {
112         struct rte_eventdev *dev;
113
114         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
115         dev = &rte_eventdevs[dev_id];
116
117         if (dev_info == NULL)
118                 return -EINVAL;
119
120         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
121
122         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
123         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
124
125         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
126
127         dev_info->dev = dev->dev;
128         return 0;
129 }
130
131 static inline int
132 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
133 {
134         uint8_t old_nb_queues = dev->data->nb_queues;
135         struct rte_event_queue_conf *queues_cfg;
136         unsigned int i;
137
138         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
139                          dev->data->dev_id);
140
141         /* First time configuration */
142         if (dev->data->queues_cfg == NULL && nb_queues != 0) {
143                 /* Allocate memory to store queue configuration */
144                 dev->data->queues_cfg = rte_zmalloc_socket(
145                                 "eventdev->data->queues_cfg",
146                                 sizeof(dev->data->queues_cfg[0]) * nb_queues,
147                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
148                 if (dev->data->queues_cfg == NULL) {
149                         dev->data->nb_queues = 0;
150                         RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
151                                         "nb_queues %u", nb_queues);
152                         return -(ENOMEM);
153                 }
154         /* Re-configure */
155         } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
156                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
157
158                 for (i = nb_queues; i < old_nb_queues; i++)
159                         (*dev->dev_ops->queue_release)(dev, i);
160
161                 /* Re allocate memory to store queue configuration */
162                 queues_cfg = dev->data->queues_cfg;
163                 queues_cfg = rte_realloc(queues_cfg,
164                                 sizeof(queues_cfg[0]) * nb_queues,
165                                 RTE_CACHE_LINE_SIZE);
166                 if (queues_cfg == NULL) {
167                         RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
168                                                 " nb_queues %u", nb_queues);
169                         return -(ENOMEM);
170                 }
171                 dev->data->queues_cfg = queues_cfg;
172
173                 if (nb_queues > old_nb_queues) {
174                         uint8_t new_qs = nb_queues - old_nb_queues;
175
176                         memset(queues_cfg + old_nb_queues, 0,
177                                 sizeof(queues_cfg[0]) * new_qs);
178                 }
179         } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
180                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
181
182                 for (i = nb_queues; i < old_nb_queues; i++)
183                         (*dev->dev_ops->queue_release)(dev, i);
184         }
185
186         dev->data->nb_queues = nb_queues;
187         return 0;
188 }
189
190 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
191
192 static inline int
193 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
194 {
195         uint8_t old_nb_ports = dev->data->nb_ports;
196         void **ports;
197         uint16_t *links_map;
198         struct rte_event_port_conf *ports_cfg;
199         unsigned int i;
200
201         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
202                          dev->data->dev_id);
203
204         /* First time configuration */
205         if (dev->data->ports == NULL && nb_ports != 0) {
206                 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
207                                 sizeof(dev->data->ports[0]) * nb_ports,
208                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
209                 if (dev->data->ports == NULL) {
210                         dev->data->nb_ports = 0;
211                         RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
212                                         "nb_ports %u", nb_ports);
213                         return -(ENOMEM);
214                 }
215
216                 /* Allocate memory to store port configurations */
217                 dev->data->ports_cfg =
218                         rte_zmalloc_socket("eventdev->ports_cfg",
219                         sizeof(dev->data->ports_cfg[0]) * nb_ports,
220                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
221                 if (dev->data->ports_cfg == NULL) {
222                         dev->data->nb_ports = 0;
223                         RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
224                                         "nb_ports %u", nb_ports);
225                         return -(ENOMEM);
226                 }
227
228                 /* Allocate memory to store queue to port link connection */
229                 dev->data->links_map =
230                         rte_zmalloc_socket("eventdev->links_map",
231                         sizeof(dev->data->links_map[0]) * nb_ports *
232                         RTE_EVENT_MAX_QUEUES_PER_DEV,
233                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
234                 if (dev->data->links_map == NULL) {
235                         dev->data->nb_ports = 0;
236                         RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
237                                         "nb_ports %u", nb_ports);
238                         return -(ENOMEM);
239                 }
240                 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
241                         dev->data->links_map[i] =
242                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
243         } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
244                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
245
246                 ports = dev->data->ports;
247                 ports_cfg = dev->data->ports_cfg;
248                 links_map = dev->data->links_map;
249
250                 for (i = nb_ports; i < old_nb_ports; i++)
251                         (*dev->dev_ops->port_release)(ports[i]);
252
253                 /* Realloc memory for ports */
254                 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
255                                 RTE_CACHE_LINE_SIZE);
256                 if (ports == NULL) {
257                         RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
258                                                 " nb_ports %u", nb_ports);
259                         return -(ENOMEM);
260                 }
261
262                 /* Realloc memory for ports_cfg */
263                 ports_cfg = rte_realloc(ports_cfg,
264                         sizeof(ports_cfg[0]) * nb_ports,
265                         RTE_CACHE_LINE_SIZE);
266                 if (ports_cfg == NULL) {
267                         RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
268                                                 " nb_ports %u", nb_ports);
269                         return -(ENOMEM);
270                 }
271
272                 /* Realloc memory to store queue to port link connection */
273                 links_map = rte_realloc(links_map,
274                         sizeof(dev->data->links_map[0]) * nb_ports *
275                         RTE_EVENT_MAX_QUEUES_PER_DEV,
276                         RTE_CACHE_LINE_SIZE);
277                 if (links_map == NULL) {
278                         dev->data->nb_ports = 0;
279                         RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
280                                         "nb_ports %u", nb_ports);
281                         return -(ENOMEM);
282                 }
283
284                 if (nb_ports > old_nb_ports) {
285                         uint8_t new_ps = nb_ports - old_nb_ports;
286                         unsigned int old_links_map_end =
287                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
288                         unsigned int links_map_end =
289                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
290
291                         memset(ports + old_nb_ports, 0,
292                                 sizeof(ports[0]) * new_ps);
293                         memset(ports_cfg + old_nb_ports, 0,
294                                 sizeof(ports_cfg[0]) * new_ps);
295                         for (i = old_links_map_end; i < links_map_end; i++)
296                                 links_map[i] =
297                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
298                 }
299
300                 dev->data->ports = ports;
301                 dev->data->ports_cfg = ports_cfg;
302                 dev->data->links_map = links_map;
303         } else if (dev->data->ports != NULL && nb_ports == 0) {
304                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
305
306                 ports = dev->data->ports;
307                 for (i = nb_ports; i < old_nb_ports; i++)
308                         (*dev->dev_ops->port_release)(ports[i]);
309         }
310
311         dev->data->nb_ports = nb_ports;
312         return 0;
313 }
314
315 int
316 rte_event_dev_configure(uint8_t dev_id,
317                         const struct rte_event_dev_config *dev_conf)
318 {
319         struct rte_eventdev *dev;
320         struct rte_event_dev_info info;
321         int diag;
322
323         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
324         dev = &rte_eventdevs[dev_id];
325
326         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
327         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
328
329         if (dev->data->dev_started) {
330                 RTE_EDEV_LOG_ERR(
331                     "device %d must be stopped to allow configuration", dev_id);
332                 return -EBUSY;
333         }
334
335         if (dev_conf == NULL)
336                 return -EINVAL;
337
338         (*dev->dev_ops->dev_infos_get)(dev, &info);
339
340         /* Check dequeue_timeout_ns value is in limit */
341         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
342                 if (dev_conf->dequeue_timeout_ns &&
343                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
344                         || dev_conf->dequeue_timeout_ns >
345                                  info.max_dequeue_timeout_ns)) {
346                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
347                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
348                         dev_id, dev_conf->dequeue_timeout_ns,
349                         info.min_dequeue_timeout_ns,
350                         info.max_dequeue_timeout_ns);
351                         return -EINVAL;
352                 }
353         }
354
355         /* Check nb_events_limit is in limit */
356         if (dev_conf->nb_events_limit > info.max_num_events) {
357                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
358                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
359                 return -EINVAL;
360         }
361
362         /* Check nb_event_queues is in limit */
363         if (!dev_conf->nb_event_queues) {
364                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
365                                         dev_id);
366                 return -EINVAL;
367         }
368         if (dev_conf->nb_event_queues > info.max_event_queues) {
369                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
370                 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
371                 return -EINVAL;
372         }
373
374         /* Check nb_event_ports is in limit */
375         if (!dev_conf->nb_event_ports) {
376                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
377                 return -EINVAL;
378         }
379         if (dev_conf->nb_event_ports > info.max_event_ports) {
380                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
381                 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
382                 return -EINVAL;
383         }
384
385         /* Check nb_event_queue_flows is in limit */
386         if (!dev_conf->nb_event_queue_flows) {
387                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
388                 return -EINVAL;
389         }
390         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
391                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
392                 dev_id, dev_conf->nb_event_queue_flows,
393                 info.max_event_queue_flows);
394                 return -EINVAL;
395         }
396
397         /* Check nb_event_port_dequeue_depth is in limit */
398         if (!dev_conf->nb_event_port_dequeue_depth) {
399                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
400                                         dev_id);
401                 return -EINVAL;
402         }
403         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
404                  (dev_conf->nb_event_port_dequeue_depth >
405                          info.max_event_port_dequeue_depth)) {
406                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
407                 dev_id, dev_conf->nb_event_port_dequeue_depth,
408                 info.max_event_port_dequeue_depth);
409                 return -EINVAL;
410         }
411
412         /* Check nb_event_port_enqueue_depth is in limit */
413         if (!dev_conf->nb_event_port_enqueue_depth) {
414                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
415                                         dev_id);
416                 return -EINVAL;
417         }
418         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
419                 (dev_conf->nb_event_port_enqueue_depth >
420                          info.max_event_port_enqueue_depth)) {
421                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
422                 dev_id, dev_conf->nb_event_port_enqueue_depth,
423                 info.max_event_port_enqueue_depth);
424                 return -EINVAL;
425         }
426
427         /* Copy the dev_conf parameter into the dev structure */
428         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
429
430         /* Setup new number of queues and reconfigure device. */
431         diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
432         if (diag != 0) {
433                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
434                                 dev_id, diag);
435                 return diag;
436         }
437
438         /* Setup new number of ports and reconfigure device. */
439         diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
440         if (diag != 0) {
441                 rte_event_dev_queue_config(dev, 0);
442                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
443                                 dev_id, diag);
444                 return diag;
445         }
446
447         /* Configure the device */
448         diag = (*dev->dev_ops->dev_configure)(dev);
449         if (diag != 0) {
450                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
451                 rte_event_dev_queue_config(dev, 0);
452                 rte_event_dev_port_config(dev, 0);
453         }
454
455         dev->data->event_dev_cap = info.event_dev_cap;
456         return diag;
457 }
458
459 static inline int
460 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
461 {
462         if (queue_id < dev->data->nb_queues && queue_id <
463                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
464                 return 1;
465         else
466                 return 0;
467 }
468
469 int
470 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
471                                  struct rte_event_queue_conf *queue_conf)
472 {
473         struct rte_eventdev *dev;
474
475         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
476         dev = &rte_eventdevs[dev_id];
477
478         if (queue_conf == NULL)
479                 return -EINVAL;
480
481         if (!is_valid_queue(dev, queue_id)) {
482                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
483                 return -EINVAL;
484         }
485
486         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
487         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
488         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
489         return 0;
490 }
491
492 static inline int
493 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
494 {
495         if (queue_conf &&
496                 !(queue_conf->event_queue_cfg &
497                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
498                 ((queue_conf->event_queue_cfg &
499                         RTE_EVENT_QUEUE_CFG_TYPE_MASK)
500                         == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
501                 ((queue_conf->event_queue_cfg &
502                         RTE_EVENT_QUEUE_CFG_TYPE_MASK)
503                         == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
504                 ))
505                 return 1;
506         else
507                 return 0;
508 }
509
510 static inline int
511 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
512 {
513         if (queue_conf &&
514                 !(queue_conf->event_queue_cfg &
515                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) && (
516                 ((queue_conf->event_queue_cfg &
517                         RTE_EVENT_QUEUE_CFG_TYPE_MASK)
518                         == RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
519                 ((queue_conf->event_queue_cfg &
520                         RTE_EVENT_QUEUE_CFG_TYPE_MASK)
521                         == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
522                 ))
523                 return 1;
524         else
525                 return 0;
526 }
527
528
529 int
530 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
531                       const struct rte_event_queue_conf *queue_conf)
532 {
533         struct rte_eventdev *dev;
534         struct rte_event_queue_conf def_conf;
535
536         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
537         dev = &rte_eventdevs[dev_id];
538
539         if (!is_valid_queue(dev, queue_id)) {
540                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
541                 return -EINVAL;
542         }
543
544         /* Check nb_atomic_flows limit */
545         if (is_valid_atomic_queue_conf(queue_conf)) {
546                 if (queue_conf->nb_atomic_flows == 0 ||
547                     queue_conf->nb_atomic_flows >
548                         dev->data->dev_conf.nb_event_queue_flows) {
549                         RTE_EDEV_LOG_ERR(
550                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
551                         dev_id, queue_id, queue_conf->nb_atomic_flows,
552                         dev->data->dev_conf.nb_event_queue_flows);
553                         return -EINVAL;
554                 }
555         }
556
557         /* Check nb_atomic_order_sequences limit */
558         if (is_valid_ordered_queue_conf(queue_conf)) {
559                 if (queue_conf->nb_atomic_order_sequences == 0 ||
560                     queue_conf->nb_atomic_order_sequences >
561                         dev->data->dev_conf.nb_event_queue_flows) {
562                         RTE_EDEV_LOG_ERR(
563                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
564                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
565                         dev->data->dev_conf.nb_event_queue_flows);
566                         return -EINVAL;
567                 }
568         }
569
570         if (dev->data->dev_started) {
571                 RTE_EDEV_LOG_ERR(
572                     "device %d must be stopped to allow queue setup", dev_id);
573                 return -EBUSY;
574         }
575
576         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
577
578         if (queue_conf == NULL) {
579                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
580                                         -ENOTSUP);
581                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
582                 queue_conf = &def_conf;
583         }
584
585         dev->data->queues_cfg[queue_id] = *queue_conf;
586         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
587 }
588
589 static inline int
590 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
591 {
592         if (port_id < dev->data->nb_ports)
593                 return 1;
594         else
595                 return 0;
596 }
597
598 int
599 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
600                                  struct rte_event_port_conf *port_conf)
601 {
602         struct rte_eventdev *dev;
603
604         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
605         dev = &rte_eventdevs[dev_id];
606
607         if (port_conf == NULL)
608                 return -EINVAL;
609
610         if (!is_valid_port(dev, port_id)) {
611                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
612                 return -EINVAL;
613         }
614
615         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
616         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
617         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
618         return 0;
619 }
620
621 int
622 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
623                      const struct rte_event_port_conf *port_conf)
624 {
625         struct rte_eventdev *dev;
626         struct rte_event_port_conf def_conf;
627         int diag;
628
629         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
630         dev = &rte_eventdevs[dev_id];
631
632         if (!is_valid_port(dev, port_id)) {
633                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
634                 return -EINVAL;
635         }
636
637         /* Check new_event_threshold limit */
638         if ((port_conf && !port_conf->new_event_threshold) ||
639                         (port_conf && port_conf->new_event_threshold >
640                                  dev->data->dev_conf.nb_events_limit)) {
641                 RTE_EDEV_LOG_ERR(
642                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
643                         dev_id, port_id, port_conf->new_event_threshold,
644                         dev->data->dev_conf.nb_events_limit);
645                 return -EINVAL;
646         }
647
648         /* Check dequeue_depth limit */
649         if ((port_conf && !port_conf->dequeue_depth) ||
650                         (port_conf && port_conf->dequeue_depth >
651                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
652                 RTE_EDEV_LOG_ERR(
653                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
654                         dev_id, port_id, port_conf->dequeue_depth,
655                         dev->data->dev_conf.nb_event_port_dequeue_depth);
656                 return -EINVAL;
657         }
658
659         /* Check enqueue_depth limit */
660         if ((port_conf && !port_conf->enqueue_depth) ||
661                         (port_conf && port_conf->enqueue_depth >
662                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
663                 RTE_EDEV_LOG_ERR(
664                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
665                         dev_id, port_id, port_conf->enqueue_depth,
666                         dev->data->dev_conf.nb_event_port_enqueue_depth);
667                 return -EINVAL;
668         }
669
670         if (dev->data->dev_started) {
671                 RTE_EDEV_LOG_ERR(
672                     "device %d must be stopped to allow port setup", dev_id);
673                 return -EBUSY;
674         }
675
676         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
677
678         if (port_conf == NULL) {
679                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
680                                         -ENOTSUP);
681                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
682                 port_conf = &def_conf;
683         }
684
685         dev->data->ports_cfg[port_id] = *port_conf;
686
687         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
688
689         /* Unlink all the queues from this port(default state after setup) */
690         if (!diag)
691                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
692
693         if (diag < 0)
694                 return diag;
695
696         return 0;
697 }
698
699 int
700 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
701                        uint32_t *attr_value)
702 {
703         struct rte_eventdev *dev;
704
705         if (!attr_value)
706                 return -EINVAL;
707         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
708         dev = &rte_eventdevs[dev_id];
709
710         switch (attr_id) {
711         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
712                 *attr_value = dev->data->nb_ports;
713                 break;
714         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
715                 *attr_value = dev->data->nb_queues;
716                 break;
717         case RTE_EVENT_DEV_ATTR_STARTED:
718                 *attr_value = dev->data->dev_started;
719                 break;
720         default:
721                 return -EINVAL;
722         }
723
724         return 0;
725 }
726
727 int
728 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
729                         uint32_t *attr_value)
730 {
731         struct rte_eventdev *dev;
732
733         if (!attr_value)
734                 return -EINVAL;
735
736         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
737         dev = &rte_eventdevs[dev_id];
738         if (!is_valid_port(dev, port_id)) {
739                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
740                 return -EINVAL;
741         }
742
743         switch (attr_id) {
744         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
745                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
746                 break;
747         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
748                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
749                 break;
750         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
751                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
752                 break;
753         default:
754                 return -EINVAL;
755         };
756         return 0;
757 }
758
759 int
760 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
761                         uint32_t *attr_value)
762 {
763         struct rte_event_queue_conf *conf;
764         struct rte_eventdev *dev;
765
766         if (!attr_value)
767                 return -EINVAL;
768
769         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
770         dev = &rte_eventdevs[dev_id];
771         if (!is_valid_queue(dev, queue_id)) {
772                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
773                 return -EINVAL;
774         }
775
776         conf = &dev->data->queues_cfg[queue_id];
777
778         switch (attr_id) {
779         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
780                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
781                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
782                         *attr_value = conf->priority;
783                 break;
784         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
785                 *attr_value = conf->nb_atomic_flows;
786                 break;
787         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
788                 *attr_value = conf->nb_atomic_order_sequences;
789                 break;
790         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
791                 *attr_value = conf->event_queue_cfg;
792                 break;
793         default:
794                 return -EINVAL;
795         };
796         return 0;
797 }
798
799 int
800 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
801                     const uint8_t queues[], const uint8_t priorities[],
802                     uint16_t nb_links)
803 {
804         struct rte_eventdev *dev;
805         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
806         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
807         uint16_t *links_map;
808         int i, diag;
809
810         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
811         dev = &rte_eventdevs[dev_id];
812         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
813
814         if (!is_valid_port(dev, port_id)) {
815                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
816                 return -EINVAL;
817         }
818
819         if (queues == NULL) {
820                 for (i = 0; i < dev->data->nb_queues; i++)
821                         queues_list[i] = i;
822
823                 queues = queues_list;
824                 nb_links = dev->data->nb_queues;
825         }
826
827         if (priorities == NULL) {
828                 for (i = 0; i < nb_links; i++)
829                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
830
831                 priorities = priorities_list;
832         }
833
834         for (i = 0; i < nb_links; i++)
835                 if (queues[i] >= dev->data->nb_queues)
836                         return -EINVAL;
837
838         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
839                                                 queues, priorities, nb_links);
840         if (diag < 0)
841                 return diag;
842
843         links_map = dev->data->links_map;
844         /* Point links_map to this port specific area */
845         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
846         for (i = 0; i < diag; i++)
847                 links_map[queues[i]] = (uint8_t)priorities[i];
848
849         return diag;
850 }
851
852 int
853 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
854                       uint8_t queues[], uint16_t nb_unlinks)
855 {
856         struct rte_eventdev *dev;
857         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
858         int i, diag;
859         uint16_t *links_map;
860
861         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
862         dev = &rte_eventdevs[dev_id];
863         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
864
865         if (!is_valid_port(dev, port_id)) {
866                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
867                 return -EINVAL;
868         }
869
870         if (queues == NULL) {
871                 for (i = 0; i < dev->data->nb_queues; i++)
872                         all_queues[i] = i;
873                 queues = all_queues;
874                 nb_unlinks = dev->data->nb_queues;
875         }
876
877         for (i = 0; i < nb_unlinks; i++)
878                 if (queues[i] >= dev->data->nb_queues)
879                         return -EINVAL;
880
881         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
882                                         queues, nb_unlinks);
883
884         if (diag < 0)
885                 return diag;
886
887         links_map = dev->data->links_map;
888         /* Point links_map to this port specific area */
889         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
890         for (i = 0; i < diag; i++)
891                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
892
893         return diag;
894 }
895
896 int
897 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
898                          uint8_t queues[], uint8_t priorities[])
899 {
900         struct rte_eventdev *dev;
901         uint16_t *links_map;
902         int i, count = 0;
903
904         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
905         dev = &rte_eventdevs[dev_id];
906         if (!is_valid_port(dev, port_id)) {
907                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
908                 return -EINVAL;
909         }
910
911         links_map = dev->data->links_map;
912         /* Point links_map to this port specific area */
913         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
914         for (i = 0; i < dev->data->nb_queues; i++) {
915                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
916                         queues[count] = i;
917                         priorities[count] = (uint8_t)links_map[i];
918                         ++count;
919                 }
920         }
921         return count;
922 }
923
924 int
925 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
926                                  uint64_t *timeout_ticks)
927 {
928         struct rte_eventdev *dev;
929
930         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
931         dev = &rte_eventdevs[dev_id];
932         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
933
934         if (timeout_ticks == NULL)
935                 return -EINVAL;
936
937         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
938 }
939
940 int
941 rte_event_dev_dump(uint8_t dev_id, FILE *f)
942 {
943         struct rte_eventdev *dev;
944
945         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
946         dev = &rte_eventdevs[dev_id];
947         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
948
949         (*dev->dev_ops->dump)(dev, f);
950         return 0;
951
952 }
953
954 static int
955 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
956                 uint8_t queue_port_id)
957 {
958         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
959         if (dev->dev_ops->xstats_get_names != NULL)
960                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
961                                                         queue_port_id,
962                                                         NULL, NULL, 0);
963         return 0;
964 }
965
966 int
967 rte_event_dev_xstats_names_get(uint8_t dev_id,
968                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
969                 struct rte_event_dev_xstats_name *xstats_names,
970                 unsigned int *ids, unsigned int size)
971 {
972         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
973         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
974                                                           queue_port_id);
975         if (xstats_names == NULL || cnt_expected_entries < 0 ||
976                         (int)size < cnt_expected_entries)
977                 return cnt_expected_entries;
978
979         /* dev_id checked above */
980         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
981
982         if (dev->dev_ops->xstats_get_names != NULL)
983                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
984                                 queue_port_id, xstats_names, ids, size);
985
986         return -ENOTSUP;
987 }
988
989 /* retrieve eventdev extended statistics */
990 int
991 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
992                 uint8_t queue_port_id, const unsigned int ids[],
993                 uint64_t values[], unsigned int n)
994 {
995         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
996         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
997
998         /* implemented by the driver */
999         if (dev->dev_ops->xstats_get != NULL)
1000                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1001                                 ids, values, n);
1002         return -ENOTSUP;
1003 }
1004
1005 uint64_t
1006 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1007                 unsigned int *id)
1008 {
1009         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1010         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1011         unsigned int temp = -1;
1012
1013         if (id != NULL)
1014                 *id = (unsigned int)-1;
1015         else
1016                 id = &temp; /* ensure driver never gets a NULL value */
1017
1018         /* implemented by driver */
1019         if (dev->dev_ops->xstats_get_by_name != NULL)
1020                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1021         return -ENOTSUP;
1022 }
1023
1024 int rte_event_dev_xstats_reset(uint8_t dev_id,
1025                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1026                 const uint32_t ids[], uint32_t nb_ids)
1027 {
1028         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1029         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1030
1031         if (dev->dev_ops->xstats_reset != NULL)
1032                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1033                                                         ids, nb_ids);
1034         return -ENOTSUP;
1035 }
1036
1037 int
1038 rte_event_dev_start(uint8_t dev_id)
1039 {
1040         struct rte_eventdev *dev;
1041         int diag;
1042
1043         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1044
1045         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1046         dev = &rte_eventdevs[dev_id];
1047         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1048
1049         if (dev->data->dev_started != 0) {
1050                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1051                         dev_id);
1052                 return 0;
1053         }
1054
1055         diag = (*dev->dev_ops->dev_start)(dev);
1056         if (diag == 0)
1057                 dev->data->dev_started = 1;
1058         else
1059                 return diag;
1060
1061         return 0;
1062 }
1063
1064 void
1065 rte_event_dev_stop(uint8_t dev_id)
1066 {
1067         struct rte_eventdev *dev;
1068
1069         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1070
1071         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1072         dev = &rte_eventdevs[dev_id];
1073         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1074
1075         if (dev->data->dev_started == 0) {
1076                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1077                         dev_id);
1078                 return;
1079         }
1080
1081         dev->data->dev_started = 0;
1082         (*dev->dev_ops->dev_stop)(dev);
1083 }
1084
1085 int
1086 rte_event_dev_close(uint8_t dev_id)
1087 {
1088         struct rte_eventdev *dev;
1089
1090         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1091         dev = &rte_eventdevs[dev_id];
1092         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1093
1094         /* Device must be stopped before it can be closed */
1095         if (dev->data->dev_started == 1) {
1096                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1097                                 dev_id);
1098                 return -EBUSY;
1099         }
1100
1101         return (*dev->dev_ops->dev_close)(dev);
1102 }
1103
1104 static inline int
1105 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1106                 int socket_id)
1107 {
1108         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1109         const struct rte_memzone *mz;
1110         int n;
1111
1112         /* Generate memzone name */
1113         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1114         if (n >= (int)sizeof(mz_name))
1115                 return -EINVAL;
1116
1117         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1118                 mz = rte_memzone_reserve(mz_name,
1119                                 sizeof(struct rte_eventdev_data),
1120                                 socket_id, 0);
1121         } else
1122                 mz = rte_memzone_lookup(mz_name);
1123
1124         if (mz == NULL)
1125                 return -ENOMEM;
1126
1127         *data = mz->addr;
1128         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1129                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1130
1131         return 0;
1132 }
1133
1134 static inline uint8_t
1135 rte_eventdev_find_free_device_index(void)
1136 {
1137         uint8_t dev_id;
1138
1139         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1140                 if (rte_eventdevs[dev_id].attached ==
1141                                 RTE_EVENTDEV_DETACHED)
1142                         return dev_id;
1143         }
1144         return RTE_EVENT_MAX_DEVS;
1145 }
1146
1147 struct rte_eventdev *
1148 rte_event_pmd_allocate(const char *name, int socket_id)
1149 {
1150         struct rte_eventdev *eventdev;
1151         uint8_t dev_id;
1152
1153         if (rte_event_pmd_get_named_dev(name) != NULL) {
1154                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1155                                 "allocated!", name);
1156                 return NULL;
1157         }
1158
1159         dev_id = rte_eventdev_find_free_device_index();
1160         if (dev_id == RTE_EVENT_MAX_DEVS) {
1161                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1162                 return NULL;
1163         }
1164
1165         eventdev = &rte_eventdevs[dev_id];
1166
1167         if (eventdev->data == NULL) {
1168                 struct rte_eventdev_data *eventdev_data = NULL;
1169
1170                 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1171                                 socket_id);
1172
1173                 if (retval < 0 || eventdev_data == NULL)
1174                         return NULL;
1175
1176                 eventdev->data = eventdev_data;
1177
1178                 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1179                                 "%s", name);
1180
1181                 eventdev->data->dev_id = dev_id;
1182                 eventdev->data->socket_id = socket_id;
1183                 eventdev->data->dev_started = 0;
1184
1185                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1186
1187                 eventdev_globals.nb_devs++;
1188         }
1189
1190         return eventdev;
1191 }
1192
1193 int
1194 rte_event_pmd_release(struct rte_eventdev *eventdev)
1195 {
1196         int ret;
1197         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1198         const struct rte_memzone *mz;
1199
1200         if (eventdev == NULL)
1201                 return -EINVAL;
1202
1203         eventdev->attached = RTE_EVENTDEV_DETACHED;
1204         eventdev_globals.nb_devs--;
1205
1206         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1207                 rte_free(eventdev->data->dev_private);
1208
1209                 /* Generate memzone name */
1210                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1211                                 eventdev->data->dev_id);
1212                 if (ret >= (int)sizeof(mz_name))
1213                         return -EINVAL;
1214
1215                 mz = rte_memzone_lookup(mz_name);
1216                 if (mz == NULL)
1217                         return -ENOMEM;
1218
1219                 ret = rte_memzone_free(mz);
1220                 if (ret)
1221                         return ret;
1222         }
1223
1224         eventdev->data = NULL;
1225         return 0;
1226 }