mbuf: fix performance of freeing with non atomic refcnt
[dpdk.git] / lib / librte_eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_byteorder.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_dev.h>
20 #include <rte_memory.h>
21 #include <rte_memcpy.h>
22 #include <rte_memzone.h>
23 #include <rte_eal.h>
24 #include <rte_per_lcore.h>
25 #include <rte_lcore.h>
26 #include <rte_atomic.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_common.h>
29 #include <rte_malloc.h>
30 #include <rte_errno.h>
31 #include <rte_ethdev.h>
32
33 #include "rte_eventdev.h"
34 #include "rte_eventdev_pmd.h"
35
36 struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
37
38 struct rte_eventdev *rte_eventdevs = &rte_event_devices[0];
39
40 static struct rte_eventdev_global eventdev_globals = {
41         .nb_devs                = 0
42 };
43
44 struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals;
45
46 /* Event dev north bound API implementation */
47
48 uint8_t
49 rte_event_dev_count(void)
50 {
51         return rte_eventdev_globals->nb_devs;
52 }
53
54 int
55 rte_event_dev_get_dev_id(const char *name)
56 {
57         int i;
58
59         if (!name)
60                 return -EINVAL;
61
62         for (i = 0; i < rte_eventdev_globals->nb_devs; i++)
63                 if ((strcmp(rte_event_devices[i].data->name, name)
64                                 == 0) &&
65                                 (rte_event_devices[i].attached ==
66                                                 RTE_EVENTDEV_ATTACHED))
67                         return i;
68         return -ENODEV;
69 }
70
71 int
72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74         struct rte_eventdev *dev;
75
76         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77         dev = &rte_eventdevs[dev_id];
78
79         return dev->data->socket_id;
80 }
81
82 int
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85         struct rte_eventdev *dev;
86
87         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88         dev = &rte_eventdevs[dev_id];
89
90         if (dev_info == NULL)
91                 return -EINVAL;
92
93         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94
95         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
97
98         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
99
100         dev_info->dev = dev->dev;
101         return 0;
102 }
103
104 int
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id,
106                                 uint32_t *caps)
107 {
108         struct rte_eventdev *dev;
109
110         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
112
113         dev = &rte_eventdevs[dev_id];
114
115         if (caps == NULL)
116                 return -EINVAL;
117         *caps = 0;
118
119         return dev->dev_ops->eth_rx_adapter_caps_get ?
120                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
121                                                 &rte_eth_devices[eth_port_id],
122                                                 caps)
123                                 : 0;
124 }
125
126 static inline int
127 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
128 {
129         uint8_t old_nb_queues = dev->data->nb_queues;
130         struct rte_event_queue_conf *queues_cfg;
131         unsigned int i;
132
133         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
134                          dev->data->dev_id);
135
136         /* First time configuration */
137         if (dev->data->queues_cfg == NULL && nb_queues != 0) {
138                 /* Allocate memory to store queue configuration */
139                 dev->data->queues_cfg = rte_zmalloc_socket(
140                                 "eventdev->data->queues_cfg",
141                                 sizeof(dev->data->queues_cfg[0]) * nb_queues,
142                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
143                 if (dev->data->queues_cfg == NULL) {
144                         dev->data->nb_queues = 0;
145                         RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
146                                         "nb_queues %u", nb_queues);
147                         return -(ENOMEM);
148                 }
149         /* Re-configure */
150         } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
151                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
152
153                 for (i = nb_queues; i < old_nb_queues; i++)
154                         (*dev->dev_ops->queue_release)(dev, i);
155
156                 /* Re allocate memory to store queue configuration */
157                 queues_cfg = dev->data->queues_cfg;
158                 queues_cfg = rte_realloc(queues_cfg,
159                                 sizeof(queues_cfg[0]) * nb_queues,
160                                 RTE_CACHE_LINE_SIZE);
161                 if (queues_cfg == NULL) {
162                         RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
163                                                 " nb_queues %u", nb_queues);
164                         return -(ENOMEM);
165                 }
166                 dev->data->queues_cfg = queues_cfg;
167
168                 if (nb_queues > old_nb_queues) {
169                         uint8_t new_qs = nb_queues - old_nb_queues;
170
171                         memset(queues_cfg + old_nb_queues, 0,
172                                 sizeof(queues_cfg[0]) * new_qs);
173                 }
174         } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
175                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
176
177                 for (i = nb_queues; i < old_nb_queues; i++)
178                         (*dev->dev_ops->queue_release)(dev, i);
179         }
180
181         dev->data->nb_queues = nb_queues;
182         return 0;
183 }
184
185 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
186
187 static inline int
188 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
189 {
190         uint8_t old_nb_ports = dev->data->nb_ports;
191         void **ports;
192         uint16_t *links_map;
193         struct rte_event_port_conf *ports_cfg;
194         unsigned int i;
195
196         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
197                          dev->data->dev_id);
198
199         /* First time configuration */
200         if (dev->data->ports == NULL && nb_ports != 0) {
201                 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
202                                 sizeof(dev->data->ports[0]) * nb_ports,
203                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
204                 if (dev->data->ports == NULL) {
205                         dev->data->nb_ports = 0;
206                         RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
207                                         "nb_ports %u", nb_ports);
208                         return -(ENOMEM);
209                 }
210
211                 /* Allocate memory to store port configurations */
212                 dev->data->ports_cfg =
213                         rte_zmalloc_socket("eventdev->ports_cfg",
214                         sizeof(dev->data->ports_cfg[0]) * nb_ports,
215                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
216                 if (dev->data->ports_cfg == NULL) {
217                         dev->data->nb_ports = 0;
218                         RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
219                                         "nb_ports %u", nb_ports);
220                         return -(ENOMEM);
221                 }
222
223                 /* Allocate memory to store queue to port link connection */
224                 dev->data->links_map =
225                         rte_zmalloc_socket("eventdev->links_map",
226                         sizeof(dev->data->links_map[0]) * nb_ports *
227                         RTE_EVENT_MAX_QUEUES_PER_DEV,
228                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
229                 if (dev->data->links_map == NULL) {
230                         dev->data->nb_ports = 0;
231                         RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
232                                         "nb_ports %u", nb_ports);
233                         return -(ENOMEM);
234                 }
235                 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
236                         dev->data->links_map[i] =
237                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
238         } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
239                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
240
241                 ports = dev->data->ports;
242                 ports_cfg = dev->data->ports_cfg;
243                 links_map = dev->data->links_map;
244
245                 for (i = nb_ports; i < old_nb_ports; i++)
246                         (*dev->dev_ops->port_release)(ports[i]);
247
248                 /* Realloc memory for ports */
249                 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
250                                 RTE_CACHE_LINE_SIZE);
251                 if (ports == NULL) {
252                         RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
253                                                 " nb_ports %u", nb_ports);
254                         return -(ENOMEM);
255                 }
256
257                 /* Realloc memory for ports_cfg */
258                 ports_cfg = rte_realloc(ports_cfg,
259                         sizeof(ports_cfg[0]) * nb_ports,
260                         RTE_CACHE_LINE_SIZE);
261                 if (ports_cfg == NULL) {
262                         RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
263                                                 " nb_ports %u", nb_ports);
264                         return -(ENOMEM);
265                 }
266
267                 /* Realloc memory to store queue to port link connection */
268                 links_map = rte_realloc(links_map,
269                         sizeof(dev->data->links_map[0]) * nb_ports *
270                         RTE_EVENT_MAX_QUEUES_PER_DEV,
271                         RTE_CACHE_LINE_SIZE);
272                 if (links_map == NULL) {
273                         dev->data->nb_ports = 0;
274                         RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
275                                         "nb_ports %u", nb_ports);
276                         return -(ENOMEM);
277                 }
278
279                 if (nb_ports > old_nb_ports) {
280                         uint8_t new_ps = nb_ports - old_nb_ports;
281                         unsigned int old_links_map_end =
282                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
283                         unsigned int links_map_end =
284                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
285
286                         memset(ports + old_nb_ports, 0,
287                                 sizeof(ports[0]) * new_ps);
288                         memset(ports_cfg + old_nb_ports, 0,
289                                 sizeof(ports_cfg[0]) * new_ps);
290                         for (i = old_links_map_end; i < links_map_end; i++)
291                                 links_map[i] =
292                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
293                 }
294
295                 dev->data->ports = ports;
296                 dev->data->ports_cfg = ports_cfg;
297                 dev->data->links_map = links_map;
298         } else if (dev->data->ports != NULL && nb_ports == 0) {
299                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
300
301                 ports = dev->data->ports;
302                 for (i = nb_ports; i < old_nb_ports; i++)
303                         (*dev->dev_ops->port_release)(ports[i]);
304         }
305
306         dev->data->nb_ports = nb_ports;
307         return 0;
308 }
309
310 int
311 rte_event_dev_configure(uint8_t dev_id,
312                         const struct rte_event_dev_config *dev_conf)
313 {
314         struct rte_eventdev *dev;
315         struct rte_event_dev_info info;
316         int diag;
317
318         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
319         dev = &rte_eventdevs[dev_id];
320
321         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
322         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
323
324         if (dev->data->dev_started) {
325                 RTE_EDEV_LOG_ERR(
326                     "device %d must be stopped to allow configuration", dev_id);
327                 return -EBUSY;
328         }
329
330         if (dev_conf == NULL)
331                 return -EINVAL;
332
333         (*dev->dev_ops->dev_infos_get)(dev, &info);
334
335         /* Check dequeue_timeout_ns value is in limit */
336         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
337                 if (dev_conf->dequeue_timeout_ns &&
338                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
339                         || dev_conf->dequeue_timeout_ns >
340                                  info.max_dequeue_timeout_ns)) {
341                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
342                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
343                         dev_id, dev_conf->dequeue_timeout_ns,
344                         info.min_dequeue_timeout_ns,
345                         info.max_dequeue_timeout_ns);
346                         return -EINVAL;
347                 }
348         }
349
350         /* Check nb_events_limit is in limit */
351         if (dev_conf->nb_events_limit > info.max_num_events) {
352                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
353                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
354                 return -EINVAL;
355         }
356
357         /* Check nb_event_queues is in limit */
358         if (!dev_conf->nb_event_queues) {
359                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
360                                         dev_id);
361                 return -EINVAL;
362         }
363         if (dev_conf->nb_event_queues > info.max_event_queues) {
364                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
365                 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
366                 return -EINVAL;
367         }
368
369         /* Check nb_event_ports is in limit */
370         if (!dev_conf->nb_event_ports) {
371                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
372                 return -EINVAL;
373         }
374         if (dev_conf->nb_event_ports > info.max_event_ports) {
375                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
376                 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
377                 return -EINVAL;
378         }
379
380         /* Check nb_event_queue_flows is in limit */
381         if (!dev_conf->nb_event_queue_flows) {
382                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
383                 return -EINVAL;
384         }
385         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
386                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
387                 dev_id, dev_conf->nb_event_queue_flows,
388                 info.max_event_queue_flows);
389                 return -EINVAL;
390         }
391
392         /* Check nb_event_port_dequeue_depth is in limit */
393         if (!dev_conf->nb_event_port_dequeue_depth) {
394                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
395                                         dev_id);
396                 return -EINVAL;
397         }
398         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
399                  (dev_conf->nb_event_port_dequeue_depth >
400                          info.max_event_port_dequeue_depth)) {
401                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
402                 dev_id, dev_conf->nb_event_port_dequeue_depth,
403                 info.max_event_port_dequeue_depth);
404                 return -EINVAL;
405         }
406
407         /* Check nb_event_port_enqueue_depth is in limit */
408         if (!dev_conf->nb_event_port_enqueue_depth) {
409                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
410                                         dev_id);
411                 return -EINVAL;
412         }
413         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
414                 (dev_conf->nb_event_port_enqueue_depth >
415                          info.max_event_port_enqueue_depth)) {
416                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
417                 dev_id, dev_conf->nb_event_port_enqueue_depth,
418                 info.max_event_port_enqueue_depth);
419                 return -EINVAL;
420         }
421
422         /* Copy the dev_conf parameter into the dev structure */
423         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
424
425         /* Setup new number of queues and reconfigure device. */
426         diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
427         if (diag != 0) {
428                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
429                                 dev_id, diag);
430                 return diag;
431         }
432
433         /* Setup new number of ports and reconfigure device. */
434         diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
435         if (diag != 0) {
436                 rte_event_dev_queue_config(dev, 0);
437                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
438                                 dev_id, diag);
439                 return diag;
440         }
441
442         /* Configure the device */
443         diag = (*dev->dev_ops->dev_configure)(dev);
444         if (diag != 0) {
445                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
446                 rte_event_dev_queue_config(dev, 0);
447                 rte_event_dev_port_config(dev, 0);
448         }
449
450         dev->data->event_dev_cap = info.event_dev_cap;
451         return diag;
452 }
453
454 static inline int
455 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
456 {
457         if (queue_id < dev->data->nb_queues && queue_id <
458                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
459                 return 1;
460         else
461                 return 0;
462 }
463
464 int
465 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
466                                  struct rte_event_queue_conf *queue_conf)
467 {
468         struct rte_eventdev *dev;
469
470         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
471         dev = &rte_eventdevs[dev_id];
472
473         if (queue_conf == NULL)
474                 return -EINVAL;
475
476         if (!is_valid_queue(dev, queue_id)) {
477                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
478                 return -EINVAL;
479         }
480
481         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
482         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
483         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
484         return 0;
485 }
486
487 static inline int
488 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
489 {
490         if (queue_conf &&
491                 !(queue_conf->event_queue_cfg &
492                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
493                 ((queue_conf->event_queue_cfg &
494                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
495                 (queue_conf->schedule_type
496                         == RTE_SCHED_TYPE_ATOMIC)
497                 ))
498                 return 1;
499         else
500                 return 0;
501 }
502
503 static inline int
504 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
505 {
506         if (queue_conf &&
507                 !(queue_conf->event_queue_cfg &
508                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
509                 ((queue_conf->event_queue_cfg &
510                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
511                 (queue_conf->schedule_type
512                         == RTE_SCHED_TYPE_ORDERED)
513                 ))
514                 return 1;
515         else
516                 return 0;
517 }
518
519
520 int
521 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
522                       const struct rte_event_queue_conf *queue_conf)
523 {
524         struct rte_eventdev *dev;
525         struct rte_event_queue_conf def_conf;
526
527         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
528         dev = &rte_eventdevs[dev_id];
529
530         if (!is_valid_queue(dev, queue_id)) {
531                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
532                 return -EINVAL;
533         }
534
535         /* Check nb_atomic_flows limit */
536         if (is_valid_atomic_queue_conf(queue_conf)) {
537                 if (queue_conf->nb_atomic_flows == 0 ||
538                     queue_conf->nb_atomic_flows >
539                         dev->data->dev_conf.nb_event_queue_flows) {
540                         RTE_EDEV_LOG_ERR(
541                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
542                         dev_id, queue_id, queue_conf->nb_atomic_flows,
543                         dev->data->dev_conf.nb_event_queue_flows);
544                         return -EINVAL;
545                 }
546         }
547
548         /* Check nb_atomic_order_sequences limit */
549         if (is_valid_ordered_queue_conf(queue_conf)) {
550                 if (queue_conf->nb_atomic_order_sequences == 0 ||
551                     queue_conf->nb_atomic_order_sequences >
552                         dev->data->dev_conf.nb_event_queue_flows) {
553                         RTE_EDEV_LOG_ERR(
554                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
555                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
556                         dev->data->dev_conf.nb_event_queue_flows);
557                         return -EINVAL;
558                 }
559         }
560
561         if (dev->data->dev_started) {
562                 RTE_EDEV_LOG_ERR(
563                     "device %d must be stopped to allow queue setup", dev_id);
564                 return -EBUSY;
565         }
566
567         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
568
569         if (queue_conf == NULL) {
570                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
571                                         -ENOTSUP);
572                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
573                 queue_conf = &def_conf;
574         }
575
576         dev->data->queues_cfg[queue_id] = *queue_conf;
577         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
578 }
579
580 static inline int
581 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
582 {
583         if (port_id < dev->data->nb_ports)
584                 return 1;
585         else
586                 return 0;
587 }
588
589 int
590 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
591                                  struct rte_event_port_conf *port_conf)
592 {
593         struct rte_eventdev *dev;
594
595         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
596         dev = &rte_eventdevs[dev_id];
597
598         if (port_conf == NULL)
599                 return -EINVAL;
600
601         if (!is_valid_port(dev, port_id)) {
602                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
603                 return -EINVAL;
604         }
605
606         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
607         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
608         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
609         return 0;
610 }
611
612 int
613 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
614                      const struct rte_event_port_conf *port_conf)
615 {
616         struct rte_eventdev *dev;
617         struct rte_event_port_conf def_conf;
618         int diag;
619
620         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
621         dev = &rte_eventdevs[dev_id];
622
623         if (!is_valid_port(dev, port_id)) {
624                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
625                 return -EINVAL;
626         }
627
628         /* Check new_event_threshold limit */
629         if ((port_conf && !port_conf->new_event_threshold) ||
630                         (port_conf && port_conf->new_event_threshold >
631                                  dev->data->dev_conf.nb_events_limit)) {
632                 RTE_EDEV_LOG_ERR(
633                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
634                         dev_id, port_id, port_conf->new_event_threshold,
635                         dev->data->dev_conf.nb_events_limit);
636                 return -EINVAL;
637         }
638
639         /* Check dequeue_depth limit */
640         if ((port_conf && !port_conf->dequeue_depth) ||
641                         (port_conf && port_conf->dequeue_depth >
642                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
643                 RTE_EDEV_LOG_ERR(
644                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
645                         dev_id, port_id, port_conf->dequeue_depth,
646                         dev->data->dev_conf.nb_event_port_dequeue_depth);
647                 return -EINVAL;
648         }
649
650         /* Check enqueue_depth limit */
651         if ((port_conf && !port_conf->enqueue_depth) ||
652                         (port_conf && port_conf->enqueue_depth >
653                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
654                 RTE_EDEV_LOG_ERR(
655                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
656                         dev_id, port_id, port_conf->enqueue_depth,
657                         dev->data->dev_conf.nb_event_port_enqueue_depth);
658                 return -EINVAL;
659         }
660
661         if (dev->data->dev_started) {
662                 RTE_EDEV_LOG_ERR(
663                     "device %d must be stopped to allow port setup", dev_id);
664                 return -EBUSY;
665         }
666
667         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
668
669         if (port_conf == NULL) {
670                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
671                                         -ENOTSUP);
672                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
673                 port_conf = &def_conf;
674         }
675
676         dev->data->ports_cfg[port_id] = *port_conf;
677
678         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
679
680         /* Unlink all the queues from this port(default state after setup) */
681         if (!diag)
682                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
683
684         if (diag < 0)
685                 return diag;
686
687         return 0;
688 }
689
690 int
691 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
692                        uint32_t *attr_value)
693 {
694         struct rte_eventdev *dev;
695
696         if (!attr_value)
697                 return -EINVAL;
698         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
699         dev = &rte_eventdevs[dev_id];
700
701         switch (attr_id) {
702         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
703                 *attr_value = dev->data->nb_ports;
704                 break;
705         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
706                 *attr_value = dev->data->nb_queues;
707                 break;
708         case RTE_EVENT_DEV_ATTR_STARTED:
709                 *attr_value = dev->data->dev_started;
710                 break;
711         default:
712                 return -EINVAL;
713         }
714
715         return 0;
716 }
717
718 int
719 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
720                         uint32_t *attr_value)
721 {
722         struct rte_eventdev *dev;
723
724         if (!attr_value)
725                 return -EINVAL;
726
727         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
728         dev = &rte_eventdevs[dev_id];
729         if (!is_valid_port(dev, port_id)) {
730                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
731                 return -EINVAL;
732         }
733
734         switch (attr_id) {
735         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
736                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
737                 break;
738         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
739                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
740                 break;
741         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
742                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
743                 break;
744         default:
745                 return -EINVAL;
746         };
747         return 0;
748 }
749
750 int
751 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
752                         uint32_t *attr_value)
753 {
754         struct rte_event_queue_conf *conf;
755         struct rte_eventdev *dev;
756
757         if (!attr_value)
758                 return -EINVAL;
759
760         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
761         dev = &rte_eventdevs[dev_id];
762         if (!is_valid_queue(dev, queue_id)) {
763                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
764                 return -EINVAL;
765         }
766
767         conf = &dev->data->queues_cfg[queue_id];
768
769         switch (attr_id) {
770         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
771                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
772                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
773                         *attr_value = conf->priority;
774                 break;
775         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
776                 *attr_value = conf->nb_atomic_flows;
777                 break;
778         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
779                 *attr_value = conf->nb_atomic_order_sequences;
780                 break;
781         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
782                 *attr_value = conf->event_queue_cfg;
783                 break;
784         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
785                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
786                         return -EOVERFLOW;
787
788                 *attr_value = conf->schedule_type;
789                 break;
790         default:
791                 return -EINVAL;
792         };
793         return 0;
794 }
795
796 int
797 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
798                     const uint8_t queues[], const uint8_t priorities[],
799                     uint16_t nb_links)
800 {
801         struct rte_eventdev *dev;
802         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
803         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
804         uint16_t *links_map;
805         int i, diag;
806
807         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
808         dev = &rte_eventdevs[dev_id];
809         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_link, -ENOTSUP);
810
811         if (!is_valid_port(dev, port_id)) {
812                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
813                 return -EINVAL;
814         }
815
816         if (queues == NULL) {
817                 for (i = 0; i < dev->data->nb_queues; i++)
818                         queues_list[i] = i;
819
820                 queues = queues_list;
821                 nb_links = dev->data->nb_queues;
822         }
823
824         if (priorities == NULL) {
825                 for (i = 0; i < nb_links; i++)
826                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
827
828                 priorities = priorities_list;
829         }
830
831         for (i = 0; i < nb_links; i++)
832                 if (queues[i] >= dev->data->nb_queues)
833                         return -EINVAL;
834
835         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
836                                                 queues, priorities, nb_links);
837         if (diag < 0)
838                 return diag;
839
840         links_map = dev->data->links_map;
841         /* Point links_map to this port specific area */
842         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
843         for (i = 0; i < diag; i++)
844                 links_map[queues[i]] = (uint8_t)priorities[i];
845
846         return diag;
847 }
848
849 int
850 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
851                       uint8_t queues[], uint16_t nb_unlinks)
852 {
853         struct rte_eventdev *dev;
854         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
855         int i, diag;
856         uint16_t *links_map;
857
858         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
859         dev = &rte_eventdevs[dev_id];
860         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlink, -ENOTSUP);
861
862         if (!is_valid_port(dev, port_id)) {
863                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
864                 return -EINVAL;
865         }
866
867         if (queues == NULL) {
868                 for (i = 0; i < dev->data->nb_queues; i++)
869                         all_queues[i] = i;
870                 queues = all_queues;
871                 nb_unlinks = dev->data->nb_queues;
872         }
873
874         for (i = 0; i < nb_unlinks; i++)
875                 if (queues[i] >= dev->data->nb_queues)
876                         return -EINVAL;
877
878         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
879                                         queues, nb_unlinks);
880
881         if (diag < 0)
882                 return diag;
883
884         links_map = dev->data->links_map;
885         /* Point links_map to this port specific area */
886         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
887         for (i = 0; i < diag; i++)
888                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
889
890         return diag;
891 }
892
893 int
894 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
895                          uint8_t queues[], uint8_t priorities[])
896 {
897         struct rte_eventdev *dev;
898         uint16_t *links_map;
899         int i, count = 0;
900
901         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
902         dev = &rte_eventdevs[dev_id];
903         if (!is_valid_port(dev, port_id)) {
904                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
905                 return -EINVAL;
906         }
907
908         links_map = dev->data->links_map;
909         /* Point links_map to this port specific area */
910         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
911         for (i = 0; i < dev->data->nb_queues; i++) {
912                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
913                         queues[count] = i;
914                         priorities[count] = (uint8_t)links_map[i];
915                         ++count;
916                 }
917         }
918         return count;
919 }
920
921 int
922 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
923                                  uint64_t *timeout_ticks)
924 {
925         struct rte_eventdev *dev;
926
927         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
928         dev = &rte_eventdevs[dev_id];
929         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
930
931         if (timeout_ticks == NULL)
932                 return -EINVAL;
933
934         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
935 }
936
937 int
938 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
939 {
940         struct rte_eventdev *dev;
941
942         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
943         dev = &rte_eventdevs[dev_id];
944
945         if (service_id == NULL)
946                 return -EINVAL;
947
948         if (dev->data->service_inited)
949                 *service_id = dev->data->service_id;
950
951         return dev->data->service_inited ? 0 : -ESRCH;
952 }
953
954 int
955 rte_event_dev_dump(uint8_t dev_id, FILE *f)
956 {
957         struct rte_eventdev *dev;
958
959         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
960         dev = &rte_eventdevs[dev_id];
961         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
962
963         (*dev->dev_ops->dump)(dev, f);
964         return 0;
965
966 }
967
968 static int
969 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
970                 uint8_t queue_port_id)
971 {
972         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
973         if (dev->dev_ops->xstats_get_names != NULL)
974                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
975                                                         queue_port_id,
976                                                         NULL, NULL, 0);
977         return 0;
978 }
979
980 int
981 rte_event_dev_xstats_names_get(uint8_t dev_id,
982                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
983                 struct rte_event_dev_xstats_name *xstats_names,
984                 unsigned int *ids, unsigned int size)
985 {
986         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
987         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
988                                                           queue_port_id);
989         if (xstats_names == NULL || cnt_expected_entries < 0 ||
990                         (int)size < cnt_expected_entries)
991                 return cnt_expected_entries;
992
993         /* dev_id checked above */
994         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
995
996         if (dev->dev_ops->xstats_get_names != NULL)
997                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
998                                 queue_port_id, xstats_names, ids, size);
999
1000         return -ENOTSUP;
1001 }
1002
1003 /* retrieve eventdev extended statistics */
1004 int
1005 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1006                 uint8_t queue_port_id, const unsigned int ids[],
1007                 uint64_t values[], unsigned int n)
1008 {
1009         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1010         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1011
1012         /* implemented by the driver */
1013         if (dev->dev_ops->xstats_get != NULL)
1014                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1015                                 ids, values, n);
1016         return -ENOTSUP;
1017 }
1018
1019 uint64_t
1020 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1021                 unsigned int *id)
1022 {
1023         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1024         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1025         unsigned int temp = -1;
1026
1027         if (id != NULL)
1028                 *id = (unsigned int)-1;
1029         else
1030                 id = &temp; /* ensure driver never gets a NULL value */
1031
1032         /* implemented by driver */
1033         if (dev->dev_ops->xstats_get_by_name != NULL)
1034                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1035         return -ENOTSUP;
1036 }
1037
1038 int rte_event_dev_xstats_reset(uint8_t dev_id,
1039                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1040                 const uint32_t ids[], uint32_t nb_ids)
1041 {
1042         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1043         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1044
1045         if (dev->dev_ops->xstats_reset != NULL)
1046                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1047                                                         ids, nb_ids);
1048         return -ENOTSUP;
1049 }
1050
1051 int
1052 rte_event_dev_start(uint8_t dev_id)
1053 {
1054         struct rte_eventdev *dev;
1055         int diag;
1056
1057         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1058
1059         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1060         dev = &rte_eventdevs[dev_id];
1061         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1062
1063         if (dev->data->dev_started != 0) {
1064                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1065                         dev_id);
1066                 return 0;
1067         }
1068
1069         diag = (*dev->dev_ops->dev_start)(dev);
1070         if (diag == 0)
1071                 dev->data->dev_started = 1;
1072         else
1073                 return diag;
1074
1075         return 0;
1076 }
1077
1078 void
1079 rte_event_dev_stop(uint8_t dev_id)
1080 {
1081         struct rte_eventdev *dev;
1082
1083         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1084
1085         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1086         dev = &rte_eventdevs[dev_id];
1087         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1088
1089         if (dev->data->dev_started == 0) {
1090                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1091                         dev_id);
1092                 return;
1093         }
1094
1095         dev->data->dev_started = 0;
1096         (*dev->dev_ops->dev_stop)(dev);
1097 }
1098
1099 int
1100 rte_event_dev_close(uint8_t dev_id)
1101 {
1102         struct rte_eventdev *dev;
1103
1104         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1105         dev = &rte_eventdevs[dev_id];
1106         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1107
1108         /* Device must be stopped before it can be closed */
1109         if (dev->data->dev_started == 1) {
1110                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1111                                 dev_id);
1112                 return -EBUSY;
1113         }
1114
1115         return (*dev->dev_ops->dev_close)(dev);
1116 }
1117
1118 static inline int
1119 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1120                 int socket_id)
1121 {
1122         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1123         const struct rte_memzone *mz;
1124         int n;
1125
1126         /* Generate memzone name */
1127         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1128         if (n >= (int)sizeof(mz_name))
1129                 return -EINVAL;
1130
1131         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1132                 mz = rte_memzone_reserve(mz_name,
1133                                 sizeof(struct rte_eventdev_data),
1134                                 socket_id, 0);
1135         } else
1136                 mz = rte_memzone_lookup(mz_name);
1137
1138         if (mz == NULL)
1139                 return -ENOMEM;
1140
1141         *data = mz->addr;
1142         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1143                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1144
1145         return 0;
1146 }
1147
1148 static inline uint8_t
1149 rte_eventdev_find_free_device_index(void)
1150 {
1151         uint8_t dev_id;
1152
1153         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1154                 if (rte_eventdevs[dev_id].attached ==
1155                                 RTE_EVENTDEV_DETACHED)
1156                         return dev_id;
1157         }
1158         return RTE_EVENT_MAX_DEVS;
1159 }
1160
1161 struct rte_eventdev *
1162 rte_event_pmd_allocate(const char *name, int socket_id)
1163 {
1164         struct rte_eventdev *eventdev;
1165         uint8_t dev_id;
1166
1167         if (rte_event_pmd_get_named_dev(name) != NULL) {
1168                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1169                                 "allocated!", name);
1170                 return NULL;
1171         }
1172
1173         dev_id = rte_eventdev_find_free_device_index();
1174         if (dev_id == RTE_EVENT_MAX_DEVS) {
1175                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1176                 return NULL;
1177         }
1178
1179         eventdev = &rte_eventdevs[dev_id];
1180
1181         if (eventdev->data == NULL) {
1182                 struct rte_eventdev_data *eventdev_data = NULL;
1183
1184                 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1185                                 socket_id);
1186
1187                 if (retval < 0 || eventdev_data == NULL)
1188                         return NULL;
1189
1190                 eventdev->data = eventdev_data;
1191
1192                 snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN,
1193                                 "%s", name);
1194
1195                 eventdev->data->dev_id = dev_id;
1196                 eventdev->data->socket_id = socket_id;
1197                 eventdev->data->dev_started = 0;
1198
1199                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1200
1201                 eventdev_globals.nb_devs++;
1202         }
1203
1204         return eventdev;
1205 }
1206
1207 int
1208 rte_event_pmd_release(struct rte_eventdev *eventdev)
1209 {
1210         int ret;
1211         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1212         const struct rte_memzone *mz;
1213
1214         if (eventdev == NULL)
1215                 return -EINVAL;
1216
1217         eventdev->attached = RTE_EVENTDEV_DETACHED;
1218         eventdev_globals.nb_devs--;
1219
1220         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1221                 rte_free(eventdev->data->dev_private);
1222
1223                 /* Generate memzone name */
1224                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1225                                 eventdev->data->dev_id);
1226                 if (ret >= (int)sizeof(mz_name))
1227                         return -EINVAL;
1228
1229                 mz = rte_memzone_lookup(mz_name);
1230                 if (mz == NULL)
1231                         return -ENOMEM;
1232
1233                 ret = rte_memzone_free(mz);
1234                 if (ret)
1235                         return ret;
1236         }
1237
1238         eventdev->data = NULL;
1239         return 0;
1240 }