test mbuf attach
[dpdk.git] / lib / librte_eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <rte_ethdev.h>
33 #include <rte_cryptodev.h>
34 #include <rte_cryptodev_pmd.h>
35
36 #include "rte_eventdev.h"
37 #include "rte_eventdev_pmd.h"
38 #include "rte_eventdev_trace.h"
39
40 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
41
42 struct rte_eventdev *rte_eventdevs = rte_event_devices;
43
44 static struct rte_eventdev_global eventdev_globals = {
45         .nb_devs                = 0
46 };
47
48 /* Event dev north bound API implementation */
49
50 uint8_t
51 rte_event_dev_count(void)
52 {
53         return eventdev_globals.nb_devs;
54 }
55
56 int
57 rte_event_dev_get_dev_id(const char *name)
58 {
59         int i;
60         uint8_t cmp;
61
62         if (!name)
63                 return -EINVAL;
64
65         for (i = 0; i < eventdev_globals.nb_devs; i++) {
66                 cmp = (strncmp(rte_event_devices[i].data->name, name,
67                                 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
68                         (rte_event_devices[i].dev ? (strncmp(
69                                 rte_event_devices[i].dev->driver->name, name,
70                                          RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
71                 if (cmp && (rte_event_devices[i].attached ==
72                                         RTE_EVENTDEV_ATTACHED))
73                         return i;
74         }
75         return -ENODEV;
76 }
77
78 int
79 rte_event_dev_socket_id(uint8_t dev_id)
80 {
81         struct rte_eventdev *dev;
82
83         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
84         dev = &rte_eventdevs[dev_id];
85
86         return dev->data->socket_id;
87 }
88
89 int
90 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
91 {
92         struct rte_eventdev *dev;
93
94         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
95         dev = &rte_eventdevs[dev_id];
96
97         if (dev_info == NULL)
98                 return -EINVAL;
99
100         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
101
102         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
103         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
104
105         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
106
107         dev_info->dev = dev->dev;
108         return 0;
109 }
110
111 int
112 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
113                                 uint32_t *caps)
114 {
115         struct rte_eventdev *dev;
116
117         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
118         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
119
120         dev = &rte_eventdevs[dev_id];
121
122         if (caps == NULL)
123                 return -EINVAL;
124         *caps = 0;
125
126         return dev->dev_ops->eth_rx_adapter_caps_get ?
127                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
128                                                 &rte_eth_devices[eth_port_id],
129                                                 caps)
130                                 : 0;
131 }
132
133 int
134 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
135 {
136         struct rte_eventdev *dev;
137         const struct rte_event_timer_adapter_ops *ops;
138
139         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
140
141         dev = &rte_eventdevs[dev_id];
142
143         if (caps == NULL)
144                 return -EINVAL;
145         *caps = 0;
146
147         return dev->dev_ops->timer_adapter_caps_get ?
148                                 (*dev->dev_ops->timer_adapter_caps_get)(dev,
149                                                                         0,
150                                                                         caps,
151                                                                         &ops)
152                                 : 0;
153 }
154
155 int
156 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
157                                   uint32_t *caps)
158 {
159         struct rte_eventdev *dev;
160         struct rte_cryptodev *cdev;
161
162         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
163         if (!rte_cryptodev_pmd_is_valid_dev(cdev_id))
164                 return -EINVAL;
165
166         dev = &rte_eventdevs[dev_id];
167         cdev = rte_cryptodev_pmd_get_dev(cdev_id);
168
169         if (caps == NULL)
170                 return -EINVAL;
171         *caps = 0;
172
173         return dev->dev_ops->crypto_adapter_caps_get ?
174                 (*dev->dev_ops->crypto_adapter_caps_get)
175                 (dev, cdev, caps) : -ENOTSUP;
176 }
177
178 int
179 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
180                                 uint32_t *caps)
181 {
182         struct rte_eventdev *dev;
183         struct rte_eth_dev *eth_dev;
184
185         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
186         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
187
188         dev = &rte_eventdevs[dev_id];
189         eth_dev = &rte_eth_devices[eth_port_id];
190
191         if (caps == NULL)
192                 return -EINVAL;
193
194         *caps = 0;
195
196         return dev->dev_ops->eth_tx_adapter_caps_get ?
197                         (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
198                                                                 eth_dev,
199                                                                 caps)
200                         : 0;
201 }
202
203 static inline int
204 rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
205 {
206         uint8_t old_nb_queues = dev->data->nb_queues;
207         struct rte_event_queue_conf *queues_cfg;
208         unsigned int i;
209
210         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
211                          dev->data->dev_id);
212
213         /* First time configuration */
214         if (dev->data->queues_cfg == NULL && nb_queues != 0) {
215                 /* Allocate memory to store queue configuration */
216                 dev->data->queues_cfg = rte_zmalloc_socket(
217                                 "eventdev->data->queues_cfg",
218                                 sizeof(dev->data->queues_cfg[0]) * nb_queues,
219                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
220                 if (dev->data->queues_cfg == NULL) {
221                         dev->data->nb_queues = 0;
222                         RTE_EDEV_LOG_ERR("failed to get mem for queue cfg,"
223                                         "nb_queues %u", nb_queues);
224                         return -(ENOMEM);
225                 }
226         /* Re-configure */
227         } else if (dev->data->queues_cfg != NULL && nb_queues != 0) {
228                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
229
230                 for (i = nb_queues; i < old_nb_queues; i++)
231                         (*dev->dev_ops->queue_release)(dev, i);
232
233                 /* Re allocate memory to store queue configuration */
234                 queues_cfg = dev->data->queues_cfg;
235                 queues_cfg = rte_realloc(queues_cfg,
236                                 sizeof(queues_cfg[0]) * nb_queues,
237                                 RTE_CACHE_LINE_SIZE);
238                 if (queues_cfg == NULL) {
239                         RTE_EDEV_LOG_ERR("failed to realloc queue cfg memory,"
240                                                 " nb_queues %u", nb_queues);
241                         return -(ENOMEM);
242                 }
243                 dev->data->queues_cfg = queues_cfg;
244
245                 if (nb_queues > old_nb_queues) {
246                         uint8_t new_qs = nb_queues - old_nb_queues;
247
248                         memset(queues_cfg + old_nb_queues, 0,
249                                 sizeof(queues_cfg[0]) * new_qs);
250                 }
251         } else if (dev->data->queues_cfg != NULL && nb_queues == 0) {
252                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
253
254                 for (i = nb_queues; i < old_nb_queues; i++)
255                         (*dev->dev_ops->queue_release)(dev, i);
256         }
257
258         dev->data->nb_queues = nb_queues;
259         return 0;
260 }
261
262 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
263
264 static inline int
265 rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
266 {
267         uint8_t old_nb_ports = dev->data->nb_ports;
268         void **ports;
269         uint16_t *links_map;
270         struct rte_event_port_conf *ports_cfg;
271         unsigned int i;
272
273         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
274                          dev->data->dev_id);
275
276         /* First time configuration */
277         if (dev->data->ports == NULL && nb_ports != 0) {
278                 dev->data->ports = rte_zmalloc_socket("eventdev->data->ports",
279                                 sizeof(dev->data->ports[0]) * nb_ports,
280                                 RTE_CACHE_LINE_SIZE, dev->data->socket_id);
281                 if (dev->data->ports == NULL) {
282                         dev->data->nb_ports = 0;
283                         RTE_EDEV_LOG_ERR("failed to get mem for port meta data,"
284                                         "nb_ports %u", nb_ports);
285                         return -(ENOMEM);
286                 }
287
288                 /* Allocate memory to store port configurations */
289                 dev->data->ports_cfg =
290                         rte_zmalloc_socket("eventdev->ports_cfg",
291                         sizeof(dev->data->ports_cfg[0]) * nb_ports,
292                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
293                 if (dev->data->ports_cfg == NULL) {
294                         dev->data->nb_ports = 0;
295                         RTE_EDEV_LOG_ERR("failed to get mem for port cfg,"
296                                         "nb_ports %u", nb_ports);
297                         return -(ENOMEM);
298                 }
299
300                 /* Allocate memory to store queue to port link connection */
301                 dev->data->links_map =
302                         rte_zmalloc_socket("eventdev->links_map",
303                         sizeof(dev->data->links_map[0]) * nb_ports *
304                         RTE_EVENT_MAX_QUEUES_PER_DEV,
305                         RTE_CACHE_LINE_SIZE, dev->data->socket_id);
306                 if (dev->data->links_map == NULL) {
307                         dev->data->nb_ports = 0;
308                         RTE_EDEV_LOG_ERR("failed to get mem for port_map area,"
309                                         "nb_ports %u", nb_ports);
310                         return -(ENOMEM);
311                 }
312                 for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++)
313                         dev->data->links_map[i] =
314                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
315         } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */
316                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
317
318                 ports = dev->data->ports;
319                 ports_cfg = dev->data->ports_cfg;
320                 links_map = dev->data->links_map;
321
322                 for (i = nb_ports; i < old_nb_ports; i++)
323                         (*dev->dev_ops->port_release)(ports[i]);
324
325                 /* Realloc memory for ports */
326                 ports = rte_realloc(ports, sizeof(ports[0]) * nb_ports,
327                                 RTE_CACHE_LINE_SIZE);
328                 if (ports == NULL) {
329                         RTE_EDEV_LOG_ERR("failed to realloc port meta data,"
330                                                 " nb_ports %u", nb_ports);
331                         return -(ENOMEM);
332                 }
333
334                 /* Realloc memory for ports_cfg */
335                 ports_cfg = rte_realloc(ports_cfg,
336                         sizeof(ports_cfg[0]) * nb_ports,
337                         RTE_CACHE_LINE_SIZE);
338                 if (ports_cfg == NULL) {
339                         RTE_EDEV_LOG_ERR("failed to realloc port cfg mem,"
340                                                 " nb_ports %u", nb_ports);
341                         return -(ENOMEM);
342                 }
343
344                 /* Realloc memory to store queue to port link connection */
345                 links_map = rte_realloc(links_map,
346                         sizeof(dev->data->links_map[0]) * nb_ports *
347                         RTE_EVENT_MAX_QUEUES_PER_DEV,
348                         RTE_CACHE_LINE_SIZE);
349                 if (links_map == NULL) {
350                         dev->data->nb_ports = 0;
351                         RTE_EDEV_LOG_ERR("failed to realloc mem for port_map,"
352                                         "nb_ports %u", nb_ports);
353                         return -(ENOMEM);
354                 }
355
356                 if (nb_ports > old_nb_ports) {
357                         uint8_t new_ps = nb_ports - old_nb_ports;
358                         unsigned int old_links_map_end =
359                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
360                         unsigned int links_map_end =
361                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
362
363                         memset(ports + old_nb_ports, 0,
364                                 sizeof(ports[0]) * new_ps);
365                         memset(ports_cfg + old_nb_ports, 0,
366                                 sizeof(ports_cfg[0]) * new_ps);
367                         for (i = old_links_map_end; i < links_map_end; i++)
368                                 links_map[i] =
369                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
370                 }
371
372                 dev->data->ports = ports;
373                 dev->data->ports_cfg = ports_cfg;
374                 dev->data->links_map = links_map;
375         } else if (dev->data->ports != NULL && nb_ports == 0) {
376                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
377
378                 ports = dev->data->ports;
379                 for (i = nb_ports; i < old_nb_ports; i++)
380                         (*dev->dev_ops->port_release)(ports[i]);
381         }
382
383         dev->data->nb_ports = nb_ports;
384         return 0;
385 }
386
387 int
388 rte_event_dev_configure(uint8_t dev_id,
389                         const struct rte_event_dev_config *dev_conf)
390 {
391         struct rte_eventdev *dev;
392         struct rte_event_dev_info info;
393         int diag;
394
395         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
396         dev = &rte_eventdevs[dev_id];
397
398         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
399         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
400
401         if (dev->data->dev_started) {
402                 RTE_EDEV_LOG_ERR(
403                     "device %d must be stopped to allow configuration", dev_id);
404                 return -EBUSY;
405         }
406
407         if (dev_conf == NULL)
408                 return -EINVAL;
409
410         (*dev->dev_ops->dev_infos_get)(dev, &info);
411
412         /* Check dequeue_timeout_ns value is in limit */
413         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
414                 if (dev_conf->dequeue_timeout_ns &&
415                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
416                         || dev_conf->dequeue_timeout_ns >
417                                  info.max_dequeue_timeout_ns)) {
418                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
419                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
420                         dev_id, dev_conf->dequeue_timeout_ns,
421                         info.min_dequeue_timeout_ns,
422                         info.max_dequeue_timeout_ns);
423                         return -EINVAL;
424                 }
425         }
426
427         /* Check nb_events_limit is in limit */
428         if (dev_conf->nb_events_limit > info.max_num_events) {
429                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
430                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
431                 return -EINVAL;
432         }
433
434         /* Check nb_event_queues is in limit */
435         if (!dev_conf->nb_event_queues) {
436                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
437                                         dev_id);
438                 return -EINVAL;
439         }
440         if (dev_conf->nb_event_queues > info.max_event_queues) {
441                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d",
442                 dev_id, dev_conf->nb_event_queues, info.max_event_queues);
443                 return -EINVAL;
444         }
445
446         /* Check nb_event_ports is in limit */
447         if (!dev_conf->nb_event_ports) {
448                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
449                 return -EINVAL;
450         }
451         if (dev_conf->nb_event_ports > info.max_event_ports) {
452                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports= %d",
453                 dev_id, dev_conf->nb_event_ports, info.max_event_ports);
454                 return -EINVAL;
455         }
456
457         /* Check nb_event_queue_flows is in limit */
458         if (!dev_conf->nb_event_queue_flows) {
459                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
460                 return -EINVAL;
461         }
462         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
463                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
464                 dev_id, dev_conf->nb_event_queue_flows,
465                 info.max_event_queue_flows);
466                 return -EINVAL;
467         }
468
469         /* Check nb_event_port_dequeue_depth is in limit */
470         if (!dev_conf->nb_event_port_dequeue_depth) {
471                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
472                                         dev_id);
473                 return -EINVAL;
474         }
475         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
476                  (dev_conf->nb_event_port_dequeue_depth >
477                          info.max_event_port_dequeue_depth)) {
478                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
479                 dev_id, dev_conf->nb_event_port_dequeue_depth,
480                 info.max_event_port_dequeue_depth);
481                 return -EINVAL;
482         }
483
484         /* Check nb_event_port_enqueue_depth is in limit */
485         if (!dev_conf->nb_event_port_enqueue_depth) {
486                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
487                                         dev_id);
488                 return -EINVAL;
489         }
490         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
491                 (dev_conf->nb_event_port_enqueue_depth >
492                          info.max_event_port_enqueue_depth)) {
493                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
494                 dev_id, dev_conf->nb_event_port_enqueue_depth,
495                 info.max_event_port_enqueue_depth);
496                 return -EINVAL;
497         }
498
499         /* Copy the dev_conf parameter into the dev structure */
500         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
501
502         /* Setup new number of queues and reconfigure device. */
503         diag = rte_event_dev_queue_config(dev, dev_conf->nb_event_queues);
504         if (diag != 0) {
505                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_queue_config = %d",
506                                 dev_id, diag);
507                 return diag;
508         }
509
510         /* Setup new number of ports and reconfigure device. */
511         diag = rte_event_dev_port_config(dev, dev_conf->nb_event_ports);
512         if (diag != 0) {
513                 rte_event_dev_queue_config(dev, 0);
514                 RTE_EDEV_LOG_ERR("dev%d rte_event_dev_port_config = %d",
515                                 dev_id, diag);
516                 return diag;
517         }
518
519         /* Configure the device */
520         diag = (*dev->dev_ops->dev_configure)(dev);
521         if (diag != 0) {
522                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
523                 rte_event_dev_queue_config(dev, 0);
524                 rte_event_dev_port_config(dev, 0);
525         }
526
527         dev->data->event_dev_cap = info.event_dev_cap;
528         rte_eventdev_trace_configure(dev_id, dev_conf, diag);
529         return diag;
530 }
531
532 static inline int
533 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
534 {
535         if (queue_id < dev->data->nb_queues && queue_id <
536                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
537                 return 1;
538         else
539                 return 0;
540 }
541
542 int
543 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
544                                  struct rte_event_queue_conf *queue_conf)
545 {
546         struct rte_eventdev *dev;
547
548         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
549         dev = &rte_eventdevs[dev_id];
550
551         if (queue_conf == NULL)
552                 return -EINVAL;
553
554         if (!is_valid_queue(dev, queue_id)) {
555                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
556                 return -EINVAL;
557         }
558
559         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
560         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
561         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
562         return 0;
563 }
564
565 static inline int
566 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
567 {
568         if (queue_conf &&
569                 !(queue_conf->event_queue_cfg &
570                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
571                 ((queue_conf->event_queue_cfg &
572                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
573                 (queue_conf->schedule_type
574                         == RTE_SCHED_TYPE_ATOMIC)
575                 ))
576                 return 1;
577         else
578                 return 0;
579 }
580
581 static inline int
582 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
583 {
584         if (queue_conf &&
585                 !(queue_conf->event_queue_cfg &
586                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
587                 ((queue_conf->event_queue_cfg &
588                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
589                 (queue_conf->schedule_type
590                         == RTE_SCHED_TYPE_ORDERED)
591                 ))
592                 return 1;
593         else
594                 return 0;
595 }
596
597
598 int
599 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
600                       const struct rte_event_queue_conf *queue_conf)
601 {
602         struct rte_eventdev *dev;
603         struct rte_event_queue_conf def_conf;
604
605         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
606         dev = &rte_eventdevs[dev_id];
607
608         if (!is_valid_queue(dev, queue_id)) {
609                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
610                 return -EINVAL;
611         }
612
613         /* Check nb_atomic_flows limit */
614         if (is_valid_atomic_queue_conf(queue_conf)) {
615                 if (queue_conf->nb_atomic_flows == 0 ||
616                     queue_conf->nb_atomic_flows >
617                         dev->data->dev_conf.nb_event_queue_flows) {
618                         RTE_EDEV_LOG_ERR(
619                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
620                         dev_id, queue_id, queue_conf->nb_atomic_flows,
621                         dev->data->dev_conf.nb_event_queue_flows);
622                         return -EINVAL;
623                 }
624         }
625
626         /* Check nb_atomic_order_sequences limit */
627         if (is_valid_ordered_queue_conf(queue_conf)) {
628                 if (queue_conf->nb_atomic_order_sequences == 0 ||
629                     queue_conf->nb_atomic_order_sequences >
630                         dev->data->dev_conf.nb_event_queue_flows) {
631                         RTE_EDEV_LOG_ERR(
632                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
633                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
634                         dev->data->dev_conf.nb_event_queue_flows);
635                         return -EINVAL;
636                 }
637         }
638
639         if (dev->data->dev_started) {
640                 RTE_EDEV_LOG_ERR(
641                     "device %d must be stopped to allow queue setup", dev_id);
642                 return -EBUSY;
643         }
644
645         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
646
647         if (queue_conf == NULL) {
648                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
649                                         -ENOTSUP);
650                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
651                 queue_conf = &def_conf;
652         }
653
654         dev->data->queues_cfg[queue_id] = *queue_conf;
655         rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
656         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
657 }
658
659 static inline int
660 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
661 {
662         if (port_id < dev->data->nb_ports)
663                 return 1;
664         else
665                 return 0;
666 }
667
668 int
669 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
670                                  struct rte_event_port_conf *port_conf)
671 {
672         struct rte_eventdev *dev;
673
674         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
675         dev = &rte_eventdevs[dev_id];
676
677         if (port_conf == NULL)
678                 return -EINVAL;
679
680         if (!is_valid_port(dev, port_id)) {
681                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
682                 return -EINVAL;
683         }
684
685         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
686         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
687         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
688         return 0;
689 }
690
691 int
692 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
693                      const struct rte_event_port_conf *port_conf)
694 {
695         struct rte_eventdev *dev;
696         struct rte_event_port_conf def_conf;
697         int diag;
698
699         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
700         dev = &rte_eventdevs[dev_id];
701
702         if (!is_valid_port(dev, port_id)) {
703                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
704                 return -EINVAL;
705         }
706
707         /* Check new_event_threshold limit */
708         if ((port_conf && !port_conf->new_event_threshold) ||
709                         (port_conf && port_conf->new_event_threshold >
710                                  dev->data->dev_conf.nb_events_limit)) {
711                 RTE_EDEV_LOG_ERR(
712                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
713                         dev_id, port_id, port_conf->new_event_threshold,
714                         dev->data->dev_conf.nb_events_limit);
715                 return -EINVAL;
716         }
717
718         /* Check dequeue_depth limit */
719         if ((port_conf && !port_conf->dequeue_depth) ||
720                         (port_conf && port_conf->dequeue_depth >
721                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
722                 RTE_EDEV_LOG_ERR(
723                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
724                         dev_id, port_id, port_conf->dequeue_depth,
725                         dev->data->dev_conf.nb_event_port_dequeue_depth);
726                 return -EINVAL;
727         }
728
729         /* Check enqueue_depth limit */
730         if ((port_conf && !port_conf->enqueue_depth) ||
731                         (port_conf && port_conf->enqueue_depth >
732                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
733                 RTE_EDEV_LOG_ERR(
734                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
735                         dev_id, port_id, port_conf->enqueue_depth,
736                         dev->data->dev_conf.nb_event_port_enqueue_depth);
737                 return -EINVAL;
738         }
739
740         if (port_conf && port_conf->disable_implicit_release &&
741             !(dev->data->event_dev_cap &
742               RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
743                 RTE_EDEV_LOG_ERR(
744                    "dev%d port%d Implicit release disable not supported",
745                         dev_id, port_id);
746                 return -EINVAL;
747         }
748
749         if (dev->data->dev_started) {
750                 RTE_EDEV_LOG_ERR(
751                     "device %d must be stopped to allow port setup", dev_id);
752                 return -EBUSY;
753         }
754
755         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
756
757         if (port_conf == NULL) {
758                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
759                                         -ENOTSUP);
760                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
761                 port_conf = &def_conf;
762         }
763
764         dev->data->ports_cfg[port_id] = *port_conf;
765
766         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
767
768         /* Unlink all the queues from this port(default state after setup) */
769         if (!diag)
770                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
771
772         rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
773         if (diag < 0)
774                 return diag;
775
776         return 0;
777 }
778
779 int
780 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
781                        uint32_t *attr_value)
782 {
783         struct rte_eventdev *dev;
784
785         if (!attr_value)
786                 return -EINVAL;
787         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
788         dev = &rte_eventdevs[dev_id];
789
790         switch (attr_id) {
791         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
792                 *attr_value = dev->data->nb_ports;
793                 break;
794         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
795                 *attr_value = dev->data->nb_queues;
796                 break;
797         case RTE_EVENT_DEV_ATTR_STARTED:
798                 *attr_value = dev->data->dev_started;
799                 break;
800         default:
801                 return -EINVAL;
802         }
803
804         return 0;
805 }
806
807 int
808 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
809                         uint32_t *attr_value)
810 {
811         struct rte_eventdev *dev;
812
813         if (!attr_value)
814                 return -EINVAL;
815
816         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
817         dev = &rte_eventdevs[dev_id];
818         if (!is_valid_port(dev, port_id)) {
819                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
820                 return -EINVAL;
821         }
822
823         switch (attr_id) {
824         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
825                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
826                 break;
827         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
828                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
829                 break;
830         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
831                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
832                 break;
833         default:
834                 return -EINVAL;
835         };
836         return 0;
837 }
838
839 int
840 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
841                         uint32_t *attr_value)
842 {
843         struct rte_event_queue_conf *conf;
844         struct rte_eventdev *dev;
845
846         if (!attr_value)
847                 return -EINVAL;
848
849         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
850         dev = &rte_eventdevs[dev_id];
851         if (!is_valid_queue(dev, queue_id)) {
852                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
853                 return -EINVAL;
854         }
855
856         conf = &dev->data->queues_cfg[queue_id];
857
858         switch (attr_id) {
859         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
860                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
861                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
862                         *attr_value = conf->priority;
863                 break;
864         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
865                 *attr_value = conf->nb_atomic_flows;
866                 break;
867         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
868                 *attr_value = conf->nb_atomic_order_sequences;
869                 break;
870         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
871                 *attr_value = conf->event_queue_cfg;
872                 break;
873         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
874                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
875                         return -EOVERFLOW;
876
877                 *attr_value = conf->schedule_type;
878                 break;
879         default:
880                 return -EINVAL;
881         };
882         return 0;
883 }
884
885 int
886 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
887                     const uint8_t queues[], const uint8_t priorities[],
888                     uint16_t nb_links)
889 {
890         struct rte_eventdev *dev;
891         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
892         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
893         uint16_t *links_map;
894         int i, diag;
895
896         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
897         dev = &rte_eventdevs[dev_id];
898
899         if (*dev->dev_ops->port_link == NULL) {
900                 RTE_EDEV_LOG_ERR("Function not supported\n");
901                 rte_errno = ENOTSUP;
902                 return 0;
903         }
904
905         if (!is_valid_port(dev, port_id)) {
906                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
907                 rte_errno = EINVAL;
908                 return 0;
909         }
910
911         if (queues == NULL) {
912                 for (i = 0; i < dev->data->nb_queues; i++)
913                         queues_list[i] = i;
914
915                 queues = queues_list;
916                 nb_links = dev->data->nb_queues;
917         }
918
919         if (priorities == NULL) {
920                 for (i = 0; i < nb_links; i++)
921                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
922
923                 priorities = priorities_list;
924         }
925
926         for (i = 0; i < nb_links; i++)
927                 if (queues[i] >= dev->data->nb_queues) {
928                         rte_errno = EINVAL;
929                         return 0;
930                 }
931
932         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
933                                                 queues, priorities, nb_links);
934         if (diag < 0)
935                 return diag;
936
937         links_map = dev->data->links_map;
938         /* Point links_map to this port specific area */
939         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
940         for (i = 0; i < diag; i++)
941                 links_map[queues[i]] = (uint8_t)priorities[i];
942
943         rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
944         return diag;
945 }
946
947 int
948 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
949                       uint8_t queues[], uint16_t nb_unlinks)
950 {
951         struct rte_eventdev *dev;
952         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
953         int i, diag, j;
954         uint16_t *links_map;
955
956         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
957         dev = &rte_eventdevs[dev_id];
958
959         if (*dev->dev_ops->port_unlink == NULL) {
960                 RTE_EDEV_LOG_ERR("Function not supported");
961                 rte_errno = ENOTSUP;
962                 return 0;
963         }
964
965         if (!is_valid_port(dev, port_id)) {
966                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
967                 rte_errno = EINVAL;
968                 return 0;
969         }
970
971         links_map = dev->data->links_map;
972         /* Point links_map to this port specific area */
973         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
974
975         if (queues == NULL) {
976                 j = 0;
977                 for (i = 0; i < dev->data->nb_queues; i++) {
978                         if (links_map[i] !=
979                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
980                                 all_queues[j] = i;
981                                 j++;
982                         }
983                 }
984                 queues = all_queues;
985         } else {
986                 for (j = 0; j < nb_unlinks; j++) {
987                         if (links_map[queues[j]] ==
988                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
989                                 break;
990                 }
991         }
992
993         nb_unlinks = j;
994         for (i = 0; i < nb_unlinks; i++)
995                 if (queues[i] >= dev->data->nb_queues) {
996                         rte_errno = EINVAL;
997                         return 0;
998                 }
999
1000         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
1001                                         queues, nb_unlinks);
1002
1003         if (diag < 0)
1004                 return diag;
1005
1006         for (i = 0; i < diag; i++)
1007                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1008
1009         rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
1010         return diag;
1011 }
1012
1013 int
1014 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
1015 {
1016         struct rte_eventdev *dev;
1017
1018         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1019         dev = &rte_eventdevs[dev_id];
1020         if (!is_valid_port(dev, port_id)) {
1021                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1022                 return -EINVAL;
1023         }
1024
1025         /* Return 0 if the PMD does not implement unlinks in progress.
1026          * This allows PMDs which handle unlink synchronously to not implement
1027          * this function at all.
1028          */
1029         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
1030
1031         return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1032                         dev->data->ports[port_id]);
1033 }
1034
1035 int
1036 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1037                          uint8_t queues[], uint8_t priorities[])
1038 {
1039         struct rte_eventdev *dev;
1040         uint16_t *links_map;
1041         int i, count = 0;
1042
1043         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1044         dev = &rte_eventdevs[dev_id];
1045         if (!is_valid_port(dev, port_id)) {
1046                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1047                 return -EINVAL;
1048         }
1049
1050         links_map = dev->data->links_map;
1051         /* Point links_map to this port specific area */
1052         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1053         for (i = 0; i < dev->data->nb_queues; i++) {
1054                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1055                         queues[count] = i;
1056                         priorities[count] = (uint8_t)links_map[i];
1057                         ++count;
1058                 }
1059         }
1060         return count;
1061 }
1062
1063 int
1064 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1065                                  uint64_t *timeout_ticks)
1066 {
1067         struct rte_eventdev *dev;
1068
1069         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1070         dev = &rte_eventdevs[dev_id];
1071         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1072
1073         if (timeout_ticks == NULL)
1074                 return -EINVAL;
1075
1076         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1077 }
1078
1079 int
1080 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1081 {
1082         struct rte_eventdev *dev;
1083
1084         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1085         dev = &rte_eventdevs[dev_id];
1086
1087         if (service_id == NULL)
1088                 return -EINVAL;
1089
1090         if (dev->data->service_inited)
1091                 *service_id = dev->data->service_id;
1092
1093         return dev->data->service_inited ? 0 : -ESRCH;
1094 }
1095
1096 int
1097 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1098 {
1099         struct rte_eventdev *dev;
1100
1101         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1102         dev = &rte_eventdevs[dev_id];
1103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1104
1105         (*dev->dev_ops->dump)(dev, f);
1106         return 0;
1107
1108 }
1109
1110 static int
1111 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1112                 uint8_t queue_port_id)
1113 {
1114         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1115         if (dev->dev_ops->xstats_get_names != NULL)
1116                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1117                                                         queue_port_id,
1118                                                         NULL, NULL, 0);
1119         return 0;
1120 }
1121
1122 int
1123 rte_event_dev_xstats_names_get(uint8_t dev_id,
1124                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1125                 struct rte_event_dev_xstats_name *xstats_names,
1126                 unsigned int *ids, unsigned int size)
1127 {
1128         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1129         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1130                                                           queue_port_id);
1131         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1132                         (int)size < cnt_expected_entries)
1133                 return cnt_expected_entries;
1134
1135         /* dev_id checked above */
1136         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1137
1138         if (dev->dev_ops->xstats_get_names != NULL)
1139                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1140                                 queue_port_id, xstats_names, ids, size);
1141
1142         return -ENOTSUP;
1143 }
1144
1145 /* retrieve eventdev extended statistics */
1146 int
1147 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1148                 uint8_t queue_port_id, const unsigned int ids[],
1149                 uint64_t values[], unsigned int n)
1150 {
1151         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1152         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1153
1154         /* implemented by the driver */
1155         if (dev->dev_ops->xstats_get != NULL)
1156                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1157                                 ids, values, n);
1158         return -ENOTSUP;
1159 }
1160
1161 uint64_t
1162 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1163                 unsigned int *id)
1164 {
1165         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1166         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1167         unsigned int temp = -1;
1168
1169         if (id != NULL)
1170                 *id = (unsigned int)-1;
1171         else
1172                 id = &temp; /* ensure driver never gets a NULL value */
1173
1174         /* implemented by driver */
1175         if (dev->dev_ops->xstats_get_by_name != NULL)
1176                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1177         return -ENOTSUP;
1178 }
1179
1180 int rte_event_dev_xstats_reset(uint8_t dev_id,
1181                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1182                 const uint32_t ids[], uint32_t nb_ids)
1183 {
1184         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1185         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1186
1187         if (dev->dev_ops->xstats_reset != NULL)
1188                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1189                                                         ids, nb_ids);
1190         return -ENOTSUP;
1191 }
1192
1193 int rte_event_dev_selftest(uint8_t dev_id)
1194 {
1195         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1196         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1197
1198         if (dev->dev_ops->dev_selftest != NULL)
1199                 return (*dev->dev_ops->dev_selftest)();
1200         return -ENOTSUP;
1201 }
1202
1203 int
1204 rte_event_dev_start(uint8_t dev_id)
1205 {
1206         struct rte_eventdev *dev;
1207         int diag;
1208
1209         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1210
1211         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1212         dev = &rte_eventdevs[dev_id];
1213         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1214
1215         if (dev->data->dev_started != 0) {
1216                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1217                         dev_id);
1218                 return 0;
1219         }
1220
1221         diag = (*dev->dev_ops->dev_start)(dev);
1222         rte_eventdev_trace_start(dev_id, diag);
1223         if (diag == 0)
1224                 dev->data->dev_started = 1;
1225         else
1226                 return diag;
1227
1228         return 0;
1229 }
1230
1231 int
1232 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1233                 eventdev_stop_flush_t callback, void *userdata)
1234 {
1235         struct rte_eventdev *dev;
1236
1237         RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1238
1239         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1240         dev = &rte_eventdevs[dev_id];
1241
1242         dev->dev_ops->dev_stop_flush = callback;
1243         dev->data->dev_stop_flush_arg = userdata;
1244
1245         return 0;
1246 }
1247
1248 void
1249 rte_event_dev_stop(uint8_t dev_id)
1250 {
1251         struct rte_eventdev *dev;
1252
1253         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1254
1255         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1256         dev = &rte_eventdevs[dev_id];
1257         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1258
1259         if (dev->data->dev_started == 0) {
1260                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1261                         dev_id);
1262                 return;
1263         }
1264
1265         dev->data->dev_started = 0;
1266         (*dev->dev_ops->dev_stop)(dev);
1267         rte_eventdev_trace_stop(dev_id);
1268 }
1269
1270 int
1271 rte_event_dev_close(uint8_t dev_id)
1272 {
1273         struct rte_eventdev *dev;
1274
1275         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1276         dev = &rte_eventdevs[dev_id];
1277         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1278
1279         /* Device must be stopped before it can be closed */
1280         if (dev->data->dev_started == 1) {
1281                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1282                                 dev_id);
1283                 return -EBUSY;
1284         }
1285
1286         rte_eventdev_trace_close(dev_id);
1287         return (*dev->dev_ops->dev_close)(dev);
1288 }
1289
1290 static inline int
1291 rte_eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1292                 int socket_id)
1293 {
1294         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1295         const struct rte_memzone *mz;
1296         int n;
1297
1298         /* Generate memzone name */
1299         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1300         if (n >= (int)sizeof(mz_name))
1301                 return -EINVAL;
1302
1303         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1304                 mz = rte_memzone_reserve(mz_name,
1305                                 sizeof(struct rte_eventdev_data),
1306                                 socket_id, 0);
1307         } else
1308                 mz = rte_memzone_lookup(mz_name);
1309
1310         if (mz == NULL)
1311                 return -ENOMEM;
1312
1313         *data = mz->addr;
1314         if (rte_eal_process_type() == RTE_PROC_PRIMARY)
1315                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1316
1317         return 0;
1318 }
1319
1320 static inline uint8_t
1321 rte_eventdev_find_free_device_index(void)
1322 {
1323         uint8_t dev_id;
1324
1325         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1326                 if (rte_eventdevs[dev_id].attached ==
1327                                 RTE_EVENTDEV_DETACHED)
1328                         return dev_id;
1329         }
1330         return RTE_EVENT_MAX_DEVS;
1331 }
1332
1333 static uint16_t
1334 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1335                         __rte_unused struct rte_event ev[],
1336                         __rte_unused uint16_t nb_events)
1337 {
1338         rte_errno = ENOTSUP;
1339         return 0;
1340 }
1341
1342 struct rte_eventdev *
1343 rte_event_pmd_allocate(const char *name, int socket_id)
1344 {
1345         struct rte_eventdev *eventdev;
1346         uint8_t dev_id;
1347
1348         if (rte_event_pmd_get_named_dev(name) != NULL) {
1349                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1350                                 "allocated!", name);
1351                 return NULL;
1352         }
1353
1354         dev_id = rte_eventdev_find_free_device_index();
1355         if (dev_id == RTE_EVENT_MAX_DEVS) {
1356                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1357                 return NULL;
1358         }
1359
1360         eventdev = &rte_eventdevs[dev_id];
1361
1362         eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1363         eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
1364
1365         if (eventdev->data == NULL) {
1366                 struct rte_eventdev_data *eventdev_data = NULL;
1367
1368                 int retval = rte_eventdev_data_alloc(dev_id, &eventdev_data,
1369                                 socket_id);
1370
1371                 if (retval < 0 || eventdev_data == NULL)
1372                         return NULL;
1373
1374                 eventdev->data = eventdev_data;
1375
1376                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1377
1378                         strlcpy(eventdev->data->name, name,
1379                                 RTE_EVENTDEV_NAME_MAX_LEN);
1380
1381                         eventdev->data->dev_id = dev_id;
1382                         eventdev->data->socket_id = socket_id;
1383                         eventdev->data->dev_started = 0;
1384                 }
1385
1386                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1387                 eventdev_globals.nb_devs++;
1388         }
1389
1390         return eventdev;
1391 }
1392
1393 int
1394 rte_event_pmd_release(struct rte_eventdev *eventdev)
1395 {
1396         int ret;
1397         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1398         const struct rte_memzone *mz;
1399
1400         if (eventdev == NULL)
1401                 return -EINVAL;
1402
1403         eventdev->attached = RTE_EVENTDEV_DETACHED;
1404         eventdev_globals.nb_devs--;
1405
1406         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1407                 rte_free(eventdev->data->dev_private);
1408
1409                 /* Generate memzone name */
1410                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1411                                 eventdev->data->dev_id);
1412                 if (ret >= (int)sizeof(mz_name))
1413                         return -EINVAL;
1414
1415                 mz = rte_memzone_lookup(mz_name);
1416                 if (mz == NULL)
1417                         return -ENOMEM;
1418
1419                 ret = rte_memzone_free(mz);
1420                 if (ret)
1421                         return ret;
1422         }
1423
1424         eventdev->data = NULL;
1425         return 0;
1426 }