eventdev: add missing C++ guards
[dpdk.git] / lib / eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <inttypes.h>
12
13 #include <rte_string_fns.h>
14 #include <rte_log.h>
15 #include <rte_dev.h>
16 #include <rte_memzone.h>
17 #include <rte_eal.h>
18 #include <rte_common.h>
19 #include <rte_malloc.h>
20 #include <rte_errno.h>
21 #include <ethdev_driver.h>
22 #include <rte_cryptodev.h>
23 #include <cryptodev_pmd.h>
24 #include <rte_telemetry.h>
25
26 #include "rte_eventdev.h"
27 #include "eventdev_pmd.h"
28 #include "eventdev_trace.h"
29
30 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
31
32 struct rte_eventdev *rte_eventdevs = rte_event_devices;
33
34 static struct rte_eventdev_global eventdev_globals = {
35         .nb_devs                = 0
36 };
37
38 /* Public fastpath APIs. */
39 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
40
41 /* Event dev north bound API implementation */
42
43 uint8_t
44 rte_event_dev_count(void)
45 {
46         return eventdev_globals.nb_devs;
47 }
48
49 int
50 rte_event_dev_get_dev_id(const char *name)
51 {
52         int i;
53         uint8_t cmp;
54
55         if (!name)
56                 return -EINVAL;
57
58         for (i = 0; i < eventdev_globals.nb_devs; i++) {
59                 cmp = (strncmp(rte_event_devices[i].data->name, name,
60                                 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
61                         (rte_event_devices[i].dev ? (strncmp(
62                                 rte_event_devices[i].dev->driver->name, name,
63                                          RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
64                 if (cmp && (rte_event_devices[i].attached ==
65                                         RTE_EVENTDEV_ATTACHED))
66                         return i;
67         }
68         return -ENODEV;
69 }
70
71 int
72 rte_event_dev_socket_id(uint8_t dev_id)
73 {
74         struct rte_eventdev *dev;
75
76         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
77         dev = &rte_eventdevs[dev_id];
78
79         return dev->data->socket_id;
80 }
81
82 int
83 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
84 {
85         struct rte_eventdev *dev;
86
87         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88         dev = &rte_eventdevs[dev_id];
89
90         if (dev_info == NULL)
91                 return -EINVAL;
92
93         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
94
95         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
96         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
97
98         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
99
100         dev_info->dev = dev->dev;
101         return 0;
102 }
103
104 int
105 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
106                                 uint32_t *caps)
107 {
108         struct rte_eventdev *dev;
109
110         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
111         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
112
113         dev = &rte_eventdevs[dev_id];
114
115         if (caps == NULL)
116                 return -EINVAL;
117
118         if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
119                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
120         else
121                 *caps = 0;
122
123         return dev->dev_ops->eth_rx_adapter_caps_get ?
124                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
125                                                 &rte_eth_devices[eth_port_id],
126                                                 caps)
127                                 : 0;
128 }
129
130 int
131 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
132 {
133         struct rte_eventdev *dev;
134         const struct event_timer_adapter_ops *ops;
135
136         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
137
138         dev = &rte_eventdevs[dev_id];
139
140         if (caps == NULL)
141                 return -EINVAL;
142         *caps = 0;
143
144         return dev->dev_ops->timer_adapter_caps_get ?
145                                 (*dev->dev_ops->timer_adapter_caps_get)(dev,
146                                                                         0,
147                                                                         caps,
148                                                                         &ops)
149                                 : 0;
150 }
151
152 int
153 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
154                                   uint32_t *caps)
155 {
156         struct rte_eventdev *dev;
157         struct rte_cryptodev *cdev;
158
159         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
160         if (!rte_cryptodev_is_valid_dev(cdev_id))
161                 return -EINVAL;
162
163         dev = &rte_eventdevs[dev_id];
164         cdev = rte_cryptodev_pmd_get_dev(cdev_id);
165
166         if (caps == NULL)
167                 return -EINVAL;
168
169         if (dev->dev_ops->crypto_adapter_caps_get == NULL)
170                 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
171         else
172                 *caps = 0;
173
174         return dev->dev_ops->crypto_adapter_caps_get ?
175                 (*dev->dev_ops->crypto_adapter_caps_get)
176                 (dev, cdev, caps) : 0;
177 }
178
179 int
180 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
181                                 uint32_t *caps)
182 {
183         struct rte_eventdev *dev;
184         struct rte_eth_dev *eth_dev;
185
186         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
187         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
188
189         dev = &rte_eventdevs[dev_id];
190         eth_dev = &rte_eth_devices[eth_port_id];
191
192         if (caps == NULL)
193                 return -EINVAL;
194
195         if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
196                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
197         else
198                 *caps = 0;
199
200         return dev->dev_ops->eth_tx_adapter_caps_get ?
201                         (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
202                                                                 eth_dev,
203                                                                 caps)
204                         : 0;
205 }
206
207 static inline int
208 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
209 {
210         uint8_t old_nb_queues = dev->data->nb_queues;
211         struct rte_event_queue_conf *queues_cfg;
212         unsigned int i;
213
214         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
215                          dev->data->dev_id);
216
217         if (nb_queues != 0) {
218                 queues_cfg = dev->data->queues_cfg;
219                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
220
221                 for (i = nb_queues; i < old_nb_queues; i++)
222                         (*dev->dev_ops->queue_release)(dev, i);
223
224
225                 if (nb_queues > old_nb_queues) {
226                         uint8_t new_qs = nb_queues - old_nb_queues;
227
228                         memset(queues_cfg + old_nb_queues, 0,
229                                 sizeof(queues_cfg[0]) * new_qs);
230                 }
231         } else {
232                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
233
234                 for (i = nb_queues; i < old_nb_queues; i++)
235                         (*dev->dev_ops->queue_release)(dev, i);
236         }
237
238         dev->data->nb_queues = nb_queues;
239         return 0;
240 }
241
242 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
243
244 static inline int
245 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
246 {
247         uint8_t old_nb_ports = dev->data->nb_ports;
248         void **ports;
249         uint16_t *links_map;
250         struct rte_event_port_conf *ports_cfg;
251         unsigned int i;
252
253         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
254                          dev->data->dev_id);
255
256         if (nb_ports != 0) { /* re-config */
257                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
258
259                 ports = dev->data->ports;
260                 ports_cfg = dev->data->ports_cfg;
261                 links_map = dev->data->links_map;
262
263                 for (i = nb_ports; i < old_nb_ports; i++)
264                         (*dev->dev_ops->port_release)(ports[i]);
265
266                 if (nb_ports > old_nb_ports) {
267                         uint8_t new_ps = nb_ports - old_nb_ports;
268                         unsigned int old_links_map_end =
269                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
270                         unsigned int links_map_end =
271                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
272
273                         memset(ports + old_nb_ports, 0,
274                                 sizeof(ports[0]) * new_ps);
275                         memset(ports_cfg + old_nb_ports, 0,
276                                 sizeof(ports_cfg[0]) * new_ps);
277                         for (i = old_links_map_end; i < links_map_end; i++)
278                                 links_map[i] =
279                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
280                 }
281         } else {
282                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
283
284                 ports = dev->data->ports;
285                 for (i = nb_ports; i < old_nb_ports; i++) {
286                         (*dev->dev_ops->port_release)(ports[i]);
287                         ports[i] = NULL;
288                 }
289         }
290
291         dev->data->nb_ports = nb_ports;
292         return 0;
293 }
294
295 int
296 rte_event_dev_configure(uint8_t dev_id,
297                         const struct rte_event_dev_config *dev_conf)
298 {
299         struct rte_event_dev_info info;
300         struct rte_eventdev *dev;
301         int diag;
302
303         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
304         dev = &rte_eventdevs[dev_id];
305
306         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
307         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
308
309         if (dev->data->dev_started) {
310                 RTE_EDEV_LOG_ERR(
311                     "device %d must be stopped to allow configuration", dev_id);
312                 return -EBUSY;
313         }
314
315         if (dev_conf == NULL)
316                 return -EINVAL;
317
318         (*dev->dev_ops->dev_infos_get)(dev, &info);
319
320         /* Check dequeue_timeout_ns value is in limit */
321         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
322                 if (dev_conf->dequeue_timeout_ns &&
323                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
324                         || dev_conf->dequeue_timeout_ns >
325                                  info.max_dequeue_timeout_ns)) {
326                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
327                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
328                         dev_id, dev_conf->dequeue_timeout_ns,
329                         info.min_dequeue_timeout_ns,
330                         info.max_dequeue_timeout_ns);
331                         return -EINVAL;
332                 }
333         }
334
335         /* Check nb_events_limit is in limit */
336         if (dev_conf->nb_events_limit > info.max_num_events) {
337                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
338                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
339                 return -EINVAL;
340         }
341
342         /* Check nb_event_queues is in limit */
343         if (!dev_conf->nb_event_queues) {
344                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
345                                         dev_id);
346                 return -EINVAL;
347         }
348         if (dev_conf->nb_event_queues > info.max_event_queues +
349                         info.max_single_link_event_port_queue_pairs) {
350                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
351                                  dev_id, dev_conf->nb_event_queues,
352                                  info.max_event_queues,
353                                  info.max_single_link_event_port_queue_pairs);
354                 return -EINVAL;
355         }
356         if (dev_conf->nb_event_queues -
357                         dev_conf->nb_single_link_event_port_queues >
358                         info.max_event_queues) {
359                 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
360                                  dev_id, dev_conf->nb_event_queues,
361                                  dev_conf->nb_single_link_event_port_queues,
362                                  info.max_event_queues);
363                 return -EINVAL;
364         }
365         if (dev_conf->nb_single_link_event_port_queues >
366                         dev_conf->nb_event_queues) {
367                 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
368                                  dev_id,
369                                  dev_conf->nb_single_link_event_port_queues,
370                                  dev_conf->nb_event_queues);
371                 return -EINVAL;
372         }
373
374         /* Check nb_event_ports is in limit */
375         if (!dev_conf->nb_event_ports) {
376                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
377                 return -EINVAL;
378         }
379         if (dev_conf->nb_event_ports > info.max_event_ports +
380                         info.max_single_link_event_port_queue_pairs) {
381                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
382                                  dev_id, dev_conf->nb_event_ports,
383                                  info.max_event_ports,
384                                  info.max_single_link_event_port_queue_pairs);
385                 return -EINVAL;
386         }
387         if (dev_conf->nb_event_ports -
388                         dev_conf->nb_single_link_event_port_queues
389                         > info.max_event_ports) {
390                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
391                                  dev_id, dev_conf->nb_event_ports,
392                                  dev_conf->nb_single_link_event_port_queues,
393                                  info.max_event_ports);
394                 return -EINVAL;
395         }
396
397         if (dev_conf->nb_single_link_event_port_queues >
398             dev_conf->nb_event_ports) {
399                 RTE_EDEV_LOG_ERR(
400                                  "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
401                                  dev_id,
402                                  dev_conf->nb_single_link_event_port_queues,
403                                  dev_conf->nb_event_ports);
404                 return -EINVAL;
405         }
406
407         /* Check nb_event_queue_flows is in limit */
408         if (!dev_conf->nb_event_queue_flows) {
409                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
410                 return -EINVAL;
411         }
412         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
413                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
414                 dev_id, dev_conf->nb_event_queue_flows,
415                 info.max_event_queue_flows);
416                 return -EINVAL;
417         }
418
419         /* Check nb_event_port_dequeue_depth is in limit */
420         if (!dev_conf->nb_event_port_dequeue_depth) {
421                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
422                                         dev_id);
423                 return -EINVAL;
424         }
425         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
426                  (dev_conf->nb_event_port_dequeue_depth >
427                          info.max_event_port_dequeue_depth)) {
428                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
429                 dev_id, dev_conf->nb_event_port_dequeue_depth,
430                 info.max_event_port_dequeue_depth);
431                 return -EINVAL;
432         }
433
434         /* Check nb_event_port_enqueue_depth is in limit */
435         if (!dev_conf->nb_event_port_enqueue_depth) {
436                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
437                                         dev_id);
438                 return -EINVAL;
439         }
440         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
441                 (dev_conf->nb_event_port_enqueue_depth >
442                          info.max_event_port_enqueue_depth)) {
443                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
444                 dev_id, dev_conf->nb_event_port_enqueue_depth,
445                 info.max_event_port_enqueue_depth);
446                 return -EINVAL;
447         }
448
449         /* Copy the dev_conf parameter into the dev structure */
450         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
451
452         /* Setup new number of queues and reconfigure device. */
453         diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
454         if (diag != 0) {
455                 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
456                                  diag);
457                 return diag;
458         }
459
460         /* Setup new number of ports and reconfigure device. */
461         diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
462         if (diag != 0) {
463                 event_dev_queue_config(dev, 0);
464                 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
465                                  diag);
466                 return diag;
467         }
468
469         event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
470
471         /* Configure the device */
472         diag = (*dev->dev_ops->dev_configure)(dev);
473         if (diag != 0) {
474                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
475                 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
476                 event_dev_queue_config(dev, 0);
477                 event_dev_port_config(dev, 0);
478         }
479
480         dev->data->event_dev_cap = info.event_dev_cap;
481         rte_eventdev_trace_configure(dev_id, dev_conf, diag);
482         return diag;
483 }
484
485 static inline int
486 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
487 {
488         if (queue_id < dev->data->nb_queues && queue_id <
489                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
490                 return 1;
491         else
492                 return 0;
493 }
494
495 int
496 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
497                                  struct rte_event_queue_conf *queue_conf)
498 {
499         struct rte_eventdev *dev;
500
501         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
502         dev = &rte_eventdevs[dev_id];
503
504         if (queue_conf == NULL)
505                 return -EINVAL;
506
507         if (!is_valid_queue(dev, queue_id)) {
508                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
509                 return -EINVAL;
510         }
511
512         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
513         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
514         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
515         return 0;
516 }
517
518 static inline int
519 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
520 {
521         if (queue_conf &&
522                 !(queue_conf->event_queue_cfg &
523                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
524                 ((queue_conf->event_queue_cfg &
525                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
526                 (queue_conf->schedule_type
527                         == RTE_SCHED_TYPE_ATOMIC)
528                 ))
529                 return 1;
530         else
531                 return 0;
532 }
533
534 static inline int
535 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
536 {
537         if (queue_conf &&
538                 !(queue_conf->event_queue_cfg &
539                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
540                 ((queue_conf->event_queue_cfg &
541                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
542                 (queue_conf->schedule_type
543                         == RTE_SCHED_TYPE_ORDERED)
544                 ))
545                 return 1;
546         else
547                 return 0;
548 }
549
550
551 int
552 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
553                       const struct rte_event_queue_conf *queue_conf)
554 {
555         struct rte_eventdev *dev;
556         struct rte_event_queue_conf def_conf;
557
558         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
559         dev = &rte_eventdevs[dev_id];
560
561         if (!is_valid_queue(dev, queue_id)) {
562                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
563                 return -EINVAL;
564         }
565
566         /* Check nb_atomic_flows limit */
567         if (is_valid_atomic_queue_conf(queue_conf)) {
568                 if (queue_conf->nb_atomic_flows == 0 ||
569                     queue_conf->nb_atomic_flows >
570                         dev->data->dev_conf.nb_event_queue_flows) {
571                         RTE_EDEV_LOG_ERR(
572                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
573                         dev_id, queue_id, queue_conf->nb_atomic_flows,
574                         dev->data->dev_conf.nb_event_queue_flows);
575                         return -EINVAL;
576                 }
577         }
578
579         /* Check nb_atomic_order_sequences limit */
580         if (is_valid_ordered_queue_conf(queue_conf)) {
581                 if (queue_conf->nb_atomic_order_sequences == 0 ||
582                     queue_conf->nb_atomic_order_sequences >
583                         dev->data->dev_conf.nb_event_queue_flows) {
584                         RTE_EDEV_LOG_ERR(
585                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
586                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
587                         dev->data->dev_conf.nb_event_queue_flows);
588                         return -EINVAL;
589                 }
590         }
591
592         if (dev->data->dev_started) {
593                 RTE_EDEV_LOG_ERR(
594                     "device %d must be stopped to allow queue setup", dev_id);
595                 return -EBUSY;
596         }
597
598         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
599
600         if (queue_conf == NULL) {
601                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
602                                         -ENOTSUP);
603                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
604                 queue_conf = &def_conf;
605         }
606
607         dev->data->queues_cfg[queue_id] = *queue_conf;
608         rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
609         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
610 }
611
612 static inline int
613 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
614 {
615         if (port_id < dev->data->nb_ports)
616                 return 1;
617         else
618                 return 0;
619 }
620
621 int
622 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
623                                  struct rte_event_port_conf *port_conf)
624 {
625         struct rte_eventdev *dev;
626
627         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
628         dev = &rte_eventdevs[dev_id];
629
630         if (port_conf == NULL)
631                 return -EINVAL;
632
633         if (!is_valid_port(dev, port_id)) {
634                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
635                 return -EINVAL;
636         }
637
638         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
639         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
640         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
641         return 0;
642 }
643
644 int
645 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
646                      const struct rte_event_port_conf *port_conf)
647 {
648         struct rte_eventdev *dev;
649         struct rte_event_port_conf def_conf;
650         int diag;
651
652         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
653         dev = &rte_eventdevs[dev_id];
654
655         if (!is_valid_port(dev, port_id)) {
656                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
657                 return -EINVAL;
658         }
659
660         /* Check new_event_threshold limit */
661         if ((port_conf && !port_conf->new_event_threshold) ||
662                         (port_conf && port_conf->new_event_threshold >
663                                  dev->data->dev_conf.nb_events_limit)) {
664                 RTE_EDEV_LOG_ERR(
665                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
666                         dev_id, port_id, port_conf->new_event_threshold,
667                         dev->data->dev_conf.nb_events_limit);
668                 return -EINVAL;
669         }
670
671         /* Check dequeue_depth limit */
672         if ((port_conf && !port_conf->dequeue_depth) ||
673                         (port_conf && port_conf->dequeue_depth >
674                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
675                 RTE_EDEV_LOG_ERR(
676                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
677                         dev_id, port_id, port_conf->dequeue_depth,
678                         dev->data->dev_conf.nb_event_port_dequeue_depth);
679                 return -EINVAL;
680         }
681
682         /* Check enqueue_depth limit */
683         if ((port_conf && !port_conf->enqueue_depth) ||
684                         (port_conf && port_conf->enqueue_depth >
685                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
686                 RTE_EDEV_LOG_ERR(
687                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
688                         dev_id, port_id, port_conf->enqueue_depth,
689                         dev->data->dev_conf.nb_event_port_enqueue_depth);
690                 return -EINVAL;
691         }
692
693         if (port_conf &&
694             (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
695             !(dev->data->event_dev_cap &
696               RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
697                 RTE_EDEV_LOG_ERR(
698                    "dev%d port%d Implicit release disable not supported",
699                         dev_id, port_id);
700                 return -EINVAL;
701         }
702
703         if (dev->data->dev_started) {
704                 RTE_EDEV_LOG_ERR(
705                     "device %d must be stopped to allow port setup", dev_id);
706                 return -EBUSY;
707         }
708
709         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
710
711         if (port_conf == NULL) {
712                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
713                                         -ENOTSUP);
714                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
715                 port_conf = &def_conf;
716         }
717
718         dev->data->ports_cfg[port_id] = *port_conf;
719
720         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
721
722         /* Unlink all the queues from this port(default state after setup) */
723         if (!diag)
724                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
725
726         rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
727         if (diag < 0)
728                 return diag;
729
730         return 0;
731 }
732
733 int
734 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
735                        uint32_t *attr_value)
736 {
737         struct rte_eventdev *dev;
738
739         if (!attr_value)
740                 return -EINVAL;
741         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
742         dev = &rte_eventdevs[dev_id];
743
744         switch (attr_id) {
745         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
746                 *attr_value = dev->data->nb_ports;
747                 break;
748         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
749                 *attr_value = dev->data->nb_queues;
750                 break;
751         case RTE_EVENT_DEV_ATTR_STARTED:
752                 *attr_value = dev->data->dev_started;
753                 break;
754         default:
755                 return -EINVAL;
756         }
757
758         return 0;
759 }
760
761 int
762 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
763                         uint32_t *attr_value)
764 {
765         struct rte_eventdev *dev;
766
767         if (!attr_value)
768                 return -EINVAL;
769
770         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
771         dev = &rte_eventdevs[dev_id];
772         if (!is_valid_port(dev, port_id)) {
773                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
774                 return -EINVAL;
775         }
776
777         switch (attr_id) {
778         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
779                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
780                 break;
781         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
782                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
783                 break;
784         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
785                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
786                 break;
787         case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
788         {
789                 uint32_t config;
790
791                 config = dev->data->ports_cfg[port_id].event_port_cfg;
792                 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
793                 break;
794         }
795         default:
796                 return -EINVAL;
797         };
798         return 0;
799 }
800
801 int
802 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
803                         uint32_t *attr_value)
804 {
805         struct rte_event_queue_conf *conf;
806         struct rte_eventdev *dev;
807
808         if (!attr_value)
809                 return -EINVAL;
810
811         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
812         dev = &rte_eventdevs[dev_id];
813         if (!is_valid_queue(dev, queue_id)) {
814                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
815                 return -EINVAL;
816         }
817
818         conf = &dev->data->queues_cfg[queue_id];
819
820         switch (attr_id) {
821         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
822                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
823                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
824                         *attr_value = conf->priority;
825                 break;
826         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
827                 *attr_value = conf->nb_atomic_flows;
828                 break;
829         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
830                 *attr_value = conf->nb_atomic_order_sequences;
831                 break;
832         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
833                 *attr_value = conf->event_queue_cfg;
834                 break;
835         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
836                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
837                         return -EOVERFLOW;
838
839                 *attr_value = conf->schedule_type;
840                 break;
841         default:
842                 return -EINVAL;
843         };
844         return 0;
845 }
846
847 int
848 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
849                     const uint8_t queues[], const uint8_t priorities[],
850                     uint16_t nb_links)
851 {
852         struct rte_eventdev *dev;
853         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
854         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
855         uint16_t *links_map;
856         int i, diag;
857
858         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
859         dev = &rte_eventdevs[dev_id];
860
861         if (*dev->dev_ops->port_link == NULL) {
862                 RTE_EDEV_LOG_ERR("Function not supported\n");
863                 rte_errno = ENOTSUP;
864                 return 0;
865         }
866
867         if (!is_valid_port(dev, port_id)) {
868                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
869                 rte_errno = EINVAL;
870                 return 0;
871         }
872
873         if (queues == NULL) {
874                 for (i = 0; i < dev->data->nb_queues; i++)
875                         queues_list[i] = i;
876
877                 queues = queues_list;
878                 nb_links = dev->data->nb_queues;
879         }
880
881         if (priorities == NULL) {
882                 for (i = 0; i < nb_links; i++)
883                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
884
885                 priorities = priorities_list;
886         }
887
888         for (i = 0; i < nb_links; i++)
889                 if (queues[i] >= dev->data->nb_queues) {
890                         rte_errno = EINVAL;
891                         return 0;
892                 }
893
894         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
895                                                 queues, priorities, nb_links);
896         if (diag < 0)
897                 return diag;
898
899         links_map = dev->data->links_map;
900         /* Point links_map to this port specific area */
901         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
902         for (i = 0; i < diag; i++)
903                 links_map[queues[i]] = (uint8_t)priorities[i];
904
905         rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
906         return diag;
907 }
908
909 int
910 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
911                       uint8_t queues[], uint16_t nb_unlinks)
912 {
913         struct rte_eventdev *dev;
914         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
915         int i, diag, j;
916         uint16_t *links_map;
917
918         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
919         dev = &rte_eventdevs[dev_id];
920
921         if (*dev->dev_ops->port_unlink == NULL) {
922                 RTE_EDEV_LOG_ERR("Function not supported");
923                 rte_errno = ENOTSUP;
924                 return 0;
925         }
926
927         if (!is_valid_port(dev, port_id)) {
928                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
929                 rte_errno = EINVAL;
930                 return 0;
931         }
932
933         links_map = dev->data->links_map;
934         /* Point links_map to this port specific area */
935         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
936
937         if (queues == NULL) {
938                 j = 0;
939                 for (i = 0; i < dev->data->nb_queues; i++) {
940                         if (links_map[i] !=
941                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
942                                 all_queues[j] = i;
943                                 j++;
944                         }
945                 }
946                 queues = all_queues;
947         } else {
948                 for (j = 0; j < nb_unlinks; j++) {
949                         if (links_map[queues[j]] ==
950                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
951                                 break;
952                 }
953         }
954
955         nb_unlinks = j;
956         for (i = 0; i < nb_unlinks; i++)
957                 if (queues[i] >= dev->data->nb_queues) {
958                         rte_errno = EINVAL;
959                         return 0;
960                 }
961
962         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
963                                         queues, nb_unlinks);
964
965         if (diag < 0)
966                 return diag;
967
968         for (i = 0; i < diag; i++)
969                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
970
971         rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
972         return diag;
973 }
974
975 int
976 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
977 {
978         struct rte_eventdev *dev;
979
980         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
981         dev = &rte_eventdevs[dev_id];
982         if (!is_valid_port(dev, port_id)) {
983                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
984                 return -EINVAL;
985         }
986
987         /* Return 0 if the PMD does not implement unlinks in progress.
988          * This allows PMDs which handle unlink synchronously to not implement
989          * this function at all.
990          */
991         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
992
993         return (*dev->dev_ops->port_unlinks_in_progress)(dev,
994                         dev->data->ports[port_id]);
995 }
996
997 int
998 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
999                          uint8_t queues[], uint8_t priorities[])
1000 {
1001         struct rte_eventdev *dev;
1002         uint16_t *links_map;
1003         int i, count = 0;
1004
1005         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1006         dev = &rte_eventdevs[dev_id];
1007         if (!is_valid_port(dev, port_id)) {
1008                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1009                 return -EINVAL;
1010         }
1011
1012         links_map = dev->data->links_map;
1013         /* Point links_map to this port specific area */
1014         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1015         for (i = 0; i < dev->data->nb_queues; i++) {
1016                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1017                         queues[count] = i;
1018                         priorities[count] = (uint8_t)links_map[i];
1019                         ++count;
1020                 }
1021         }
1022         return count;
1023 }
1024
1025 int
1026 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1027                                  uint64_t *timeout_ticks)
1028 {
1029         struct rte_eventdev *dev;
1030
1031         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1032         dev = &rte_eventdevs[dev_id];
1033         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1034
1035         if (timeout_ticks == NULL)
1036                 return -EINVAL;
1037
1038         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1039 }
1040
1041 int
1042 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1043 {
1044         struct rte_eventdev *dev;
1045
1046         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1047         dev = &rte_eventdevs[dev_id];
1048
1049         if (service_id == NULL)
1050                 return -EINVAL;
1051
1052         if (dev->data->service_inited)
1053                 *service_id = dev->data->service_id;
1054
1055         return dev->data->service_inited ? 0 : -ESRCH;
1056 }
1057
1058 int
1059 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1060 {
1061         struct rte_eventdev *dev;
1062
1063         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1064         dev = &rte_eventdevs[dev_id];
1065         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1066         if (f == NULL)
1067                 return -EINVAL;
1068
1069         (*dev->dev_ops->dump)(dev, f);
1070         return 0;
1071
1072 }
1073
1074 static int
1075 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1076                 uint8_t queue_port_id)
1077 {
1078         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1079         if (dev->dev_ops->xstats_get_names != NULL)
1080                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1081                                                         queue_port_id,
1082                                                         NULL, NULL, 0);
1083         return 0;
1084 }
1085
1086 int
1087 rte_event_dev_xstats_names_get(uint8_t dev_id,
1088                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1089                 struct rte_event_dev_xstats_name *xstats_names,
1090                 unsigned int *ids, unsigned int size)
1091 {
1092         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1093         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1094                                                           queue_port_id);
1095         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1096                         (int)size < cnt_expected_entries)
1097                 return cnt_expected_entries;
1098
1099         /* dev_id checked above */
1100         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1101
1102         if (dev->dev_ops->xstats_get_names != NULL)
1103                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1104                                 queue_port_id, xstats_names, ids, size);
1105
1106         return -ENOTSUP;
1107 }
1108
1109 /* retrieve eventdev extended statistics */
1110 int
1111 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1112                 uint8_t queue_port_id, const unsigned int ids[],
1113                 uint64_t values[], unsigned int n)
1114 {
1115         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1116         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1117
1118         /* implemented by the driver */
1119         if (dev->dev_ops->xstats_get != NULL)
1120                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1121                                 ids, values, n);
1122         return -ENOTSUP;
1123 }
1124
1125 uint64_t
1126 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1127                 unsigned int *id)
1128 {
1129         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1130         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1131         unsigned int temp = -1;
1132
1133         if (id != NULL)
1134                 *id = (unsigned int)-1;
1135         else
1136                 id = &temp; /* ensure driver never gets a NULL value */
1137
1138         /* implemented by driver */
1139         if (dev->dev_ops->xstats_get_by_name != NULL)
1140                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1141         return -ENOTSUP;
1142 }
1143
1144 int rte_event_dev_xstats_reset(uint8_t dev_id,
1145                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1146                 const uint32_t ids[], uint32_t nb_ids)
1147 {
1148         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1149         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1150
1151         if (dev->dev_ops->xstats_reset != NULL)
1152                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1153                                                         ids, nb_ids);
1154         return -ENOTSUP;
1155 }
1156
1157 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1158
1159 int rte_event_dev_selftest(uint8_t dev_id)
1160 {
1161         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1162         static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1163                 .name = "rte_event_pmd_selftest_seqn_dynfield",
1164                 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1165                 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1166         };
1167         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1168
1169         if (dev->dev_ops->dev_selftest != NULL) {
1170                 rte_event_pmd_selftest_seqn_dynfield_offset =
1171                         rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1172                 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1173                         return -ENOMEM;
1174                 return (*dev->dev_ops->dev_selftest)();
1175         }
1176         return -ENOTSUP;
1177 }
1178
1179 struct rte_mempool *
1180 rte_event_vector_pool_create(const char *name, unsigned int n,
1181                              unsigned int cache_size, uint16_t nb_elem,
1182                              int socket_id)
1183 {
1184         const char *mp_ops_name;
1185         struct rte_mempool *mp;
1186         unsigned int elt_sz;
1187         int ret;
1188
1189         if (!nb_elem) {
1190                 RTE_LOG(ERR, EVENTDEV,
1191                         "Invalid number of elements=%d requested\n", nb_elem);
1192                 rte_errno = EINVAL;
1193                 return NULL;
1194         }
1195
1196         elt_sz =
1197                 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1198         mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1199                                       0);
1200         if (mp == NULL)
1201                 return NULL;
1202
1203         mp_ops_name = rte_mbuf_best_mempool_ops();
1204         ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1205         if (ret != 0) {
1206                 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1207                 goto err;
1208         }
1209
1210         ret = rte_mempool_populate_default(mp);
1211         if (ret < 0)
1212                 goto err;
1213
1214         return mp;
1215 err:
1216         rte_mempool_free(mp);
1217         rte_errno = -ret;
1218         return NULL;
1219 }
1220
1221 int
1222 rte_event_dev_start(uint8_t dev_id)
1223 {
1224         struct rte_eventdev *dev;
1225         int diag;
1226
1227         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1228
1229         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1230         dev = &rte_eventdevs[dev_id];
1231         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1232
1233         if (dev->data->dev_started != 0) {
1234                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1235                         dev_id);
1236                 return 0;
1237         }
1238
1239         diag = (*dev->dev_ops->dev_start)(dev);
1240         rte_eventdev_trace_start(dev_id, diag);
1241         if (diag == 0)
1242                 dev->data->dev_started = 1;
1243         else
1244                 return diag;
1245
1246         event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1247
1248         return 0;
1249 }
1250
1251 int
1252 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1253                 eventdev_stop_flush_t callback, void *userdata)
1254 {
1255         struct rte_eventdev *dev;
1256
1257         RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1258
1259         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1260         dev = &rte_eventdevs[dev_id];
1261
1262         dev->dev_ops->dev_stop_flush = callback;
1263         dev->data->dev_stop_flush_arg = userdata;
1264
1265         return 0;
1266 }
1267
1268 void
1269 rte_event_dev_stop(uint8_t dev_id)
1270 {
1271         struct rte_eventdev *dev;
1272
1273         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1274
1275         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1276         dev = &rte_eventdevs[dev_id];
1277         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1278
1279         if (dev->data->dev_started == 0) {
1280                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1281                         dev_id);
1282                 return;
1283         }
1284
1285         dev->data->dev_started = 0;
1286         (*dev->dev_ops->dev_stop)(dev);
1287         rte_eventdev_trace_stop(dev_id);
1288         event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1289 }
1290
1291 int
1292 rte_event_dev_close(uint8_t dev_id)
1293 {
1294         struct rte_eventdev *dev;
1295
1296         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1297         dev = &rte_eventdevs[dev_id];
1298         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1299
1300         /* Device must be stopped before it can be closed */
1301         if (dev->data->dev_started == 1) {
1302                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1303                                 dev_id);
1304                 return -EBUSY;
1305         }
1306
1307         event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1308         rte_eventdev_trace_close(dev_id);
1309         return (*dev->dev_ops->dev_close)(dev);
1310 }
1311
1312 static inline int
1313 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1314                     int socket_id)
1315 {
1316         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1317         const struct rte_memzone *mz;
1318         int n;
1319
1320         /* Generate memzone name */
1321         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1322         if (n >= (int)sizeof(mz_name))
1323                 return -EINVAL;
1324
1325         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1326                 mz = rte_memzone_reserve(mz_name,
1327                                 sizeof(struct rte_eventdev_data),
1328                                 socket_id, 0);
1329         } else
1330                 mz = rte_memzone_lookup(mz_name);
1331
1332         if (mz == NULL)
1333                 return -ENOMEM;
1334
1335         *data = mz->addr;
1336         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1337                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1338                 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1339                                         RTE_EVENT_MAX_QUEUES_PER_DEV;
1340                      n++)
1341                         (*data)->links_map[n] =
1342                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1343         }
1344
1345         return 0;
1346 }
1347
1348 static inline uint8_t
1349 eventdev_find_free_device_index(void)
1350 {
1351         uint8_t dev_id;
1352
1353         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1354                 if (rte_eventdevs[dev_id].attached ==
1355                                 RTE_EVENTDEV_DETACHED)
1356                         return dev_id;
1357         }
1358         return RTE_EVENT_MAX_DEVS;
1359 }
1360
1361 struct rte_eventdev *
1362 rte_event_pmd_allocate(const char *name, int socket_id)
1363 {
1364         struct rte_eventdev *eventdev;
1365         uint8_t dev_id;
1366
1367         if (rte_event_pmd_get_named_dev(name) != NULL) {
1368                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1369                                 "allocated!", name);
1370                 return NULL;
1371         }
1372
1373         dev_id = eventdev_find_free_device_index();
1374         if (dev_id == RTE_EVENT_MAX_DEVS) {
1375                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1376                 return NULL;
1377         }
1378
1379         eventdev = &rte_eventdevs[dev_id];
1380
1381         if (eventdev->data == NULL) {
1382                 struct rte_eventdev_data *eventdev_data = NULL;
1383
1384                 int retval =
1385                         eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1386
1387                 if (retval < 0 || eventdev_data == NULL)
1388                         return NULL;
1389
1390                 eventdev->data = eventdev_data;
1391
1392                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1393
1394                         strlcpy(eventdev->data->name, name,
1395                                 RTE_EVENTDEV_NAME_MAX_LEN);
1396
1397                         eventdev->data->dev_id = dev_id;
1398                         eventdev->data->socket_id = socket_id;
1399                         eventdev->data->dev_started = 0;
1400                 }
1401
1402                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1403                 eventdev_globals.nb_devs++;
1404         }
1405
1406         return eventdev;
1407 }
1408
1409 int
1410 rte_event_pmd_release(struct rte_eventdev *eventdev)
1411 {
1412         int ret;
1413         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1414         const struct rte_memzone *mz;
1415
1416         if (eventdev == NULL)
1417                 return -EINVAL;
1418
1419         event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1420         eventdev->attached = RTE_EVENTDEV_DETACHED;
1421         eventdev_globals.nb_devs--;
1422
1423         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1424                 rte_free(eventdev->data->dev_private);
1425
1426                 /* Generate memzone name */
1427                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1428                                 eventdev->data->dev_id);
1429                 if (ret >= (int)sizeof(mz_name))
1430                         return -EINVAL;
1431
1432                 mz = rte_memzone_lookup(mz_name);
1433                 if (mz == NULL)
1434                         return -ENOMEM;
1435
1436                 ret = rte_memzone_free(mz);
1437                 if (ret)
1438                         return ret;
1439         }
1440
1441         eventdev->data = NULL;
1442         return 0;
1443 }
1444
1445 void
1446 event_dev_probing_finish(struct rte_eventdev *eventdev)
1447 {
1448         if (eventdev == NULL)
1449                 return;
1450
1451         event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1452                              eventdev);
1453 }
1454
1455 static int
1456 handle_dev_list(const char *cmd __rte_unused,
1457                 const char *params __rte_unused,
1458                 struct rte_tel_data *d)
1459 {
1460         uint8_t dev_id;
1461         int ndev = rte_event_dev_count();
1462
1463         if (ndev < 1)
1464                 return -1;
1465
1466         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1467         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1468                 if (rte_eventdevs[dev_id].attached ==
1469                                 RTE_EVENTDEV_ATTACHED)
1470                         rte_tel_data_add_array_int(d, dev_id);
1471         }
1472
1473         return 0;
1474 }
1475
1476 static int
1477 handle_port_list(const char *cmd __rte_unused,
1478                  const char *params,
1479                  struct rte_tel_data *d)
1480 {
1481         int i;
1482         uint8_t dev_id;
1483         struct rte_eventdev *dev;
1484         char *end_param;
1485
1486         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1487                 return -1;
1488
1489         dev_id = strtoul(params, &end_param, 10);
1490         if (*end_param != '\0')
1491                 RTE_EDEV_LOG_DEBUG(
1492                         "Extra parameters passed to eventdev telemetry command, ignoring");
1493
1494         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1495         dev = &rte_eventdevs[dev_id];
1496
1497         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1498         for (i = 0; i < dev->data->nb_ports; i++)
1499                 rte_tel_data_add_array_int(d, i);
1500
1501         return 0;
1502 }
1503
1504 static int
1505 handle_queue_list(const char *cmd __rte_unused,
1506                   const char *params,
1507                   struct rte_tel_data *d)
1508 {
1509         int i;
1510         uint8_t dev_id;
1511         struct rte_eventdev *dev;
1512         char *end_param;
1513
1514         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1515                 return -1;
1516
1517         dev_id = strtoul(params, &end_param, 10);
1518         if (*end_param != '\0')
1519                 RTE_EDEV_LOG_DEBUG(
1520                         "Extra parameters passed to eventdev telemetry command, ignoring");
1521
1522         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1523         dev = &rte_eventdevs[dev_id];
1524
1525         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1526         for (i = 0; i < dev->data->nb_queues; i++)
1527                 rte_tel_data_add_array_int(d, i);
1528
1529         return 0;
1530 }
1531
1532 static int
1533 handle_queue_links(const char *cmd __rte_unused,
1534                    const char *params,
1535                    struct rte_tel_data *d)
1536 {
1537         int i, ret, port_id = 0;
1538         char *end_param;
1539         uint8_t dev_id;
1540         uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1541         uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1542         const char *p_param;
1543
1544         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1545                 return -1;
1546
1547         /* Get dev ID from parameter string */
1548         dev_id = strtoul(params, &end_param, 10);
1549         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1550
1551         p_param = strtok(end_param, ",");
1552         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1553                 return -1;
1554
1555         port_id = strtoul(p_param, &end_param, 10);
1556         p_param = strtok(NULL, "\0");
1557         if (p_param != NULL)
1558                 RTE_EDEV_LOG_DEBUG(
1559                         "Extra parameters passed to eventdev telemetry command, ignoring");
1560
1561         ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1562         if (ret < 0)
1563                 return -1;
1564
1565         rte_tel_data_start_dict(d);
1566         for (i = 0; i < ret; i++) {
1567                 char qid_name[32];
1568
1569                 snprintf(qid_name, 31, "qid_%u", queues[i]);
1570                 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1571         }
1572
1573         return 0;
1574 }
1575
1576 static int
1577 eventdev_build_telemetry_data(int dev_id,
1578                               enum rte_event_dev_xstats_mode mode,
1579                               int port_queue_id,
1580                               struct rte_tel_data *d)
1581 {
1582         struct rte_event_dev_xstats_name *xstat_names;
1583         unsigned int *ids;
1584         uint64_t *values;
1585         int i, ret, num_xstats;
1586
1587         num_xstats = rte_event_dev_xstats_names_get(dev_id,
1588                                                     mode,
1589                                                     port_queue_id,
1590                                                     NULL,
1591                                                     NULL,
1592                                                     0);
1593
1594         if (num_xstats < 0)
1595                 return -1;
1596
1597         /* use one malloc for names */
1598         xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1599                              * num_xstats);
1600         if (xstat_names == NULL)
1601                 return -1;
1602
1603         ids = malloc((sizeof(unsigned int)) * num_xstats);
1604         if (ids == NULL) {
1605                 free(xstat_names);
1606                 return -1;
1607         }
1608
1609         values = malloc((sizeof(uint64_t)) * num_xstats);
1610         if (values == NULL) {
1611                 free(xstat_names);
1612                 free(ids);
1613                 return -1;
1614         }
1615
1616         ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1617                                              xstat_names, ids, num_xstats);
1618         if (ret < 0 || ret > num_xstats) {
1619                 free(xstat_names);
1620                 free(ids);
1621                 free(values);
1622                 return -1;
1623         }
1624
1625         ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1626                                        ids, values, num_xstats);
1627         if (ret < 0 || ret > num_xstats) {
1628                 free(xstat_names);
1629                 free(ids);
1630                 free(values);
1631                 return -1;
1632         }
1633
1634         rte_tel_data_start_dict(d);
1635         for (i = 0; i < num_xstats; i++)
1636                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1637                                           values[i]);
1638
1639         free(xstat_names);
1640         free(ids);
1641         free(values);
1642         return 0;
1643 }
1644
1645 static int
1646 handle_dev_xstats(const char *cmd __rte_unused,
1647                   const char *params,
1648                   struct rte_tel_data *d)
1649 {
1650         int dev_id;
1651         enum rte_event_dev_xstats_mode mode;
1652         char *end_param;
1653
1654         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1655                 return -1;
1656
1657         /* Get dev ID from parameter string */
1658         dev_id = strtoul(params, &end_param, 10);
1659         if (*end_param != '\0')
1660                 RTE_EDEV_LOG_DEBUG(
1661                         "Extra parameters passed to eventdev telemetry command, ignoring");
1662
1663         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1664
1665         mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1666         return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1667 }
1668
1669 static int
1670 handle_port_xstats(const char *cmd __rte_unused,
1671                    const char *params,
1672                    struct rte_tel_data *d)
1673 {
1674         int dev_id;
1675         int port_queue_id = 0;
1676         enum rte_event_dev_xstats_mode mode;
1677         char *end_param;
1678         const char *p_param;
1679
1680         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1681                 return -1;
1682
1683         /* Get dev ID from parameter string */
1684         dev_id = strtoul(params, &end_param, 10);
1685         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1686
1687         p_param = strtok(end_param, ",");
1688         mode = RTE_EVENT_DEV_XSTATS_PORT;
1689
1690         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1691                 return -1;
1692
1693         port_queue_id = strtoul(p_param, &end_param, 10);
1694
1695         p_param = strtok(NULL, "\0");
1696         if (p_param != NULL)
1697                 RTE_EDEV_LOG_DEBUG(
1698                         "Extra parameters passed to eventdev telemetry command, ignoring");
1699
1700         return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1701 }
1702
1703 static int
1704 handle_queue_xstats(const char *cmd __rte_unused,
1705                     const char *params,
1706                     struct rte_tel_data *d)
1707 {
1708         int dev_id;
1709         int port_queue_id = 0;
1710         enum rte_event_dev_xstats_mode mode;
1711         char *end_param;
1712         const char *p_param;
1713
1714         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1715                 return -1;
1716
1717         /* Get dev ID from parameter string */
1718         dev_id = strtoul(params, &end_param, 10);
1719         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1720
1721         p_param = strtok(end_param, ",");
1722         mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1723
1724         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1725                 return -1;
1726
1727         port_queue_id = strtoul(p_param, &end_param, 10);
1728
1729         p_param = strtok(NULL, "\0");
1730         if (p_param != NULL)
1731                 RTE_EDEV_LOG_DEBUG(
1732                         "Extra parameters passed to eventdev telemetry command, ignoring");
1733
1734         return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1735 }
1736
1737 RTE_INIT(eventdev_init_telemetry)
1738 {
1739         rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1740                         "Returns list of available eventdevs. Takes no parameters");
1741         rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1742                         "Returns list of available ports. Parameter: DevID");
1743         rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1744                         "Returns list of available queues. Parameter: DevID");
1745
1746         rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1747                         "Returns stats for an eventdev. Parameter: DevID");
1748         rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1749                         "Returns stats for an eventdev port. Params: DevID,PortID");
1750         rte_telemetry_register_cmd("/eventdev/queue_xstats",
1751                         handle_queue_xstats,
1752                         "Returns stats for an eventdev queue. Params: DevID,QueueID");
1753         rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1754                         "Returns links for an eventdev port. Params: DevID,QueueID");
1755 }