eventdev: allocate max space for internal arrays
[dpdk.git] / lib / eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <ethdev_driver.h>
33 #include <rte_cryptodev.h>
34 #include <cryptodev_pmd.h>
35 #include <rte_telemetry.h>
36
37 #include "rte_eventdev.h"
38 #include "eventdev_pmd.h"
39 #include "rte_eventdev_trace.h"
40
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
42
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
44
45 static struct rte_eventdev_global eventdev_globals = {
46         .nb_devs                = 0
47 };
48
49 /* Event dev north bound API implementation */
50
51 uint8_t
52 rte_event_dev_count(void)
53 {
54         return eventdev_globals.nb_devs;
55 }
56
57 int
58 rte_event_dev_get_dev_id(const char *name)
59 {
60         int i;
61         uint8_t cmp;
62
63         if (!name)
64                 return -EINVAL;
65
66         for (i = 0; i < eventdev_globals.nb_devs; i++) {
67                 cmp = (strncmp(rte_event_devices[i].data->name, name,
68                                 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
69                         (rte_event_devices[i].dev ? (strncmp(
70                                 rte_event_devices[i].dev->driver->name, name,
71                                          RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
72                 if (cmp && (rte_event_devices[i].attached ==
73                                         RTE_EVENTDEV_ATTACHED))
74                         return i;
75         }
76         return -ENODEV;
77 }
78
79 int
80 rte_event_dev_socket_id(uint8_t dev_id)
81 {
82         struct rte_eventdev *dev;
83
84         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
85         dev = &rte_eventdevs[dev_id];
86
87         return dev->data->socket_id;
88 }
89
90 int
91 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
92 {
93         struct rte_eventdev *dev;
94
95         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
96         dev = &rte_eventdevs[dev_id];
97
98         if (dev_info == NULL)
99                 return -EINVAL;
100
101         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
102
103         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
104         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
105
106         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
107
108         dev_info->dev = dev->dev;
109         return 0;
110 }
111
112 int
113 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
114                                 uint32_t *caps)
115 {
116         struct rte_eventdev *dev;
117
118         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
119         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
120
121         dev = &rte_eventdevs[dev_id];
122
123         if (caps == NULL)
124                 return -EINVAL;
125
126         if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
127                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
128         else
129                 *caps = 0;
130
131         return dev->dev_ops->eth_rx_adapter_caps_get ?
132                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
133                                                 &rte_eth_devices[eth_port_id],
134                                                 caps)
135                                 : 0;
136 }
137
138 int
139 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
140 {
141         struct rte_eventdev *dev;
142         const struct rte_event_timer_adapter_ops *ops;
143
144         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
145
146         dev = &rte_eventdevs[dev_id];
147
148         if (caps == NULL)
149                 return -EINVAL;
150         *caps = 0;
151
152         return dev->dev_ops->timer_adapter_caps_get ?
153                                 (*dev->dev_ops->timer_adapter_caps_get)(dev,
154                                                                         0,
155                                                                         caps,
156                                                                         &ops)
157                                 : 0;
158 }
159
160 int
161 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
162                                   uint32_t *caps)
163 {
164         struct rte_eventdev *dev;
165         struct rte_cryptodev *cdev;
166
167         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
168         if (!rte_cryptodev_is_valid_dev(cdev_id))
169                 return -EINVAL;
170
171         dev = &rte_eventdevs[dev_id];
172         cdev = rte_cryptodev_pmd_get_dev(cdev_id);
173
174         if (caps == NULL)
175                 return -EINVAL;
176         *caps = 0;
177
178         return dev->dev_ops->crypto_adapter_caps_get ?
179                 (*dev->dev_ops->crypto_adapter_caps_get)
180                 (dev, cdev, caps) : -ENOTSUP;
181 }
182
183 int
184 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
185                                 uint32_t *caps)
186 {
187         struct rte_eventdev *dev;
188         struct rte_eth_dev *eth_dev;
189
190         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
191         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
192
193         dev = &rte_eventdevs[dev_id];
194         eth_dev = &rte_eth_devices[eth_port_id];
195
196         if (caps == NULL)
197                 return -EINVAL;
198
199         if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
200                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
201         else
202                 *caps = 0;
203
204         return dev->dev_ops->eth_tx_adapter_caps_get ?
205                         (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
206                                                                 eth_dev,
207                                                                 caps)
208                         : 0;
209 }
210
211 static inline int
212 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
213 {
214         uint8_t old_nb_queues = dev->data->nb_queues;
215         struct rte_event_queue_conf *queues_cfg;
216         unsigned int i;
217
218         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
219                          dev->data->dev_id);
220
221         if (nb_queues != 0) {
222                 queues_cfg = dev->data->queues_cfg;
223                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
224
225                 for (i = nb_queues; i < old_nb_queues; i++)
226                         (*dev->dev_ops->queue_release)(dev, i);
227
228
229                 if (nb_queues > old_nb_queues) {
230                         uint8_t new_qs = nb_queues - old_nb_queues;
231
232                         memset(queues_cfg + old_nb_queues, 0,
233                                 sizeof(queues_cfg[0]) * new_qs);
234                 }
235         } else {
236                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
237
238                 for (i = nb_queues; i < old_nb_queues; i++)
239                         (*dev->dev_ops->queue_release)(dev, i);
240         }
241
242         dev->data->nb_queues = nb_queues;
243         return 0;
244 }
245
246 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
247
248 static inline int
249 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
250 {
251         uint8_t old_nb_ports = dev->data->nb_ports;
252         void **ports;
253         uint16_t *links_map;
254         struct rte_event_port_conf *ports_cfg;
255         unsigned int i;
256
257         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
258                          dev->data->dev_id);
259
260         if (nb_ports != 0) { /* re-config */
261                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
262
263                 ports = dev->data->ports;
264                 ports_cfg = dev->data->ports_cfg;
265                 links_map = dev->data->links_map;
266
267                 for (i = nb_ports; i < old_nb_ports; i++)
268                         (*dev->dev_ops->port_release)(ports[i]);
269
270                 if (nb_ports > old_nb_ports) {
271                         uint8_t new_ps = nb_ports - old_nb_ports;
272                         unsigned int old_links_map_end =
273                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
274                         unsigned int links_map_end =
275                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
276
277                         memset(ports + old_nb_ports, 0,
278                                 sizeof(ports[0]) * new_ps);
279                         memset(ports_cfg + old_nb_ports, 0,
280                                 sizeof(ports_cfg[0]) * new_ps);
281                         for (i = old_links_map_end; i < links_map_end; i++)
282                                 links_map[i] =
283                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
284                 }
285         } else {
286                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
287
288                 ports = dev->data->ports;
289                 for (i = nb_ports; i < old_nb_ports; i++) {
290                         (*dev->dev_ops->port_release)(ports[i]);
291                         ports[i] = NULL;
292                 }
293         }
294
295         dev->data->nb_ports = nb_ports;
296         return 0;
297 }
298
299 int
300 rte_event_dev_configure(uint8_t dev_id,
301                         const struct rte_event_dev_config *dev_conf)
302 {
303         struct rte_eventdev *dev;
304         struct rte_event_dev_info info;
305         int diag;
306
307         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
308         dev = &rte_eventdevs[dev_id];
309
310         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
311         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
312
313         if (dev->data->dev_started) {
314                 RTE_EDEV_LOG_ERR(
315                     "device %d must be stopped to allow configuration", dev_id);
316                 return -EBUSY;
317         }
318
319         if (dev_conf == NULL)
320                 return -EINVAL;
321
322         (*dev->dev_ops->dev_infos_get)(dev, &info);
323
324         /* Check dequeue_timeout_ns value is in limit */
325         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
326                 if (dev_conf->dequeue_timeout_ns &&
327                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
328                         || dev_conf->dequeue_timeout_ns >
329                                  info.max_dequeue_timeout_ns)) {
330                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
331                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
332                         dev_id, dev_conf->dequeue_timeout_ns,
333                         info.min_dequeue_timeout_ns,
334                         info.max_dequeue_timeout_ns);
335                         return -EINVAL;
336                 }
337         }
338
339         /* Check nb_events_limit is in limit */
340         if (dev_conf->nb_events_limit > info.max_num_events) {
341                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
342                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
343                 return -EINVAL;
344         }
345
346         /* Check nb_event_queues is in limit */
347         if (!dev_conf->nb_event_queues) {
348                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
349                                         dev_id);
350                 return -EINVAL;
351         }
352         if (dev_conf->nb_event_queues > info.max_event_queues +
353                         info.max_single_link_event_port_queue_pairs) {
354                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
355                                  dev_id, dev_conf->nb_event_queues,
356                                  info.max_event_queues,
357                                  info.max_single_link_event_port_queue_pairs);
358                 return -EINVAL;
359         }
360         if (dev_conf->nb_event_queues -
361                         dev_conf->nb_single_link_event_port_queues >
362                         info.max_event_queues) {
363                 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
364                                  dev_id, dev_conf->nb_event_queues,
365                                  dev_conf->nb_single_link_event_port_queues,
366                                  info.max_event_queues);
367                 return -EINVAL;
368         }
369         if (dev_conf->nb_single_link_event_port_queues >
370                         dev_conf->nb_event_queues) {
371                 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
372                                  dev_id,
373                                  dev_conf->nb_single_link_event_port_queues,
374                                  dev_conf->nb_event_queues);
375                 return -EINVAL;
376         }
377
378         /* Check nb_event_ports is in limit */
379         if (!dev_conf->nb_event_ports) {
380                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
381                 return -EINVAL;
382         }
383         if (dev_conf->nb_event_ports > info.max_event_ports +
384                         info.max_single_link_event_port_queue_pairs) {
385                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
386                                  dev_id, dev_conf->nb_event_ports,
387                                  info.max_event_ports,
388                                  info.max_single_link_event_port_queue_pairs);
389                 return -EINVAL;
390         }
391         if (dev_conf->nb_event_ports -
392                         dev_conf->nb_single_link_event_port_queues
393                         > info.max_event_ports) {
394                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
395                                  dev_id, dev_conf->nb_event_ports,
396                                  dev_conf->nb_single_link_event_port_queues,
397                                  info.max_event_ports);
398                 return -EINVAL;
399         }
400
401         if (dev_conf->nb_single_link_event_port_queues >
402             dev_conf->nb_event_ports) {
403                 RTE_EDEV_LOG_ERR(
404                                  "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
405                                  dev_id,
406                                  dev_conf->nb_single_link_event_port_queues,
407                                  dev_conf->nb_event_ports);
408                 return -EINVAL;
409         }
410
411         /* Check nb_event_queue_flows is in limit */
412         if (!dev_conf->nb_event_queue_flows) {
413                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
414                 return -EINVAL;
415         }
416         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
417                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
418                 dev_id, dev_conf->nb_event_queue_flows,
419                 info.max_event_queue_flows);
420                 return -EINVAL;
421         }
422
423         /* Check nb_event_port_dequeue_depth is in limit */
424         if (!dev_conf->nb_event_port_dequeue_depth) {
425                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
426                                         dev_id);
427                 return -EINVAL;
428         }
429         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
430                  (dev_conf->nb_event_port_dequeue_depth >
431                          info.max_event_port_dequeue_depth)) {
432                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
433                 dev_id, dev_conf->nb_event_port_dequeue_depth,
434                 info.max_event_port_dequeue_depth);
435                 return -EINVAL;
436         }
437
438         /* Check nb_event_port_enqueue_depth is in limit */
439         if (!dev_conf->nb_event_port_enqueue_depth) {
440                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
441                                         dev_id);
442                 return -EINVAL;
443         }
444         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
445                 (dev_conf->nb_event_port_enqueue_depth >
446                          info.max_event_port_enqueue_depth)) {
447                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
448                 dev_id, dev_conf->nb_event_port_enqueue_depth,
449                 info.max_event_port_enqueue_depth);
450                 return -EINVAL;
451         }
452
453         /* Copy the dev_conf parameter into the dev structure */
454         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
455
456         /* Setup new number of queues and reconfigure device. */
457         diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
458         if (diag != 0) {
459                 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
460                                  diag);
461                 return diag;
462         }
463
464         /* Setup new number of ports and reconfigure device. */
465         diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
466         if (diag != 0) {
467                 event_dev_queue_config(dev, 0);
468                 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
469                                  diag);
470                 return diag;
471         }
472
473         /* Configure the device */
474         diag = (*dev->dev_ops->dev_configure)(dev);
475         if (diag != 0) {
476                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
477                 event_dev_queue_config(dev, 0);
478                 event_dev_port_config(dev, 0);
479         }
480
481         dev->data->event_dev_cap = info.event_dev_cap;
482         rte_eventdev_trace_configure(dev_id, dev_conf, diag);
483         return diag;
484 }
485
486 static inline int
487 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
488 {
489         if (queue_id < dev->data->nb_queues && queue_id <
490                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
491                 return 1;
492         else
493                 return 0;
494 }
495
496 int
497 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
498                                  struct rte_event_queue_conf *queue_conf)
499 {
500         struct rte_eventdev *dev;
501
502         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
503         dev = &rte_eventdevs[dev_id];
504
505         if (queue_conf == NULL)
506                 return -EINVAL;
507
508         if (!is_valid_queue(dev, queue_id)) {
509                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
510                 return -EINVAL;
511         }
512
513         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
514         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
515         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
516         return 0;
517 }
518
519 static inline int
520 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
521 {
522         if (queue_conf &&
523                 !(queue_conf->event_queue_cfg &
524                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
525                 ((queue_conf->event_queue_cfg &
526                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
527                 (queue_conf->schedule_type
528                         == RTE_SCHED_TYPE_ATOMIC)
529                 ))
530                 return 1;
531         else
532                 return 0;
533 }
534
535 static inline int
536 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
537 {
538         if (queue_conf &&
539                 !(queue_conf->event_queue_cfg &
540                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
541                 ((queue_conf->event_queue_cfg &
542                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
543                 (queue_conf->schedule_type
544                         == RTE_SCHED_TYPE_ORDERED)
545                 ))
546                 return 1;
547         else
548                 return 0;
549 }
550
551
552 int
553 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
554                       const struct rte_event_queue_conf *queue_conf)
555 {
556         struct rte_eventdev *dev;
557         struct rte_event_queue_conf def_conf;
558
559         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
560         dev = &rte_eventdevs[dev_id];
561
562         if (!is_valid_queue(dev, queue_id)) {
563                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
564                 return -EINVAL;
565         }
566
567         /* Check nb_atomic_flows limit */
568         if (is_valid_atomic_queue_conf(queue_conf)) {
569                 if (queue_conf->nb_atomic_flows == 0 ||
570                     queue_conf->nb_atomic_flows >
571                         dev->data->dev_conf.nb_event_queue_flows) {
572                         RTE_EDEV_LOG_ERR(
573                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
574                         dev_id, queue_id, queue_conf->nb_atomic_flows,
575                         dev->data->dev_conf.nb_event_queue_flows);
576                         return -EINVAL;
577                 }
578         }
579
580         /* Check nb_atomic_order_sequences limit */
581         if (is_valid_ordered_queue_conf(queue_conf)) {
582                 if (queue_conf->nb_atomic_order_sequences == 0 ||
583                     queue_conf->nb_atomic_order_sequences >
584                         dev->data->dev_conf.nb_event_queue_flows) {
585                         RTE_EDEV_LOG_ERR(
586                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
587                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
588                         dev->data->dev_conf.nb_event_queue_flows);
589                         return -EINVAL;
590                 }
591         }
592
593         if (dev->data->dev_started) {
594                 RTE_EDEV_LOG_ERR(
595                     "device %d must be stopped to allow queue setup", dev_id);
596                 return -EBUSY;
597         }
598
599         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
600
601         if (queue_conf == NULL) {
602                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
603                                         -ENOTSUP);
604                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
605                 queue_conf = &def_conf;
606         }
607
608         dev->data->queues_cfg[queue_id] = *queue_conf;
609         rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
610         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
611 }
612
613 static inline int
614 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
615 {
616         if (port_id < dev->data->nb_ports)
617                 return 1;
618         else
619                 return 0;
620 }
621
622 int
623 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
624                                  struct rte_event_port_conf *port_conf)
625 {
626         struct rte_eventdev *dev;
627
628         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
629         dev = &rte_eventdevs[dev_id];
630
631         if (port_conf == NULL)
632                 return -EINVAL;
633
634         if (!is_valid_port(dev, port_id)) {
635                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
636                 return -EINVAL;
637         }
638
639         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
640         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
641         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
642         return 0;
643 }
644
645 int
646 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
647                      const struct rte_event_port_conf *port_conf)
648 {
649         struct rte_eventdev *dev;
650         struct rte_event_port_conf def_conf;
651         int diag;
652
653         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
654         dev = &rte_eventdevs[dev_id];
655
656         if (!is_valid_port(dev, port_id)) {
657                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
658                 return -EINVAL;
659         }
660
661         /* Check new_event_threshold limit */
662         if ((port_conf && !port_conf->new_event_threshold) ||
663                         (port_conf && port_conf->new_event_threshold >
664                                  dev->data->dev_conf.nb_events_limit)) {
665                 RTE_EDEV_LOG_ERR(
666                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
667                         dev_id, port_id, port_conf->new_event_threshold,
668                         dev->data->dev_conf.nb_events_limit);
669                 return -EINVAL;
670         }
671
672         /* Check dequeue_depth limit */
673         if ((port_conf && !port_conf->dequeue_depth) ||
674                         (port_conf && port_conf->dequeue_depth >
675                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
676                 RTE_EDEV_LOG_ERR(
677                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
678                         dev_id, port_id, port_conf->dequeue_depth,
679                         dev->data->dev_conf.nb_event_port_dequeue_depth);
680                 return -EINVAL;
681         }
682
683         /* Check enqueue_depth limit */
684         if ((port_conf && !port_conf->enqueue_depth) ||
685                         (port_conf && port_conf->enqueue_depth >
686                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
687                 RTE_EDEV_LOG_ERR(
688                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
689                         dev_id, port_id, port_conf->enqueue_depth,
690                         dev->data->dev_conf.nb_event_port_enqueue_depth);
691                 return -EINVAL;
692         }
693
694         if (port_conf &&
695             (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
696             !(dev->data->event_dev_cap &
697               RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
698                 RTE_EDEV_LOG_ERR(
699                    "dev%d port%d Implicit release disable not supported",
700                         dev_id, port_id);
701                 return -EINVAL;
702         }
703
704         if (dev->data->dev_started) {
705                 RTE_EDEV_LOG_ERR(
706                     "device %d must be stopped to allow port setup", dev_id);
707                 return -EBUSY;
708         }
709
710         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
711
712         if (port_conf == NULL) {
713                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
714                                         -ENOTSUP);
715                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
716                 port_conf = &def_conf;
717         }
718
719         dev->data->ports_cfg[port_id] = *port_conf;
720
721         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
722
723         /* Unlink all the queues from this port(default state after setup) */
724         if (!diag)
725                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
726
727         rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
728         if (diag < 0)
729                 return diag;
730
731         return 0;
732 }
733
734 int
735 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
736                        uint32_t *attr_value)
737 {
738         struct rte_eventdev *dev;
739
740         if (!attr_value)
741                 return -EINVAL;
742         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
743         dev = &rte_eventdevs[dev_id];
744
745         switch (attr_id) {
746         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
747                 *attr_value = dev->data->nb_ports;
748                 break;
749         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
750                 *attr_value = dev->data->nb_queues;
751                 break;
752         case RTE_EVENT_DEV_ATTR_STARTED:
753                 *attr_value = dev->data->dev_started;
754                 break;
755         default:
756                 return -EINVAL;
757         }
758
759         return 0;
760 }
761
762 int
763 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
764                         uint32_t *attr_value)
765 {
766         struct rte_eventdev *dev;
767
768         if (!attr_value)
769                 return -EINVAL;
770
771         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
772         dev = &rte_eventdevs[dev_id];
773         if (!is_valid_port(dev, port_id)) {
774                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
775                 return -EINVAL;
776         }
777
778         switch (attr_id) {
779         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
780                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
781                 break;
782         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
783                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
784                 break;
785         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
786                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
787                 break;
788         case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
789         {
790                 uint32_t config;
791
792                 config = dev->data->ports_cfg[port_id].event_port_cfg;
793                 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
794                 break;
795         }
796         default:
797                 return -EINVAL;
798         };
799         return 0;
800 }
801
802 int
803 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
804                         uint32_t *attr_value)
805 {
806         struct rte_event_queue_conf *conf;
807         struct rte_eventdev *dev;
808
809         if (!attr_value)
810                 return -EINVAL;
811
812         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
813         dev = &rte_eventdevs[dev_id];
814         if (!is_valid_queue(dev, queue_id)) {
815                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
816                 return -EINVAL;
817         }
818
819         conf = &dev->data->queues_cfg[queue_id];
820
821         switch (attr_id) {
822         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
823                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
824                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
825                         *attr_value = conf->priority;
826                 break;
827         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
828                 *attr_value = conf->nb_atomic_flows;
829                 break;
830         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
831                 *attr_value = conf->nb_atomic_order_sequences;
832                 break;
833         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
834                 *attr_value = conf->event_queue_cfg;
835                 break;
836         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
837                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
838                         return -EOVERFLOW;
839
840                 *attr_value = conf->schedule_type;
841                 break;
842         default:
843                 return -EINVAL;
844         };
845         return 0;
846 }
847
848 int
849 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
850                     const uint8_t queues[], const uint8_t priorities[],
851                     uint16_t nb_links)
852 {
853         struct rte_eventdev *dev;
854         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
855         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
856         uint16_t *links_map;
857         int i, diag;
858
859         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
860         dev = &rte_eventdevs[dev_id];
861
862         if (*dev->dev_ops->port_link == NULL) {
863                 RTE_EDEV_LOG_ERR("Function not supported\n");
864                 rte_errno = ENOTSUP;
865                 return 0;
866         }
867
868         if (!is_valid_port(dev, port_id)) {
869                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
870                 rte_errno = EINVAL;
871                 return 0;
872         }
873
874         if (queues == NULL) {
875                 for (i = 0; i < dev->data->nb_queues; i++)
876                         queues_list[i] = i;
877
878                 queues = queues_list;
879                 nb_links = dev->data->nb_queues;
880         }
881
882         if (priorities == NULL) {
883                 for (i = 0; i < nb_links; i++)
884                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
885
886                 priorities = priorities_list;
887         }
888
889         for (i = 0; i < nb_links; i++)
890                 if (queues[i] >= dev->data->nb_queues) {
891                         rte_errno = EINVAL;
892                         return 0;
893                 }
894
895         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
896                                                 queues, priorities, nb_links);
897         if (diag < 0)
898                 return diag;
899
900         links_map = dev->data->links_map;
901         /* Point links_map to this port specific area */
902         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
903         for (i = 0; i < diag; i++)
904                 links_map[queues[i]] = (uint8_t)priorities[i];
905
906         rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
907         return diag;
908 }
909
910 int
911 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
912                       uint8_t queues[], uint16_t nb_unlinks)
913 {
914         struct rte_eventdev *dev;
915         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
916         int i, diag, j;
917         uint16_t *links_map;
918
919         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
920         dev = &rte_eventdevs[dev_id];
921
922         if (*dev->dev_ops->port_unlink == NULL) {
923                 RTE_EDEV_LOG_ERR("Function not supported");
924                 rte_errno = ENOTSUP;
925                 return 0;
926         }
927
928         if (!is_valid_port(dev, port_id)) {
929                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
930                 rte_errno = EINVAL;
931                 return 0;
932         }
933
934         links_map = dev->data->links_map;
935         /* Point links_map to this port specific area */
936         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
937
938         if (queues == NULL) {
939                 j = 0;
940                 for (i = 0; i < dev->data->nb_queues; i++) {
941                         if (links_map[i] !=
942                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
943                                 all_queues[j] = i;
944                                 j++;
945                         }
946                 }
947                 queues = all_queues;
948         } else {
949                 for (j = 0; j < nb_unlinks; j++) {
950                         if (links_map[queues[j]] ==
951                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
952                                 break;
953                 }
954         }
955
956         nb_unlinks = j;
957         for (i = 0; i < nb_unlinks; i++)
958                 if (queues[i] >= dev->data->nb_queues) {
959                         rte_errno = EINVAL;
960                         return 0;
961                 }
962
963         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
964                                         queues, nb_unlinks);
965
966         if (diag < 0)
967                 return diag;
968
969         for (i = 0; i < diag; i++)
970                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
971
972         rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
973         return diag;
974 }
975
976 int
977 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
978 {
979         struct rte_eventdev *dev;
980
981         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
982         dev = &rte_eventdevs[dev_id];
983         if (!is_valid_port(dev, port_id)) {
984                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
985                 return -EINVAL;
986         }
987
988         /* Return 0 if the PMD does not implement unlinks in progress.
989          * This allows PMDs which handle unlink synchronously to not implement
990          * this function at all.
991          */
992         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
993
994         return (*dev->dev_ops->port_unlinks_in_progress)(dev,
995                         dev->data->ports[port_id]);
996 }
997
998 int
999 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1000                          uint8_t queues[], uint8_t priorities[])
1001 {
1002         struct rte_eventdev *dev;
1003         uint16_t *links_map;
1004         int i, count = 0;
1005
1006         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1007         dev = &rte_eventdevs[dev_id];
1008         if (!is_valid_port(dev, port_id)) {
1009                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1010                 return -EINVAL;
1011         }
1012
1013         links_map = dev->data->links_map;
1014         /* Point links_map to this port specific area */
1015         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1016         for (i = 0; i < dev->data->nb_queues; i++) {
1017                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1018                         queues[count] = i;
1019                         priorities[count] = (uint8_t)links_map[i];
1020                         ++count;
1021                 }
1022         }
1023         return count;
1024 }
1025
1026 int
1027 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1028                                  uint64_t *timeout_ticks)
1029 {
1030         struct rte_eventdev *dev;
1031
1032         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1033         dev = &rte_eventdevs[dev_id];
1034         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1035
1036         if (timeout_ticks == NULL)
1037                 return -EINVAL;
1038
1039         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1040 }
1041
1042 int
1043 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1044 {
1045         struct rte_eventdev *dev;
1046
1047         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1048         dev = &rte_eventdevs[dev_id];
1049
1050         if (service_id == NULL)
1051                 return -EINVAL;
1052
1053         if (dev->data->service_inited)
1054                 *service_id = dev->data->service_id;
1055
1056         return dev->data->service_inited ? 0 : -ESRCH;
1057 }
1058
1059 int
1060 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1061 {
1062         struct rte_eventdev *dev;
1063
1064         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1065         dev = &rte_eventdevs[dev_id];
1066         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1067         if (f == NULL)
1068                 return -EINVAL;
1069
1070         (*dev->dev_ops->dump)(dev, f);
1071         return 0;
1072
1073 }
1074
1075 static int
1076 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1077                 uint8_t queue_port_id)
1078 {
1079         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1080         if (dev->dev_ops->xstats_get_names != NULL)
1081                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1082                                                         queue_port_id,
1083                                                         NULL, NULL, 0);
1084         return 0;
1085 }
1086
1087 int
1088 rte_event_dev_xstats_names_get(uint8_t dev_id,
1089                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1090                 struct rte_event_dev_xstats_name *xstats_names,
1091                 unsigned int *ids, unsigned int size)
1092 {
1093         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1094         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1095                                                           queue_port_id);
1096         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1097                         (int)size < cnt_expected_entries)
1098                 return cnt_expected_entries;
1099
1100         /* dev_id checked above */
1101         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1102
1103         if (dev->dev_ops->xstats_get_names != NULL)
1104                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1105                                 queue_port_id, xstats_names, ids, size);
1106
1107         return -ENOTSUP;
1108 }
1109
1110 /* retrieve eventdev extended statistics */
1111 int
1112 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1113                 uint8_t queue_port_id, const unsigned int ids[],
1114                 uint64_t values[], unsigned int n)
1115 {
1116         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1117         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1118
1119         /* implemented by the driver */
1120         if (dev->dev_ops->xstats_get != NULL)
1121                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1122                                 ids, values, n);
1123         return -ENOTSUP;
1124 }
1125
1126 uint64_t
1127 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1128                 unsigned int *id)
1129 {
1130         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1131         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1132         unsigned int temp = -1;
1133
1134         if (id != NULL)
1135                 *id = (unsigned int)-1;
1136         else
1137                 id = &temp; /* ensure driver never gets a NULL value */
1138
1139         /* implemented by driver */
1140         if (dev->dev_ops->xstats_get_by_name != NULL)
1141                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1142         return -ENOTSUP;
1143 }
1144
1145 int rte_event_dev_xstats_reset(uint8_t dev_id,
1146                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1147                 const uint32_t ids[], uint32_t nb_ids)
1148 {
1149         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1150         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1151
1152         if (dev->dev_ops->xstats_reset != NULL)
1153                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1154                                                         ids, nb_ids);
1155         return -ENOTSUP;
1156 }
1157
1158 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1159
1160 int rte_event_dev_selftest(uint8_t dev_id)
1161 {
1162         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1163         static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1164                 .name = "rte_event_pmd_selftest_seqn_dynfield",
1165                 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1166                 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1167         };
1168         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1169
1170         if (dev->dev_ops->dev_selftest != NULL) {
1171                 rte_event_pmd_selftest_seqn_dynfield_offset =
1172                         rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1173                 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1174                         return -ENOMEM;
1175                 return (*dev->dev_ops->dev_selftest)();
1176         }
1177         return -ENOTSUP;
1178 }
1179
1180 struct rte_mempool *
1181 rte_event_vector_pool_create(const char *name, unsigned int n,
1182                              unsigned int cache_size, uint16_t nb_elem,
1183                              int socket_id)
1184 {
1185         const char *mp_ops_name;
1186         struct rte_mempool *mp;
1187         unsigned int elt_sz;
1188         int ret;
1189
1190         if (!nb_elem) {
1191                 RTE_LOG(ERR, EVENTDEV,
1192                         "Invalid number of elements=%d requested\n", nb_elem);
1193                 rte_errno = EINVAL;
1194                 return NULL;
1195         }
1196
1197         elt_sz =
1198                 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1199         mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1200                                       0);
1201         if (mp == NULL)
1202                 return NULL;
1203
1204         mp_ops_name = rte_mbuf_best_mempool_ops();
1205         ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1206         if (ret != 0) {
1207                 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1208                 goto err;
1209         }
1210
1211         ret = rte_mempool_populate_default(mp);
1212         if (ret < 0)
1213                 goto err;
1214
1215         return mp;
1216 err:
1217         rte_mempool_free(mp);
1218         rte_errno = -ret;
1219         return NULL;
1220 }
1221
1222 int
1223 rte_event_dev_start(uint8_t dev_id)
1224 {
1225         struct rte_eventdev *dev;
1226         int diag;
1227
1228         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1229
1230         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1231         dev = &rte_eventdevs[dev_id];
1232         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1233
1234         if (dev->data->dev_started != 0) {
1235                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1236                         dev_id);
1237                 return 0;
1238         }
1239
1240         diag = (*dev->dev_ops->dev_start)(dev);
1241         rte_eventdev_trace_start(dev_id, diag);
1242         if (diag == 0)
1243                 dev->data->dev_started = 1;
1244         else
1245                 return diag;
1246
1247         return 0;
1248 }
1249
1250 int
1251 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1252                 eventdev_stop_flush_t callback, void *userdata)
1253 {
1254         struct rte_eventdev *dev;
1255
1256         RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1257
1258         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1259         dev = &rte_eventdevs[dev_id];
1260
1261         dev->dev_ops->dev_stop_flush = callback;
1262         dev->data->dev_stop_flush_arg = userdata;
1263
1264         return 0;
1265 }
1266
1267 void
1268 rte_event_dev_stop(uint8_t dev_id)
1269 {
1270         struct rte_eventdev *dev;
1271
1272         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1273
1274         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1275         dev = &rte_eventdevs[dev_id];
1276         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1277
1278         if (dev->data->dev_started == 0) {
1279                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1280                         dev_id);
1281                 return;
1282         }
1283
1284         dev->data->dev_started = 0;
1285         (*dev->dev_ops->dev_stop)(dev);
1286         rte_eventdev_trace_stop(dev_id);
1287 }
1288
1289 int
1290 rte_event_dev_close(uint8_t dev_id)
1291 {
1292         struct rte_eventdev *dev;
1293
1294         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1295         dev = &rte_eventdevs[dev_id];
1296         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1297
1298         /* Device must be stopped before it can be closed */
1299         if (dev->data->dev_started == 1) {
1300                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1301                                 dev_id);
1302                 return -EBUSY;
1303         }
1304
1305         rte_eventdev_trace_close(dev_id);
1306         return (*dev->dev_ops->dev_close)(dev);
1307 }
1308
1309 static inline int
1310 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1311                     int socket_id)
1312 {
1313         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1314         const struct rte_memzone *mz;
1315         int n;
1316
1317         /* Generate memzone name */
1318         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1319         if (n >= (int)sizeof(mz_name))
1320                 return -EINVAL;
1321
1322         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1323                 mz = rte_memzone_reserve(mz_name,
1324                                 sizeof(struct rte_eventdev_data),
1325                                 socket_id, 0);
1326         } else
1327                 mz = rte_memzone_lookup(mz_name);
1328
1329         if (mz == NULL)
1330                 return -ENOMEM;
1331
1332         *data = mz->addr;
1333         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1334                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1335                 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1336                                         RTE_EVENT_MAX_QUEUES_PER_DEV;
1337                      n++)
1338                         (*data)->links_map[n] =
1339                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1340         }
1341
1342         return 0;
1343 }
1344
1345 static inline uint8_t
1346 eventdev_find_free_device_index(void)
1347 {
1348         uint8_t dev_id;
1349
1350         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1351                 if (rte_eventdevs[dev_id].attached ==
1352                                 RTE_EVENTDEV_DETACHED)
1353                         return dev_id;
1354         }
1355         return RTE_EVENT_MAX_DEVS;
1356 }
1357
1358 static uint16_t
1359 rte_event_tx_adapter_enqueue(__rte_unused void *port,
1360                         __rte_unused struct rte_event ev[],
1361                         __rte_unused uint16_t nb_events)
1362 {
1363         rte_errno = ENOTSUP;
1364         return 0;
1365 }
1366
1367 static uint16_t
1368 rte_event_crypto_adapter_enqueue(__rte_unused void *port,
1369                         __rte_unused struct rte_event ev[],
1370                         __rte_unused uint16_t nb_events)
1371 {
1372         rte_errno = ENOTSUP;
1373         return 0;
1374 }
1375
1376 struct rte_eventdev *
1377 rte_event_pmd_allocate(const char *name, int socket_id)
1378 {
1379         struct rte_eventdev *eventdev;
1380         uint8_t dev_id;
1381
1382         if (rte_event_pmd_get_named_dev(name) != NULL) {
1383                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1384                                 "allocated!", name);
1385                 return NULL;
1386         }
1387
1388         dev_id = eventdev_find_free_device_index();
1389         if (dev_id == RTE_EVENT_MAX_DEVS) {
1390                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1391                 return NULL;
1392         }
1393
1394         eventdev = &rte_eventdevs[dev_id];
1395
1396         eventdev->txa_enqueue = rte_event_tx_adapter_enqueue;
1397         eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue;
1398         eventdev->ca_enqueue = rte_event_crypto_adapter_enqueue;
1399
1400         if (eventdev->data == NULL) {
1401                 struct rte_eventdev_data *eventdev_data = NULL;
1402
1403                 int retval =
1404                         eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1405
1406                 if (retval < 0 || eventdev_data == NULL)
1407                         return NULL;
1408
1409                 eventdev->data = eventdev_data;
1410
1411                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1412
1413                         strlcpy(eventdev->data->name, name,
1414                                 RTE_EVENTDEV_NAME_MAX_LEN);
1415
1416                         eventdev->data->dev_id = dev_id;
1417                         eventdev->data->socket_id = socket_id;
1418                         eventdev->data->dev_started = 0;
1419                 }
1420
1421                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1422                 eventdev_globals.nb_devs++;
1423         }
1424
1425         return eventdev;
1426 }
1427
1428 int
1429 rte_event_pmd_release(struct rte_eventdev *eventdev)
1430 {
1431         int ret;
1432         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1433         const struct rte_memzone *mz;
1434
1435         if (eventdev == NULL)
1436                 return -EINVAL;
1437
1438         eventdev->attached = RTE_EVENTDEV_DETACHED;
1439         eventdev_globals.nb_devs--;
1440
1441         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1442                 rte_free(eventdev->data->dev_private);
1443
1444                 /* Generate memzone name */
1445                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1446                                 eventdev->data->dev_id);
1447                 if (ret >= (int)sizeof(mz_name))
1448                         return -EINVAL;
1449
1450                 mz = rte_memzone_lookup(mz_name);
1451                 if (mz == NULL)
1452                         return -ENOMEM;
1453
1454                 ret = rte_memzone_free(mz);
1455                 if (ret)
1456                         return ret;
1457         }
1458
1459         eventdev->data = NULL;
1460         return 0;
1461 }
1462
1463
1464 static int
1465 handle_dev_list(const char *cmd __rte_unused,
1466                 const char *params __rte_unused,
1467                 struct rte_tel_data *d)
1468 {
1469         uint8_t dev_id;
1470         int ndev = rte_event_dev_count();
1471
1472         if (ndev < 1)
1473                 return -1;
1474
1475         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1476         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1477                 if (rte_eventdevs[dev_id].attached ==
1478                                 RTE_EVENTDEV_ATTACHED)
1479                         rte_tel_data_add_array_int(d, dev_id);
1480         }
1481
1482         return 0;
1483 }
1484
1485 static int
1486 handle_port_list(const char *cmd __rte_unused,
1487                  const char *params,
1488                  struct rte_tel_data *d)
1489 {
1490         int i;
1491         uint8_t dev_id;
1492         struct rte_eventdev *dev;
1493         char *end_param;
1494
1495         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1496                 return -1;
1497
1498         dev_id = strtoul(params, &end_param, 10);
1499         if (*end_param != '\0')
1500                 RTE_EDEV_LOG_DEBUG(
1501                         "Extra parameters passed to eventdev telemetry command, ignoring");
1502
1503         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1504         dev = &rte_eventdevs[dev_id];
1505
1506         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1507         for (i = 0; i < dev->data->nb_ports; i++)
1508                 rte_tel_data_add_array_int(d, i);
1509
1510         return 0;
1511 }
1512
1513 static int
1514 handle_queue_list(const char *cmd __rte_unused,
1515                   const char *params,
1516                   struct rte_tel_data *d)
1517 {
1518         int i;
1519         uint8_t dev_id;
1520         struct rte_eventdev *dev;
1521         char *end_param;
1522
1523         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1524                 return -1;
1525
1526         dev_id = strtoul(params, &end_param, 10);
1527         if (*end_param != '\0')
1528                 RTE_EDEV_LOG_DEBUG(
1529                         "Extra parameters passed to eventdev telemetry command, ignoring");
1530
1531         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1532         dev = &rte_eventdevs[dev_id];
1533
1534         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1535         for (i = 0; i < dev->data->nb_queues; i++)
1536                 rte_tel_data_add_array_int(d, i);
1537
1538         return 0;
1539 }
1540
1541 static int
1542 handle_queue_links(const char *cmd __rte_unused,
1543                    const char *params,
1544                    struct rte_tel_data *d)
1545 {
1546         int i, ret, port_id = 0;
1547         char *end_param;
1548         uint8_t dev_id;
1549         uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1550         uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1551         const char *p_param;
1552
1553         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1554                 return -1;
1555
1556         /* Get dev ID from parameter string */
1557         dev_id = strtoul(params, &end_param, 10);
1558         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1559
1560         p_param = strtok(end_param, ",");
1561         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1562                 return -1;
1563
1564         port_id = strtoul(p_param, &end_param, 10);
1565         p_param = strtok(NULL, "\0");
1566         if (p_param != NULL)
1567                 RTE_EDEV_LOG_DEBUG(
1568                         "Extra parameters passed to eventdev telemetry command, ignoring");
1569
1570         ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1571         if (ret < 0)
1572                 return -1;
1573
1574         rte_tel_data_start_dict(d);
1575         for (i = 0; i < ret; i++) {
1576                 char qid_name[32];
1577
1578                 snprintf(qid_name, 31, "qid_%u", queues[i]);
1579                 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1580         }
1581
1582         return 0;
1583 }
1584
1585 static int
1586 eventdev_build_telemetry_data(int dev_id,
1587                               enum rte_event_dev_xstats_mode mode,
1588                               int port_queue_id,
1589                               struct rte_tel_data *d)
1590 {
1591         struct rte_event_dev_xstats_name *xstat_names;
1592         unsigned int *ids;
1593         uint64_t *values;
1594         int i, ret, num_xstats;
1595
1596         num_xstats = rte_event_dev_xstats_names_get(dev_id,
1597                                                     mode,
1598                                                     port_queue_id,
1599                                                     NULL,
1600                                                     NULL,
1601                                                     0);
1602
1603         if (num_xstats < 0)
1604                 return -1;
1605
1606         /* use one malloc for names */
1607         xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1608                              * num_xstats);
1609         if (xstat_names == NULL)
1610                 return -1;
1611
1612         ids = malloc((sizeof(unsigned int)) * num_xstats);
1613         if (ids == NULL) {
1614                 free(xstat_names);
1615                 return -1;
1616         }
1617
1618         values = malloc((sizeof(uint64_t)) * num_xstats);
1619         if (values == NULL) {
1620                 free(xstat_names);
1621                 free(ids);
1622                 return -1;
1623         }
1624
1625         ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1626                                              xstat_names, ids, num_xstats);
1627         if (ret < 0 || ret > num_xstats) {
1628                 free(xstat_names);
1629                 free(ids);
1630                 free(values);
1631                 return -1;
1632         }
1633
1634         ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1635                                        ids, values, num_xstats);
1636         if (ret < 0 || ret > num_xstats) {
1637                 free(xstat_names);
1638                 free(ids);
1639                 free(values);
1640                 return -1;
1641         }
1642
1643         rte_tel_data_start_dict(d);
1644         for (i = 0; i < num_xstats; i++)
1645                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1646                                           values[i]);
1647
1648         free(xstat_names);
1649         free(ids);
1650         free(values);
1651         return 0;
1652 }
1653
1654 static int
1655 handle_dev_xstats(const char *cmd __rte_unused,
1656                   const char *params,
1657                   struct rte_tel_data *d)
1658 {
1659         int dev_id;
1660         enum rte_event_dev_xstats_mode mode;
1661         char *end_param;
1662
1663         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1664                 return -1;
1665
1666         /* Get dev ID from parameter string */
1667         dev_id = strtoul(params, &end_param, 10);
1668         if (*end_param != '\0')
1669                 RTE_EDEV_LOG_DEBUG(
1670                         "Extra parameters passed to eventdev telemetry command, ignoring");
1671
1672         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1673
1674         mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1675         return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1676 }
1677
1678 static int
1679 handle_port_xstats(const char *cmd __rte_unused,
1680                    const char *params,
1681                    struct rte_tel_data *d)
1682 {
1683         int dev_id;
1684         int port_queue_id = 0;
1685         enum rte_event_dev_xstats_mode mode;
1686         char *end_param;
1687         const char *p_param;
1688
1689         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1690                 return -1;
1691
1692         /* Get dev ID from parameter string */
1693         dev_id = strtoul(params, &end_param, 10);
1694         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1695
1696         p_param = strtok(end_param, ",");
1697         mode = RTE_EVENT_DEV_XSTATS_PORT;
1698
1699         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1700                 return -1;
1701
1702         port_queue_id = strtoul(p_param, &end_param, 10);
1703
1704         p_param = strtok(NULL, "\0");
1705         if (p_param != NULL)
1706                 RTE_EDEV_LOG_DEBUG(
1707                         "Extra parameters passed to eventdev telemetry command, ignoring");
1708
1709         return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1710 }
1711
1712 static int
1713 handle_queue_xstats(const char *cmd __rte_unused,
1714                     const char *params,
1715                     struct rte_tel_data *d)
1716 {
1717         int dev_id;
1718         int port_queue_id = 0;
1719         enum rte_event_dev_xstats_mode mode;
1720         char *end_param;
1721         const char *p_param;
1722
1723         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1724                 return -1;
1725
1726         /* Get dev ID from parameter string */
1727         dev_id = strtoul(params, &end_param, 10);
1728         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1729
1730         p_param = strtok(end_param, ",");
1731         mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1732
1733         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1734                 return -1;
1735
1736         port_queue_id = strtoul(p_param, &end_param, 10);
1737
1738         p_param = strtok(NULL, "\0");
1739         if (p_param != NULL)
1740                 RTE_EDEV_LOG_DEBUG(
1741                         "Extra parameters passed to eventdev telemetry command, ignoring");
1742
1743         return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1744 }
1745
1746 RTE_INIT(eventdev_init_telemetry)
1747 {
1748         rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1749                         "Returns list of available eventdevs. Takes no parameters");
1750         rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1751                         "Returns list of available ports. Parameter: DevID");
1752         rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1753                         "Returns list of available queues. Parameter: DevID");
1754
1755         rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1756                         "Returns stats for an eventdev. Parameter: DevID");
1757         rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1758                         "Returns stats for an eventdev port. Params: DevID,PortID");
1759         rte_telemetry_register_cmd("/eventdev/queue_xstats",
1760                         handle_queue_xstats,
1761                         "Returns stats for an eventdev queue. Params: DevID,QueueID");
1762         rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1763                         "Returns links for an eventdev port. Params: DevID,QueueID");
1764 }