crypto/cnxk: support null authentication in IPsec
[dpdk.git] / lib / eventdev / rte_eventdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc
3  */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <errno.h>
11 #include <stdint.h>
12 #include <inttypes.h>
13 #include <sys/types.h>
14 #include <sys/queue.h>
15
16 #include <rte_string_fns.h>
17 #include <rte_byteorder.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_dev.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_eal.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_common.h>
30 #include <rte_malloc.h>
31 #include <rte_errno.h>
32 #include <ethdev_driver.h>
33 #include <rte_cryptodev.h>
34 #include <cryptodev_pmd.h>
35 #include <rte_telemetry.h>
36
37 #include "rte_eventdev.h"
38 #include "eventdev_pmd.h"
39 #include "eventdev_trace.h"
40
41 static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS];
42
43 struct rte_eventdev *rte_eventdevs = rte_event_devices;
44
45 static struct rte_eventdev_global eventdev_globals = {
46         .nb_devs                = 0
47 };
48
49 /* Public fastpath APIs. */
50 struct rte_event_fp_ops rte_event_fp_ops[RTE_EVENT_MAX_DEVS];
51
52 /* Event dev north bound API implementation */
53
54 uint8_t
55 rte_event_dev_count(void)
56 {
57         return eventdev_globals.nb_devs;
58 }
59
60 int
61 rte_event_dev_get_dev_id(const char *name)
62 {
63         int i;
64         uint8_t cmp;
65
66         if (!name)
67                 return -EINVAL;
68
69         for (i = 0; i < eventdev_globals.nb_devs; i++) {
70                 cmp = (strncmp(rte_event_devices[i].data->name, name,
71                                 RTE_EVENTDEV_NAME_MAX_LEN) == 0) ||
72                         (rte_event_devices[i].dev ? (strncmp(
73                                 rte_event_devices[i].dev->driver->name, name,
74                                          RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0);
75                 if (cmp && (rte_event_devices[i].attached ==
76                                         RTE_EVENTDEV_ATTACHED))
77                         return i;
78         }
79         return -ENODEV;
80 }
81
82 int
83 rte_event_dev_socket_id(uint8_t dev_id)
84 {
85         struct rte_eventdev *dev;
86
87         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
88         dev = &rte_eventdevs[dev_id];
89
90         return dev->data->socket_id;
91 }
92
93 int
94 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
95 {
96         struct rte_eventdev *dev;
97
98         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
99         dev = &rte_eventdevs[dev_id];
100
101         if (dev_info == NULL)
102                 return -EINVAL;
103
104         memset(dev_info, 0, sizeof(struct rte_event_dev_info));
105
106         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
107         (*dev->dev_ops->dev_infos_get)(dev, dev_info);
108
109         dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns;
110
111         dev_info->dev = dev->dev;
112         return 0;
113 }
114
115 int
116 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
117                                 uint32_t *caps)
118 {
119         struct rte_eventdev *dev;
120
121         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
122         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
123
124         dev = &rte_eventdevs[dev_id];
125
126         if (caps == NULL)
127                 return -EINVAL;
128
129         if (dev->dev_ops->eth_rx_adapter_caps_get == NULL)
130                 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
131         else
132                 *caps = 0;
133
134         return dev->dev_ops->eth_rx_adapter_caps_get ?
135                                 (*dev->dev_ops->eth_rx_adapter_caps_get)(dev,
136                                                 &rte_eth_devices[eth_port_id],
137                                                 caps)
138                                 : 0;
139 }
140
141 int
142 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
143 {
144         struct rte_eventdev *dev;
145         const struct event_timer_adapter_ops *ops;
146
147         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
148
149         dev = &rte_eventdevs[dev_id];
150
151         if (caps == NULL)
152                 return -EINVAL;
153         *caps = 0;
154
155         return dev->dev_ops->timer_adapter_caps_get ?
156                                 (*dev->dev_ops->timer_adapter_caps_get)(dev,
157                                                                         0,
158                                                                         caps,
159                                                                         &ops)
160                                 : 0;
161 }
162
163 int
164 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
165                                   uint32_t *caps)
166 {
167         struct rte_eventdev *dev;
168         struct rte_cryptodev *cdev;
169
170         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
171         if (!rte_cryptodev_is_valid_dev(cdev_id))
172                 return -EINVAL;
173
174         dev = &rte_eventdevs[dev_id];
175         cdev = rte_cryptodev_pmd_get_dev(cdev_id);
176
177         if (caps == NULL)
178                 return -EINVAL;
179         *caps = 0;
180
181         return dev->dev_ops->crypto_adapter_caps_get ?
182                 (*dev->dev_ops->crypto_adapter_caps_get)
183                 (dev, cdev, caps) : -ENOTSUP;
184 }
185
186 int
187 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
188                                 uint32_t *caps)
189 {
190         struct rte_eventdev *dev;
191         struct rte_eth_dev *eth_dev;
192
193         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
194         RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL);
195
196         dev = &rte_eventdevs[dev_id];
197         eth_dev = &rte_eth_devices[eth_port_id];
198
199         if (caps == NULL)
200                 return -EINVAL;
201
202         if (dev->dev_ops->eth_tx_adapter_caps_get == NULL)
203                 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR;
204         else
205                 *caps = 0;
206
207         return dev->dev_ops->eth_tx_adapter_caps_get ?
208                         (*dev->dev_ops->eth_tx_adapter_caps_get)(dev,
209                                                                 eth_dev,
210                                                                 caps)
211                         : 0;
212 }
213
214 static inline int
215 event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues)
216 {
217         uint8_t old_nb_queues = dev->data->nb_queues;
218         struct rte_event_queue_conf *queues_cfg;
219         unsigned int i;
220
221         RTE_EDEV_LOG_DEBUG("Setup %d queues on device %u", nb_queues,
222                          dev->data->dev_id);
223
224         if (nb_queues != 0) {
225                 queues_cfg = dev->data->queues_cfg;
226                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
227
228                 for (i = nb_queues; i < old_nb_queues; i++)
229                         (*dev->dev_ops->queue_release)(dev, i);
230
231
232                 if (nb_queues > old_nb_queues) {
233                         uint8_t new_qs = nb_queues - old_nb_queues;
234
235                         memset(queues_cfg + old_nb_queues, 0,
236                                 sizeof(queues_cfg[0]) * new_qs);
237                 }
238         } else {
239                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_release, -ENOTSUP);
240
241                 for (i = nb_queues; i < old_nb_queues; i++)
242                         (*dev->dev_ops->queue_release)(dev, i);
243         }
244
245         dev->data->nb_queues = nb_queues;
246         return 0;
247 }
248
249 #define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead)
250
251 static inline int
252 event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports)
253 {
254         uint8_t old_nb_ports = dev->data->nb_ports;
255         void **ports;
256         uint16_t *links_map;
257         struct rte_event_port_conf *ports_cfg;
258         unsigned int i;
259
260         RTE_EDEV_LOG_DEBUG("Setup %d ports on device %u", nb_ports,
261                          dev->data->dev_id);
262
263         if (nb_ports != 0) { /* re-config */
264                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
265
266                 ports = dev->data->ports;
267                 ports_cfg = dev->data->ports_cfg;
268                 links_map = dev->data->links_map;
269
270                 for (i = nb_ports; i < old_nb_ports; i++)
271                         (*dev->dev_ops->port_release)(ports[i]);
272
273                 if (nb_ports > old_nb_ports) {
274                         uint8_t new_ps = nb_ports - old_nb_ports;
275                         unsigned int old_links_map_end =
276                                 old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
277                         unsigned int links_map_end =
278                                 nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV;
279
280                         memset(ports + old_nb_ports, 0,
281                                 sizeof(ports[0]) * new_ps);
282                         memset(ports_cfg + old_nb_ports, 0,
283                                 sizeof(ports_cfg[0]) * new_ps);
284                         for (i = old_links_map_end; i < links_map_end; i++)
285                                 links_map[i] =
286                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
287                 }
288         } else {
289                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP);
290
291                 ports = dev->data->ports;
292                 for (i = nb_ports; i < old_nb_ports; i++) {
293                         (*dev->dev_ops->port_release)(ports[i]);
294                         ports[i] = NULL;
295                 }
296         }
297
298         dev->data->nb_ports = nb_ports;
299         return 0;
300 }
301
302 int
303 rte_event_dev_configure(uint8_t dev_id,
304                         const struct rte_event_dev_config *dev_conf)
305 {
306         struct rte_event_dev_info info;
307         struct rte_eventdev *dev;
308         int diag;
309
310         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
311         dev = &rte_eventdevs[dev_id];
312
313         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
314         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
315
316         if (dev->data->dev_started) {
317                 RTE_EDEV_LOG_ERR(
318                     "device %d must be stopped to allow configuration", dev_id);
319                 return -EBUSY;
320         }
321
322         if (dev_conf == NULL)
323                 return -EINVAL;
324
325         (*dev->dev_ops->dev_infos_get)(dev, &info);
326
327         /* Check dequeue_timeout_ns value is in limit */
328         if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) {
329                 if (dev_conf->dequeue_timeout_ns &&
330                     (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns
331                         || dev_conf->dequeue_timeout_ns >
332                                  info.max_dequeue_timeout_ns)) {
333                         RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d"
334                         " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d",
335                         dev_id, dev_conf->dequeue_timeout_ns,
336                         info.min_dequeue_timeout_ns,
337                         info.max_dequeue_timeout_ns);
338                         return -EINVAL;
339                 }
340         }
341
342         /* Check nb_events_limit is in limit */
343         if (dev_conf->nb_events_limit > info.max_num_events) {
344                 RTE_EDEV_LOG_ERR("dev%d nb_events_limit=%d > max_num_events=%d",
345                 dev_id, dev_conf->nb_events_limit, info.max_num_events);
346                 return -EINVAL;
347         }
348
349         /* Check nb_event_queues is in limit */
350         if (!dev_conf->nb_event_queues) {
351                 RTE_EDEV_LOG_ERR("dev%d nb_event_queues cannot be zero",
352                                         dev_id);
353                 return -EINVAL;
354         }
355         if (dev_conf->nb_event_queues > info.max_event_queues +
356                         info.max_single_link_event_port_queue_pairs) {
357                 RTE_EDEV_LOG_ERR("%d nb_event_queues=%d > max_event_queues=%d + max_single_link_event_port_queue_pairs=%d",
358                                  dev_id, dev_conf->nb_event_queues,
359                                  info.max_event_queues,
360                                  info.max_single_link_event_port_queue_pairs);
361                 return -EINVAL;
362         }
363         if (dev_conf->nb_event_queues -
364                         dev_conf->nb_single_link_event_port_queues >
365                         info.max_event_queues) {
366                 RTE_EDEV_LOG_ERR("id%d nb_event_queues=%d - nb_single_link_event_port_queues=%d > max_event_queues=%d",
367                                  dev_id, dev_conf->nb_event_queues,
368                                  dev_conf->nb_single_link_event_port_queues,
369                                  info.max_event_queues);
370                 return -EINVAL;
371         }
372         if (dev_conf->nb_single_link_event_port_queues >
373                         dev_conf->nb_event_queues) {
374                 RTE_EDEV_LOG_ERR("dev%d nb_single_link_event_port_queues=%d > nb_event_queues=%d",
375                                  dev_id,
376                                  dev_conf->nb_single_link_event_port_queues,
377                                  dev_conf->nb_event_queues);
378                 return -EINVAL;
379         }
380
381         /* Check nb_event_ports is in limit */
382         if (!dev_conf->nb_event_ports) {
383                 RTE_EDEV_LOG_ERR("dev%d nb_event_ports cannot be zero", dev_id);
384                 return -EINVAL;
385         }
386         if (dev_conf->nb_event_ports > info.max_event_ports +
387                         info.max_single_link_event_port_queue_pairs) {
388                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d > max_event_ports=%d + max_single_link_event_port_queue_pairs=%d",
389                                  dev_id, dev_conf->nb_event_ports,
390                                  info.max_event_ports,
391                                  info.max_single_link_event_port_queue_pairs);
392                 return -EINVAL;
393         }
394         if (dev_conf->nb_event_ports -
395                         dev_conf->nb_single_link_event_port_queues
396                         > info.max_event_ports) {
397                 RTE_EDEV_LOG_ERR("id%d nb_event_ports=%d - nb_single_link_event_port_queues=%d > max_event_ports=%d",
398                                  dev_id, dev_conf->nb_event_ports,
399                                  dev_conf->nb_single_link_event_port_queues,
400                                  info.max_event_ports);
401                 return -EINVAL;
402         }
403
404         if (dev_conf->nb_single_link_event_port_queues >
405             dev_conf->nb_event_ports) {
406                 RTE_EDEV_LOG_ERR(
407                                  "dev%d nb_single_link_event_port_queues=%d > nb_event_ports=%d",
408                                  dev_id,
409                                  dev_conf->nb_single_link_event_port_queues,
410                                  dev_conf->nb_event_ports);
411                 return -EINVAL;
412         }
413
414         /* Check nb_event_queue_flows is in limit */
415         if (!dev_conf->nb_event_queue_flows) {
416                 RTE_EDEV_LOG_ERR("dev%d nb_flows cannot be zero", dev_id);
417                 return -EINVAL;
418         }
419         if (dev_conf->nb_event_queue_flows > info.max_event_queue_flows) {
420                 RTE_EDEV_LOG_ERR("dev%d nb_flows=%x > max_flows=%x",
421                 dev_id, dev_conf->nb_event_queue_flows,
422                 info.max_event_queue_flows);
423                 return -EINVAL;
424         }
425
426         /* Check nb_event_port_dequeue_depth is in limit */
427         if (!dev_conf->nb_event_port_dequeue_depth) {
428                 RTE_EDEV_LOG_ERR("dev%d nb_dequeue_depth cannot be zero",
429                                         dev_id);
430                 return -EINVAL;
431         }
432         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
433                  (dev_conf->nb_event_port_dequeue_depth >
434                          info.max_event_port_dequeue_depth)) {
435                 RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d",
436                 dev_id, dev_conf->nb_event_port_dequeue_depth,
437                 info.max_event_port_dequeue_depth);
438                 return -EINVAL;
439         }
440
441         /* Check nb_event_port_enqueue_depth is in limit */
442         if (!dev_conf->nb_event_port_enqueue_depth) {
443                 RTE_EDEV_LOG_ERR("dev%d nb_enqueue_depth cannot be zero",
444                                         dev_id);
445                 return -EINVAL;
446         }
447         if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) &&
448                 (dev_conf->nb_event_port_enqueue_depth >
449                          info.max_event_port_enqueue_depth)) {
450                 RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d",
451                 dev_id, dev_conf->nb_event_port_enqueue_depth,
452                 info.max_event_port_enqueue_depth);
453                 return -EINVAL;
454         }
455
456         /* Copy the dev_conf parameter into the dev structure */
457         memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
458
459         /* Setup new number of queues and reconfigure device. */
460         diag = event_dev_queue_config(dev, dev_conf->nb_event_queues);
461         if (diag != 0) {
462                 RTE_EDEV_LOG_ERR("dev%d event_dev_queue_config = %d", dev_id,
463                                  diag);
464                 return diag;
465         }
466
467         /* Setup new number of ports and reconfigure device. */
468         diag = event_dev_port_config(dev, dev_conf->nb_event_ports);
469         if (diag != 0) {
470                 event_dev_queue_config(dev, 0);
471                 RTE_EDEV_LOG_ERR("dev%d event_dev_port_config = %d", dev_id,
472                                  diag);
473                 return diag;
474         }
475
476         event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
477
478         /* Configure the device */
479         diag = (*dev->dev_ops->dev_configure)(dev);
480         if (diag != 0) {
481                 RTE_EDEV_LOG_ERR("dev%d dev_configure = %d", dev_id, diag);
482                 event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
483                 event_dev_queue_config(dev, 0);
484                 event_dev_port_config(dev, 0);
485         }
486
487         dev->data->event_dev_cap = info.event_dev_cap;
488         rte_eventdev_trace_configure(dev_id, dev_conf, diag);
489         return diag;
490 }
491
492 static inline int
493 is_valid_queue(struct rte_eventdev *dev, uint8_t queue_id)
494 {
495         if (queue_id < dev->data->nb_queues && queue_id <
496                                 RTE_EVENT_MAX_QUEUES_PER_DEV)
497                 return 1;
498         else
499                 return 0;
500 }
501
502 int
503 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
504                                  struct rte_event_queue_conf *queue_conf)
505 {
506         struct rte_eventdev *dev;
507
508         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
509         dev = &rte_eventdevs[dev_id];
510
511         if (queue_conf == NULL)
512                 return -EINVAL;
513
514         if (!is_valid_queue(dev, queue_id)) {
515                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
516                 return -EINVAL;
517         }
518
519         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP);
520         memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
521         (*dev->dev_ops->queue_def_conf)(dev, queue_id, queue_conf);
522         return 0;
523 }
524
525 static inline int
526 is_valid_atomic_queue_conf(const struct rte_event_queue_conf *queue_conf)
527 {
528         if (queue_conf &&
529                 !(queue_conf->event_queue_cfg &
530                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
531                 ((queue_conf->event_queue_cfg &
532                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
533                 (queue_conf->schedule_type
534                         == RTE_SCHED_TYPE_ATOMIC)
535                 ))
536                 return 1;
537         else
538                 return 0;
539 }
540
541 static inline int
542 is_valid_ordered_queue_conf(const struct rte_event_queue_conf *queue_conf)
543 {
544         if (queue_conf &&
545                 !(queue_conf->event_queue_cfg &
546                   RTE_EVENT_QUEUE_CFG_SINGLE_LINK) &&
547                 ((queue_conf->event_queue_cfg &
548                          RTE_EVENT_QUEUE_CFG_ALL_TYPES) ||
549                 (queue_conf->schedule_type
550                         == RTE_SCHED_TYPE_ORDERED)
551                 ))
552                 return 1;
553         else
554                 return 0;
555 }
556
557
558 int
559 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
560                       const struct rte_event_queue_conf *queue_conf)
561 {
562         struct rte_eventdev *dev;
563         struct rte_event_queue_conf def_conf;
564
565         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
566         dev = &rte_eventdevs[dev_id];
567
568         if (!is_valid_queue(dev, queue_id)) {
569                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
570                 return -EINVAL;
571         }
572
573         /* Check nb_atomic_flows limit */
574         if (is_valid_atomic_queue_conf(queue_conf)) {
575                 if (queue_conf->nb_atomic_flows == 0 ||
576                     queue_conf->nb_atomic_flows >
577                         dev->data->dev_conf.nb_event_queue_flows) {
578                         RTE_EDEV_LOG_ERR(
579                 "dev%d queue%d Invalid nb_atomic_flows=%d max_flows=%d",
580                         dev_id, queue_id, queue_conf->nb_atomic_flows,
581                         dev->data->dev_conf.nb_event_queue_flows);
582                         return -EINVAL;
583                 }
584         }
585
586         /* Check nb_atomic_order_sequences limit */
587         if (is_valid_ordered_queue_conf(queue_conf)) {
588                 if (queue_conf->nb_atomic_order_sequences == 0 ||
589                     queue_conf->nb_atomic_order_sequences >
590                         dev->data->dev_conf.nb_event_queue_flows) {
591                         RTE_EDEV_LOG_ERR(
592                 "dev%d queue%d Invalid nb_atomic_order_seq=%d max_flows=%d",
593                         dev_id, queue_id, queue_conf->nb_atomic_order_sequences,
594                         dev->data->dev_conf.nb_event_queue_flows);
595                         return -EINVAL;
596                 }
597         }
598
599         if (dev->data->dev_started) {
600                 RTE_EDEV_LOG_ERR(
601                     "device %d must be stopped to allow queue setup", dev_id);
602                 return -EBUSY;
603         }
604
605         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_setup, -ENOTSUP);
606
607         if (queue_conf == NULL) {
608                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf,
609                                         -ENOTSUP);
610                 (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf);
611                 queue_conf = &def_conf;
612         }
613
614         dev->data->queues_cfg[queue_id] = *queue_conf;
615         rte_eventdev_trace_queue_setup(dev_id, queue_id, queue_conf);
616         return (*dev->dev_ops->queue_setup)(dev, queue_id, queue_conf);
617 }
618
619 static inline int
620 is_valid_port(struct rte_eventdev *dev, uint8_t port_id)
621 {
622         if (port_id < dev->data->nb_ports)
623                 return 1;
624         else
625                 return 0;
626 }
627
628 int
629 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
630                                  struct rte_event_port_conf *port_conf)
631 {
632         struct rte_eventdev *dev;
633
634         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
635         dev = &rte_eventdevs[dev_id];
636
637         if (port_conf == NULL)
638                 return -EINVAL;
639
640         if (!is_valid_port(dev, port_id)) {
641                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
642                 return -EINVAL;
643         }
644
645         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf, -ENOTSUP);
646         memset(port_conf, 0, sizeof(struct rte_event_port_conf));
647         (*dev->dev_ops->port_def_conf)(dev, port_id, port_conf);
648         return 0;
649 }
650
651 int
652 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
653                      const struct rte_event_port_conf *port_conf)
654 {
655         struct rte_eventdev *dev;
656         struct rte_event_port_conf def_conf;
657         int diag;
658
659         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
660         dev = &rte_eventdevs[dev_id];
661
662         if (!is_valid_port(dev, port_id)) {
663                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
664                 return -EINVAL;
665         }
666
667         /* Check new_event_threshold limit */
668         if ((port_conf && !port_conf->new_event_threshold) ||
669                         (port_conf && port_conf->new_event_threshold >
670                                  dev->data->dev_conf.nb_events_limit)) {
671                 RTE_EDEV_LOG_ERR(
672                    "dev%d port%d Invalid event_threshold=%d nb_events_limit=%d",
673                         dev_id, port_id, port_conf->new_event_threshold,
674                         dev->data->dev_conf.nb_events_limit);
675                 return -EINVAL;
676         }
677
678         /* Check dequeue_depth limit */
679         if ((port_conf && !port_conf->dequeue_depth) ||
680                         (port_conf && port_conf->dequeue_depth >
681                 dev->data->dev_conf.nb_event_port_dequeue_depth)) {
682                 RTE_EDEV_LOG_ERR(
683                    "dev%d port%d Invalid dequeue depth=%d max_dequeue_depth=%d",
684                         dev_id, port_id, port_conf->dequeue_depth,
685                         dev->data->dev_conf.nb_event_port_dequeue_depth);
686                 return -EINVAL;
687         }
688
689         /* Check enqueue_depth limit */
690         if ((port_conf && !port_conf->enqueue_depth) ||
691                         (port_conf && port_conf->enqueue_depth >
692                 dev->data->dev_conf.nb_event_port_enqueue_depth)) {
693                 RTE_EDEV_LOG_ERR(
694                    "dev%d port%d Invalid enqueue depth=%d max_enqueue_depth=%d",
695                         dev_id, port_id, port_conf->enqueue_depth,
696                         dev->data->dev_conf.nb_event_port_enqueue_depth);
697                 return -EINVAL;
698         }
699
700         if (port_conf &&
701             (port_conf->event_port_cfg & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL) &&
702             !(dev->data->event_dev_cap &
703               RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE)) {
704                 RTE_EDEV_LOG_ERR(
705                    "dev%d port%d Implicit release disable not supported",
706                         dev_id, port_id);
707                 return -EINVAL;
708         }
709
710         if (dev->data->dev_started) {
711                 RTE_EDEV_LOG_ERR(
712                     "device %d must be stopped to allow port setup", dev_id);
713                 return -EBUSY;
714         }
715
716         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_setup, -ENOTSUP);
717
718         if (port_conf == NULL) {
719                 RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_def_conf,
720                                         -ENOTSUP);
721                 (*dev->dev_ops->port_def_conf)(dev, port_id, &def_conf);
722                 port_conf = &def_conf;
723         }
724
725         dev->data->ports_cfg[port_id] = *port_conf;
726
727         diag = (*dev->dev_ops->port_setup)(dev, port_id, port_conf);
728
729         /* Unlink all the queues from this port(default state after setup) */
730         if (!diag)
731                 diag = rte_event_port_unlink(dev_id, port_id, NULL, 0);
732
733         rte_eventdev_trace_port_setup(dev_id, port_id, port_conf, diag);
734         if (diag < 0)
735                 return diag;
736
737         return 0;
738 }
739
740 int
741 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
742                        uint32_t *attr_value)
743 {
744         struct rte_eventdev *dev;
745
746         if (!attr_value)
747                 return -EINVAL;
748         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
749         dev = &rte_eventdevs[dev_id];
750
751         switch (attr_id) {
752         case RTE_EVENT_DEV_ATTR_PORT_COUNT:
753                 *attr_value = dev->data->nb_ports;
754                 break;
755         case RTE_EVENT_DEV_ATTR_QUEUE_COUNT:
756                 *attr_value = dev->data->nb_queues;
757                 break;
758         case RTE_EVENT_DEV_ATTR_STARTED:
759                 *attr_value = dev->data->dev_started;
760                 break;
761         default:
762                 return -EINVAL;
763         }
764
765         return 0;
766 }
767
768 int
769 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
770                         uint32_t *attr_value)
771 {
772         struct rte_eventdev *dev;
773
774         if (!attr_value)
775                 return -EINVAL;
776
777         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
778         dev = &rte_eventdevs[dev_id];
779         if (!is_valid_port(dev, port_id)) {
780                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
781                 return -EINVAL;
782         }
783
784         switch (attr_id) {
785         case RTE_EVENT_PORT_ATTR_ENQ_DEPTH:
786                 *attr_value = dev->data->ports_cfg[port_id].enqueue_depth;
787                 break;
788         case RTE_EVENT_PORT_ATTR_DEQ_DEPTH:
789                 *attr_value = dev->data->ports_cfg[port_id].dequeue_depth;
790                 break;
791         case RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD:
792                 *attr_value = dev->data->ports_cfg[port_id].new_event_threshold;
793                 break;
794         case RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE:
795         {
796                 uint32_t config;
797
798                 config = dev->data->ports_cfg[port_id].event_port_cfg;
799                 *attr_value = !!(config & RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL);
800                 break;
801         }
802         default:
803                 return -EINVAL;
804         };
805         return 0;
806 }
807
808 int
809 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
810                         uint32_t *attr_value)
811 {
812         struct rte_event_queue_conf *conf;
813         struct rte_eventdev *dev;
814
815         if (!attr_value)
816                 return -EINVAL;
817
818         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
819         dev = &rte_eventdevs[dev_id];
820         if (!is_valid_queue(dev, queue_id)) {
821                 RTE_EDEV_LOG_ERR("Invalid queue_id=%" PRIu8, queue_id);
822                 return -EINVAL;
823         }
824
825         conf = &dev->data->queues_cfg[queue_id];
826
827         switch (attr_id) {
828         case RTE_EVENT_QUEUE_ATTR_PRIORITY:
829                 *attr_value = RTE_EVENT_DEV_PRIORITY_NORMAL;
830                 if (dev->data->event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_QOS)
831                         *attr_value = conf->priority;
832                 break;
833         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS:
834                 *attr_value = conf->nb_atomic_flows;
835                 break;
836         case RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES:
837                 *attr_value = conf->nb_atomic_order_sequences;
838                 break;
839         case RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG:
840                 *attr_value = conf->event_queue_cfg;
841                 break;
842         case RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE:
843                 if (conf->event_queue_cfg & RTE_EVENT_QUEUE_CFG_ALL_TYPES)
844                         return -EOVERFLOW;
845
846                 *attr_value = conf->schedule_type;
847                 break;
848         default:
849                 return -EINVAL;
850         };
851         return 0;
852 }
853
854 int
855 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
856                     const uint8_t queues[], const uint8_t priorities[],
857                     uint16_t nb_links)
858 {
859         struct rte_eventdev *dev;
860         uint8_t queues_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
861         uint8_t priorities_list[RTE_EVENT_MAX_QUEUES_PER_DEV];
862         uint16_t *links_map;
863         int i, diag;
864
865         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
866         dev = &rte_eventdevs[dev_id];
867
868         if (*dev->dev_ops->port_link == NULL) {
869                 RTE_EDEV_LOG_ERR("Function not supported\n");
870                 rte_errno = ENOTSUP;
871                 return 0;
872         }
873
874         if (!is_valid_port(dev, port_id)) {
875                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
876                 rte_errno = EINVAL;
877                 return 0;
878         }
879
880         if (queues == NULL) {
881                 for (i = 0; i < dev->data->nb_queues; i++)
882                         queues_list[i] = i;
883
884                 queues = queues_list;
885                 nb_links = dev->data->nb_queues;
886         }
887
888         if (priorities == NULL) {
889                 for (i = 0; i < nb_links; i++)
890                         priorities_list[i] = RTE_EVENT_DEV_PRIORITY_NORMAL;
891
892                 priorities = priorities_list;
893         }
894
895         for (i = 0; i < nb_links; i++)
896                 if (queues[i] >= dev->data->nb_queues) {
897                         rte_errno = EINVAL;
898                         return 0;
899                 }
900
901         diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id],
902                                                 queues, priorities, nb_links);
903         if (diag < 0)
904                 return diag;
905
906         links_map = dev->data->links_map;
907         /* Point links_map to this port specific area */
908         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
909         for (i = 0; i < diag; i++)
910                 links_map[queues[i]] = (uint8_t)priorities[i];
911
912         rte_eventdev_trace_port_link(dev_id, port_id, nb_links, diag);
913         return diag;
914 }
915
916 int
917 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
918                       uint8_t queues[], uint16_t nb_unlinks)
919 {
920         struct rte_eventdev *dev;
921         uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
922         int i, diag, j;
923         uint16_t *links_map;
924
925         RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0);
926         dev = &rte_eventdevs[dev_id];
927
928         if (*dev->dev_ops->port_unlink == NULL) {
929                 RTE_EDEV_LOG_ERR("Function not supported");
930                 rte_errno = ENOTSUP;
931                 return 0;
932         }
933
934         if (!is_valid_port(dev, port_id)) {
935                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
936                 rte_errno = EINVAL;
937                 return 0;
938         }
939
940         links_map = dev->data->links_map;
941         /* Point links_map to this port specific area */
942         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
943
944         if (queues == NULL) {
945                 j = 0;
946                 for (i = 0; i < dev->data->nb_queues; i++) {
947                         if (links_map[i] !=
948                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
949                                 all_queues[j] = i;
950                                 j++;
951                         }
952                 }
953                 queues = all_queues;
954         } else {
955                 for (j = 0; j < nb_unlinks; j++) {
956                         if (links_map[queues[j]] ==
957                                         EVENT_QUEUE_SERVICE_PRIORITY_INVALID)
958                                 break;
959                 }
960         }
961
962         nb_unlinks = j;
963         for (i = 0; i < nb_unlinks; i++)
964                 if (queues[i] >= dev->data->nb_queues) {
965                         rte_errno = EINVAL;
966                         return 0;
967                 }
968
969         diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id],
970                                         queues, nb_unlinks);
971
972         if (diag < 0)
973                 return diag;
974
975         for (i = 0; i < diag; i++)
976                 links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
977
978         rte_eventdev_trace_port_unlink(dev_id, port_id, nb_unlinks, diag);
979         return diag;
980 }
981
982 int
983 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
984 {
985         struct rte_eventdev *dev;
986
987         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
988         dev = &rte_eventdevs[dev_id];
989         if (!is_valid_port(dev, port_id)) {
990                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
991                 return -EINVAL;
992         }
993
994         /* Return 0 if the PMD does not implement unlinks in progress.
995          * This allows PMDs which handle unlink synchronously to not implement
996          * this function at all.
997          */
998         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0);
999
1000         return (*dev->dev_ops->port_unlinks_in_progress)(dev,
1001                         dev->data->ports[port_id]);
1002 }
1003
1004 int
1005 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1006                          uint8_t queues[], uint8_t priorities[])
1007 {
1008         struct rte_eventdev *dev;
1009         uint16_t *links_map;
1010         int i, count = 0;
1011
1012         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1013         dev = &rte_eventdevs[dev_id];
1014         if (!is_valid_port(dev, port_id)) {
1015                 RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id);
1016                 return -EINVAL;
1017         }
1018
1019         links_map = dev->data->links_map;
1020         /* Point links_map to this port specific area */
1021         links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV);
1022         for (i = 0; i < dev->data->nb_queues; i++) {
1023                 if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) {
1024                         queues[count] = i;
1025                         priorities[count] = (uint8_t)links_map[i];
1026                         ++count;
1027                 }
1028         }
1029         return count;
1030 }
1031
1032 int
1033 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1034                                  uint64_t *timeout_ticks)
1035 {
1036         struct rte_eventdev *dev;
1037
1038         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1039         dev = &rte_eventdevs[dev_id];
1040         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->timeout_ticks, -ENOTSUP);
1041
1042         if (timeout_ticks == NULL)
1043                 return -EINVAL;
1044
1045         return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks);
1046 }
1047
1048 int
1049 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
1050 {
1051         struct rte_eventdev *dev;
1052
1053         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1054         dev = &rte_eventdevs[dev_id];
1055
1056         if (service_id == NULL)
1057                 return -EINVAL;
1058
1059         if (dev->data->service_inited)
1060                 *service_id = dev->data->service_id;
1061
1062         return dev->data->service_inited ? 0 : -ESRCH;
1063 }
1064
1065 int
1066 rte_event_dev_dump(uint8_t dev_id, FILE *f)
1067 {
1068         struct rte_eventdev *dev;
1069
1070         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1071         dev = &rte_eventdevs[dev_id];
1072         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dump, -ENOTSUP);
1073         if (f == NULL)
1074                 return -EINVAL;
1075
1076         (*dev->dev_ops->dump)(dev, f);
1077         return 0;
1078
1079 }
1080
1081 static int
1082 xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1083                 uint8_t queue_port_id)
1084 {
1085         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1086         if (dev->dev_ops->xstats_get_names != NULL)
1087                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1088                                                         queue_port_id,
1089                                                         NULL, NULL, 0);
1090         return 0;
1091 }
1092
1093 int
1094 rte_event_dev_xstats_names_get(uint8_t dev_id,
1095                 enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id,
1096                 struct rte_event_dev_xstats_name *xstats_names,
1097                 unsigned int *ids, unsigned int size)
1098 {
1099         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1100         const int cnt_expected_entries = xstats_get_count(dev_id, mode,
1101                                                           queue_port_id);
1102         if (xstats_names == NULL || cnt_expected_entries < 0 ||
1103                         (int)size < cnt_expected_entries)
1104                 return cnt_expected_entries;
1105
1106         /* dev_id checked above */
1107         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1108
1109         if (dev->dev_ops->xstats_get_names != NULL)
1110                 return (*dev->dev_ops->xstats_get_names)(dev, mode,
1111                                 queue_port_id, xstats_names, ids, size);
1112
1113         return -ENOTSUP;
1114 }
1115
1116 /* retrieve eventdev extended statistics */
1117 int
1118 rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode,
1119                 uint8_t queue_port_id, const unsigned int ids[],
1120                 uint64_t values[], unsigned int n)
1121 {
1122         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV);
1123         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1124
1125         /* implemented by the driver */
1126         if (dev->dev_ops->xstats_get != NULL)
1127                 return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id,
1128                                 ids, values, n);
1129         return -ENOTSUP;
1130 }
1131
1132 uint64_t
1133 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
1134                 unsigned int *id)
1135 {
1136         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0);
1137         const struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1138         unsigned int temp = -1;
1139
1140         if (id != NULL)
1141                 *id = (unsigned int)-1;
1142         else
1143                 id = &temp; /* ensure driver never gets a NULL value */
1144
1145         /* implemented by driver */
1146         if (dev->dev_ops->xstats_get_by_name != NULL)
1147                 return (*dev->dev_ops->xstats_get_by_name)(dev, name, id);
1148         return -ENOTSUP;
1149 }
1150
1151 int rte_event_dev_xstats_reset(uint8_t dev_id,
1152                 enum rte_event_dev_xstats_mode mode, int16_t queue_port_id,
1153                 const uint32_t ids[], uint32_t nb_ids)
1154 {
1155         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1156         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1157
1158         if (dev->dev_ops->xstats_reset != NULL)
1159                 return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id,
1160                                                         ids, nb_ids);
1161         return -ENOTSUP;
1162 }
1163
1164 int rte_event_pmd_selftest_seqn_dynfield_offset = -1;
1165
1166 int rte_event_dev_selftest(uint8_t dev_id)
1167 {
1168         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1169         static const struct rte_mbuf_dynfield test_seqn_dynfield_desc = {
1170                 .name = "rte_event_pmd_selftest_seqn_dynfield",
1171                 .size = sizeof(rte_event_pmd_selftest_seqn_t),
1172                 .align = __alignof__(rte_event_pmd_selftest_seqn_t),
1173         };
1174         struct rte_eventdev *dev = &rte_eventdevs[dev_id];
1175
1176         if (dev->dev_ops->dev_selftest != NULL) {
1177                 rte_event_pmd_selftest_seqn_dynfield_offset =
1178                         rte_mbuf_dynfield_register(&test_seqn_dynfield_desc);
1179                 if (rte_event_pmd_selftest_seqn_dynfield_offset < 0)
1180                         return -ENOMEM;
1181                 return (*dev->dev_ops->dev_selftest)();
1182         }
1183         return -ENOTSUP;
1184 }
1185
1186 struct rte_mempool *
1187 rte_event_vector_pool_create(const char *name, unsigned int n,
1188                              unsigned int cache_size, uint16_t nb_elem,
1189                              int socket_id)
1190 {
1191         const char *mp_ops_name;
1192         struct rte_mempool *mp;
1193         unsigned int elt_sz;
1194         int ret;
1195
1196         if (!nb_elem) {
1197                 RTE_LOG(ERR, EVENTDEV,
1198                         "Invalid number of elements=%d requested\n", nb_elem);
1199                 rte_errno = EINVAL;
1200                 return NULL;
1201         }
1202
1203         elt_sz =
1204                 sizeof(struct rte_event_vector) + (nb_elem * sizeof(uintptr_t));
1205         mp = rte_mempool_create_empty(name, n, elt_sz, cache_size, 0, socket_id,
1206                                       0);
1207         if (mp == NULL)
1208                 return NULL;
1209
1210         mp_ops_name = rte_mbuf_best_mempool_ops();
1211         ret = rte_mempool_set_ops_byname(mp, mp_ops_name, NULL);
1212         if (ret != 0) {
1213                 RTE_LOG(ERR, EVENTDEV, "error setting mempool handler\n");
1214                 goto err;
1215         }
1216
1217         ret = rte_mempool_populate_default(mp);
1218         if (ret < 0)
1219                 goto err;
1220
1221         return mp;
1222 err:
1223         rte_mempool_free(mp);
1224         rte_errno = -ret;
1225         return NULL;
1226 }
1227
1228 int
1229 rte_event_dev_start(uint8_t dev_id)
1230 {
1231         struct rte_eventdev *dev;
1232         int diag;
1233
1234         RTE_EDEV_LOG_DEBUG("Start dev_id=%" PRIu8, dev_id);
1235
1236         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1237         dev = &rte_eventdevs[dev_id];
1238         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_start, -ENOTSUP);
1239
1240         if (dev->data->dev_started != 0) {
1241                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already started",
1242                         dev_id);
1243                 return 0;
1244         }
1245
1246         diag = (*dev->dev_ops->dev_start)(dev);
1247         rte_eventdev_trace_start(dev_id, diag);
1248         if (diag == 0)
1249                 dev->data->dev_started = 1;
1250         else
1251                 return diag;
1252
1253         event_dev_fp_ops_set(rte_event_fp_ops + dev_id, dev);
1254
1255         return 0;
1256 }
1257
1258 int
1259 rte_event_dev_stop_flush_callback_register(uint8_t dev_id,
1260                 eventdev_stop_flush_t callback, void *userdata)
1261 {
1262         struct rte_eventdev *dev;
1263
1264         RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id);
1265
1266         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1267         dev = &rte_eventdevs[dev_id];
1268
1269         dev->dev_ops->dev_stop_flush = callback;
1270         dev->data->dev_stop_flush_arg = userdata;
1271
1272         return 0;
1273 }
1274
1275 void
1276 rte_event_dev_stop(uint8_t dev_id)
1277 {
1278         struct rte_eventdev *dev;
1279
1280         RTE_EDEV_LOG_DEBUG("Stop dev_id=%" PRIu8, dev_id);
1281
1282         RTE_EVENTDEV_VALID_DEVID_OR_RET(dev_id);
1283         dev = &rte_eventdevs[dev_id];
1284         RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_stop);
1285
1286         if (dev->data->dev_started == 0) {
1287                 RTE_EDEV_LOG_ERR("Device with dev_id=%" PRIu8 "already stopped",
1288                         dev_id);
1289                 return;
1290         }
1291
1292         dev->data->dev_started = 0;
1293         (*dev->dev_ops->dev_stop)(dev);
1294         rte_eventdev_trace_stop(dev_id);
1295         event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1296 }
1297
1298 int
1299 rte_event_dev_close(uint8_t dev_id)
1300 {
1301         struct rte_eventdev *dev;
1302
1303         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1304         dev = &rte_eventdevs[dev_id];
1305         RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
1306
1307         /* Device must be stopped before it can be closed */
1308         if (dev->data->dev_started == 1) {
1309                 RTE_EDEV_LOG_ERR("Device %u must be stopped before closing",
1310                                 dev_id);
1311                 return -EBUSY;
1312         }
1313
1314         event_dev_fp_ops_reset(rte_event_fp_ops + dev_id);
1315         rte_eventdev_trace_close(dev_id);
1316         return (*dev->dev_ops->dev_close)(dev);
1317 }
1318
1319 static inline int
1320 eventdev_data_alloc(uint8_t dev_id, struct rte_eventdev_data **data,
1321                     int socket_id)
1322 {
1323         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1324         const struct rte_memzone *mz;
1325         int n;
1326
1327         /* Generate memzone name */
1328         n = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", dev_id);
1329         if (n >= (int)sizeof(mz_name))
1330                 return -EINVAL;
1331
1332         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1333                 mz = rte_memzone_reserve(mz_name,
1334                                 sizeof(struct rte_eventdev_data),
1335                                 socket_id, 0);
1336         } else
1337                 mz = rte_memzone_lookup(mz_name);
1338
1339         if (mz == NULL)
1340                 return -ENOMEM;
1341
1342         *data = mz->addr;
1343         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1344                 memset(*data, 0, sizeof(struct rte_eventdev_data));
1345                 for (n = 0; n < RTE_EVENT_MAX_PORTS_PER_DEV *
1346                                         RTE_EVENT_MAX_QUEUES_PER_DEV;
1347                      n++)
1348                         (*data)->links_map[n] =
1349                                 EVENT_QUEUE_SERVICE_PRIORITY_INVALID;
1350         }
1351
1352         return 0;
1353 }
1354
1355 static inline uint8_t
1356 eventdev_find_free_device_index(void)
1357 {
1358         uint8_t dev_id;
1359
1360         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1361                 if (rte_eventdevs[dev_id].attached ==
1362                                 RTE_EVENTDEV_DETACHED)
1363                         return dev_id;
1364         }
1365         return RTE_EVENT_MAX_DEVS;
1366 }
1367
1368 struct rte_eventdev *
1369 rte_event_pmd_allocate(const char *name, int socket_id)
1370 {
1371         struct rte_eventdev *eventdev;
1372         uint8_t dev_id;
1373
1374         if (rte_event_pmd_get_named_dev(name) != NULL) {
1375                 RTE_EDEV_LOG_ERR("Event device with name %s already "
1376                                 "allocated!", name);
1377                 return NULL;
1378         }
1379
1380         dev_id = eventdev_find_free_device_index();
1381         if (dev_id == RTE_EVENT_MAX_DEVS) {
1382                 RTE_EDEV_LOG_ERR("Reached maximum number of event devices");
1383                 return NULL;
1384         }
1385
1386         eventdev = &rte_eventdevs[dev_id];
1387
1388         if (eventdev->data == NULL) {
1389                 struct rte_eventdev_data *eventdev_data = NULL;
1390
1391                 int retval =
1392                         eventdev_data_alloc(dev_id, &eventdev_data, socket_id);
1393
1394                 if (retval < 0 || eventdev_data == NULL)
1395                         return NULL;
1396
1397                 eventdev->data = eventdev_data;
1398
1399                 if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1400
1401                         strlcpy(eventdev->data->name, name,
1402                                 RTE_EVENTDEV_NAME_MAX_LEN);
1403
1404                         eventdev->data->dev_id = dev_id;
1405                         eventdev->data->socket_id = socket_id;
1406                         eventdev->data->dev_started = 0;
1407                 }
1408
1409                 eventdev->attached = RTE_EVENTDEV_ATTACHED;
1410                 eventdev_globals.nb_devs++;
1411         }
1412
1413         return eventdev;
1414 }
1415
1416 int
1417 rte_event_pmd_release(struct rte_eventdev *eventdev)
1418 {
1419         int ret;
1420         char mz_name[RTE_EVENTDEV_NAME_MAX_LEN];
1421         const struct rte_memzone *mz;
1422
1423         if (eventdev == NULL)
1424                 return -EINVAL;
1425
1426         event_dev_fp_ops_reset(rte_event_fp_ops + eventdev->data->dev_id);
1427         eventdev->attached = RTE_EVENTDEV_DETACHED;
1428         eventdev_globals.nb_devs--;
1429
1430         if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
1431                 rte_free(eventdev->data->dev_private);
1432
1433                 /* Generate memzone name */
1434                 ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u",
1435                                 eventdev->data->dev_id);
1436                 if (ret >= (int)sizeof(mz_name))
1437                         return -EINVAL;
1438
1439                 mz = rte_memzone_lookup(mz_name);
1440                 if (mz == NULL)
1441                         return -ENOMEM;
1442
1443                 ret = rte_memzone_free(mz);
1444                 if (ret)
1445                         return ret;
1446         }
1447
1448         eventdev->data = NULL;
1449         return 0;
1450 }
1451
1452 void
1453 event_dev_probing_finish(struct rte_eventdev *eventdev)
1454 {
1455         if (eventdev == NULL)
1456                 return;
1457
1458         event_dev_fp_ops_set(rte_event_fp_ops + eventdev->data->dev_id,
1459                              eventdev);
1460 }
1461
1462 static int
1463 handle_dev_list(const char *cmd __rte_unused,
1464                 const char *params __rte_unused,
1465                 struct rte_tel_data *d)
1466 {
1467         uint8_t dev_id;
1468         int ndev = rte_event_dev_count();
1469
1470         if (ndev < 1)
1471                 return -1;
1472
1473         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1474         for (dev_id = 0; dev_id < RTE_EVENT_MAX_DEVS; dev_id++) {
1475                 if (rte_eventdevs[dev_id].attached ==
1476                                 RTE_EVENTDEV_ATTACHED)
1477                         rte_tel_data_add_array_int(d, dev_id);
1478         }
1479
1480         return 0;
1481 }
1482
1483 static int
1484 handle_port_list(const char *cmd __rte_unused,
1485                  const char *params,
1486                  struct rte_tel_data *d)
1487 {
1488         int i;
1489         uint8_t dev_id;
1490         struct rte_eventdev *dev;
1491         char *end_param;
1492
1493         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1494                 return -1;
1495
1496         dev_id = strtoul(params, &end_param, 10);
1497         if (*end_param != '\0')
1498                 RTE_EDEV_LOG_DEBUG(
1499                         "Extra parameters passed to eventdev telemetry command, ignoring");
1500
1501         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1502         dev = &rte_eventdevs[dev_id];
1503
1504         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1505         for (i = 0; i < dev->data->nb_ports; i++)
1506                 rte_tel_data_add_array_int(d, i);
1507
1508         return 0;
1509 }
1510
1511 static int
1512 handle_queue_list(const char *cmd __rte_unused,
1513                   const char *params,
1514                   struct rte_tel_data *d)
1515 {
1516         int i;
1517         uint8_t dev_id;
1518         struct rte_eventdev *dev;
1519         char *end_param;
1520
1521         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1522                 return -1;
1523
1524         dev_id = strtoul(params, &end_param, 10);
1525         if (*end_param != '\0')
1526                 RTE_EDEV_LOG_DEBUG(
1527                         "Extra parameters passed to eventdev telemetry command, ignoring");
1528
1529         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1530         dev = &rte_eventdevs[dev_id];
1531
1532         rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
1533         for (i = 0; i < dev->data->nb_queues; i++)
1534                 rte_tel_data_add_array_int(d, i);
1535
1536         return 0;
1537 }
1538
1539 static int
1540 handle_queue_links(const char *cmd __rte_unused,
1541                    const char *params,
1542                    struct rte_tel_data *d)
1543 {
1544         int i, ret, port_id = 0;
1545         char *end_param;
1546         uint8_t dev_id;
1547         uint8_t queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
1548         uint8_t priorities[RTE_EVENT_MAX_QUEUES_PER_DEV];
1549         const char *p_param;
1550
1551         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1552                 return -1;
1553
1554         /* Get dev ID from parameter string */
1555         dev_id = strtoul(params, &end_param, 10);
1556         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1557
1558         p_param = strtok(end_param, ",");
1559         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1560                 return -1;
1561
1562         port_id = strtoul(p_param, &end_param, 10);
1563         p_param = strtok(NULL, "\0");
1564         if (p_param != NULL)
1565                 RTE_EDEV_LOG_DEBUG(
1566                         "Extra parameters passed to eventdev telemetry command, ignoring");
1567
1568         ret = rte_event_port_links_get(dev_id, port_id, queues, priorities);
1569         if (ret < 0)
1570                 return -1;
1571
1572         rte_tel_data_start_dict(d);
1573         for (i = 0; i < ret; i++) {
1574                 char qid_name[32];
1575
1576                 snprintf(qid_name, 31, "qid_%u", queues[i]);
1577                 rte_tel_data_add_dict_u64(d, qid_name, priorities[i]);
1578         }
1579
1580         return 0;
1581 }
1582
1583 static int
1584 eventdev_build_telemetry_data(int dev_id,
1585                               enum rte_event_dev_xstats_mode mode,
1586                               int port_queue_id,
1587                               struct rte_tel_data *d)
1588 {
1589         struct rte_event_dev_xstats_name *xstat_names;
1590         unsigned int *ids;
1591         uint64_t *values;
1592         int i, ret, num_xstats;
1593
1594         num_xstats = rte_event_dev_xstats_names_get(dev_id,
1595                                                     mode,
1596                                                     port_queue_id,
1597                                                     NULL,
1598                                                     NULL,
1599                                                     0);
1600
1601         if (num_xstats < 0)
1602                 return -1;
1603
1604         /* use one malloc for names */
1605         xstat_names = malloc((sizeof(struct rte_event_dev_xstats_name))
1606                              * num_xstats);
1607         if (xstat_names == NULL)
1608                 return -1;
1609
1610         ids = malloc((sizeof(unsigned int)) * num_xstats);
1611         if (ids == NULL) {
1612                 free(xstat_names);
1613                 return -1;
1614         }
1615
1616         values = malloc((sizeof(uint64_t)) * num_xstats);
1617         if (values == NULL) {
1618                 free(xstat_names);
1619                 free(ids);
1620                 return -1;
1621         }
1622
1623         ret = rte_event_dev_xstats_names_get(dev_id, mode, port_queue_id,
1624                                              xstat_names, ids, num_xstats);
1625         if (ret < 0 || ret > num_xstats) {
1626                 free(xstat_names);
1627                 free(ids);
1628                 free(values);
1629                 return -1;
1630         }
1631
1632         ret = rte_event_dev_xstats_get(dev_id, mode, port_queue_id,
1633                                        ids, values, num_xstats);
1634         if (ret < 0 || ret > num_xstats) {
1635                 free(xstat_names);
1636                 free(ids);
1637                 free(values);
1638                 return -1;
1639         }
1640
1641         rte_tel_data_start_dict(d);
1642         for (i = 0; i < num_xstats; i++)
1643                 rte_tel_data_add_dict_u64(d, xstat_names[i].name,
1644                                           values[i]);
1645
1646         free(xstat_names);
1647         free(ids);
1648         free(values);
1649         return 0;
1650 }
1651
1652 static int
1653 handle_dev_xstats(const char *cmd __rte_unused,
1654                   const char *params,
1655                   struct rte_tel_data *d)
1656 {
1657         int dev_id;
1658         enum rte_event_dev_xstats_mode mode;
1659         char *end_param;
1660
1661         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1662                 return -1;
1663
1664         /* Get dev ID from parameter string */
1665         dev_id = strtoul(params, &end_param, 10);
1666         if (*end_param != '\0')
1667                 RTE_EDEV_LOG_DEBUG(
1668                         "Extra parameters passed to eventdev telemetry command, ignoring");
1669
1670         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1671
1672         mode = RTE_EVENT_DEV_XSTATS_DEVICE;
1673         return eventdev_build_telemetry_data(dev_id, mode, 0, d);
1674 }
1675
1676 static int
1677 handle_port_xstats(const char *cmd __rte_unused,
1678                    const char *params,
1679                    struct rte_tel_data *d)
1680 {
1681         int dev_id;
1682         int port_queue_id = 0;
1683         enum rte_event_dev_xstats_mode mode;
1684         char *end_param;
1685         const char *p_param;
1686
1687         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1688                 return -1;
1689
1690         /* Get dev ID from parameter string */
1691         dev_id = strtoul(params, &end_param, 10);
1692         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1693
1694         p_param = strtok(end_param, ",");
1695         mode = RTE_EVENT_DEV_XSTATS_PORT;
1696
1697         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1698                 return -1;
1699
1700         port_queue_id = strtoul(p_param, &end_param, 10);
1701
1702         p_param = strtok(NULL, "\0");
1703         if (p_param != NULL)
1704                 RTE_EDEV_LOG_DEBUG(
1705                         "Extra parameters passed to eventdev telemetry command, ignoring");
1706
1707         return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1708 }
1709
1710 static int
1711 handle_queue_xstats(const char *cmd __rte_unused,
1712                     const char *params,
1713                     struct rte_tel_data *d)
1714 {
1715         int dev_id;
1716         int port_queue_id = 0;
1717         enum rte_event_dev_xstats_mode mode;
1718         char *end_param;
1719         const char *p_param;
1720
1721         if (params == NULL || strlen(params) == 0 || !isdigit(*params))
1722                 return -1;
1723
1724         /* Get dev ID from parameter string */
1725         dev_id = strtoul(params, &end_param, 10);
1726         RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL);
1727
1728         p_param = strtok(end_param, ",");
1729         mode = RTE_EVENT_DEV_XSTATS_QUEUE;
1730
1731         if (p_param == NULL || strlen(p_param) == 0 || !isdigit(*p_param))
1732                 return -1;
1733
1734         port_queue_id = strtoul(p_param, &end_param, 10);
1735
1736         p_param = strtok(NULL, "\0");
1737         if (p_param != NULL)
1738                 RTE_EDEV_LOG_DEBUG(
1739                         "Extra parameters passed to eventdev telemetry command, ignoring");
1740
1741         return eventdev_build_telemetry_data(dev_id, mode, port_queue_id, d);
1742 }
1743
1744 RTE_INIT(eventdev_init_telemetry)
1745 {
1746         rte_telemetry_register_cmd("/eventdev/dev_list", handle_dev_list,
1747                         "Returns list of available eventdevs. Takes no parameters");
1748         rte_telemetry_register_cmd("/eventdev/port_list", handle_port_list,
1749                         "Returns list of available ports. Parameter: DevID");
1750         rte_telemetry_register_cmd("/eventdev/queue_list", handle_queue_list,
1751                         "Returns list of available queues. Parameter: DevID");
1752
1753         rte_telemetry_register_cmd("/eventdev/dev_xstats", handle_dev_xstats,
1754                         "Returns stats for an eventdev. Parameter: DevID");
1755         rte_telemetry_register_cmd("/eventdev/port_xstats", handle_port_xstats,
1756                         "Returns stats for an eventdev port. Params: DevID,PortID");
1757         rte_telemetry_register_cmd("/eventdev/queue_xstats",
1758                         handle_queue_xstats,
1759                         "Returns stats for an eventdev queue. Params: DevID,QueueID");
1760         rte_telemetry_register_cmd("/eventdev/queue_links", handle_queue_links,
1761                         "Returns links for an eventdev port. Params: DevID,QueueID");
1762 }