2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_bus_vdev.h>
10 #include <rte_lcore.h>
11 #include <rte_memzone.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
14 #include <rte_cycles.h>
16 #include "opdl_evdev.h"
17 #include "opdl_ring.h"
20 #define EVENTDEV_NAME_OPDL_PMD event_opdl
21 #define NUMA_NODE_ARG "numa_node"
22 #define DO_VALIDATION_ARG "do_validation"
23 #define DO_TEST_ARG "self_test"
27 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
31 opdl_queue_setup(struct rte_eventdev *dev,
33 const struct rte_event_queue_conf *conf)
37 struct opdl_evdev *device = opdl_pmd_priv(dev);
39 /* Extra sanity check, probably not needed */
40 if (queue_id == OPDL_INVALID_QID) {
41 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
42 "Invalid queue id %u requested\n",
48 if (device->nb_q_md > device->max_queue_nb) {
49 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
50 "Max number of queues %u exceeded by request %u\n",
57 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
58 & conf->event_queue_cfg) {
59 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
60 "QUEUE_CFG_ALL_TYPES not supported\n",
63 } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK
64 & conf->event_queue_cfg) {
65 type = OPDL_Q_TYPE_SINGLE_LINK;
67 switch (conf->schedule_type) {
68 case RTE_SCHED_TYPE_ORDERED:
69 type = OPDL_Q_TYPE_ORDERED;
71 case RTE_SCHED_TYPE_ATOMIC:
72 type = OPDL_Q_TYPE_ATOMIC;
74 case RTE_SCHED_TYPE_PARALLEL:
75 type = OPDL_Q_TYPE_ORDERED;
78 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
79 "Unknown queue type %d requested\n",
81 conf->event_queue_cfg);
85 /* Check if queue id has been setup already */
86 for (uint32_t i = 0; i < device->nb_q_md; i++) {
87 if (device->q_md[i].ext_id == queue_id) {
88 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
89 "queue id %u already setup\n",
96 device->q_md[device->nb_q_md].ext_id = queue_id;
97 device->q_md[device->nb_q_md].type = type;
98 device->q_md[device->nb_q_md].setup = 1;
105 opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
107 struct opdl_evdev *device = opdl_pmd_priv(dev);
109 RTE_SET_USED(queue_id);
111 if (device->data->dev_started)
117 opdl_queue_def_conf(struct rte_eventdev *dev,
119 struct rte_event_queue_conf *conf)
122 RTE_SET_USED(queue_id);
124 static const struct rte_event_queue_conf default_conf = {
125 .nb_atomic_flows = 1024,
126 .nb_atomic_order_sequences = 1,
127 .event_queue_cfg = 0,
128 .schedule_type = RTE_SCHED_TYPE_ORDERED,
129 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
132 *conf = default_conf;
137 opdl_dev_configure(const struct rte_eventdev *dev)
139 struct opdl_evdev *opdl = opdl_pmd_priv(dev);
140 const struct rte_eventdev_data *data = dev->data;
141 const struct rte_event_dev_config *conf = &data->dev_conf;
143 opdl->max_queue_nb = conf->nb_event_queues;
144 opdl->max_port_nb = conf->nb_event_ports;
145 opdl->nb_events_limit = conf->nb_events_limit;
147 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
148 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
149 "DEQUEUE_TIMEOUT not supported\n",
158 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
162 static const struct rte_event_dev_info evdev_opdl_info = {
163 .driver_name = OPDL_PMD_NAME,
164 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
165 .max_event_queue_flows = OPDL_QID_NUM_FIDS,
166 .max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX,
167 .max_event_priority_levels = OPDL_IQS_MAX,
168 .max_event_ports = OPDL_PORTS_MAX,
169 .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
170 .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
171 .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
172 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
175 *info = evdev_opdl_info;
179 opdl_dump(struct rte_eventdev *dev, FILE *f)
181 struct opdl_evdev *device = opdl_pmd_priv(dev);
183 if (!device->do_validation)
187 "\n\n -- RING STATISTICS --\n");
189 for (uint32_t i = 0; i < device->nb_opdls; i++)
190 opdl_ring_dump(device->opdl[i], f);
193 "\n\n -- PORT STATISTICS --\n"
194 "Type Port Index Port Id Queue Id Av. Req Size "
195 "Av. Grant Size Av. Cycles PP"
196 " Empty DEQs Non Empty DEQs Pkts Processed\n");
198 for (uint32_t i = 0; i < device->max_port_nb; i++) {
204 struct opdl_port *port = &device->ports[i];
206 if (port->initialized) {
207 cne = port->port_stat[claim_non_empty];
208 cpg = port->port_stat[claim_pkts_granted];
209 if (port->p_type == OPDL_REGULAR_PORT)
211 else if (port->p_type == OPDL_PURE_RX_PORT)
213 else if (port->p_type == OPDL_PURE_TX_PORT)
215 else if (port->p_type == OPDL_ASYNC_PORT)
220 sprintf(queue_id, "%02u", port->external_qid);
221 if (port->p_type == OPDL_REGULAR_PORT ||
222 port->p_type == OPDL_ASYNC_PORT)
226 port->port_stat[total_cycles] / cpg
232 "%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
233 "%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n",
237 (port->external_qid == OPDL_INVALID_QID ? "---"
240 port->port_stat[claim_pkts_requested] / cne
243 port->port_stat[claim_pkts_granted] / cne
246 port->port_stat[claim_empty],
247 port->port_stat[claim_non_empty],
248 port->port_stat[claim_pkts_granted]);
256 opdl_stop(struct rte_eventdev *dev)
258 struct opdl_evdev *device = opdl_pmd_priv(dev);
260 opdl_xstats_uninit(dev);
262 destroy_queues_and_rings(dev);
271 opdl_start(struct rte_eventdev *dev)
276 err = create_queues_and_rings(dev);
280 err = assign_internal_queue_ids(dev);
284 err = initialise_queue_zero_ports(dev);
288 err = initialise_all_other_ports(dev);
292 err = check_queues_linked(dev);
296 err = opdl_add_event_handlers(dev);
300 err = build_all_dependencies(dev);
303 opdl_xstats_init(dev);
305 struct opdl_evdev *device = opdl_pmd_priv(dev);
307 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
308 "SUCCESS : Created %u total queues (%u ex, %u in),"
309 " %u opdls, %u event_dev ports, %u input ports",
310 opdl_pmd_dev_id(device),
312 (device->nb_queues - device->nb_opdls),
316 device->queue[0].nb_ports);
324 opdl_close(struct rte_eventdev *dev)
326 struct opdl_evdev *device = opdl_pmd_priv(dev);
329 for (i = 0; i < device->max_port_nb; i++) {
330 memset(&device->ports[i],
332 sizeof(struct opdl_port));
335 memset(&device->s_md,
337 sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX);
339 memset(&device->q_md,
341 sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES);
344 memset(device->q_map_ex_to_in,
346 sizeof(uint8_t)*OPDL_INVALID_QID);
348 opdl_xstats_uninit(dev);
350 device->max_port_nb = 0;
352 device->max_queue_nb = 0;
354 device->nb_opdls = 0;
356 device->nb_queues = 0;
358 device->nb_ports = 0;
362 dev->data->nb_queues = 0;
364 dev->data->nb_ports = 0;
371 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
373 int *socket_id = opaque;
374 *socket_id = atoi(value);
375 if (*socket_id >= RTE_MAX_NUMA_NODES)
381 set_do_validation(const char *key __rte_unused, const char *value, void *opaque)
383 int *do_val = opaque;
384 *do_val = atoi(value);
391 set_do_test(const char *key __rte_unused, const char *value, void *opaque)
393 int *do_test = opaque;
395 *do_test = atoi(value);
403 opdl_probe(struct rte_vdev_device *vdev)
405 static const struct rte_eventdev_ops evdev_opdl_ops = {
406 .dev_configure = opdl_dev_configure,
407 .dev_infos_get = opdl_info_get,
408 .dev_close = opdl_close,
409 .dev_start = opdl_start,
410 .dev_stop = opdl_stop,
413 .queue_def_conf = opdl_queue_def_conf,
414 .queue_setup = opdl_queue_setup,
415 .queue_release = opdl_queue_release,
417 .xstats_get = opdl_xstats_get,
418 .xstats_get_names = opdl_xstats_get_names,
419 .xstats_get_by_name = opdl_xstats_get_by_name,
420 .xstats_reset = opdl_xstats_reset,
423 static const char *const args[] = {
431 struct rte_eventdev *dev;
432 struct opdl_evdev *opdl;
433 int socket_id = rte_socket_id();
434 int do_validation = 0;
439 name = rte_vdev_device_name(vdev);
440 params = rte_vdev_device_args(vdev);
441 if (params != NULL && params[0] != '\0') {
442 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
446 "Ignoring unsupported parameters when creating device '%s'\n",
449 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
450 assign_numa_node, &socket_id);
453 "%s: Error parsing numa node parameter",
456 rte_kvargs_free(kvlist);
460 ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG,
461 set_do_validation, &do_validation);
464 "%s: Error parsing do validation parameter",
466 rte_kvargs_free(kvlist);
470 ret = rte_kvargs_process(kvlist, DO_TEST_ARG,
471 set_do_test, &do_test);
474 "%s: Error parsing do test parameter",
476 rte_kvargs_free(kvlist);
480 rte_kvargs_free(kvlist);
483 dev = rte_event_pmd_vdev_init(name,
484 sizeof(struct opdl_evdev), socket_id);
487 PMD_DRV_LOG(ERR, "eventdev vdev init() failed");
491 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
492 "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
493 " , self_test:[%s]\n",
497 (do_validation ? "true" : "false"),
498 (do_test ? "true" : "false"));
500 dev->dev_ops = &evdev_opdl_ops;
502 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
505 opdl = dev->data->dev_private;
506 opdl->data = dev->data;
507 opdl->socket = socket_id;
508 opdl->do_validation = do_validation;
509 opdl->do_test = do_test;
510 str_len = strlen(name);
511 memcpy(opdl->service_name, name, str_len);
517 opdl_remove(struct rte_vdev_device *vdev)
521 name = rte_vdev_device_name(vdev);
525 PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name);
527 return rte_event_pmd_vdev_uninit(name);
530 static struct rte_vdev_driver evdev_opdl_pmd_drv = {
532 .remove = opdl_remove
535 RTE_INIT(opdl_init_log);
540 opdl_logtype_driver = rte_log_register("eventdev.opdl.driver");
541 if (opdl_logtype_driver >= 0)
542 rte_log_set_level(opdl_logtype_driver, RTE_LOG_INFO);
546 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv);
547 RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>"
548 DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>");