2 * SPDX-License-Identifier: BSD-3-Clause
3 * Copyright(c) 2010-2014 Intel Corporation
9 #include <rte_bus_vdev.h>
10 #include <rte_lcore.h>
11 #include <rte_memzone.h>
12 #include <rte_kvargs.h>
13 #include <rte_errno.h>
14 #include <rte_cycles.h>
16 #include "opdl_evdev.h"
17 #include "opdl_ring.h"
20 #define EVENTDEV_NAME_OPDL_PMD event_opdl
21 #define NUMA_NODE_ARG "numa_node"
22 #define DO_VALIDATION_ARG "do_validation"
23 #define DO_TEST_ARG "self_test"
27 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
30 opdl_event_enqueue_burst(void *port,
31 const struct rte_event ev[],
34 struct opdl_port *p = port;
36 if (unlikely(!p->opdl->data->dev_started))
40 /* either rx_enqueue or disclaim*/
41 return p->enq(p, ev, num);
45 opdl_event_enqueue(void *port, const struct rte_event *ev)
47 struct opdl_port *p = port;
49 if (unlikely(!p->opdl->data->dev_started))
53 return p->enq(p, ev, 1);
57 opdl_event_dequeue_burst(void *port,
62 struct opdl_port *p = (void *)port;
66 if (unlikely(!p->opdl->data->dev_started))
69 /* This function pointer can point to tx_dequeue or claim*/
70 return p->deq(p, ev, num);
74 opdl_event_dequeue(void *port,
78 struct opdl_port *p = (void *)port;
80 if (unlikely(!p->opdl->data->dev_started))
85 return p->deq(p, ev, 1);
89 opdl_port_link(struct rte_eventdev *dev,
91 const uint8_t queues[],
92 const uint8_t priorities[],
95 struct opdl_port *p = port;
97 RTE_SET_USED(priorities);
100 if (unlikely(dev->data->dev_started)) {
101 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
102 "Attempt to link queue (%u) to port %d while device started\n",
110 /* Max of 1 queue per port */
112 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
113 "Attempt to link more than one queue (%u) to port %d requested\n",
121 if (!p->configured) {
122 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
123 "port %d not configured, cannot link to %u\n",
131 if (p->external_qid != OPDL_INVALID_QID) {
132 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
133 "port %d already linked to queue %u, cannot link to %u\n",
142 p->external_qid = queues[0];
148 opdl_port_unlink(struct rte_eventdev *dev,
153 struct opdl_port *p = port;
155 RTE_SET_USED(queues);
156 RTE_SET_USED(nb_unlinks);
158 if (unlikely(dev->data->dev_started)) {
159 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
160 "Attempt to unlink queue (%u) to port %d while device started\n",
167 RTE_SET_USED(nb_unlinks);
170 p->queue_id = OPDL_INVALID_QID;
171 p->p_type = OPDL_INVALID_PORT;
172 p->external_qid = OPDL_INVALID_QID;
174 /* always unlink 0 queue due to statice pipeline */
179 opdl_port_setup(struct rte_eventdev *dev,
181 const struct rte_event_port_conf *conf)
183 struct opdl_evdev *device = opdl_pmd_priv(dev);
184 struct opdl_port *p = &device->ports[port_id];
188 /* Check if port already configured */
190 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
191 "Attempt to setup port %d which is already setup\n",
197 *p = (struct opdl_port){0}; /* zero entire structure */
200 p->queue_id = OPDL_INVALID_QID;
201 p->external_qid = OPDL_INVALID_QID;
202 dev->data->ports[port_id] = p;
210 opdl_port_release(void *port)
212 struct opdl_port *p = (void *)port;
215 p->opdl->data->dev_started) {
224 opdl_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
225 struct rte_event_port_conf *port_conf)
228 RTE_SET_USED(port_id);
230 port_conf->new_event_threshold = MAX_OPDL_CONS_Q_DEPTH;
231 port_conf->dequeue_depth = MAX_OPDL_CONS_Q_DEPTH;
232 port_conf->enqueue_depth = MAX_OPDL_CONS_Q_DEPTH;
236 opdl_queue_setup(struct rte_eventdev *dev,
238 const struct rte_event_queue_conf *conf)
240 enum queue_type type;
242 struct opdl_evdev *device = opdl_pmd_priv(dev);
244 /* Extra sanity check, probably not needed */
245 if (queue_id == OPDL_INVALID_QID) {
246 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
247 "Invalid queue id %u requested\n",
253 if (device->nb_q_md > device->max_queue_nb) {
254 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
255 "Max number of queues %u exceeded by request %u\n",
257 device->max_queue_nb,
262 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
263 & conf->event_queue_cfg) {
264 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
265 "QUEUE_CFG_ALL_TYPES not supported\n",
268 } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK
269 & conf->event_queue_cfg) {
270 type = OPDL_Q_TYPE_SINGLE_LINK;
272 switch (conf->schedule_type) {
273 case RTE_SCHED_TYPE_ORDERED:
274 type = OPDL_Q_TYPE_ORDERED;
276 case RTE_SCHED_TYPE_ATOMIC:
277 type = OPDL_Q_TYPE_ATOMIC;
279 case RTE_SCHED_TYPE_PARALLEL:
280 type = OPDL_Q_TYPE_ORDERED;
283 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
284 "Unknown queue type %d requested\n",
286 conf->event_queue_cfg);
290 /* Check if queue id has been setup already */
291 for (uint32_t i = 0; i < device->nb_q_md; i++) {
292 if (device->q_md[i].ext_id == queue_id) {
293 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
294 "queue id %u already setup\n",
301 device->q_md[device->nb_q_md].ext_id = queue_id;
302 device->q_md[device->nb_q_md].type = type;
303 device->q_md[device->nb_q_md].setup = 1;
310 opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
312 struct opdl_evdev *device = opdl_pmd_priv(dev);
314 RTE_SET_USED(queue_id);
316 if (device->data->dev_started)
322 opdl_queue_def_conf(struct rte_eventdev *dev,
324 struct rte_event_queue_conf *conf)
327 RTE_SET_USED(queue_id);
329 static const struct rte_event_queue_conf default_conf = {
330 .nb_atomic_flows = 1024,
331 .nb_atomic_order_sequences = 1,
332 .event_queue_cfg = 0,
333 .schedule_type = RTE_SCHED_TYPE_ORDERED,
334 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
337 *conf = default_conf;
342 opdl_dev_configure(const struct rte_eventdev *dev)
344 struct opdl_evdev *opdl = opdl_pmd_priv(dev);
345 const struct rte_eventdev_data *data = dev->data;
346 const struct rte_event_dev_config *conf = &data->dev_conf;
348 opdl->max_queue_nb = conf->nb_event_queues;
349 opdl->max_port_nb = conf->nb_event_ports;
350 opdl->nb_events_limit = conf->nb_events_limit;
352 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
353 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
354 "DEQUEUE_TIMEOUT not supported\n",
363 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
367 static const struct rte_event_dev_info evdev_opdl_info = {
368 .driver_name = OPDL_PMD_NAME,
369 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
370 .max_event_queue_flows = OPDL_QID_NUM_FIDS,
371 .max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX,
372 .max_event_priority_levels = OPDL_IQS_MAX,
373 .max_event_ports = OPDL_PORTS_MAX,
374 .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
375 .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
376 .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
377 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
380 *info = evdev_opdl_info;
384 opdl_dump(struct rte_eventdev *dev, FILE *f)
386 struct opdl_evdev *device = opdl_pmd_priv(dev);
388 if (!device->do_validation)
392 "\n\n -- RING STATISTICS --\n");
394 for (uint32_t i = 0; i < device->nb_opdls; i++)
395 opdl_ring_dump(device->opdl[i], f);
398 "\n\n -- PORT STATISTICS --\n"
399 "Type Port Index Port Id Queue Id Av. Req Size "
400 "Av. Grant Size Av. Cycles PP"
401 " Empty DEQs Non Empty DEQs Pkts Processed\n");
403 for (uint32_t i = 0; i < device->max_port_nb; i++) {
409 struct opdl_port *port = &device->ports[i];
411 if (port->initialized) {
412 cne = port->port_stat[claim_non_empty];
413 cpg = port->port_stat[claim_pkts_granted];
414 if (port->p_type == OPDL_REGULAR_PORT)
416 else if (port->p_type == OPDL_PURE_RX_PORT)
418 else if (port->p_type == OPDL_PURE_TX_PORT)
420 else if (port->p_type == OPDL_ASYNC_PORT)
425 sprintf(queue_id, "%02u", port->external_qid);
426 if (port->p_type == OPDL_REGULAR_PORT ||
427 port->p_type == OPDL_ASYNC_PORT)
431 port->port_stat[total_cycles] / cpg
437 "%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
438 "%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n",
442 (port->external_qid == OPDL_INVALID_QID ? "---"
445 port->port_stat[claim_pkts_requested] / cne
448 port->port_stat[claim_pkts_granted] / cne
451 port->port_stat[claim_empty],
452 port->port_stat[claim_non_empty],
453 port->port_stat[claim_pkts_granted]);
461 opdl_stop(struct rte_eventdev *dev)
463 struct opdl_evdev *device = opdl_pmd_priv(dev);
465 opdl_xstats_uninit(dev);
467 destroy_queues_and_rings(dev);
476 opdl_start(struct rte_eventdev *dev)
481 err = create_queues_and_rings(dev);
485 err = assign_internal_queue_ids(dev);
489 err = initialise_queue_zero_ports(dev);
493 err = initialise_all_other_ports(dev);
497 err = check_queues_linked(dev);
501 err = opdl_add_event_handlers(dev);
505 err = build_all_dependencies(dev);
508 opdl_xstats_init(dev);
510 struct opdl_evdev *device = opdl_pmd_priv(dev);
512 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
513 "SUCCESS : Created %u total queues (%u ex, %u in),"
514 " %u opdls, %u event_dev ports, %u input ports",
515 opdl_pmd_dev_id(device),
517 (device->nb_queues - device->nb_opdls),
521 device->queue[0].nb_ports);
529 opdl_close(struct rte_eventdev *dev)
531 struct opdl_evdev *device = opdl_pmd_priv(dev);
534 for (i = 0; i < device->max_port_nb; i++) {
535 memset(&device->ports[i],
537 sizeof(struct opdl_port));
540 memset(&device->s_md,
542 sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX);
544 memset(&device->q_md,
546 sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES);
549 memset(device->q_map_ex_to_in,
551 sizeof(uint8_t)*OPDL_INVALID_QID);
553 opdl_xstats_uninit(dev);
555 device->max_port_nb = 0;
557 device->max_queue_nb = 0;
559 device->nb_opdls = 0;
561 device->nb_queues = 0;
563 device->nb_ports = 0;
567 dev->data->nb_queues = 0;
569 dev->data->nb_ports = 0;
576 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
578 int *socket_id = opaque;
579 *socket_id = atoi(value);
580 if (*socket_id >= RTE_MAX_NUMA_NODES)
586 set_do_validation(const char *key __rte_unused, const char *value, void *opaque)
588 int *do_val = opaque;
589 *do_val = atoi(value);
596 set_do_test(const char *key __rte_unused, const char *value, void *opaque)
598 int *do_test = opaque;
600 *do_test = atoi(value);
608 opdl_probe(struct rte_vdev_device *vdev)
610 static const struct rte_eventdev_ops evdev_opdl_ops = {
611 .dev_configure = opdl_dev_configure,
612 .dev_infos_get = opdl_info_get,
613 .dev_close = opdl_close,
614 .dev_start = opdl_start,
615 .dev_stop = opdl_stop,
618 .queue_def_conf = opdl_queue_def_conf,
619 .queue_setup = opdl_queue_setup,
620 .queue_release = opdl_queue_release,
621 .port_def_conf = opdl_port_def_conf,
622 .port_setup = opdl_port_setup,
623 .port_release = opdl_port_release,
624 .port_link = opdl_port_link,
625 .port_unlink = opdl_port_unlink,
628 .xstats_get = opdl_xstats_get,
629 .xstats_get_names = opdl_xstats_get_names,
630 .xstats_get_by_name = opdl_xstats_get_by_name,
631 .xstats_reset = opdl_xstats_reset,
634 static const char *const args[] = {
642 struct rte_eventdev *dev;
643 struct opdl_evdev *opdl;
644 int socket_id = rte_socket_id();
645 int do_validation = 0;
650 name = rte_vdev_device_name(vdev);
651 params = rte_vdev_device_args(vdev);
652 if (params != NULL && params[0] != '\0') {
653 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
657 "Ignoring unsupported parameters when creating device '%s'\n",
660 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
661 assign_numa_node, &socket_id);
664 "%s: Error parsing numa node parameter",
667 rte_kvargs_free(kvlist);
671 ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG,
672 set_do_validation, &do_validation);
675 "%s: Error parsing do validation parameter",
677 rte_kvargs_free(kvlist);
681 ret = rte_kvargs_process(kvlist, DO_TEST_ARG,
682 set_do_test, &do_test);
685 "%s: Error parsing do test parameter",
687 rte_kvargs_free(kvlist);
691 rte_kvargs_free(kvlist);
694 dev = rte_event_pmd_vdev_init(name,
695 sizeof(struct opdl_evdev), socket_id);
698 PMD_DRV_LOG(ERR, "eventdev vdev init() failed");
702 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
703 "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
704 " , self_test:[%s]\n",
708 (do_validation ? "true" : "false"),
709 (do_test ? "true" : "false"));
711 dev->dev_ops = &evdev_opdl_ops;
713 dev->enqueue = opdl_event_enqueue;
714 dev->enqueue_burst = opdl_event_enqueue_burst;
715 dev->enqueue_new_burst = opdl_event_enqueue_burst;
716 dev->enqueue_forward_burst = opdl_event_enqueue_burst;
717 dev->dequeue = opdl_event_dequeue;
718 dev->dequeue_burst = opdl_event_dequeue_burst;
720 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
723 opdl = dev->data->dev_private;
724 opdl->data = dev->data;
725 opdl->socket = socket_id;
726 opdl->do_validation = do_validation;
727 opdl->do_test = do_test;
728 str_len = strlen(name);
729 memcpy(opdl->service_name, name, str_len);
732 test_result = opdl_selftest();
738 opdl_remove(struct rte_vdev_device *vdev)
742 name = rte_vdev_device_name(vdev);
746 PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name);
748 return rte_event_pmd_vdev_uninit(name);
751 static struct rte_vdev_driver evdev_opdl_pmd_drv = {
753 .remove = opdl_remove
756 RTE_INIT(opdl_init_log);
761 opdl_logtype_driver = rte_log_register("eventdev.opdl.driver");
762 if (opdl_logtype_driver >= 0)
763 rte_log_set_level(opdl_logtype_driver, RTE_LOG_INFO);
767 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv);
768 RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>"
769 DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>");