1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
8 #include <rte_bus_vdev.h>
10 #include <rte_memzone.h>
11 #include <rte_kvargs.h>
12 #include <rte_errno.h>
13 #include <rte_cycles.h>
15 #include "opdl_evdev.h"
16 #include "opdl_ring.h"
19 #define EVENTDEV_NAME_OPDL_PMD event_opdl
20 #define NUMA_NODE_ARG "numa_node"
21 #define DO_VALIDATION_ARG "do_validation"
22 #define DO_TEST_ARG "self_test"
26 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
29 opdl_event_enqueue_burst(void *port,
30 const struct rte_event ev[],
33 struct opdl_port *p = port;
35 if (unlikely(!p->opdl->data->dev_started))
39 /* either rx_enqueue or disclaim*/
40 return p->enq(p, ev, num);
44 opdl_event_enqueue(void *port, const struct rte_event *ev)
46 struct opdl_port *p = port;
48 if (unlikely(!p->opdl->data->dev_started))
52 return p->enq(p, ev, 1);
56 opdl_event_dequeue_burst(void *port,
61 struct opdl_port *p = (void *)port;
65 if (unlikely(!p->opdl->data->dev_started))
68 /* This function pointer can point to tx_dequeue or claim*/
69 return p->deq(p, ev, num);
73 opdl_event_dequeue(void *port,
77 struct opdl_port *p = (void *)port;
79 if (unlikely(!p->opdl->data->dev_started))
84 return p->deq(p, ev, 1);
88 opdl_port_link(struct rte_eventdev *dev,
90 const uint8_t queues[],
91 const uint8_t priorities[],
94 struct opdl_port *p = port;
96 RTE_SET_USED(priorities);
99 if (unlikely(dev->data->dev_started)) {
100 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
101 "Attempt to link queue (%u) to port %d while device started\n",
109 /* Max of 1 queue per port */
111 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
112 "Attempt to link more than one queue (%u) to port %d requested\n",
120 if (!p->configured) {
121 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
122 "port %d not configured, cannot link to %u\n",
130 if (p->external_qid != OPDL_INVALID_QID) {
131 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
132 "port %d already linked to queue %u, cannot link to %u\n",
141 p->external_qid = queues[0];
147 opdl_port_unlink(struct rte_eventdev *dev,
152 struct opdl_port *p = port;
154 RTE_SET_USED(queues);
155 RTE_SET_USED(nb_unlinks);
157 if (unlikely(dev->data->dev_started)) {
158 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
159 "Attempt to unlink queue (%u) to port %d while device started\n",
166 RTE_SET_USED(nb_unlinks);
169 p->queue_id = OPDL_INVALID_QID;
170 p->p_type = OPDL_INVALID_PORT;
171 p->external_qid = OPDL_INVALID_QID;
173 /* always unlink 0 queue due to statice pipeline */
178 opdl_port_setup(struct rte_eventdev *dev,
180 const struct rte_event_port_conf *conf)
182 struct opdl_evdev *device = opdl_pmd_priv(dev);
183 struct opdl_port *p = &device->ports[port_id];
187 /* Check if port already configured */
189 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
190 "Attempt to setup port %d which is already setup\n",
196 *p = (struct opdl_port){0}; /* zero entire structure */
199 p->queue_id = OPDL_INVALID_QID;
200 p->external_qid = OPDL_INVALID_QID;
201 dev->data->ports[port_id] = p;
209 opdl_port_release(void *port)
211 struct opdl_port *p = (void *)port;
214 p->opdl->data->dev_started) {
223 opdl_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
224 struct rte_event_port_conf *port_conf)
227 RTE_SET_USED(port_id);
229 port_conf->new_event_threshold = MAX_OPDL_CONS_Q_DEPTH;
230 port_conf->dequeue_depth = MAX_OPDL_CONS_Q_DEPTH;
231 port_conf->enqueue_depth = MAX_OPDL_CONS_Q_DEPTH;
235 opdl_queue_setup(struct rte_eventdev *dev,
237 const struct rte_event_queue_conf *conf)
239 enum queue_type type;
241 struct opdl_evdev *device = opdl_pmd_priv(dev);
243 /* Extra sanity check, probably not needed */
244 if (queue_id == OPDL_INVALID_QID) {
245 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
246 "Invalid queue id %u requested\n",
252 if (device->nb_q_md > device->max_queue_nb) {
253 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
254 "Max number of queues %u exceeded by request %u\n",
256 device->max_queue_nb,
261 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
262 & conf->event_queue_cfg) {
263 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
264 "QUEUE_CFG_ALL_TYPES not supported\n",
267 } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK
268 & conf->event_queue_cfg) {
269 type = OPDL_Q_TYPE_SINGLE_LINK;
271 switch (conf->schedule_type) {
272 case RTE_SCHED_TYPE_ORDERED:
273 type = OPDL_Q_TYPE_ORDERED;
275 case RTE_SCHED_TYPE_ATOMIC:
276 type = OPDL_Q_TYPE_ATOMIC;
278 case RTE_SCHED_TYPE_PARALLEL:
279 type = OPDL_Q_TYPE_ORDERED;
282 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
283 "Unknown queue type %d requested\n",
285 conf->event_queue_cfg);
289 /* Check if queue id has been setup already */
291 for (i = 0; i < device->nb_q_md; i++) {
292 if (device->q_md[i].ext_id == queue_id) {
293 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
294 "queue id %u already setup\n",
301 device->q_md[device->nb_q_md].ext_id = queue_id;
302 device->q_md[device->nb_q_md].type = type;
303 device->q_md[device->nb_q_md].setup = 1;
310 opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
312 struct opdl_evdev *device = opdl_pmd_priv(dev);
314 RTE_SET_USED(queue_id);
316 if (device->data->dev_started)
322 opdl_queue_def_conf(struct rte_eventdev *dev,
324 struct rte_event_queue_conf *conf)
327 RTE_SET_USED(queue_id);
329 static const struct rte_event_queue_conf default_conf = {
330 .nb_atomic_flows = 1024,
331 .nb_atomic_order_sequences = 1,
332 .event_queue_cfg = 0,
333 .schedule_type = RTE_SCHED_TYPE_ORDERED,
334 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
337 *conf = default_conf;
342 opdl_dev_configure(const struct rte_eventdev *dev)
344 struct opdl_evdev *opdl = opdl_pmd_priv(dev);
345 const struct rte_eventdev_data *data = dev->data;
346 const struct rte_event_dev_config *conf = &data->dev_conf;
348 opdl->max_queue_nb = conf->nb_event_queues;
349 opdl->max_port_nb = conf->nb_event_ports;
350 opdl->nb_events_limit = conf->nb_events_limit;
352 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
353 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : "
354 "DEQUEUE_TIMEOUT not supported\n",
363 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
367 static const struct rte_event_dev_info evdev_opdl_info = {
368 .driver_name = OPDL_PMD_NAME,
369 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
370 .max_event_queue_flows = OPDL_QID_NUM_FIDS,
371 .max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX,
372 .max_event_priority_levels = OPDL_IQS_MAX,
373 .max_event_ports = OPDL_PORTS_MAX,
374 .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH,
375 .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH,
376 .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL,
377 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE,
380 *info = evdev_opdl_info;
384 opdl_dump(struct rte_eventdev *dev, FILE *f)
386 struct opdl_evdev *device = opdl_pmd_priv(dev);
388 if (!device->do_validation)
392 "\n\n -- RING STATISTICS --\n");
394 for (i = 0; i < device->nb_opdls; i++)
395 opdl_ring_dump(device->opdl[i], f);
398 "\n\n -- PORT STATISTICS --\n"
399 "Type Port Index Port Id Queue Id Av. Req Size "
400 "Av. Grant Size Av. Cycles PP"
401 " Empty DEQs Non Empty DEQs Pkts Processed\n");
403 for (i = 0; i < device->max_port_nb; i++) {
409 struct opdl_port *port = &device->ports[i];
411 if (port->initialized) {
412 cne = port->port_stat[claim_non_empty];
413 cpg = port->port_stat[claim_pkts_granted];
414 if (port->p_type == OPDL_REGULAR_PORT)
416 else if (port->p_type == OPDL_PURE_RX_PORT)
418 else if (port->p_type == OPDL_PURE_TX_PORT)
420 else if (port->p_type == OPDL_ASYNC_PORT)
425 sprintf(queue_id, "%02u", port->external_qid);
426 if (port->p_type == OPDL_REGULAR_PORT ||
427 port->p_type == OPDL_ASYNC_PORT)
431 port->port_stat[total_cycles] / cpg
437 "%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s "
438 "%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n",
442 (port->external_qid == OPDL_INVALID_QID ? "---"
445 port->port_stat[claim_pkts_requested] / cne
448 port->port_stat[claim_pkts_granted] / cne
451 port->port_stat[claim_empty],
452 port->port_stat[claim_non_empty],
453 port->port_stat[claim_pkts_granted]);
461 opdl_stop(struct rte_eventdev *dev)
463 struct opdl_evdev *device = opdl_pmd_priv(dev);
465 opdl_xstats_uninit(dev);
467 destroy_queues_and_rings(dev);
476 opdl_start(struct rte_eventdev *dev)
481 err = create_queues_and_rings(dev);
485 err = assign_internal_queue_ids(dev);
489 err = initialise_queue_zero_ports(dev);
493 err = initialise_all_other_ports(dev);
497 err = check_queues_linked(dev);
501 err = opdl_add_event_handlers(dev);
505 err = build_all_dependencies(dev);
508 opdl_xstats_init(dev);
510 struct opdl_evdev *device = opdl_pmd_priv(dev);
512 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
513 "SUCCESS : Created %u total queues (%u ex, %u in),"
514 " %u opdls, %u event_dev ports, %u input ports",
515 opdl_pmd_dev_id(device),
517 (device->nb_queues - device->nb_opdls),
521 device->queue[0].nb_ports);
529 opdl_close(struct rte_eventdev *dev)
531 struct opdl_evdev *device = opdl_pmd_priv(dev);
534 for (i = 0; i < device->max_port_nb; i++) {
535 memset(&device->ports[i],
537 sizeof(struct opdl_port));
540 memset(&device->s_md,
542 sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX);
544 memset(&device->q_md,
546 sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES);
549 memset(device->q_map_ex_to_in,
551 sizeof(uint8_t)*OPDL_INVALID_QID);
553 opdl_xstats_uninit(dev);
555 device->max_port_nb = 0;
557 device->max_queue_nb = 0;
559 device->nb_opdls = 0;
561 device->nb_queues = 0;
563 device->nb_ports = 0;
567 dev->data->nb_queues = 0;
569 dev->data->nb_ports = 0;
576 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
578 int *socket_id = opaque;
579 *socket_id = atoi(value);
580 if (*socket_id >= RTE_MAX_NUMA_NODES)
586 set_do_validation(const char *key __rte_unused, const char *value, void *opaque)
588 int *do_val = opaque;
589 *do_val = atoi(value);
596 set_do_test(const char *key __rte_unused, const char *value, void *opaque)
598 int *do_test = opaque;
600 *do_test = atoi(value);
608 opdl_probe(struct rte_vdev_device *vdev)
610 static struct rte_eventdev_ops evdev_opdl_ops = {
611 .dev_configure = opdl_dev_configure,
612 .dev_infos_get = opdl_info_get,
613 .dev_close = opdl_close,
614 .dev_start = opdl_start,
615 .dev_stop = opdl_stop,
618 .queue_def_conf = opdl_queue_def_conf,
619 .queue_setup = opdl_queue_setup,
620 .queue_release = opdl_queue_release,
621 .port_def_conf = opdl_port_def_conf,
622 .port_setup = opdl_port_setup,
623 .port_release = opdl_port_release,
624 .port_link = opdl_port_link,
625 .port_unlink = opdl_port_unlink,
628 .xstats_get = opdl_xstats_get,
629 .xstats_get_names = opdl_xstats_get_names,
630 .xstats_get_by_name = opdl_xstats_get_by_name,
631 .xstats_reset = opdl_xstats_reset,
634 static const char *const args[] = {
642 struct rte_eventdev *dev;
643 struct opdl_evdev *opdl;
644 int socket_id = rte_socket_id();
645 int do_validation = 0;
650 name = rte_vdev_device_name(vdev);
651 params = rte_vdev_device_args(vdev);
652 if (params != NULL && params[0] != '\0') {
653 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
657 "Ignoring unsupported parameters when creating device '%s'\n",
660 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
661 assign_numa_node, &socket_id);
664 "%s: Error parsing numa node parameter",
667 rte_kvargs_free(kvlist);
671 ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG,
672 set_do_validation, &do_validation);
675 "%s: Error parsing do validation parameter",
677 rte_kvargs_free(kvlist);
681 ret = rte_kvargs_process(kvlist, DO_TEST_ARG,
682 set_do_test, &do_test);
685 "%s: Error parsing do test parameter",
687 rte_kvargs_free(kvlist);
691 rte_kvargs_free(kvlist);
694 dev = rte_event_pmd_vdev_init(name,
695 sizeof(struct opdl_evdev), socket_id);
698 PMD_DRV_LOG(ERR, "eventdev vdev init() failed");
702 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : "
703 "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]"
704 " , self_test:[%s]\n",
708 (do_validation ? "true" : "false"),
709 (do_test ? "true" : "false"));
711 dev->dev_ops = &evdev_opdl_ops;
713 dev->enqueue = opdl_event_enqueue;
714 dev->enqueue_burst = opdl_event_enqueue_burst;
715 dev->enqueue_new_burst = opdl_event_enqueue_burst;
716 dev->enqueue_forward_burst = opdl_event_enqueue_burst;
717 dev->dequeue = opdl_event_dequeue;
718 dev->dequeue_burst = opdl_event_dequeue_burst;
720 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
723 opdl = dev->data->dev_private;
724 opdl->data = dev->data;
725 opdl->socket = socket_id;
726 opdl->do_validation = do_validation;
727 opdl->do_test = do_test;
728 str_len = strlen(name);
729 memcpy(opdl->service_name, name, str_len);
732 test_result = opdl_selftest();
738 opdl_remove(struct rte_vdev_device *vdev)
742 name = rte_vdev_device_name(vdev);
746 PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name);
748 return rte_event_pmd_vdev_uninit(name);
751 static struct rte_vdev_driver evdev_opdl_pmd_drv = {
753 .remove = opdl_remove
756 RTE_INIT(opdl_init_log);
761 opdl_logtype_driver = rte_log_register("pmd.event.opdl.driver");
762 if (opdl_logtype_driver >= 0)
763 rte_log_set_level(opdl_logtype_driver, RTE_LOG_INFO);
767 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv);
768 RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>"
769 DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>");