1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Ericsson AB
5 #include <rte_eventdev_pmd.h>
6 #include <rte_eventdev_pmd_vdev.h>
10 #define EVENTDEV_NAME_DSW_PMD event_dsw
13 dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
14 const struct rte_event_port_conf *conf)
16 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
17 struct dsw_port *port;
18 struct rte_event_ring *in_ring;
19 char ring_name[RTE_RING_NAMESIZE];
21 port = &dsw->ports[port_id];
23 *port = (struct dsw_port) {
26 .dequeue_depth = conf->dequeue_depth,
27 .enqueue_depth = conf->enqueue_depth,
28 .new_event_threshold = conf->new_event_threshold
31 snprintf(ring_name, sizeof(ring_name), "dsw%d_p%u", dev->data->dev_id,
34 in_ring = rte_event_ring_create(ring_name, DSW_IN_RING_SIZE,
36 RING_F_SC_DEQ|RING_F_EXACT_SZ);
41 port->in_ring = in_ring;
43 dev->data->ports[port_id] = port;
49 dsw_port_def_conf(struct rte_eventdev *dev __rte_unused,
50 uint8_t port_id __rte_unused,
51 struct rte_event_port_conf *port_conf)
53 *port_conf = (struct rte_event_port_conf) {
54 .new_event_threshold = 1024,
55 .dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH / 4,
56 .enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH / 4
61 dsw_port_release(void *p)
63 struct dsw_port *port = p;
65 rte_event_ring_free(port->in_ring);
69 dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
70 const struct rte_event_queue_conf *conf)
72 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
73 struct dsw_queue *queue = &dsw->queues[queue_id];
75 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg)
78 if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED)
81 /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it
82 * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since
83 * the queue will only have a single serving port, no
84 * migration will ever happen, so the extra TYPE_ATOMIC
85 * migration overhead is avoided.
87 if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg)
88 queue->schedule_type = RTE_SCHED_TYPE_ATOMIC;
89 else /* atomic or parallel */
90 queue->schedule_type = conf->schedule_type;
92 queue->num_serving_ports = 0;
98 dsw_queue_def_conf(struct rte_eventdev *dev __rte_unused,
99 uint8_t queue_id __rte_unused,
100 struct rte_event_queue_conf *queue_conf)
102 *queue_conf = (struct rte_event_queue_conf) {
103 .nb_atomic_flows = 4096,
104 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
105 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL
110 dsw_queue_release(struct rte_eventdev *dev __rte_unused,
111 uint8_t queue_id __rte_unused)
116 dsw_info_get(struct rte_eventdev *dev __rte_unused,
117 struct rte_event_dev_info *info)
119 *info = (struct rte_event_dev_info) {
120 .driver_name = DSW_PMD_NAME,
121 .max_event_queues = DSW_MAX_QUEUES,
122 .max_event_queue_flows = DSW_MAX_FLOWS,
123 .max_event_queue_priority_levels = 1,
124 .max_event_priority_levels = 1,
125 .max_event_ports = DSW_MAX_PORTS,
126 .max_event_port_dequeue_depth = DSW_MAX_PORT_DEQUEUE_DEPTH,
127 .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH,
128 .max_num_events = DSW_MAX_EVENTS,
129 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE|
130 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED
135 dsw_configure(const struct rte_eventdev *dev)
137 struct dsw_evdev *dsw = dsw_pmd_priv(dev);
138 const struct rte_event_dev_config *conf = &dev->data->dev_conf;
140 dsw->num_ports = conf->nb_event_ports;
141 dsw->num_queues = conf->nb_event_queues;
146 static struct rte_eventdev_ops dsw_evdev_ops = {
147 .port_setup = dsw_port_setup,
148 .port_def_conf = dsw_port_def_conf,
149 .port_release = dsw_port_release,
150 .queue_setup = dsw_queue_setup,
151 .queue_def_conf = dsw_queue_def_conf,
152 .queue_release = dsw_queue_release,
153 .dev_infos_get = dsw_info_get,
154 .dev_configure = dsw_configure,
158 dsw_probe(struct rte_vdev_device *vdev)
161 struct rte_eventdev *dev;
162 struct dsw_evdev *dsw;
164 name = rte_vdev_device_name(vdev);
166 dev = rte_event_pmd_vdev_init(name, sizeof(struct dsw_evdev),
171 dev->dev_ops = &dsw_evdev_ops;
173 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
176 dsw = dev->data->dev_private;
177 dsw->data = dev->data;
183 dsw_remove(struct rte_vdev_device *vdev)
187 name = rte_vdev_device_name(vdev);
191 return rte_event_pmd_vdev_uninit(name);
194 static struct rte_vdev_driver evdev_dsw_pmd_drv = {
199 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DSW_PMD, evdev_dsw_pmd_drv);