4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
42 #define EVENTDEV_NAME_SW_PMD event_sw
43 #define NUMA_NODE_ARG "numa_node"
44 #define SCHED_QUANTA_ARG "sched_quanta"
45 #define CREDIT_QUANTA_ARG "credit_quanta"
48 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
49 struct rte_event_queue_conf *conf)
52 RTE_SET_USED(queue_id);
54 static const struct rte_event_queue_conf default_conf = {
55 .nb_atomic_flows = 4096,
56 .nb_atomic_order_sequences = 1,
57 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
58 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
65 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
66 struct rte_event_port_conf *port_conf)
69 RTE_SET_USED(port_id);
71 port_conf->new_event_threshold = 1024;
72 port_conf->dequeue_depth = 16;
73 port_conf->enqueue_depth = 16;
77 sw_dev_configure(const struct rte_eventdev *dev)
79 struct sw_evdev *sw = sw_pmd_priv(dev);
80 const struct rte_eventdev_data *data = dev->data;
81 const struct rte_event_dev_config *conf = &data->dev_conf;
83 sw->qid_count = conf->nb_event_queues;
84 sw->port_count = conf->nb_event_ports;
85 sw->nb_events_limit = conf->nb_events_limit;
87 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
94 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
98 static const struct rte_event_dev_info evdev_sw_info = {
99 .driver_name = SW_PMD_NAME,
100 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
101 .max_event_queue_flows = SW_QID_NUM_FIDS,
102 .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
103 .max_event_priority_levels = SW_IQS_MAX,
104 .max_event_ports = SW_PORTS_MAX,
105 .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
106 .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
107 .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
108 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
109 RTE_EVENT_DEV_CAP_EVENT_QOS),
112 *info = evdev_sw_info;
116 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
118 int *socket_id = opaque;
119 *socket_id = atoi(value);
120 if (*socket_id >= RTE_MAX_NUMA_NODES)
126 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
128 int *quanta = opaque;
129 *quanta = atoi(value);
130 if (*quanta < 0 || *quanta >= 4096)
136 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
138 int *credit = opaque;
139 *credit = atoi(value);
140 if (*credit < 0 || *credit >= 128)
146 sw_probe(const char *name, const char *params)
148 static const struct rte_eventdev_ops evdev_sw_ops = {
149 .dev_configure = sw_dev_configure,
150 .dev_infos_get = sw_info_get,
152 .queue_def_conf = sw_queue_def_conf,
153 .port_def_conf = sw_port_def_conf,
156 static const char *const args[] = {
162 struct rte_eventdev *dev;
164 int socket_id = rte_socket_id();
165 int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
166 int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
168 if (params != NULL && params[0] != '\0') {
169 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
173 "Ignoring unsupported parameters when creating device '%s'\n",
176 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
177 assign_numa_node, &socket_id);
180 "%s: Error parsing numa node parameter",
182 rte_kvargs_free(kvlist);
186 ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
187 set_sched_quanta, &sched_quanta);
190 "%s: Error parsing sched quanta parameter",
192 rte_kvargs_free(kvlist);
196 ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
197 set_credit_quanta, &credit_quanta);
200 "%s: Error parsing credit quanta parameter",
202 rte_kvargs_free(kvlist);
206 rte_kvargs_free(kvlist);
211 "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
212 name, socket_id, sched_quanta, credit_quanta);
214 dev = rte_event_pmd_vdev_init(name,
215 sizeof(struct sw_evdev), socket_id);
217 SW_LOG_ERR("eventdev vdev init() failed");
220 dev->dev_ops = &evdev_sw_ops;
222 sw = dev->data->dev_private;
223 sw->data = dev->data;
225 /* copy values passed from vdev command line to instance */
226 sw->credit_update_quanta = credit_quanta;
227 sw->sched_quanta = sched_quanta;
233 sw_remove(const char *name)
238 SW_LOG_INFO("Closing eventdev sw device %s\n", name);
240 return rte_event_pmd_vdev_uninit(name);
243 static struct rte_vdev_driver evdev_sw_pmd_drv = {
248 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
249 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
250 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");