4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
42 #define EVENTDEV_NAME_SW_PMD event_sw
43 #define NUMA_NODE_ARG "numa_node"
44 #define SCHED_QUANTA_ARG "sched_quanta"
45 #define CREDIT_QUANTA_ARG "credit_quanta"
48 sw_dev_configure(const struct rte_eventdev *dev)
50 struct sw_evdev *sw = sw_pmd_priv(dev);
51 const struct rte_eventdev_data *data = dev->data;
52 const struct rte_event_dev_config *conf = &data->dev_conf;
54 sw->qid_count = conf->nb_event_queues;
55 sw->port_count = conf->nb_event_ports;
56 sw->nb_events_limit = conf->nb_events_limit;
58 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
65 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
69 static const struct rte_event_dev_info evdev_sw_info = {
70 .driver_name = SW_PMD_NAME,
71 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
72 .max_event_queue_flows = SW_QID_NUM_FIDS,
73 .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
74 .max_event_priority_levels = SW_IQS_MAX,
75 .max_event_ports = SW_PORTS_MAX,
76 .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
77 .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
78 .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
79 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
80 RTE_EVENT_DEV_CAP_EVENT_QOS),
83 *info = evdev_sw_info;
87 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
89 int *socket_id = opaque;
90 *socket_id = atoi(value);
91 if (*socket_id >= RTE_MAX_NUMA_NODES)
97 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
100 *quanta = atoi(value);
101 if (*quanta < 0 || *quanta >= 4096)
107 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
109 int *credit = opaque;
110 *credit = atoi(value);
111 if (*credit < 0 || *credit >= 128)
117 sw_probe(const char *name, const char *params)
119 static const struct rte_eventdev_ops evdev_sw_ops = {
120 .dev_configure = sw_dev_configure,
121 .dev_infos_get = sw_info_get,
124 static const char *const args[] = {
130 struct rte_eventdev *dev;
132 int socket_id = rte_socket_id();
133 int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
134 int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
136 if (params != NULL && params[0] != '\0') {
137 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
141 "Ignoring unsupported parameters when creating device '%s'\n",
144 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
145 assign_numa_node, &socket_id);
148 "%s: Error parsing numa node parameter",
150 rte_kvargs_free(kvlist);
154 ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
155 set_sched_quanta, &sched_quanta);
158 "%s: Error parsing sched quanta parameter",
160 rte_kvargs_free(kvlist);
164 ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
165 set_credit_quanta, &credit_quanta);
168 "%s: Error parsing credit quanta parameter",
170 rte_kvargs_free(kvlist);
174 rte_kvargs_free(kvlist);
179 "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
180 name, socket_id, sched_quanta, credit_quanta);
182 dev = rte_event_pmd_vdev_init(name,
183 sizeof(struct sw_evdev), socket_id);
185 SW_LOG_ERR("eventdev vdev init() failed");
188 dev->dev_ops = &evdev_sw_ops;
190 sw = dev->data->dev_private;
191 sw->data = dev->data;
193 /* copy values passed from vdev command line to instance */
194 sw->credit_update_quanta = credit_quanta;
195 sw->sched_quanta = sched_quanta;
201 sw_remove(const char *name)
206 SW_LOG_INFO("Closing eventdev sw device %s\n", name);
208 return rte_event_pmd_vdev_uninit(name);
211 static struct rte_vdev_driver evdev_sw_pmd_drv = {
216 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
217 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
218 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");