event/sw: return default port/queue config
[dpdk.git] / drivers / event / sw / sw_evdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <string.h>
34
35 #include <rte_vdev.h>
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
38 #include <rte_ring.h>
39
40 #include "sw_evdev.h"
41
42 #define EVENTDEV_NAME_SW_PMD event_sw
43 #define NUMA_NODE_ARG "numa_node"
44 #define SCHED_QUANTA_ARG "sched_quanta"
45 #define CREDIT_QUANTA_ARG "credit_quanta"
46
47 static void
48 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
49                                  struct rte_event_queue_conf *conf)
50 {
51         RTE_SET_USED(dev);
52         RTE_SET_USED(queue_id);
53
54         static const struct rte_event_queue_conf default_conf = {
55                 .nb_atomic_flows = 4096,
56                 .nb_atomic_order_sequences = 1,
57                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
58                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
59         };
60
61         *conf = default_conf;
62 }
63
64 static void
65 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
66                  struct rte_event_port_conf *port_conf)
67 {
68         RTE_SET_USED(dev);
69         RTE_SET_USED(port_id);
70
71         port_conf->new_event_threshold = 1024;
72         port_conf->dequeue_depth = 16;
73         port_conf->enqueue_depth = 16;
74 }
75
76 static int
77 sw_dev_configure(const struct rte_eventdev *dev)
78 {
79         struct sw_evdev *sw = sw_pmd_priv(dev);
80         const struct rte_eventdev_data *data = dev->data;
81         const struct rte_event_dev_config *conf = &data->dev_conf;
82
83         sw->qid_count = conf->nb_event_queues;
84         sw->port_count = conf->nb_event_ports;
85         sw->nb_events_limit = conf->nb_events_limit;
86
87         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
88                 return -ENOTSUP;
89
90         return 0;
91 }
92
93 static void
94 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
95 {
96         RTE_SET_USED(dev);
97
98         static const struct rte_event_dev_info evdev_sw_info = {
99                         .driver_name = SW_PMD_NAME,
100                         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
101                         .max_event_queue_flows = SW_QID_NUM_FIDS,
102                         .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
103                         .max_event_priority_levels = SW_IQS_MAX,
104                         .max_event_ports = SW_PORTS_MAX,
105                         .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
106                         .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
107                         .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
108                         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
109                                         RTE_EVENT_DEV_CAP_EVENT_QOS),
110         };
111
112         *info = evdev_sw_info;
113 }
114
115 static int
116 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
117 {
118         int *socket_id = opaque;
119         *socket_id = atoi(value);
120         if (*socket_id >= RTE_MAX_NUMA_NODES)
121                 return -1;
122         return 0;
123 }
124
125 static int
126 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
127 {
128         int *quanta = opaque;
129         *quanta = atoi(value);
130         if (*quanta < 0 || *quanta >= 4096)
131                 return -1;
132         return 0;
133 }
134
135 static int
136 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
137 {
138         int *credit = opaque;
139         *credit = atoi(value);
140         if (*credit < 0 || *credit >= 128)
141                 return -1;
142         return 0;
143 }
144
145 static int
146 sw_probe(const char *name, const char *params)
147 {
148         static const struct rte_eventdev_ops evdev_sw_ops = {
149                         .dev_configure = sw_dev_configure,
150                         .dev_infos_get = sw_info_get,
151
152                         .queue_def_conf = sw_queue_def_conf,
153                         .port_def_conf = sw_port_def_conf,
154         };
155
156         static const char *const args[] = {
157                 NUMA_NODE_ARG,
158                 SCHED_QUANTA_ARG,
159                 CREDIT_QUANTA_ARG,
160                 NULL
161         };
162         struct rte_eventdev *dev;
163         struct sw_evdev *sw;
164         int socket_id = rte_socket_id();
165         int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
166         int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
167
168         if (params != NULL && params[0] != '\0') {
169                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
170
171                 if (!kvlist) {
172                         SW_LOG_INFO(
173                                 "Ignoring unsupported parameters when creating device '%s'\n",
174                                 name);
175                 } else {
176                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
177                                         assign_numa_node, &socket_id);
178                         if (ret != 0) {
179                                 SW_LOG_ERR(
180                                         "%s: Error parsing numa node parameter",
181                                         name);
182                                 rte_kvargs_free(kvlist);
183                                 return ret;
184                         }
185
186                         ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
187                                         set_sched_quanta, &sched_quanta);
188                         if (ret != 0) {
189                                 SW_LOG_ERR(
190                                         "%s: Error parsing sched quanta parameter",
191                                         name);
192                                 rte_kvargs_free(kvlist);
193                                 return ret;
194                         }
195
196                         ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
197                                         set_credit_quanta, &credit_quanta);
198                         if (ret != 0) {
199                                 SW_LOG_ERR(
200                                         "%s: Error parsing credit quanta parameter",
201                                         name);
202                                 rte_kvargs_free(kvlist);
203                                 return ret;
204                         }
205
206                         rte_kvargs_free(kvlist);
207                 }
208         }
209
210         SW_LOG_INFO(
211                         "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
212                         name, socket_id, sched_quanta, credit_quanta);
213
214         dev = rte_event_pmd_vdev_init(name,
215                         sizeof(struct sw_evdev), socket_id);
216         if (dev == NULL) {
217                 SW_LOG_ERR("eventdev vdev init() failed");
218                 return -EFAULT;
219         }
220         dev->dev_ops = &evdev_sw_ops;
221
222         sw = dev->data->dev_private;
223         sw->data = dev->data;
224
225         /* copy values passed from vdev command line to instance */
226         sw->credit_update_quanta = credit_quanta;
227         sw->sched_quanta = sched_quanta;
228
229         return 0;
230 }
231
232 static int
233 sw_remove(const char *name)
234 {
235         if (name == NULL)
236                 return -EINVAL;
237
238         SW_LOG_INFO("Closing eventdev sw device %s\n", name);
239
240         return rte_event_pmd_vdev_uninit(name);
241 }
242
243 static struct rte_vdev_driver evdev_sw_pmd_drv = {
244         .probe = sw_probe,
245         .remove = sw_remove
246 };
247
248 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
249 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
250                 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");