4 * Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
42 #include "event_ring.h"
44 #define EVENTDEV_NAME_SW_PMD event_sw
45 #define NUMA_NODE_ARG "numa_node"
46 #define SCHED_QUANTA_ARG "sched_quanta"
47 #define CREDIT_QUANTA_ARG "credit_quanta"
50 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
53 sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
54 const struct rte_event_port_conf *conf)
56 struct sw_evdev *sw = sw_pmd_priv(dev);
57 struct sw_port *p = &sw->ports[port_id];
58 char buf[QE_RING_NAMESIZE];
61 struct rte_event_dev_info info;
62 sw_info_get(dev, &info);
64 /* detect re-configuring and return credits to instance if needed */
66 /* taking credits from pool is done one quanta at a time, and
67 * credits may be spend (counted in p->inflights) or still
68 * available in the port (p->inflight_credits). We must return
69 * the sum to no leak credits
71 int possible_inflights = p->inflight_credits + p->inflights;
72 rte_atomic32_sub(&sw->inflights, possible_inflights);
75 *p = (struct sw_port){0}; /* zero entire structure */
79 snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
81 p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
82 dev->data->socket_id);
83 if (p->rx_worker_ring == NULL) {
84 SW_LOG_ERR("Error creating RX worker ring for port %d\n",
89 p->inflight_max = conf->new_event_threshold;
91 snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
93 p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
94 dev->data->socket_id);
95 if (p->cq_worker_ring == NULL) {
96 qe_ring_destroy(p->rx_worker_ring);
97 SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
101 sw->cq_ring_space[port_id] = conf->dequeue_depth;
103 /* set hist list contents to empty */
104 for (i = 0; i < SW_PORT_HIST_LIST; i++) {
105 p->hist_list[i].fid = -1;
106 p->hist_list[i].qid = -1;
108 dev->data->ports[port_id] = p;
116 sw_port_release(void *port)
118 struct sw_port *p = (void *)port;
122 qe_ring_destroy(p->rx_worker_ring);
123 qe_ring_destroy(p->cq_worker_ring);
124 memset(p, 0, sizeof(*p));
128 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
129 const struct rte_event_queue_conf *queue_conf)
132 int dev_id = sw->data->dev_id;
133 int socket_id = sw->data->socket_id;
134 char buf[IQ_RING_NAMESIZE];
135 struct sw_qid *qid = &sw->qids[idx];
137 for (i = 0; i < SW_IQS_MAX; i++) {
138 snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
139 qid->iq[i] = iq_ring_create(buf, socket_id);
141 SW_LOG_DBG("ring create failed");
146 /* Initialize the FID structures to no pinning (-1), and zero packets */
147 const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
148 for (i = 0; i < RTE_DIM(qid->fids); i++)
153 qid->priority = queue_conf->priority;
155 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
156 char ring_name[RTE_RING_NAMESIZE];
157 uint32_t window_size;
159 /* rte_ring and window_size_mask require require window_size to
162 window_size = rte_align32pow2(
163 queue_conf->nb_atomic_order_sequences);
165 qid->window_size = window_size - 1;
169 "invalid reorder_window_size for ordered queue\n"
174 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
175 qid->reorder_buffer = rte_zmalloc_socket(buf,
176 window_size * sizeof(qid->reorder_buffer[0]),
178 if (!qid->reorder_buffer) {
179 SW_LOG_DBG("reorder_buffer malloc failed\n");
183 memset(&qid->reorder_buffer[0],
185 window_size * sizeof(qid->reorder_buffer[0]));
187 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
190 /* lookup the ring, and if it already exists, free it */
191 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
193 rte_ring_free(cleanup);
195 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
198 RING_F_SP_ENQ | RING_F_SC_DEQ);
199 if (!qid->reorder_buffer_freelist) {
200 SW_LOG_DBG("freelist ring create failed");
204 /* Populate the freelist with reorder buffer entries. Enqueue
205 * 'window_size - 1' entries because the rte_ring holds only
208 for (i = 0; i < window_size - 1; i++) {
209 if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
210 &qid->reorder_buffer[i]) < 0)
214 qid->reorder_buffer_index = 0;
218 qid->initialized = 1;
223 for (i = 0; i < SW_IQS_MAX; i++) {
225 iq_ring_destroy(qid->iq[i]);
228 if (qid->reorder_buffer) {
229 rte_free(qid->reorder_buffer);
230 qid->reorder_buffer = NULL;
233 if (qid->reorder_buffer_freelist) {
234 rte_ring_free(qid->reorder_buffer_freelist);
235 qid->reorder_buffer_freelist = NULL;
242 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
243 const struct rte_event_queue_conf *conf)
247 /* SINGLE_LINK can be OR-ed with other types, so handle first */
248 if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
249 type = SW_SCHED_TYPE_DIRECT;
251 switch (conf->event_queue_cfg) {
252 case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
253 type = RTE_SCHED_TYPE_ATOMIC;
255 case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
256 type = RTE_SCHED_TYPE_ORDERED;
258 case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
259 type = RTE_SCHED_TYPE_PARALLEL;
261 case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
262 SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
265 SW_LOG_ERR("Unknown queue type %d requested\n",
266 conf->event_queue_cfg);
271 struct sw_evdev *sw = sw_pmd_priv(dev);
272 return qid_init(sw, queue_id, type, conf);
276 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
278 struct sw_evdev *sw = sw_pmd_priv(dev);
279 struct sw_qid *qid = &sw->qids[id];
282 for (i = 0; i < SW_IQS_MAX; i++)
283 iq_ring_destroy(qid->iq[i]);
285 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
286 rte_free(qid->reorder_buffer);
287 rte_ring_free(qid->reorder_buffer_freelist);
289 memset(qid, 0, sizeof(*qid));
293 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
294 struct rte_event_queue_conf *conf)
297 RTE_SET_USED(queue_id);
299 static const struct rte_event_queue_conf default_conf = {
300 .nb_atomic_flows = 4096,
301 .nb_atomic_order_sequences = 1,
302 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
303 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
306 *conf = default_conf;
310 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
311 struct rte_event_port_conf *port_conf)
314 RTE_SET_USED(port_id);
316 port_conf->new_event_threshold = 1024;
317 port_conf->dequeue_depth = 16;
318 port_conf->enqueue_depth = 16;
322 sw_dev_configure(const struct rte_eventdev *dev)
324 struct sw_evdev *sw = sw_pmd_priv(dev);
325 const struct rte_eventdev_data *data = dev->data;
326 const struct rte_event_dev_config *conf = &data->dev_conf;
328 sw->qid_count = conf->nb_event_queues;
329 sw->port_count = conf->nb_event_ports;
330 sw->nb_events_limit = conf->nb_events_limit;
332 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
339 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
343 static const struct rte_event_dev_info evdev_sw_info = {
344 .driver_name = SW_PMD_NAME,
345 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
346 .max_event_queue_flows = SW_QID_NUM_FIDS,
347 .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
348 .max_event_priority_levels = SW_IQS_MAX,
349 .max_event_ports = SW_PORTS_MAX,
350 .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
351 .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
352 .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
353 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
354 RTE_EVENT_DEV_CAP_EVENT_QOS),
357 *info = evdev_sw_info;
361 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
363 int *socket_id = opaque;
364 *socket_id = atoi(value);
365 if (*socket_id >= RTE_MAX_NUMA_NODES)
371 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
373 int *quanta = opaque;
374 *quanta = atoi(value);
375 if (*quanta < 0 || *quanta >= 4096)
381 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
383 int *credit = opaque;
384 *credit = atoi(value);
385 if (*credit < 0 || *credit >= 128)
391 sw_probe(const char *name, const char *params)
393 static const struct rte_eventdev_ops evdev_sw_ops = {
394 .dev_configure = sw_dev_configure,
395 .dev_infos_get = sw_info_get,
397 .queue_def_conf = sw_queue_def_conf,
398 .queue_setup = sw_queue_setup,
399 .queue_release = sw_queue_release,
400 .port_def_conf = sw_port_def_conf,
401 .port_setup = sw_port_setup,
402 .port_release = sw_port_release,
405 static const char *const args[] = {
411 struct rte_eventdev *dev;
413 int socket_id = rte_socket_id();
414 int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
415 int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
417 if (params != NULL && params[0] != '\0') {
418 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
422 "Ignoring unsupported parameters when creating device '%s'\n",
425 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
426 assign_numa_node, &socket_id);
429 "%s: Error parsing numa node parameter",
431 rte_kvargs_free(kvlist);
435 ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
436 set_sched_quanta, &sched_quanta);
439 "%s: Error parsing sched quanta parameter",
441 rte_kvargs_free(kvlist);
445 ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
446 set_credit_quanta, &credit_quanta);
449 "%s: Error parsing credit quanta parameter",
451 rte_kvargs_free(kvlist);
455 rte_kvargs_free(kvlist);
460 "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
461 name, socket_id, sched_quanta, credit_quanta);
463 dev = rte_event_pmd_vdev_init(name,
464 sizeof(struct sw_evdev), socket_id);
466 SW_LOG_ERR("eventdev vdev init() failed");
469 dev->dev_ops = &evdev_sw_ops;
471 sw = dev->data->dev_private;
472 sw->data = dev->data;
474 /* copy values passed from vdev command line to instance */
475 sw->credit_update_quanta = credit_quanta;
476 sw->sched_quanta = sched_quanta;
482 sw_remove(const char *name)
487 SW_LOG_INFO("Closing eventdev sw device %s\n", name);
489 return rte_event_pmd_vdev_uninit(name);
492 static struct rte_vdev_driver evdev_sw_pmd_drv = {
497 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
498 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
499 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");