event/sw: fix mapped qid count with parallel queue
[dpdk.git] / drivers / event / sw / sw_evdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2016-2017 Intel Corporation. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Intel Corporation nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <string.h>
34
35 #include <rte_vdev.h>
36 #include <rte_memzone.h>
37 #include <rte_kvargs.h>
38 #include <rte_ring.h>
39 #include <rte_errno.h>
40
41 #include "sw_evdev.h"
42 #include "iq_ring.h"
43 #include "event_ring.h"
44
45 #define EVENTDEV_NAME_SW_PMD event_sw
46 #define NUMA_NODE_ARG "numa_node"
47 #define SCHED_QUANTA_ARG "sched_quanta"
48 #define CREDIT_QUANTA_ARG "credit_quanta"
49
50 static void
51 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
52
53 static int
54 sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
55                 const uint8_t priorities[], uint16_t num)
56 {
57         struct sw_port *p = port;
58         struct sw_evdev *sw = sw_pmd_priv(dev);
59         int i;
60
61         RTE_SET_USED(priorities);
62         for (i = 0; i < num; i++) {
63                 struct sw_qid *q = &sw->qids[queues[i]];
64
65                 /* check for qid map overflow */
66                 if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
67                         rte_errno = -EDQUOT;
68                         break;
69                 }
70
71                 if (p->is_directed && p->num_qids_mapped > 0) {
72                         rte_errno = -EDQUOT;
73                         break;
74                 }
75
76                 if (q->type == SW_SCHED_TYPE_DIRECT) {
77                         /* check directed qids only map to one port */
78                         if (p->num_qids_mapped > 0) {
79                                 rte_errno = -EDQUOT;
80                                 break;
81                         }
82                         /* check port only takes a directed flow */
83                         if (num > 1) {
84                                 rte_errno = -EDQUOT;
85                                 break;
86                         }
87
88                         p->is_directed = 1;
89                         p->num_qids_mapped = 1;
90                 } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
91                         p->num_ordered_qids++;
92                         p->num_qids_mapped++;
93                 } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
94                                 q->type == RTE_SCHED_TYPE_PARALLEL) {
95                         p->num_qids_mapped++;
96                 }
97
98                 q->cq_map[q->cq_num_mapped_cqs] = p->id;
99                 rte_smp_wmb();
100                 q->cq_num_mapped_cqs++;
101         }
102         return i;
103 }
104
105 static int
106 sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
107                 uint16_t nb_unlinks)
108 {
109         struct sw_port *p = port;
110         struct sw_evdev *sw = sw_pmd_priv(dev);
111         unsigned int i, j;
112
113         int unlinked = 0;
114         for (i = 0; i < nb_unlinks; i++) {
115                 struct sw_qid *q = &sw->qids[queues[i]];
116                 for (j = 0; j < q->cq_num_mapped_cqs; j++) {
117                         if (q->cq_map[j] == p->id) {
118                                 q->cq_map[j] =
119                                         q->cq_map[q->cq_num_mapped_cqs - 1];
120                                 rte_smp_wmb();
121                                 q->cq_num_mapped_cqs--;
122                                 unlinked++;
123
124                                 p->num_qids_mapped--;
125
126                                 if (q->type == RTE_SCHED_TYPE_ORDERED)
127                                         p->num_ordered_qids--;
128
129                                 continue;
130                         }
131                 }
132         }
133         return unlinked;
134 }
135
136 static int
137 sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
138                 const struct rte_event_port_conf *conf)
139 {
140         struct sw_evdev *sw = sw_pmd_priv(dev);
141         struct sw_port *p = &sw->ports[port_id];
142         char buf[QE_RING_NAMESIZE];
143         unsigned int i;
144
145         struct rte_event_dev_info info;
146         sw_info_get(dev, &info);
147
148         /* detect re-configuring and return credits to instance if needed */
149         if (p->initialized) {
150                 /* taking credits from pool is done one quanta at a time, and
151                  * credits may be spend (counted in p->inflights) or still
152                  * available in the port (p->inflight_credits). We must return
153                  * the sum to no leak credits
154                  */
155                 int possible_inflights = p->inflight_credits + p->inflights;
156                 rte_atomic32_sub(&sw->inflights, possible_inflights);
157         }
158
159         *p = (struct sw_port){0}; /* zero entire structure */
160         p->id = port_id;
161         p->sw = sw;
162
163         snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
164                         "rx_worker_ring");
165         p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
166                         dev->data->socket_id);
167         if (p->rx_worker_ring == NULL) {
168                 SW_LOG_ERR("Error creating RX worker ring for port %d\n",
169                                 port_id);
170                 return -1;
171         }
172
173         p->inflight_max = conf->new_event_threshold;
174
175         snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id,
176                         "cq_worker_ring");
177         p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth,
178                         dev->data->socket_id);
179         if (p->cq_worker_ring == NULL) {
180                 qe_ring_destroy(p->rx_worker_ring);
181                 SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
182                                 port_id);
183                 return -1;
184         }
185         sw->cq_ring_space[port_id] = conf->dequeue_depth;
186
187         /* set hist list contents to empty */
188         for (i = 0; i < SW_PORT_HIST_LIST; i++) {
189                 p->hist_list[i].fid = -1;
190                 p->hist_list[i].qid = -1;
191         }
192         dev->data->ports[port_id] = p;
193
194         rte_smp_wmb();
195         p->initialized = 1;
196         return 0;
197 }
198
199 static void
200 sw_port_release(void *port)
201 {
202         struct sw_port *p = (void *)port;
203         if (p == NULL)
204                 return;
205
206         qe_ring_destroy(p->rx_worker_ring);
207         qe_ring_destroy(p->cq_worker_ring);
208         memset(p, 0, sizeof(*p));
209 }
210
211 static int32_t
212 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
213                 const struct rte_event_queue_conf *queue_conf)
214 {
215         unsigned int i;
216         int dev_id = sw->data->dev_id;
217         int socket_id = sw->data->socket_id;
218         char buf[IQ_RING_NAMESIZE];
219         struct sw_qid *qid = &sw->qids[idx];
220
221         for (i = 0; i < SW_IQS_MAX; i++) {
222                 snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
223                 qid->iq[i] = iq_ring_create(buf, socket_id);
224                 if (!qid->iq[i]) {
225                         SW_LOG_DBG("ring create failed");
226                         goto cleanup;
227                 }
228         }
229
230         /* Initialize the FID structures to no pinning (-1), and zero packets */
231         const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
232         for (i = 0; i < RTE_DIM(qid->fids); i++)
233                 qid->fids[i] = fid;
234
235         qid->id = idx;
236         qid->type = type;
237         qid->priority = queue_conf->priority;
238
239         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
240                 char ring_name[RTE_RING_NAMESIZE];
241                 uint32_t window_size;
242
243                 /* rte_ring and window_size_mask require require window_size to
244                  * be a power-of-2.
245                  */
246                 window_size = rte_align32pow2(
247                                 queue_conf->nb_atomic_order_sequences);
248
249                 qid->window_size = window_size - 1;
250
251                 if (!window_size) {
252                         SW_LOG_DBG(
253                                 "invalid reorder_window_size for ordered queue\n"
254                                 );
255                         goto cleanup;
256                 }
257
258                 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
259                 qid->reorder_buffer = rte_zmalloc_socket(buf,
260                                 window_size * sizeof(qid->reorder_buffer[0]),
261                                 0, socket_id);
262                 if (!qid->reorder_buffer) {
263                         SW_LOG_DBG("reorder_buffer malloc failed\n");
264                         goto cleanup;
265                 }
266
267                 memset(&qid->reorder_buffer[0],
268                        0,
269                        window_size * sizeof(qid->reorder_buffer[0]));
270
271                 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
272                                 dev_id, idx);
273
274                 /* lookup the ring, and if it already exists, free it */
275                 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
276                 if (cleanup)
277                         rte_ring_free(cleanup);
278
279                 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
280                                 window_size,
281                                 socket_id,
282                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
283                 if (!qid->reorder_buffer_freelist) {
284                         SW_LOG_DBG("freelist ring create failed");
285                         goto cleanup;
286                 }
287
288                 /* Populate the freelist with reorder buffer entries. Enqueue
289                  * 'window_size - 1' entries because the rte_ring holds only
290                  * that many.
291                  */
292                 for (i = 0; i < window_size - 1; i++) {
293                         if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
294                                                 &qid->reorder_buffer[i]) < 0)
295                                 goto cleanup;
296                 }
297
298                 qid->reorder_buffer_index = 0;
299                 qid->cq_next_tx = 0;
300         }
301
302         qid->initialized = 1;
303
304         return 0;
305
306 cleanup:
307         for (i = 0; i < SW_IQS_MAX; i++) {
308                 if (qid->iq[i])
309                         iq_ring_destroy(qid->iq[i]);
310         }
311
312         if (qid->reorder_buffer) {
313                 rte_free(qid->reorder_buffer);
314                 qid->reorder_buffer = NULL;
315         }
316
317         if (qid->reorder_buffer_freelist) {
318                 rte_ring_free(qid->reorder_buffer_freelist);
319                 qid->reorder_buffer_freelist = NULL;
320         }
321
322         return -EINVAL;
323 }
324
325 static int
326 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
327                 const struct rte_event_queue_conf *conf)
328 {
329         int type;
330
331         /* SINGLE_LINK can be OR-ed with other types, so handle first */
332         if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
333                 type = SW_SCHED_TYPE_DIRECT;
334         } else {
335                 switch (conf->event_queue_cfg) {
336                 case RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY:
337                         type = RTE_SCHED_TYPE_ATOMIC;
338                         break;
339                 case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
340                         type = RTE_SCHED_TYPE_ORDERED;
341                         break;
342                 case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
343                         type = RTE_SCHED_TYPE_PARALLEL;
344                         break;
345                 case RTE_EVENT_QUEUE_CFG_ALL_TYPES:
346                         SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
347                         return -ENOTSUP;
348                 default:
349                         SW_LOG_ERR("Unknown queue type %d requested\n",
350                                    conf->event_queue_cfg);
351                         return -EINVAL;
352                 }
353         }
354
355         struct sw_evdev *sw = sw_pmd_priv(dev);
356         return qid_init(sw, queue_id, type, conf);
357 }
358
359 static void
360 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
361 {
362         struct sw_evdev *sw = sw_pmd_priv(dev);
363         struct sw_qid *qid = &sw->qids[id];
364         uint32_t i;
365
366         for (i = 0; i < SW_IQS_MAX; i++)
367                 iq_ring_destroy(qid->iq[i]);
368
369         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
370                 rte_free(qid->reorder_buffer);
371                 rte_ring_free(qid->reorder_buffer_freelist);
372         }
373         memset(qid, 0, sizeof(*qid));
374 }
375
376 static void
377 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
378                                  struct rte_event_queue_conf *conf)
379 {
380         RTE_SET_USED(dev);
381         RTE_SET_USED(queue_id);
382
383         static const struct rte_event_queue_conf default_conf = {
384                 .nb_atomic_flows = 4096,
385                 .nb_atomic_order_sequences = 1,
386                 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
387                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
388         };
389
390         *conf = default_conf;
391 }
392
393 static void
394 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
395                  struct rte_event_port_conf *port_conf)
396 {
397         RTE_SET_USED(dev);
398         RTE_SET_USED(port_id);
399
400         port_conf->new_event_threshold = 1024;
401         port_conf->dequeue_depth = 16;
402         port_conf->enqueue_depth = 16;
403 }
404
405 static int
406 sw_dev_configure(const struct rte_eventdev *dev)
407 {
408         struct sw_evdev *sw = sw_pmd_priv(dev);
409         const struct rte_eventdev_data *data = dev->data;
410         const struct rte_event_dev_config *conf = &data->dev_conf;
411
412         sw->qid_count = conf->nb_event_queues;
413         sw->port_count = conf->nb_event_ports;
414         sw->nb_events_limit = conf->nb_events_limit;
415         rte_atomic32_set(&sw->inflights, 0);
416
417         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
418                 return -ENOTSUP;
419
420         return 0;
421 }
422
423 static void
424 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
425 {
426         RTE_SET_USED(dev);
427
428         static const struct rte_event_dev_info evdev_sw_info = {
429                         .driver_name = SW_PMD_NAME,
430                         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
431                         .max_event_queue_flows = SW_QID_NUM_FIDS,
432                         .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
433                         .max_event_priority_levels = SW_IQS_MAX,
434                         .max_event_ports = SW_PORTS_MAX,
435                         .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
436                         .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
437                         .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
438                         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
439                                         RTE_EVENT_DEV_CAP_EVENT_QOS),
440         };
441
442         *info = evdev_sw_info;
443 }
444
445 static void
446 sw_dump(struct rte_eventdev *dev, FILE *f)
447 {
448         const struct sw_evdev *sw = sw_pmd_priv(dev);
449
450         static const char * const q_type_strings[] = {
451                         "Ordered", "Atomic", "Parallel", "Directed"
452         };
453         uint32_t i;
454         fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
455                         sw->port_count, sw->qid_count);
456
457         fprintf(f, "\trx   %"PRIu64"\n\tdrop %"PRIu64"\n\ttx   %"PRIu64"\n",
458                 sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
459         fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
460         fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
461         fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
462         fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
463         uint32_t inflights = rte_atomic32_read(&sw->inflights);
464         uint32_t credits = sw->nb_events_limit - inflights;
465         fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
466
467 #define COL_RED "\x1b[31m"
468 #define COL_RESET "\x1b[0m"
469
470         for (i = 0; i < sw->port_count; i++) {
471                 int max, j;
472                 const struct sw_port *p = &sw->ports[i];
473                 if (!p->initialized) {
474                         fprintf(f, "  %sPort %d not initialized.%s\n",
475                                 COL_RED, i, COL_RESET);
476                         continue;
477                 }
478                 fprintf(f, "  Port %d %s\n", i,
479                         p->is_directed ? " (SingleCons)" : "");
480                 fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64
481                         "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
482                         sw->ports[i].stats.rx_dropped,
483                         sw->ports[i].stats.tx_pkts,
484                         (p->inflights == p->inflight_max) ?
485                                 COL_RED : COL_RESET,
486                         sw->ports[i].inflights, COL_RESET);
487
488                 fprintf(f, "\tMax New: %u"
489                         "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
490                         sw->ports[i].inflight_max,
491                         sw->ports[i].avg_pkt_ticks,
492                         sw->ports[i].inflight_credits);
493                 fprintf(f, "\tReceive burst distribution:\n");
494                 float zp_percent = p->zero_polls * 100.0 / p->total_polls;
495                 fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
496                                 zp_percent);
497                 for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
498                         if (p->poll_buckets[max] != 0)
499                                 break;
500                 for (j = 0; j <= max; j++) {
501                         if (p->poll_buckets[j] != 0) {
502                                 float poll_pc = p->poll_buckets[j] * 100.0 /
503                                         p->total_polls;
504                                 fprintf(f, "%u-%u:%.02f%% ",
505                                         ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
506                                         ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
507                                         poll_pc);
508                         }
509                 }
510                 fprintf(f, "\n");
511
512                 if (p->rx_worker_ring) {
513                         uint64_t used = qe_ring_count(p->rx_worker_ring);
514                         uint64_t space = qe_ring_free_count(p->rx_worker_ring);
515                         const char *col = (space == 0) ? COL_RED : COL_RESET;
516                         fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
517                                         PRIu64 COL_RESET"\n", col, used, space);
518                 } else
519                         fprintf(f, "\trx ring not initialized.\n");
520
521                 if (p->cq_worker_ring) {
522                         uint64_t used = qe_ring_count(p->cq_worker_ring);
523                         uint64_t space = qe_ring_free_count(p->cq_worker_ring);
524                         const char *col = (space == 0) ? COL_RED : COL_RESET;
525                         fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
526                                         PRIu64 COL_RESET"\n", col, used, space);
527                 } else
528                         fprintf(f, "\tcq ring not initialized.\n");
529         }
530
531         for (i = 0; i < sw->qid_count; i++) {
532                 const struct sw_qid *qid = &sw->qids[i];
533                 if (!qid->initialized) {
534                         fprintf(f, "  %sQueue %d not initialized.%s\n",
535                                 COL_RED, i, COL_RESET);
536                         continue;
537                 }
538                 int affinities_per_port[SW_PORTS_MAX] = {0};
539                 uint32_t inflights = 0;
540
541                 fprintf(f, "  Queue %d (%s)\n", i, q_type_strings[qid->type]);
542                 fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64"\n",
543                         qid->stats.rx_pkts, qid->stats.rx_dropped,
544                         qid->stats.tx_pkts);
545                 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
546                         struct rte_ring *rob_buf_free =
547                                 qid->reorder_buffer_freelist;
548                         if (rob_buf_free)
549                                 fprintf(f, "\tReorder entries in use: %u\n",
550                                         rte_ring_free_count(rob_buf_free));
551                         else
552                                 fprintf(f,
553                                         "\tReorder buffer not initialized\n");
554                 }
555
556                 uint32_t flow;
557                 for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
558                         if (qid->fids[flow].cq != -1) {
559                                 affinities_per_port[qid->fids[flow].cq]++;
560                                 inflights += qid->fids[flow].pcount;
561                         }
562
563                 uint32_t port;
564                 fprintf(f, "\tPer Port Stats:\n");
565                 for (port = 0; port < sw->port_count; port++) {
566                         fprintf(f, "\t  Port %d: Pkts: %"PRIu64, port,
567                                         qid->to_port[port]);
568                         fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
569                 }
570
571                 uint32_t iq;
572                 uint32_t iq_printed = 0;
573                 for (iq = 0; iq < SW_IQS_MAX; iq++) {
574                         if (!qid->iq[iq]) {
575                                 fprintf(f, "\tiq %d is not initialized.\n", iq);
576                                 iq_printed = 1;
577                                 continue;
578                         }
579                         uint32_t used = iq_ring_count(qid->iq[iq]);
580                         uint32_t free = iq_ring_free_count(qid->iq[iq]);
581                         const char *col = (free == 0) ? COL_RED : COL_RESET;
582                         if (used > 0) {
583                                 fprintf(f, "\t%siq %d: Used %d\tFree %d"
584                                         COL_RESET"\n", col, iq, used, free);
585                                 iq_printed = 1;
586                         }
587                 }
588                 if (iq_printed == 0)
589                         fprintf(f, "\t-- iqs empty --\n");
590         }
591 }
592
593 static int
594 sw_start(struct rte_eventdev *dev)
595 {
596         unsigned int i, j;
597         struct sw_evdev *sw = sw_pmd_priv(dev);
598         /* check all ports are set up */
599         for (i = 0; i < sw->port_count; i++)
600                 if (sw->ports[i].rx_worker_ring == NULL) {
601                         SW_LOG_ERR("Port %d not configured\n", i);
602                         return -ESTALE;
603                 }
604
605         /* check all queues are configured and mapped to ports*/
606         for (i = 0; i < sw->qid_count; i++)
607                 if (sw->qids[i].iq[0] == NULL ||
608                                 sw->qids[i].cq_num_mapped_cqs == 0) {
609                         SW_LOG_ERR("Queue %d not configured\n", i);
610                         return -ENOLINK;
611                 }
612
613         /* build up our prioritized array of qids */
614         /* We don't use qsort here, as if all/multiple entries have the same
615          * priority, the result is non-deterministic. From "man 3 qsort":
616          * "If two members compare as equal, their order in the sorted
617          * array is undefined."
618          */
619         uint32_t qidx = 0;
620         for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
621                 for (i = 0; i < sw->qid_count; i++) {
622                         if (sw->qids[i].priority == j) {
623                                 sw->qids_prioritized[qidx] = &sw->qids[i];
624                                 qidx++;
625                         }
626                 }
627         }
628
629         if (sw_xstats_init(sw) < 0)
630                 return -EINVAL;
631
632         rte_smp_wmb();
633         sw->started = 1;
634
635         return 0;
636 }
637
638 static void
639 sw_stop(struct rte_eventdev *dev)
640 {
641         struct sw_evdev *sw = sw_pmd_priv(dev);
642         sw_xstats_uninit(sw);
643         sw->started = 0;
644         rte_smp_wmb();
645 }
646
647 static int
648 sw_close(struct rte_eventdev *dev)
649 {
650         struct sw_evdev *sw = sw_pmd_priv(dev);
651         uint32_t i;
652
653         for (i = 0; i < sw->qid_count; i++)
654                 sw_queue_release(dev, i);
655         sw->qid_count = 0;
656
657         for (i = 0; i < sw->port_count; i++)
658                 sw_port_release(&sw->ports[i]);
659         sw->port_count = 0;
660
661         memset(&sw->stats, 0, sizeof(sw->stats));
662         sw->sched_called = 0;
663         sw->sched_no_iq_enqueues = 0;
664         sw->sched_no_cq_enqueues = 0;
665         sw->sched_cq_qid_called = 0;
666
667         return 0;
668 }
669
670 static int
671 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
672 {
673         int *socket_id = opaque;
674         *socket_id = atoi(value);
675         if (*socket_id >= RTE_MAX_NUMA_NODES)
676                 return -1;
677         return 0;
678 }
679
680 static int
681 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
682 {
683         int *quanta = opaque;
684         *quanta = atoi(value);
685         if (*quanta < 0 || *quanta >= 4096)
686                 return -1;
687         return 0;
688 }
689
690 static int
691 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
692 {
693         int *credit = opaque;
694         *credit = atoi(value);
695         if (*credit < 0 || *credit >= 128)
696                 return -1;
697         return 0;
698 }
699
700 static int
701 sw_probe(struct rte_vdev_device *vdev)
702 {
703         static const struct rte_eventdev_ops evdev_sw_ops = {
704                         .dev_configure = sw_dev_configure,
705                         .dev_infos_get = sw_info_get,
706                         .dev_close = sw_close,
707                         .dev_start = sw_start,
708                         .dev_stop = sw_stop,
709                         .dump = sw_dump,
710
711                         .queue_def_conf = sw_queue_def_conf,
712                         .queue_setup = sw_queue_setup,
713                         .queue_release = sw_queue_release,
714                         .port_def_conf = sw_port_def_conf,
715                         .port_setup = sw_port_setup,
716                         .port_release = sw_port_release,
717                         .port_link = sw_port_link,
718                         .port_unlink = sw_port_unlink,
719
720                         .xstats_get = sw_xstats_get,
721                         .xstats_get_names = sw_xstats_get_names,
722                         .xstats_get_by_name = sw_xstats_get_by_name,
723                         .xstats_reset = sw_xstats_reset,
724         };
725
726         static const char *const args[] = {
727                 NUMA_NODE_ARG,
728                 SCHED_QUANTA_ARG,
729                 CREDIT_QUANTA_ARG,
730                 NULL
731         };
732         const char *name;
733         const char *params;
734         struct rte_eventdev *dev;
735         struct sw_evdev *sw;
736         int socket_id = rte_socket_id();
737         int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
738         int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
739
740         name = rte_vdev_device_name(vdev);
741         params = rte_vdev_device_args(vdev);
742         if (params != NULL && params[0] != '\0') {
743                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
744
745                 if (!kvlist) {
746                         SW_LOG_INFO(
747                                 "Ignoring unsupported parameters when creating device '%s'\n",
748                                 name);
749                 } else {
750                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
751                                         assign_numa_node, &socket_id);
752                         if (ret != 0) {
753                                 SW_LOG_ERR(
754                                         "%s: Error parsing numa node parameter",
755                                         name);
756                                 rte_kvargs_free(kvlist);
757                                 return ret;
758                         }
759
760                         ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
761                                         set_sched_quanta, &sched_quanta);
762                         if (ret != 0) {
763                                 SW_LOG_ERR(
764                                         "%s: Error parsing sched quanta parameter",
765                                         name);
766                                 rte_kvargs_free(kvlist);
767                                 return ret;
768                         }
769
770                         ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
771                                         set_credit_quanta, &credit_quanta);
772                         if (ret != 0) {
773                                 SW_LOG_ERR(
774                                         "%s: Error parsing credit quanta parameter",
775                                         name);
776                                 rte_kvargs_free(kvlist);
777                                 return ret;
778                         }
779
780                         rte_kvargs_free(kvlist);
781                 }
782         }
783
784         SW_LOG_INFO(
785                         "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
786                         name, socket_id, sched_quanta, credit_quanta);
787
788         dev = rte_event_pmd_vdev_init(name,
789                         sizeof(struct sw_evdev), socket_id);
790         if (dev == NULL) {
791                 SW_LOG_ERR("eventdev vdev init() failed");
792                 return -EFAULT;
793         }
794         dev->dev_ops = &evdev_sw_ops;
795         dev->enqueue = sw_event_enqueue;
796         dev->enqueue_burst = sw_event_enqueue_burst;
797         dev->dequeue = sw_event_dequeue;
798         dev->dequeue_burst = sw_event_dequeue_burst;
799         dev->schedule = sw_event_schedule;
800
801         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
802                 return 0;
803
804         sw = dev->data->dev_private;
805         sw->data = dev->data;
806
807         /* copy values passed from vdev command line to instance */
808         sw->credit_update_quanta = credit_quanta;
809         sw->sched_quanta = sched_quanta;
810
811         return 0;
812 }
813
814 static int
815 sw_remove(struct rte_vdev_device *vdev)
816 {
817         const char *name;
818
819         name = rte_vdev_device_name(vdev);
820         if (name == NULL)
821                 return -EINVAL;
822
823         SW_LOG_INFO("Closing eventdev sw device %s\n", name);
824
825         return rte_event_pmd_vdev_uninit(name);
826 }
827
828 static struct rte_vdev_driver evdev_sw_pmd_drv = {
829         .probe = sw_probe,
830         .remove = sw_remove
831 };
832
833 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
834 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
835                 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");