drivers: use SPDX tag for Intel copyright files
[dpdk.git] / drivers / event / sw / sw_evdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2017 Intel Corporation
3  */
4
5 #include <inttypes.h>
6 #include <string.h>
7
8 #include <rte_bus_vdev.h>
9 #include <rte_kvargs.h>
10 #include <rte_ring.h>
11 #include <rte_errno.h>
12 #include <rte_event_ring.h>
13 #include <rte_service_component.h>
14
15 #include "sw_evdev.h"
16 #include "iq_ring.h"
17
18 #define EVENTDEV_NAME_SW_PMD event_sw
19 #define NUMA_NODE_ARG "numa_node"
20 #define SCHED_QUANTA_ARG "sched_quanta"
21 #define CREDIT_QUANTA_ARG "credit_quanta"
22
23 static void
24 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
25
26 static int
27 sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
28                 const uint8_t priorities[], uint16_t num)
29 {
30         struct sw_port *p = port;
31         struct sw_evdev *sw = sw_pmd_priv(dev);
32         int i;
33
34         RTE_SET_USED(priorities);
35         for (i = 0; i < num; i++) {
36                 struct sw_qid *q = &sw->qids[queues[i]];
37
38                 /* check for qid map overflow */
39                 if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
40                         rte_errno = -EDQUOT;
41                         break;
42                 }
43
44                 if (p->is_directed && p->num_qids_mapped > 0) {
45                         rte_errno = -EDQUOT;
46                         break;
47                 }
48
49                 if (q->type == SW_SCHED_TYPE_DIRECT) {
50                         /* check directed qids only map to one port */
51                         if (p->num_qids_mapped > 0) {
52                                 rte_errno = -EDQUOT;
53                                 break;
54                         }
55                         /* check port only takes a directed flow */
56                         if (num > 1) {
57                                 rte_errno = -EDQUOT;
58                                 break;
59                         }
60
61                         p->is_directed = 1;
62                         p->num_qids_mapped = 1;
63                 } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
64                         p->num_ordered_qids++;
65                         p->num_qids_mapped++;
66                 } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
67                                 q->type == RTE_SCHED_TYPE_PARALLEL) {
68                         p->num_qids_mapped++;
69                 }
70
71                 q->cq_map[q->cq_num_mapped_cqs] = p->id;
72                 rte_smp_wmb();
73                 q->cq_num_mapped_cqs++;
74         }
75         return i;
76 }
77
78 static int
79 sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
80                 uint16_t nb_unlinks)
81 {
82         struct sw_port *p = port;
83         struct sw_evdev *sw = sw_pmd_priv(dev);
84         unsigned int i, j;
85
86         int unlinked = 0;
87         for (i = 0; i < nb_unlinks; i++) {
88                 struct sw_qid *q = &sw->qids[queues[i]];
89                 for (j = 0; j < q->cq_num_mapped_cqs; j++) {
90                         if (q->cq_map[j] == p->id) {
91                                 q->cq_map[j] =
92                                         q->cq_map[q->cq_num_mapped_cqs - 1];
93                                 rte_smp_wmb();
94                                 q->cq_num_mapped_cqs--;
95                                 unlinked++;
96
97                                 p->num_qids_mapped--;
98
99                                 if (q->type == RTE_SCHED_TYPE_ORDERED)
100                                         p->num_ordered_qids--;
101
102                                 continue;
103                         }
104                 }
105         }
106         return unlinked;
107 }
108
109 static int
110 sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
111                 const struct rte_event_port_conf *conf)
112 {
113         struct sw_evdev *sw = sw_pmd_priv(dev);
114         struct sw_port *p = &sw->ports[port_id];
115         char buf[RTE_RING_NAMESIZE];
116         unsigned int i;
117
118         struct rte_event_dev_info info;
119         sw_info_get(dev, &info);
120
121         /* detect re-configuring and return credits to instance if needed */
122         if (p->initialized) {
123                 /* taking credits from pool is done one quanta at a time, and
124                  * credits may be spend (counted in p->inflights) or still
125                  * available in the port (p->inflight_credits). We must return
126                  * the sum to no leak credits
127                  */
128                 int possible_inflights = p->inflight_credits + p->inflights;
129                 rte_atomic32_sub(&sw->inflights, possible_inflights);
130         }
131
132         *p = (struct sw_port){0}; /* zero entire structure */
133         p->id = port_id;
134         p->sw = sw;
135
136         /* check to see if rings exists - port_setup() can be called multiple
137          * times legally (assuming device is stopped). If ring exists, free it
138          * to so it gets re-created with the correct size
139          */
140         snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
141                         port_id, "rx_worker_ring");
142         struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
143         if (existing_ring)
144                 rte_event_ring_free(existing_ring);
145
146         p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
147                         dev->data->socket_id,
148                         RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
149         if (p->rx_worker_ring == NULL) {
150                 SW_LOG_ERR("Error creating RX worker ring for port %d\n",
151                                 port_id);
152                 return -1;
153         }
154
155         p->inflight_max = conf->new_event_threshold;
156
157         /* check if ring exists, same as rx_worker above */
158         snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
159                         port_id, "cq_worker_ring");
160         existing_ring = rte_event_ring_lookup(buf);
161         if (existing_ring)
162                 rte_event_ring_free(existing_ring);
163
164         p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
165                         dev->data->socket_id,
166                         RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
167         if (p->cq_worker_ring == NULL) {
168                 rte_event_ring_free(p->rx_worker_ring);
169                 SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
170                                 port_id);
171                 return -1;
172         }
173         sw->cq_ring_space[port_id] = conf->dequeue_depth;
174
175         /* set hist list contents to empty */
176         for (i = 0; i < SW_PORT_HIST_LIST; i++) {
177                 p->hist_list[i].fid = -1;
178                 p->hist_list[i].qid = -1;
179         }
180         dev->data->ports[port_id] = p;
181
182         rte_smp_wmb();
183         p->initialized = 1;
184         return 0;
185 }
186
187 static void
188 sw_port_release(void *port)
189 {
190         struct sw_port *p = (void *)port;
191         if (p == NULL)
192                 return;
193
194         rte_event_ring_free(p->rx_worker_ring);
195         rte_event_ring_free(p->cq_worker_ring);
196         memset(p, 0, sizeof(*p));
197 }
198
199 static int32_t
200 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
201                 const struct rte_event_queue_conf *queue_conf)
202 {
203         unsigned int i;
204         int dev_id = sw->data->dev_id;
205         int socket_id = sw->data->socket_id;
206         char buf[IQ_RING_NAMESIZE];
207         struct sw_qid *qid = &sw->qids[idx];
208
209         for (i = 0; i < SW_IQS_MAX; i++) {
210                 snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
211                 qid->iq[i] = iq_ring_create(buf, socket_id);
212                 if (!qid->iq[i]) {
213                         SW_LOG_DBG("ring create failed");
214                         goto cleanup;
215                 }
216         }
217
218         /* Initialize the FID structures to no pinning (-1), and zero packets */
219         const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
220         for (i = 0; i < RTE_DIM(qid->fids); i++)
221                 qid->fids[i] = fid;
222
223         qid->id = idx;
224         qid->type = type;
225         qid->priority = queue_conf->priority;
226
227         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
228                 char ring_name[RTE_RING_NAMESIZE];
229                 uint32_t window_size;
230
231                 /* rte_ring and window_size_mask require require window_size to
232                  * be a power-of-2.
233                  */
234                 window_size = rte_align32pow2(
235                                 queue_conf->nb_atomic_order_sequences);
236
237                 qid->window_size = window_size - 1;
238
239                 if (!window_size) {
240                         SW_LOG_DBG(
241                                 "invalid reorder_window_size for ordered queue\n"
242                                 );
243                         goto cleanup;
244                 }
245
246                 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
247                 qid->reorder_buffer = rte_zmalloc_socket(buf,
248                                 window_size * sizeof(qid->reorder_buffer[0]),
249                                 0, socket_id);
250                 if (!qid->reorder_buffer) {
251                         SW_LOG_DBG("reorder_buffer malloc failed\n");
252                         goto cleanup;
253                 }
254
255                 memset(&qid->reorder_buffer[0],
256                        0,
257                        window_size * sizeof(qid->reorder_buffer[0]));
258
259                 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
260                                 dev_id, idx);
261
262                 /* lookup the ring, and if it already exists, free it */
263                 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
264                 if (cleanup)
265                         rte_ring_free(cleanup);
266
267                 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
268                                 window_size,
269                                 socket_id,
270                                 RING_F_SP_ENQ | RING_F_SC_DEQ);
271                 if (!qid->reorder_buffer_freelist) {
272                         SW_LOG_DBG("freelist ring create failed");
273                         goto cleanup;
274                 }
275
276                 /* Populate the freelist with reorder buffer entries. Enqueue
277                  * 'window_size - 1' entries because the rte_ring holds only
278                  * that many.
279                  */
280                 for (i = 0; i < window_size - 1; i++) {
281                         if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
282                                                 &qid->reorder_buffer[i]) < 0)
283                                 goto cleanup;
284                 }
285
286                 qid->reorder_buffer_index = 0;
287                 qid->cq_next_tx = 0;
288         }
289
290         qid->initialized = 1;
291
292         return 0;
293
294 cleanup:
295         for (i = 0; i < SW_IQS_MAX; i++) {
296                 if (qid->iq[i])
297                         iq_ring_destroy(qid->iq[i]);
298         }
299
300         if (qid->reorder_buffer) {
301                 rte_free(qid->reorder_buffer);
302                 qid->reorder_buffer = NULL;
303         }
304
305         if (qid->reorder_buffer_freelist) {
306                 rte_ring_free(qid->reorder_buffer_freelist);
307                 qid->reorder_buffer_freelist = NULL;
308         }
309
310         return -EINVAL;
311 }
312
313 static int
314 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
315                 const struct rte_event_queue_conf *conf)
316 {
317         int type;
318
319         type = conf->schedule_type;
320
321         if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
322                 type = SW_SCHED_TYPE_DIRECT;
323         } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
324                         & conf->event_queue_cfg) {
325                 SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
326                 return -ENOTSUP;
327         }
328
329         struct sw_evdev *sw = sw_pmd_priv(dev);
330         return qid_init(sw, queue_id, type, conf);
331 }
332
333 static void
334 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
335 {
336         struct sw_evdev *sw = sw_pmd_priv(dev);
337         struct sw_qid *qid = &sw->qids[id];
338         uint32_t i;
339
340         for (i = 0; i < SW_IQS_MAX; i++)
341                 iq_ring_destroy(qid->iq[i]);
342
343         if (qid->type == RTE_SCHED_TYPE_ORDERED) {
344                 rte_free(qid->reorder_buffer);
345                 rte_ring_free(qid->reorder_buffer_freelist);
346         }
347         memset(qid, 0, sizeof(*qid));
348 }
349
350 static void
351 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
352                                  struct rte_event_queue_conf *conf)
353 {
354         RTE_SET_USED(dev);
355         RTE_SET_USED(queue_id);
356
357         static const struct rte_event_queue_conf default_conf = {
358                 .nb_atomic_flows = 4096,
359                 .nb_atomic_order_sequences = 1,
360                 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
361                 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
362         };
363
364         *conf = default_conf;
365 }
366
367 static void
368 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
369                  struct rte_event_port_conf *port_conf)
370 {
371         RTE_SET_USED(dev);
372         RTE_SET_USED(port_id);
373
374         port_conf->new_event_threshold = 1024;
375         port_conf->dequeue_depth = 16;
376         port_conf->enqueue_depth = 16;
377 }
378
379 static int
380 sw_dev_configure(const struct rte_eventdev *dev)
381 {
382         struct sw_evdev *sw = sw_pmd_priv(dev);
383         const struct rte_eventdev_data *data = dev->data;
384         const struct rte_event_dev_config *conf = &data->dev_conf;
385
386         sw->qid_count = conf->nb_event_queues;
387         sw->port_count = conf->nb_event_ports;
388         sw->nb_events_limit = conf->nb_events_limit;
389         rte_atomic32_set(&sw->inflights, 0);
390
391         if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
392                 return -ENOTSUP;
393
394         return 0;
395 }
396
397 struct rte_eth_dev;
398
399 static int
400 sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
401                         const struct rte_eth_dev *eth_dev,
402                         uint32_t *caps)
403 {
404         RTE_SET_USED(dev);
405         RTE_SET_USED(eth_dev);
406         *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
407         return 0;
408 }
409
410 static void
411 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
412 {
413         RTE_SET_USED(dev);
414
415         static const struct rte_event_dev_info evdev_sw_info = {
416                         .driver_name = SW_PMD_NAME,
417                         .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
418                         .max_event_queue_flows = SW_QID_NUM_FIDS,
419                         .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
420                         .max_event_priority_levels = SW_IQS_MAX,
421                         .max_event_ports = SW_PORTS_MAX,
422                         .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
423                         .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
424                         .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
425                         .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
426                                         RTE_EVENT_DEV_CAP_BURST_MODE |
427                                         RTE_EVENT_DEV_CAP_EVENT_QOS),
428         };
429
430         *info = evdev_sw_info;
431 }
432
433 static void
434 sw_dump(struct rte_eventdev *dev, FILE *f)
435 {
436         const struct sw_evdev *sw = sw_pmd_priv(dev);
437
438         static const char * const q_type_strings[] = {
439                         "Ordered", "Atomic", "Parallel", "Directed"
440         };
441         uint32_t i;
442         fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
443                         sw->port_count, sw->qid_count);
444
445         fprintf(f, "\trx   %"PRIu64"\n\tdrop %"PRIu64"\n\ttx   %"PRIu64"\n",
446                 sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
447         fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
448         fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
449         fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
450         fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
451         uint32_t inflights = rte_atomic32_read(&sw->inflights);
452         uint32_t credits = sw->nb_events_limit - inflights;
453         fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
454
455 #define COL_RED "\x1b[31m"
456 #define COL_RESET "\x1b[0m"
457
458         for (i = 0; i < sw->port_count; i++) {
459                 int max, j;
460                 const struct sw_port *p = &sw->ports[i];
461                 if (!p->initialized) {
462                         fprintf(f, "  %sPort %d not initialized.%s\n",
463                                 COL_RED, i, COL_RESET);
464                         continue;
465                 }
466                 fprintf(f, "  Port %d %s\n", i,
467                         p->is_directed ? " (SingleCons)" : "");
468                 fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64
469                         "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
470                         sw->ports[i].stats.rx_dropped,
471                         sw->ports[i].stats.tx_pkts,
472                         (p->inflights == p->inflight_max) ?
473                                 COL_RED : COL_RESET,
474                         sw->ports[i].inflights, COL_RESET);
475
476                 fprintf(f, "\tMax New: %u"
477                         "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
478                         sw->ports[i].inflight_max,
479                         sw->ports[i].avg_pkt_ticks,
480                         sw->ports[i].inflight_credits);
481                 fprintf(f, "\tReceive burst distribution:\n");
482                 float zp_percent = p->zero_polls * 100.0 / p->total_polls;
483                 fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
484                                 zp_percent);
485                 for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
486                         if (p->poll_buckets[max] != 0)
487                                 break;
488                 for (j = 0; j <= max; j++) {
489                         if (p->poll_buckets[j] != 0) {
490                                 float poll_pc = p->poll_buckets[j] * 100.0 /
491                                         p->total_polls;
492                                 fprintf(f, "%u-%u:%.02f%% ",
493                                         ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
494                                         ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
495                                         poll_pc);
496                         }
497                 }
498                 fprintf(f, "\n");
499
500                 if (p->rx_worker_ring) {
501                         uint64_t used = rte_event_ring_count(p->rx_worker_ring);
502                         uint64_t space = rte_event_ring_free_count(
503                                         p->rx_worker_ring);
504                         const char *col = (space == 0) ? COL_RED : COL_RESET;
505                         fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
506                                         PRIu64 COL_RESET"\n", col, used, space);
507                 } else
508                         fprintf(f, "\trx ring not initialized.\n");
509
510                 if (p->cq_worker_ring) {
511                         uint64_t used = rte_event_ring_count(p->cq_worker_ring);
512                         uint64_t space = rte_event_ring_free_count(
513                                         p->cq_worker_ring);
514                         const char *col = (space == 0) ? COL_RED : COL_RESET;
515                         fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
516                                         PRIu64 COL_RESET"\n", col, used, space);
517                 } else
518                         fprintf(f, "\tcq ring not initialized.\n");
519         }
520
521         for (i = 0; i < sw->qid_count; i++) {
522                 const struct sw_qid *qid = &sw->qids[i];
523                 if (!qid->initialized) {
524                         fprintf(f, "  %sQueue %d not initialized.%s\n",
525                                 COL_RED, i, COL_RESET);
526                         continue;
527                 }
528                 int affinities_per_port[SW_PORTS_MAX] = {0};
529                 uint32_t inflights = 0;
530
531                 fprintf(f, "  Queue %d (%s)\n", i, q_type_strings[qid->type]);
532                 fprintf(f, "\trx   %"PRIu64"\tdrop %"PRIu64"\ttx   %"PRIu64"\n",
533                         qid->stats.rx_pkts, qid->stats.rx_dropped,
534                         qid->stats.tx_pkts);
535                 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
536                         struct rte_ring *rob_buf_free =
537                                 qid->reorder_buffer_freelist;
538                         if (rob_buf_free)
539                                 fprintf(f, "\tReorder entries in use: %u\n",
540                                         rte_ring_free_count(rob_buf_free));
541                         else
542                                 fprintf(f,
543                                         "\tReorder buffer not initialized\n");
544                 }
545
546                 uint32_t flow;
547                 for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
548                         if (qid->fids[flow].cq != -1) {
549                                 affinities_per_port[qid->fids[flow].cq]++;
550                                 inflights += qid->fids[flow].pcount;
551                         }
552
553                 uint32_t port;
554                 fprintf(f, "\tPer Port Stats:\n");
555                 for (port = 0; port < sw->port_count; port++) {
556                         fprintf(f, "\t  Port %d: Pkts: %"PRIu64, port,
557                                         qid->to_port[port]);
558                         fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
559                 }
560
561                 uint32_t iq;
562                 uint32_t iq_printed = 0;
563                 for (iq = 0; iq < SW_IQS_MAX; iq++) {
564                         if (!qid->iq[iq]) {
565                                 fprintf(f, "\tiq %d is not initialized.\n", iq);
566                                 iq_printed = 1;
567                                 continue;
568                         }
569                         uint32_t used = iq_ring_count(qid->iq[iq]);
570                         uint32_t free = iq_ring_free_count(qid->iq[iq]);
571                         const char *col = (free == 0) ? COL_RED : COL_RESET;
572                         if (used > 0) {
573                                 fprintf(f, "\t%siq %d: Used %d\tFree %d"
574                                         COL_RESET"\n", col, iq, used, free);
575                                 iq_printed = 1;
576                         }
577                 }
578                 if (iq_printed == 0)
579                         fprintf(f, "\t-- iqs empty --\n");
580         }
581 }
582
583 static int
584 sw_start(struct rte_eventdev *dev)
585 {
586         unsigned int i, j;
587         struct sw_evdev *sw = sw_pmd_priv(dev);
588
589         rte_service_component_runstate_set(sw->service_id, 1);
590
591         /* check a service core is mapped to this service */
592         if (!rte_service_runstate_get(sw->service_id)) {
593                 SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
594                                 sw->service_name);
595                 return -ENOENT;
596         }
597
598         /* check all ports are set up */
599         for (i = 0; i < sw->port_count; i++)
600                 if (sw->ports[i].rx_worker_ring == NULL) {
601                         SW_LOG_ERR("Port %d not configured\n", i);
602                         return -ESTALE;
603                 }
604
605         /* check all queues are configured and mapped to ports*/
606         for (i = 0; i < sw->qid_count; i++)
607                 if (sw->qids[i].iq[0] == NULL ||
608                                 sw->qids[i].cq_num_mapped_cqs == 0) {
609                         SW_LOG_ERR("Queue %d not configured\n", i);
610                         return -ENOLINK;
611                 }
612
613         /* build up our prioritized array of qids */
614         /* We don't use qsort here, as if all/multiple entries have the same
615          * priority, the result is non-deterministic. From "man 3 qsort":
616          * "If two members compare as equal, their order in the sorted
617          * array is undefined."
618          */
619         uint32_t qidx = 0;
620         for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
621                 for (i = 0; i < sw->qid_count; i++) {
622                         if (sw->qids[i].priority == j) {
623                                 sw->qids_prioritized[qidx] = &sw->qids[i];
624                                 qidx++;
625                         }
626                 }
627         }
628
629         if (sw_xstats_init(sw) < 0)
630                 return -EINVAL;
631
632         rte_smp_wmb();
633         sw->started = 1;
634
635         return 0;
636 }
637
638 static void
639 sw_stop(struct rte_eventdev *dev)
640 {
641         struct sw_evdev *sw = sw_pmd_priv(dev);
642         sw_xstats_uninit(sw);
643         sw->started = 0;
644         rte_smp_wmb();
645 }
646
647 static int
648 sw_close(struct rte_eventdev *dev)
649 {
650         struct sw_evdev *sw = sw_pmd_priv(dev);
651         uint32_t i;
652
653         for (i = 0; i < sw->qid_count; i++)
654                 sw_queue_release(dev, i);
655         sw->qid_count = 0;
656
657         for (i = 0; i < sw->port_count; i++)
658                 sw_port_release(&sw->ports[i]);
659         sw->port_count = 0;
660
661         memset(&sw->stats, 0, sizeof(sw->stats));
662         sw->sched_called = 0;
663         sw->sched_no_iq_enqueues = 0;
664         sw->sched_no_cq_enqueues = 0;
665         sw->sched_cq_qid_called = 0;
666
667         return 0;
668 }
669
670 static int
671 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
672 {
673         int *socket_id = opaque;
674         *socket_id = atoi(value);
675         if (*socket_id >= RTE_MAX_NUMA_NODES)
676                 return -1;
677         return 0;
678 }
679
680 static int
681 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
682 {
683         int *quanta = opaque;
684         *quanta = atoi(value);
685         if (*quanta < 0 || *quanta >= 4096)
686                 return -1;
687         return 0;
688 }
689
690 static int
691 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
692 {
693         int *credit = opaque;
694         *credit = atoi(value);
695         if (*credit < 0 || *credit >= 128)
696                 return -1;
697         return 0;
698 }
699
700
701 static int32_t sw_sched_service_func(void *args)
702 {
703         struct rte_eventdev *dev = args;
704         sw_event_schedule(dev);
705         return 0;
706 }
707
708 static int
709 sw_probe(struct rte_vdev_device *vdev)
710 {
711         static const struct rte_eventdev_ops evdev_sw_ops = {
712                         .dev_configure = sw_dev_configure,
713                         .dev_infos_get = sw_info_get,
714                         .dev_close = sw_close,
715                         .dev_start = sw_start,
716                         .dev_stop = sw_stop,
717                         .dump = sw_dump,
718
719                         .queue_def_conf = sw_queue_def_conf,
720                         .queue_setup = sw_queue_setup,
721                         .queue_release = sw_queue_release,
722                         .port_def_conf = sw_port_def_conf,
723                         .port_setup = sw_port_setup,
724                         .port_release = sw_port_release,
725                         .port_link = sw_port_link,
726                         .port_unlink = sw_port_unlink,
727
728                         .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
729
730                         .xstats_get = sw_xstats_get,
731                         .xstats_get_names = sw_xstats_get_names,
732                         .xstats_get_by_name = sw_xstats_get_by_name,
733                         .xstats_reset = sw_xstats_reset,
734         };
735
736         static const char *const args[] = {
737                 NUMA_NODE_ARG,
738                 SCHED_QUANTA_ARG,
739                 CREDIT_QUANTA_ARG,
740                 NULL
741         };
742         const char *name;
743         const char *params;
744         struct rte_eventdev *dev;
745         struct sw_evdev *sw;
746         int socket_id = rte_socket_id();
747         int sched_quanta  = SW_DEFAULT_SCHED_QUANTA;
748         int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
749
750         name = rte_vdev_device_name(vdev);
751         params = rte_vdev_device_args(vdev);
752         if (params != NULL && params[0] != '\0') {
753                 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
754
755                 if (!kvlist) {
756                         SW_LOG_INFO(
757                                 "Ignoring unsupported parameters when creating device '%s'\n",
758                                 name);
759                 } else {
760                         int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
761                                         assign_numa_node, &socket_id);
762                         if (ret != 0) {
763                                 SW_LOG_ERR(
764                                         "%s: Error parsing numa node parameter",
765                                         name);
766                                 rte_kvargs_free(kvlist);
767                                 return ret;
768                         }
769
770                         ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
771                                         set_sched_quanta, &sched_quanta);
772                         if (ret != 0) {
773                                 SW_LOG_ERR(
774                                         "%s: Error parsing sched quanta parameter",
775                                         name);
776                                 rte_kvargs_free(kvlist);
777                                 return ret;
778                         }
779
780                         ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
781                                         set_credit_quanta, &credit_quanta);
782                         if (ret != 0) {
783                                 SW_LOG_ERR(
784                                         "%s: Error parsing credit quanta parameter",
785                                         name);
786                                 rte_kvargs_free(kvlist);
787                                 return ret;
788                         }
789
790                         rte_kvargs_free(kvlist);
791                 }
792         }
793
794         SW_LOG_INFO(
795                         "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
796                         name, socket_id, sched_quanta, credit_quanta);
797
798         dev = rte_event_pmd_vdev_init(name,
799                         sizeof(struct sw_evdev), socket_id);
800         if (dev == NULL) {
801                 SW_LOG_ERR("eventdev vdev init() failed");
802                 return -EFAULT;
803         }
804         dev->dev_ops = &evdev_sw_ops;
805         dev->enqueue = sw_event_enqueue;
806         dev->enqueue_burst = sw_event_enqueue_burst;
807         dev->enqueue_new_burst = sw_event_enqueue_burst;
808         dev->enqueue_forward_burst = sw_event_enqueue_burst;
809         dev->dequeue = sw_event_dequeue;
810         dev->dequeue_burst = sw_event_dequeue_burst;
811
812         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
813                 return 0;
814
815         sw = dev->data->dev_private;
816         sw->data = dev->data;
817
818         /* copy values passed from vdev command line to instance */
819         sw->credit_update_quanta = credit_quanta;
820         sw->sched_quanta = sched_quanta;
821
822         /* register service with EAL */
823         struct rte_service_spec service;
824         memset(&service, 0, sizeof(struct rte_service_spec));
825         snprintf(service.name, sizeof(service.name), "%s_service", name);
826         snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
827                         name);
828         service.socket_id = socket_id;
829         service.callback = sw_sched_service_func;
830         service.callback_userdata = (void *)dev;
831
832         int32_t ret = rte_service_component_register(&service, &sw->service_id);
833         if (ret) {
834                 SW_LOG_ERR("service register() failed");
835                 return -ENOEXEC;
836         }
837
838         dev->data->service_inited = 1;
839         dev->data->service_id = sw->service_id;
840
841         return 0;
842 }
843
844 static int
845 sw_remove(struct rte_vdev_device *vdev)
846 {
847         const char *name;
848
849         name = rte_vdev_device_name(vdev);
850         if (name == NULL)
851                 return -EINVAL;
852
853         SW_LOG_INFO("Closing eventdev sw device %s\n", name);
854
855         return rte_event_pmd_vdev_uninit(name);
856 }
857
858 static struct rte_vdev_driver evdev_sw_pmd_drv = {
859         .probe = sw_probe,
860         .remove = sw_remove
861 };
862
863 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
864 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
865                 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");