1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_bus_vdev.h>
9 #include <rte_kvargs.h>
11 #include <rte_errno.h>
12 #include <rte_event_ring.h>
13 #include <rte_service_component.h>
18 #define EVENTDEV_NAME_SW_PMD event_sw
19 #define NUMA_NODE_ARG "numa_node"
20 #define SCHED_QUANTA_ARG "sched_quanta"
21 #define CREDIT_QUANTA_ARG "credit_quanta"
24 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
27 sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
28 const uint8_t priorities[], uint16_t num)
30 struct sw_port *p = port;
31 struct sw_evdev *sw = sw_pmd_priv(dev);
34 RTE_SET_USED(priorities);
35 for (i = 0; i < num; i++) {
36 struct sw_qid *q = &sw->qids[queues[i]];
39 /* check for qid map overflow */
40 if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
45 if (p->is_directed && p->num_qids_mapped > 0) {
50 for (j = 0; j < q->cq_num_mapped_cqs; j++) {
51 if (q->cq_map[j] == p->id)
55 /* check if port is already linked */
56 if (j < q->cq_num_mapped_cqs)
59 if (q->type == SW_SCHED_TYPE_DIRECT) {
60 /* check directed qids only map to one port */
61 if (p->num_qids_mapped > 0) {
65 /* check port only takes a directed flow */
72 p->num_qids_mapped = 1;
73 } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
74 p->num_ordered_qids++;
76 } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
77 q->type == RTE_SCHED_TYPE_PARALLEL) {
81 q->cq_map[q->cq_num_mapped_cqs] = p->id;
83 q->cq_num_mapped_cqs++;
89 sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
92 struct sw_port *p = port;
93 struct sw_evdev *sw = sw_pmd_priv(dev);
97 for (i = 0; i < nb_unlinks; i++) {
98 struct sw_qid *q = &sw->qids[queues[i]];
99 for (j = 0; j < q->cq_num_mapped_cqs; j++) {
100 if (q->cq_map[j] == p->id) {
102 q->cq_map[q->cq_num_mapped_cqs - 1];
104 q->cq_num_mapped_cqs--;
107 p->num_qids_mapped--;
109 if (q->type == RTE_SCHED_TYPE_ORDERED)
110 p->num_ordered_qids--;
120 sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
121 const struct rte_event_port_conf *conf)
123 struct sw_evdev *sw = sw_pmd_priv(dev);
124 struct sw_port *p = &sw->ports[port_id];
125 char buf[RTE_RING_NAMESIZE];
128 struct rte_event_dev_info info;
129 sw_info_get(dev, &info);
131 /* detect re-configuring and return credits to instance if needed */
132 if (p->initialized) {
133 /* taking credits from pool is done one quanta at a time, and
134 * credits may be spend (counted in p->inflights) or still
135 * available in the port (p->inflight_credits). We must return
136 * the sum to no leak credits
138 int possible_inflights = p->inflight_credits + p->inflights;
139 rte_atomic32_sub(&sw->inflights, possible_inflights);
142 *p = (struct sw_port){0}; /* zero entire structure */
146 /* check to see if rings exists - port_setup() can be called multiple
147 * times legally (assuming device is stopped). If ring exists, free it
148 * to so it gets re-created with the correct size
150 snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
151 port_id, "rx_worker_ring");
152 struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
154 rte_event_ring_free(existing_ring);
156 p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
157 dev->data->socket_id,
158 RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
159 if (p->rx_worker_ring == NULL) {
160 SW_LOG_ERR("Error creating RX worker ring for port %d\n",
165 p->inflight_max = conf->new_event_threshold;
166 p->implicit_release = !conf->disable_implicit_release;
168 /* check if ring exists, same as rx_worker above */
169 snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
170 port_id, "cq_worker_ring");
171 existing_ring = rte_event_ring_lookup(buf);
173 rte_event_ring_free(existing_ring);
175 p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
176 dev->data->socket_id,
177 RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
178 if (p->cq_worker_ring == NULL) {
179 rte_event_ring_free(p->rx_worker_ring);
180 SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
184 sw->cq_ring_space[port_id] = conf->dequeue_depth;
186 /* set hist list contents to empty */
187 for (i = 0; i < SW_PORT_HIST_LIST; i++) {
188 p->hist_list[i].fid = -1;
189 p->hist_list[i].qid = -1;
191 dev->data->ports[port_id] = p;
199 sw_port_release(void *port)
201 struct sw_port *p = (void *)port;
205 rte_event_ring_free(p->rx_worker_ring);
206 rte_event_ring_free(p->cq_worker_ring);
207 memset(p, 0, sizeof(*p));
211 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
212 const struct rte_event_queue_conf *queue_conf)
215 int dev_id = sw->data->dev_id;
216 int socket_id = sw->data->socket_id;
217 char buf[IQ_ROB_NAMESIZE];
218 struct sw_qid *qid = &sw->qids[idx];
220 /* Initialize the FID structures to no pinning (-1), and zero packets */
221 const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
222 for (i = 0; i < RTE_DIM(qid->fids); i++)
227 qid->priority = queue_conf->priority;
229 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
230 char ring_name[RTE_RING_NAMESIZE];
231 uint32_t window_size;
233 /* rte_ring and window_size_mask require require window_size to
236 window_size = rte_align32pow2(
237 queue_conf->nb_atomic_order_sequences);
239 qid->window_size = window_size - 1;
243 "invalid reorder_window_size for ordered queue\n"
248 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
249 qid->reorder_buffer = rte_zmalloc_socket(buf,
250 window_size * sizeof(qid->reorder_buffer[0]),
252 if (!qid->reorder_buffer) {
253 SW_LOG_DBG("reorder_buffer malloc failed\n");
257 memset(&qid->reorder_buffer[0],
259 window_size * sizeof(qid->reorder_buffer[0]));
261 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
264 /* lookup the ring, and if it already exists, free it */
265 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
267 rte_ring_free(cleanup);
269 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
272 RING_F_SP_ENQ | RING_F_SC_DEQ);
273 if (!qid->reorder_buffer_freelist) {
274 SW_LOG_DBG("freelist ring create failed");
278 /* Populate the freelist with reorder buffer entries. Enqueue
279 * 'window_size - 1' entries because the rte_ring holds only
282 for (i = 0; i < window_size - 1; i++) {
283 if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
284 &qid->reorder_buffer[i]) < 0)
288 qid->reorder_buffer_index = 0;
292 qid->initialized = 1;
297 if (qid->reorder_buffer) {
298 rte_free(qid->reorder_buffer);
299 qid->reorder_buffer = NULL;
302 if (qid->reorder_buffer_freelist) {
303 rte_ring_free(qid->reorder_buffer_freelist);
304 qid->reorder_buffer_freelist = NULL;
311 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
313 struct sw_evdev *sw = sw_pmd_priv(dev);
314 struct sw_qid *qid = &sw->qids[id];
316 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
317 rte_free(qid->reorder_buffer);
318 rte_ring_free(qid->reorder_buffer_freelist);
320 memset(qid, 0, sizeof(*qid));
324 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
325 const struct rte_event_queue_conf *conf)
329 type = conf->schedule_type;
331 if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
332 type = SW_SCHED_TYPE_DIRECT;
333 } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
334 & conf->event_queue_cfg) {
335 SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
339 struct sw_evdev *sw = sw_pmd_priv(dev);
341 if (sw->qids[queue_id].initialized)
342 sw_queue_release(dev, queue_id);
344 return qid_init(sw, queue_id, type, conf);
348 sw_init_qid_iqs(struct sw_evdev *sw)
352 /* Initialize the IQ memory of all configured qids */
353 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
354 struct sw_qid *qid = &sw->qids[i];
356 if (!qid->initialized)
359 for (j = 0; j < SW_IQS_MAX; j++)
360 iq_init(sw, &qid->iq[j]);
365 sw_qids_empty(struct sw_evdev *sw)
369 for (i = 0; i < sw->qid_count; i++) {
370 for (j = 0; j < SW_IQS_MAX; j++) {
371 if (iq_count(&sw->qids[i].iq[j]))
380 sw_ports_empty(struct sw_evdev *sw)
384 for (i = 0; i < sw->port_count; i++) {
385 if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) ||
386 rte_event_ring_count(sw->ports[i].cq_worker_ring))
394 sw_drain_ports(struct rte_eventdev *dev)
396 struct sw_evdev *sw = sw_pmd_priv(dev);
397 eventdev_stop_flush_t flush;
402 flush = dev->dev_ops->dev_stop_flush;
403 dev_id = dev->data->dev_id;
404 arg = dev->data->dev_stop_flush_arg;
406 for (i = 0; i < sw->port_count; i++) {
409 while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) {
411 flush(dev_id, ev, arg);
413 ev.op = RTE_EVENT_OP_RELEASE;
414 rte_event_enqueue_burst(dev_id, i, &ev, 1);
420 sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq)
422 struct sw_evdev *sw = sw_pmd_priv(dev);
423 eventdev_stop_flush_t flush;
427 flush = dev->dev_ops->dev_stop_flush;
428 dev_id = dev->data->dev_id;
429 arg = dev->data->dev_stop_flush_arg;
431 while (iq_count(iq) > 0) {
434 iq_dequeue_burst(sw, iq, &ev, 1);
437 flush(dev_id, ev, arg);
442 sw_drain_queues(struct rte_eventdev *dev)
444 struct sw_evdev *sw = sw_pmd_priv(dev);
447 for (i = 0; i < sw->qid_count; i++) {
448 for (j = 0; j < SW_IQS_MAX; j++)
449 sw_drain_queue(dev, &sw->qids[i].iq[j]);
454 sw_clean_qid_iqs(struct rte_eventdev *dev)
456 struct sw_evdev *sw = sw_pmd_priv(dev);
459 /* Release the IQ memory of all configured qids */
460 for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) {
461 struct sw_qid *qid = &sw->qids[i];
463 for (j = 0; j < SW_IQS_MAX; j++) {
464 if (!qid->iq[j].head)
466 iq_free_chunk_list(sw, qid->iq[j].head);
467 qid->iq[j].head = NULL;
473 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
474 struct rte_event_queue_conf *conf)
477 RTE_SET_USED(queue_id);
479 static const struct rte_event_queue_conf default_conf = {
480 .nb_atomic_flows = 4096,
481 .nb_atomic_order_sequences = 1,
482 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
483 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
486 *conf = default_conf;
490 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
491 struct rte_event_port_conf *port_conf)
494 RTE_SET_USED(port_id);
496 port_conf->new_event_threshold = 1024;
497 port_conf->dequeue_depth = 16;
498 port_conf->enqueue_depth = 16;
499 port_conf->disable_implicit_release = 0;
503 sw_dev_configure(const struct rte_eventdev *dev)
505 struct sw_evdev *sw = sw_pmd_priv(dev);
506 const struct rte_eventdev_data *data = dev->data;
507 const struct rte_event_dev_config *conf = &data->dev_conf;
510 sw->qid_count = conf->nb_event_queues;
511 sw->port_count = conf->nb_event_ports;
512 sw->nb_events_limit = conf->nb_events_limit;
513 rte_atomic32_set(&sw->inflights, 0);
515 /* Number of chunks sized for worst-case spread of events across IQs */
516 num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) +
517 sw->qid_count*SW_IQS_MAX*2;
519 /* If this is a reconfiguration, free the previous IQ allocation. All
520 * IQ chunk references were cleaned out of the QIDs in sw_stop(), and
521 * will be reinitialized in sw_start().
524 rte_free(sw->chunks);
526 sw->chunks = rte_malloc_socket(NULL,
527 sizeof(struct sw_queue_chunk) *
530 sw->data->socket_id);
534 sw->chunk_list_head = NULL;
535 for (i = 0; i < num_chunks; i++)
536 iq_free_chunk(sw, &sw->chunks[i]);
538 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
547 sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
548 const struct rte_eth_dev *eth_dev,
552 RTE_SET_USED(eth_dev);
553 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
558 sw_timer_adapter_caps_get(const struct rte_eventdev *dev,
561 const struct rte_event_timer_adapter_ops **ops)
567 /* Use default SW ops */
574 sw_crypto_adapter_caps_get(const struct rte_eventdev *dev,
575 const struct rte_cryptodev *cdev,
580 *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP;
585 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
589 static const struct rte_event_dev_info evdev_sw_info = {
590 .driver_name = SW_PMD_NAME,
591 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
592 .max_event_queue_flows = SW_QID_NUM_FIDS,
593 .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
594 .max_event_priority_levels = SW_IQS_MAX,
595 .max_event_ports = SW_PORTS_MAX,
596 .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
597 .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
598 .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
600 RTE_EVENT_DEV_CAP_QUEUE_QOS |
601 RTE_EVENT_DEV_CAP_BURST_MODE |
602 RTE_EVENT_DEV_CAP_EVENT_QOS |
603 RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE|
604 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
605 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
606 RTE_EVENT_DEV_CAP_NONSEQ_MODE),
609 *info = evdev_sw_info;
613 sw_dump(struct rte_eventdev *dev, FILE *f)
615 const struct sw_evdev *sw = sw_pmd_priv(dev);
617 static const char * const q_type_strings[] = {
618 "Ordered", "Atomic", "Parallel", "Directed"
621 fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
622 sw->port_count, sw->qid_count);
624 fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n",
625 sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
626 fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
627 fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
628 fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
629 fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
630 uint32_t inflights = rte_atomic32_read(&sw->inflights);
631 uint32_t credits = sw->nb_events_limit - inflights;
632 fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
634 #define COL_RED "\x1b[31m"
635 #define COL_RESET "\x1b[0m"
637 for (i = 0; i < sw->port_count; i++) {
639 const struct sw_port *p = &sw->ports[i];
640 if (!p->initialized) {
641 fprintf(f, " %sPort %d not initialized.%s\n",
642 COL_RED, i, COL_RESET);
645 fprintf(f, " Port %d %s\n", i,
646 p->is_directed ? " (SingleCons)" : "");
647 fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64
648 "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
649 sw->ports[i].stats.rx_dropped,
650 sw->ports[i].stats.tx_pkts,
651 (p->inflights == p->inflight_max) ?
653 sw->ports[i].inflights, COL_RESET);
655 fprintf(f, "\tMax New: %u"
656 "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
657 sw->ports[i].inflight_max,
658 sw->ports[i].avg_pkt_ticks,
659 sw->ports[i].inflight_credits);
660 fprintf(f, "\tReceive burst distribution:\n");
661 float zp_percent = p->zero_polls * 100.0 / p->total_polls;
662 fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
664 for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
665 if (p->poll_buckets[max] != 0)
667 for (j = 0; j <= max; j++) {
668 if (p->poll_buckets[j] != 0) {
669 float poll_pc = p->poll_buckets[j] * 100.0 /
671 fprintf(f, "%u-%u:%.02f%% ",
672 ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
673 ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
679 if (p->rx_worker_ring) {
680 uint64_t used = rte_event_ring_count(p->rx_worker_ring);
681 uint64_t space = rte_event_ring_free_count(
683 const char *col = (space == 0) ? COL_RED : COL_RESET;
684 fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
685 PRIu64 COL_RESET"\n", col, used, space);
687 fprintf(f, "\trx ring not initialized.\n");
689 if (p->cq_worker_ring) {
690 uint64_t used = rte_event_ring_count(p->cq_worker_ring);
691 uint64_t space = rte_event_ring_free_count(
693 const char *col = (space == 0) ? COL_RED : COL_RESET;
694 fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
695 PRIu64 COL_RESET"\n", col, used, space);
697 fprintf(f, "\tcq ring not initialized.\n");
700 for (i = 0; i < sw->qid_count; i++) {
701 const struct sw_qid *qid = &sw->qids[i];
702 if (!qid->initialized) {
703 fprintf(f, " %sQueue %d not initialized.%s\n",
704 COL_RED, i, COL_RESET);
707 int affinities_per_port[SW_PORTS_MAX] = {0};
708 uint32_t inflights = 0;
710 fprintf(f, " Queue %d (%s)\n", i, q_type_strings[qid->type]);
711 fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64"\n",
712 qid->stats.rx_pkts, qid->stats.rx_dropped,
714 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
715 struct rte_ring *rob_buf_free =
716 qid->reorder_buffer_freelist;
718 fprintf(f, "\tReorder entries in use: %u\n",
719 rte_ring_free_count(rob_buf_free));
722 "\tReorder buffer not initialized\n");
726 for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
727 if (qid->fids[flow].cq != -1) {
728 affinities_per_port[qid->fids[flow].cq]++;
729 inflights += qid->fids[flow].pcount;
733 fprintf(f, "\tPer Port Stats:\n");
734 for (port = 0; port < sw->port_count; port++) {
735 fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
737 fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
741 uint32_t iq_printed = 0;
742 for (iq = 0; iq < SW_IQS_MAX; iq++) {
743 if (!qid->iq[iq].head) {
744 fprintf(f, "\tiq %d is not initialized.\n", iq);
748 uint32_t used = iq_count(&qid->iq[iq]);
749 const char *col = COL_RESET;
751 fprintf(f, "\t%siq %d: Used %d"
752 COL_RESET"\n", col, iq, used);
757 fprintf(f, "\t-- iqs empty --\n");
762 sw_start(struct rte_eventdev *dev)
765 struct sw_evdev *sw = sw_pmd_priv(dev);
767 rte_service_component_runstate_set(sw->service_id, 1);
769 /* check a service core is mapped to this service */
770 if (!rte_service_runstate_get(sw->service_id)) {
771 SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
776 /* check all ports are set up */
777 for (i = 0; i < sw->port_count; i++)
778 if (sw->ports[i].rx_worker_ring == NULL) {
779 SW_LOG_ERR("Port %d not configured\n", i);
783 /* check all queues are configured and mapped to ports*/
784 for (i = 0; i < sw->qid_count; i++)
785 if (!sw->qids[i].initialized ||
786 sw->qids[i].cq_num_mapped_cqs == 0) {
787 SW_LOG_ERR("Queue %d not configured\n", i);
791 /* build up our prioritized array of qids */
792 /* We don't use qsort here, as if all/multiple entries have the same
793 * priority, the result is non-deterministic. From "man 3 qsort":
794 * "If two members compare as equal, their order in the sorted
795 * array is undefined."
798 for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
799 for (i = 0; i < sw->qid_count; i++) {
800 if (sw->qids[i].priority == j) {
801 sw->qids_prioritized[qidx] = &sw->qids[i];
809 if (sw_xstats_init(sw) < 0)
819 sw_stop(struct rte_eventdev *dev)
821 struct sw_evdev *sw = sw_pmd_priv(dev);
824 /* Stop the scheduler if it's running */
825 runstate = rte_service_runstate_get(sw->service_id);
827 rte_service_runstate_set(sw->service_id, 0);
829 while (rte_service_may_be_active(sw->service_id))
832 /* Flush all events out of the device */
833 while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) {
834 sw_event_schedule(dev);
836 sw_drain_queues(dev);
839 sw_clean_qid_iqs(dev);
840 sw_xstats_uninit(sw);
845 rte_service_runstate_set(sw->service_id, 1);
849 sw_close(struct rte_eventdev *dev)
851 struct sw_evdev *sw = sw_pmd_priv(dev);
854 for (i = 0; i < sw->qid_count; i++)
855 sw_queue_release(dev, i);
858 for (i = 0; i < sw->port_count; i++)
859 sw_port_release(&sw->ports[i]);
862 memset(&sw->stats, 0, sizeof(sw->stats));
863 sw->sched_called = 0;
864 sw->sched_no_iq_enqueues = 0;
865 sw->sched_no_cq_enqueues = 0;
866 sw->sched_cq_qid_called = 0;
872 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
874 int *socket_id = opaque;
875 *socket_id = atoi(value);
876 if (*socket_id >= RTE_MAX_NUMA_NODES)
882 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
884 int *quanta = opaque;
885 *quanta = atoi(value);
886 if (*quanta < 0 || *quanta >= 4096)
892 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
894 int *credit = opaque;
895 *credit = atoi(value);
896 if (*credit < 0 || *credit >= 128)
902 static int32_t sw_sched_service_func(void *args)
904 struct rte_eventdev *dev = args;
905 sw_event_schedule(dev);
910 sw_probe(struct rte_vdev_device *vdev)
912 static struct rte_eventdev_ops evdev_sw_ops = {
913 .dev_configure = sw_dev_configure,
914 .dev_infos_get = sw_info_get,
915 .dev_close = sw_close,
916 .dev_start = sw_start,
920 .queue_def_conf = sw_queue_def_conf,
921 .queue_setup = sw_queue_setup,
922 .queue_release = sw_queue_release,
923 .port_def_conf = sw_port_def_conf,
924 .port_setup = sw_port_setup,
925 .port_release = sw_port_release,
926 .port_link = sw_port_link,
927 .port_unlink = sw_port_unlink,
929 .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
931 .timer_adapter_caps_get = sw_timer_adapter_caps_get,
933 .crypto_adapter_caps_get = sw_crypto_adapter_caps_get,
935 .xstats_get = sw_xstats_get,
936 .xstats_get_names = sw_xstats_get_names,
937 .xstats_get_by_name = sw_xstats_get_by_name,
938 .xstats_reset = sw_xstats_reset,
940 .dev_selftest = test_sw_eventdev,
943 static const char *const args[] = {
951 struct rte_eventdev *dev;
953 int socket_id = rte_socket_id();
954 int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
955 int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
957 name = rte_vdev_device_name(vdev);
958 params = rte_vdev_device_args(vdev);
959 if (params != NULL && params[0] != '\0') {
960 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
964 "Ignoring unsupported parameters when creating device '%s'\n",
967 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
968 assign_numa_node, &socket_id);
971 "%s: Error parsing numa node parameter",
973 rte_kvargs_free(kvlist);
977 ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
978 set_sched_quanta, &sched_quanta);
981 "%s: Error parsing sched quanta parameter",
983 rte_kvargs_free(kvlist);
987 ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
988 set_credit_quanta, &credit_quanta);
991 "%s: Error parsing credit quanta parameter",
993 rte_kvargs_free(kvlist);
997 rte_kvargs_free(kvlist);
1002 "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
1003 name, socket_id, sched_quanta, credit_quanta);
1005 dev = rte_event_pmd_vdev_init(name,
1006 sizeof(struct sw_evdev), socket_id);
1008 SW_LOG_ERR("eventdev vdev init() failed");
1011 dev->dev_ops = &evdev_sw_ops;
1012 dev->enqueue = sw_event_enqueue;
1013 dev->enqueue_burst = sw_event_enqueue_burst;
1014 dev->enqueue_new_burst = sw_event_enqueue_burst;
1015 dev->enqueue_forward_burst = sw_event_enqueue_burst;
1016 dev->dequeue = sw_event_dequeue;
1017 dev->dequeue_burst = sw_event_dequeue_burst;
1019 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1022 sw = dev->data->dev_private;
1023 sw->data = dev->data;
1025 /* copy values passed from vdev command line to instance */
1026 sw->credit_update_quanta = credit_quanta;
1027 sw->sched_quanta = sched_quanta;
1029 /* register service with EAL */
1030 struct rte_service_spec service;
1031 memset(&service, 0, sizeof(struct rte_service_spec));
1032 snprintf(service.name, sizeof(service.name), "%s_service", name);
1033 snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
1035 service.socket_id = socket_id;
1036 service.callback = sw_sched_service_func;
1037 service.callback_userdata = (void *)dev;
1039 int32_t ret = rte_service_component_register(&service, &sw->service_id);
1041 SW_LOG_ERR("service register() failed");
1045 dev->data->service_inited = 1;
1046 dev->data->service_id = sw->service_id;
1052 sw_remove(struct rte_vdev_device *vdev)
1056 name = rte_vdev_device_name(vdev);
1060 SW_LOG_INFO("Closing eventdev sw device %s\n", name);
1062 return rte_event_pmd_vdev_uninit(name);
1065 static struct rte_vdev_driver evdev_sw_pmd_drv = {
1070 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
1071 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
1072 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");
1074 /* declared extern in header, for access from other .c files */
1075 int eventdev_sw_log_level;
1077 RTE_INIT(evdev_sw_init_log)
1079 eventdev_sw_log_level = rte_log_register("pmd.event.sw");
1080 if (eventdev_sw_log_level >= 0)
1081 rte_log_set_level(eventdev_sw_log_level, RTE_LOG_NOTICE);