1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
8 #include <rte_bus_vdev.h>
9 #include <rte_kvargs.h>
11 #include <rte_errno.h>
12 #include <rte_event_ring.h>
13 #include <rte_service_component.h>
18 #define EVENTDEV_NAME_SW_PMD event_sw
19 #define NUMA_NODE_ARG "numa_node"
20 #define SCHED_QUANTA_ARG "sched_quanta"
21 #define CREDIT_QUANTA_ARG "credit_quanta"
24 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info);
27 sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
28 const uint8_t priorities[], uint16_t num)
30 struct sw_port *p = port;
31 struct sw_evdev *sw = sw_pmd_priv(dev);
34 RTE_SET_USED(priorities);
35 for (i = 0; i < num; i++) {
36 struct sw_qid *q = &sw->qids[queues[i]];
38 /* check for qid map overflow */
39 if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) {
44 if (p->is_directed && p->num_qids_mapped > 0) {
49 if (q->type == SW_SCHED_TYPE_DIRECT) {
50 /* check directed qids only map to one port */
51 if (p->num_qids_mapped > 0) {
55 /* check port only takes a directed flow */
62 p->num_qids_mapped = 1;
63 } else if (q->type == RTE_SCHED_TYPE_ORDERED) {
64 p->num_ordered_qids++;
66 } else if (q->type == RTE_SCHED_TYPE_ATOMIC ||
67 q->type == RTE_SCHED_TYPE_PARALLEL) {
71 q->cq_map[q->cq_num_mapped_cqs] = p->id;
73 q->cq_num_mapped_cqs++;
79 sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
82 struct sw_port *p = port;
83 struct sw_evdev *sw = sw_pmd_priv(dev);
87 for (i = 0; i < nb_unlinks; i++) {
88 struct sw_qid *q = &sw->qids[queues[i]];
89 for (j = 0; j < q->cq_num_mapped_cqs; j++) {
90 if (q->cq_map[j] == p->id) {
92 q->cq_map[q->cq_num_mapped_cqs - 1];
94 q->cq_num_mapped_cqs--;
99 if (q->type == RTE_SCHED_TYPE_ORDERED)
100 p->num_ordered_qids--;
110 sw_port_setup(struct rte_eventdev *dev, uint8_t port_id,
111 const struct rte_event_port_conf *conf)
113 struct sw_evdev *sw = sw_pmd_priv(dev);
114 struct sw_port *p = &sw->ports[port_id];
115 char buf[RTE_RING_NAMESIZE];
118 struct rte_event_dev_info info;
119 sw_info_get(dev, &info);
121 /* detect re-configuring and return credits to instance if needed */
122 if (p->initialized) {
123 /* taking credits from pool is done one quanta at a time, and
124 * credits may be spend (counted in p->inflights) or still
125 * available in the port (p->inflight_credits). We must return
126 * the sum to no leak credits
128 int possible_inflights = p->inflight_credits + p->inflights;
129 rte_atomic32_sub(&sw->inflights, possible_inflights);
132 *p = (struct sw_port){0}; /* zero entire structure */
136 /* check to see if rings exists - port_setup() can be called multiple
137 * times legally (assuming device is stopped). If ring exists, free it
138 * to so it gets re-created with the correct size
140 snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id,
141 port_id, "rx_worker_ring");
142 struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf);
144 rte_event_ring_free(existing_ring);
146 p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH,
147 dev->data->socket_id,
148 RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
149 if (p->rx_worker_ring == NULL) {
150 SW_LOG_ERR("Error creating RX worker ring for port %d\n",
155 p->inflight_max = conf->new_event_threshold;
157 /* check if ring exists, same as rx_worker above */
158 snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id,
159 port_id, "cq_worker_ring");
160 existing_ring = rte_event_ring_lookup(buf);
162 rte_event_ring_free(existing_ring);
164 p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth,
165 dev->data->socket_id,
166 RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ);
167 if (p->cq_worker_ring == NULL) {
168 rte_event_ring_free(p->rx_worker_ring);
169 SW_LOG_ERR("Error creating CQ worker ring for port %d\n",
173 sw->cq_ring_space[port_id] = conf->dequeue_depth;
175 /* set hist list contents to empty */
176 for (i = 0; i < SW_PORT_HIST_LIST; i++) {
177 p->hist_list[i].fid = -1;
178 p->hist_list[i].qid = -1;
180 dev->data->ports[port_id] = p;
188 sw_port_release(void *port)
190 struct sw_port *p = (void *)port;
194 rte_event_ring_free(p->rx_worker_ring);
195 rte_event_ring_free(p->cq_worker_ring);
196 memset(p, 0, sizeof(*p));
200 qid_init(struct sw_evdev *sw, unsigned int idx, int type,
201 const struct rte_event_queue_conf *queue_conf)
204 int dev_id = sw->data->dev_id;
205 int socket_id = sw->data->socket_id;
206 char buf[IQ_RING_NAMESIZE];
207 struct sw_qid *qid = &sw->qids[idx];
209 for (i = 0; i < SW_IQS_MAX; i++) {
210 snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i);
211 qid->iq[i] = iq_ring_create(buf, socket_id);
213 SW_LOG_DBG("ring create failed");
218 /* Initialize the FID structures to no pinning (-1), and zero packets */
219 const struct sw_fid_t fid = {.cq = -1, .pcount = 0};
220 for (i = 0; i < RTE_DIM(qid->fids); i++)
225 qid->priority = queue_conf->priority;
227 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
228 char ring_name[RTE_RING_NAMESIZE];
229 uint32_t window_size;
231 /* rte_ring and window_size_mask require require window_size to
234 window_size = rte_align32pow2(
235 queue_conf->nb_atomic_order_sequences);
237 qid->window_size = window_size - 1;
241 "invalid reorder_window_size for ordered queue\n"
246 snprintf(buf, sizeof(buf), "sw%d_iq_%d_rob", dev_id, i);
247 qid->reorder_buffer = rte_zmalloc_socket(buf,
248 window_size * sizeof(qid->reorder_buffer[0]),
250 if (!qid->reorder_buffer) {
251 SW_LOG_DBG("reorder_buffer malloc failed\n");
255 memset(&qid->reorder_buffer[0],
257 window_size * sizeof(qid->reorder_buffer[0]));
259 snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist",
262 /* lookup the ring, and if it already exists, free it */
263 struct rte_ring *cleanup = rte_ring_lookup(ring_name);
265 rte_ring_free(cleanup);
267 qid->reorder_buffer_freelist = rte_ring_create(ring_name,
270 RING_F_SP_ENQ | RING_F_SC_DEQ);
271 if (!qid->reorder_buffer_freelist) {
272 SW_LOG_DBG("freelist ring create failed");
276 /* Populate the freelist with reorder buffer entries. Enqueue
277 * 'window_size - 1' entries because the rte_ring holds only
280 for (i = 0; i < window_size - 1; i++) {
281 if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist,
282 &qid->reorder_buffer[i]) < 0)
286 qid->reorder_buffer_index = 0;
290 qid->initialized = 1;
295 for (i = 0; i < SW_IQS_MAX; i++) {
297 iq_ring_destroy(qid->iq[i]);
300 if (qid->reorder_buffer) {
301 rte_free(qid->reorder_buffer);
302 qid->reorder_buffer = NULL;
305 if (qid->reorder_buffer_freelist) {
306 rte_ring_free(qid->reorder_buffer_freelist);
307 qid->reorder_buffer_freelist = NULL;
314 sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
315 const struct rte_event_queue_conf *conf)
319 type = conf->schedule_type;
321 if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) {
322 type = SW_SCHED_TYPE_DIRECT;
323 } else if (RTE_EVENT_QUEUE_CFG_ALL_TYPES
324 & conf->event_queue_cfg) {
325 SW_LOG_ERR("QUEUE_CFG_ALL_TYPES not supported\n");
329 struct sw_evdev *sw = sw_pmd_priv(dev);
330 return qid_init(sw, queue_id, type, conf);
334 sw_queue_release(struct rte_eventdev *dev, uint8_t id)
336 struct sw_evdev *sw = sw_pmd_priv(dev);
337 struct sw_qid *qid = &sw->qids[id];
340 for (i = 0; i < SW_IQS_MAX; i++)
341 iq_ring_destroy(qid->iq[i]);
343 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
344 rte_free(qid->reorder_buffer);
345 rte_ring_free(qid->reorder_buffer_freelist);
347 memset(qid, 0, sizeof(*qid));
351 sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
352 struct rte_event_queue_conf *conf)
355 RTE_SET_USED(queue_id);
357 static const struct rte_event_queue_conf default_conf = {
358 .nb_atomic_flows = 4096,
359 .nb_atomic_order_sequences = 1,
360 .schedule_type = RTE_SCHED_TYPE_ATOMIC,
361 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
364 *conf = default_conf;
368 sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
369 struct rte_event_port_conf *port_conf)
372 RTE_SET_USED(port_id);
374 port_conf->new_event_threshold = 1024;
375 port_conf->dequeue_depth = 16;
376 port_conf->enqueue_depth = 16;
380 sw_dev_configure(const struct rte_eventdev *dev)
382 struct sw_evdev *sw = sw_pmd_priv(dev);
383 const struct rte_eventdev_data *data = dev->data;
384 const struct rte_event_dev_config *conf = &data->dev_conf;
386 sw->qid_count = conf->nb_event_queues;
387 sw->port_count = conf->nb_event_ports;
388 sw->nb_events_limit = conf->nb_events_limit;
389 rte_atomic32_set(&sw->inflights, 0);
391 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)
400 sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
401 const struct rte_eth_dev *eth_dev,
405 RTE_SET_USED(eth_dev);
406 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
411 sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info)
415 static const struct rte_event_dev_info evdev_sw_info = {
416 .driver_name = SW_PMD_NAME,
417 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV,
418 .max_event_queue_flows = SW_QID_NUM_FIDS,
419 .max_event_queue_priority_levels = SW_Q_PRIORITY_MAX,
420 .max_event_priority_levels = SW_IQS_MAX,
421 .max_event_ports = SW_PORTS_MAX,
422 .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH,
423 .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH,
424 .max_num_events = SW_INFLIGHT_EVENTS_TOTAL,
425 .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS |
426 RTE_EVENT_DEV_CAP_BURST_MODE |
427 RTE_EVENT_DEV_CAP_EVENT_QOS),
430 *info = evdev_sw_info;
434 sw_dump(struct rte_eventdev *dev, FILE *f)
436 const struct sw_evdev *sw = sw_pmd_priv(dev);
438 static const char * const q_type_strings[] = {
439 "Ordered", "Atomic", "Parallel", "Directed"
442 fprintf(f, "EventDev %s: ports %d, qids %d\n", "todo-fix-name",
443 sw->port_count, sw->qid_count);
445 fprintf(f, "\trx %"PRIu64"\n\tdrop %"PRIu64"\n\ttx %"PRIu64"\n",
446 sw->stats.rx_pkts, sw->stats.rx_dropped, sw->stats.tx_pkts);
447 fprintf(f, "\tsched calls: %"PRIu64"\n", sw->sched_called);
448 fprintf(f, "\tsched cq/qid call: %"PRIu64"\n", sw->sched_cq_qid_called);
449 fprintf(f, "\tsched no IQ enq: %"PRIu64"\n", sw->sched_no_iq_enqueues);
450 fprintf(f, "\tsched no CQ enq: %"PRIu64"\n", sw->sched_no_cq_enqueues);
451 uint32_t inflights = rte_atomic32_read(&sw->inflights);
452 uint32_t credits = sw->nb_events_limit - inflights;
453 fprintf(f, "\tinflight %d, credits: %d\n", inflights, credits);
455 #define COL_RED "\x1b[31m"
456 #define COL_RESET "\x1b[0m"
458 for (i = 0; i < sw->port_count; i++) {
460 const struct sw_port *p = &sw->ports[i];
461 if (!p->initialized) {
462 fprintf(f, " %sPort %d not initialized.%s\n",
463 COL_RED, i, COL_RESET);
466 fprintf(f, " Port %d %s\n", i,
467 p->is_directed ? " (SingleCons)" : "");
468 fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64
469 "\t%sinflight %d%s\n", sw->ports[i].stats.rx_pkts,
470 sw->ports[i].stats.rx_dropped,
471 sw->ports[i].stats.tx_pkts,
472 (p->inflights == p->inflight_max) ?
474 sw->ports[i].inflights, COL_RESET);
476 fprintf(f, "\tMax New: %u"
477 "\tAvg cycles PP: %"PRIu64"\tCredits: %u\n",
478 sw->ports[i].inflight_max,
479 sw->ports[i].avg_pkt_ticks,
480 sw->ports[i].inflight_credits);
481 fprintf(f, "\tReceive burst distribution:\n");
482 float zp_percent = p->zero_polls * 100.0 / p->total_polls;
483 fprintf(f, zp_percent < 10 ? "\t\t0:%.02f%% " : "\t\t0:%.0f%% ",
485 for (max = (int)RTE_DIM(p->poll_buckets); max-- > 0;)
486 if (p->poll_buckets[max] != 0)
488 for (j = 0; j <= max; j++) {
489 if (p->poll_buckets[j] != 0) {
490 float poll_pc = p->poll_buckets[j] * 100.0 /
492 fprintf(f, "%u-%u:%.02f%% ",
493 ((j << SW_DEQ_STAT_BUCKET_SHIFT) + 1),
494 ((j+1) << SW_DEQ_STAT_BUCKET_SHIFT),
500 if (p->rx_worker_ring) {
501 uint64_t used = rte_event_ring_count(p->rx_worker_ring);
502 uint64_t space = rte_event_ring_free_count(
504 const char *col = (space == 0) ? COL_RED : COL_RESET;
505 fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4"
506 PRIu64 COL_RESET"\n", col, used, space);
508 fprintf(f, "\trx ring not initialized.\n");
510 if (p->cq_worker_ring) {
511 uint64_t used = rte_event_ring_count(p->cq_worker_ring);
512 uint64_t space = rte_event_ring_free_count(
514 const char *col = (space == 0) ? COL_RED : COL_RESET;
515 fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4"
516 PRIu64 COL_RESET"\n", col, used, space);
518 fprintf(f, "\tcq ring not initialized.\n");
521 for (i = 0; i < sw->qid_count; i++) {
522 const struct sw_qid *qid = &sw->qids[i];
523 if (!qid->initialized) {
524 fprintf(f, " %sQueue %d not initialized.%s\n",
525 COL_RED, i, COL_RESET);
528 int affinities_per_port[SW_PORTS_MAX] = {0};
529 uint32_t inflights = 0;
531 fprintf(f, " Queue %d (%s)\n", i, q_type_strings[qid->type]);
532 fprintf(f, "\trx %"PRIu64"\tdrop %"PRIu64"\ttx %"PRIu64"\n",
533 qid->stats.rx_pkts, qid->stats.rx_dropped,
535 if (qid->type == RTE_SCHED_TYPE_ORDERED) {
536 struct rte_ring *rob_buf_free =
537 qid->reorder_buffer_freelist;
539 fprintf(f, "\tReorder entries in use: %u\n",
540 rte_ring_free_count(rob_buf_free));
543 "\tReorder buffer not initialized\n");
547 for (flow = 0; flow < RTE_DIM(qid->fids); flow++)
548 if (qid->fids[flow].cq != -1) {
549 affinities_per_port[qid->fids[flow].cq]++;
550 inflights += qid->fids[flow].pcount;
554 fprintf(f, "\tPer Port Stats:\n");
555 for (port = 0; port < sw->port_count; port++) {
556 fprintf(f, "\t Port %d: Pkts: %"PRIu64, port,
558 fprintf(f, "\tFlows: %d\n", affinities_per_port[port]);
562 uint32_t iq_printed = 0;
563 for (iq = 0; iq < SW_IQS_MAX; iq++) {
565 fprintf(f, "\tiq %d is not initialized.\n", iq);
569 uint32_t used = iq_ring_count(qid->iq[iq]);
570 uint32_t free = iq_ring_free_count(qid->iq[iq]);
571 const char *col = (free == 0) ? COL_RED : COL_RESET;
573 fprintf(f, "\t%siq %d: Used %d\tFree %d"
574 COL_RESET"\n", col, iq, used, free);
579 fprintf(f, "\t-- iqs empty --\n");
584 sw_start(struct rte_eventdev *dev)
587 struct sw_evdev *sw = sw_pmd_priv(dev);
589 rte_service_component_runstate_set(sw->service_id, 1);
591 /* check a service core is mapped to this service */
592 if (!rte_service_runstate_get(sw->service_id)) {
593 SW_LOG_ERR("Warning: No Service core enabled on service %s\n",
598 /* check all ports are set up */
599 for (i = 0; i < sw->port_count; i++)
600 if (sw->ports[i].rx_worker_ring == NULL) {
601 SW_LOG_ERR("Port %d not configured\n", i);
605 /* check all queues are configured and mapped to ports*/
606 for (i = 0; i < sw->qid_count; i++)
607 if (sw->qids[i].iq[0] == NULL ||
608 sw->qids[i].cq_num_mapped_cqs == 0) {
609 SW_LOG_ERR("Queue %d not configured\n", i);
613 /* build up our prioritized array of qids */
614 /* We don't use qsort here, as if all/multiple entries have the same
615 * priority, the result is non-deterministic. From "man 3 qsort":
616 * "If two members compare as equal, their order in the sorted
617 * array is undefined."
620 for (j = 0; j <= RTE_EVENT_DEV_PRIORITY_LOWEST; j++) {
621 for (i = 0; i < sw->qid_count; i++) {
622 if (sw->qids[i].priority == j) {
623 sw->qids_prioritized[qidx] = &sw->qids[i];
629 if (sw_xstats_init(sw) < 0)
639 sw_stop(struct rte_eventdev *dev)
641 struct sw_evdev *sw = sw_pmd_priv(dev);
642 sw_xstats_uninit(sw);
648 sw_close(struct rte_eventdev *dev)
650 struct sw_evdev *sw = sw_pmd_priv(dev);
653 for (i = 0; i < sw->qid_count; i++)
654 sw_queue_release(dev, i);
657 for (i = 0; i < sw->port_count; i++)
658 sw_port_release(&sw->ports[i]);
661 memset(&sw->stats, 0, sizeof(sw->stats));
662 sw->sched_called = 0;
663 sw->sched_no_iq_enqueues = 0;
664 sw->sched_no_cq_enqueues = 0;
665 sw->sched_cq_qid_called = 0;
671 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque)
673 int *socket_id = opaque;
674 *socket_id = atoi(value);
675 if (*socket_id >= RTE_MAX_NUMA_NODES)
681 set_sched_quanta(const char *key __rte_unused, const char *value, void *opaque)
683 int *quanta = opaque;
684 *quanta = atoi(value);
685 if (*quanta < 0 || *quanta >= 4096)
691 set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque)
693 int *credit = opaque;
694 *credit = atoi(value);
695 if (*credit < 0 || *credit >= 128)
701 static int32_t sw_sched_service_func(void *args)
703 struct rte_eventdev *dev = args;
704 sw_event_schedule(dev);
709 sw_probe(struct rte_vdev_device *vdev)
711 static const struct rte_eventdev_ops evdev_sw_ops = {
712 .dev_configure = sw_dev_configure,
713 .dev_infos_get = sw_info_get,
714 .dev_close = sw_close,
715 .dev_start = sw_start,
719 .queue_def_conf = sw_queue_def_conf,
720 .queue_setup = sw_queue_setup,
721 .queue_release = sw_queue_release,
722 .port_def_conf = sw_port_def_conf,
723 .port_setup = sw_port_setup,
724 .port_release = sw_port_release,
725 .port_link = sw_port_link,
726 .port_unlink = sw_port_unlink,
728 .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get,
730 .xstats_get = sw_xstats_get,
731 .xstats_get_names = sw_xstats_get_names,
732 .xstats_get_by_name = sw_xstats_get_by_name,
733 .xstats_reset = sw_xstats_reset,
736 static const char *const args[] = {
744 struct rte_eventdev *dev;
746 int socket_id = rte_socket_id();
747 int sched_quanta = SW_DEFAULT_SCHED_QUANTA;
748 int credit_quanta = SW_DEFAULT_CREDIT_QUANTA;
750 name = rte_vdev_device_name(vdev);
751 params = rte_vdev_device_args(vdev);
752 if (params != NULL && params[0] != '\0') {
753 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
757 "Ignoring unsupported parameters when creating device '%s'\n",
760 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG,
761 assign_numa_node, &socket_id);
764 "%s: Error parsing numa node parameter",
766 rte_kvargs_free(kvlist);
770 ret = rte_kvargs_process(kvlist, SCHED_QUANTA_ARG,
771 set_sched_quanta, &sched_quanta);
774 "%s: Error parsing sched quanta parameter",
776 rte_kvargs_free(kvlist);
780 ret = rte_kvargs_process(kvlist, CREDIT_QUANTA_ARG,
781 set_credit_quanta, &credit_quanta);
784 "%s: Error parsing credit quanta parameter",
786 rte_kvargs_free(kvlist);
790 rte_kvargs_free(kvlist);
795 "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n",
796 name, socket_id, sched_quanta, credit_quanta);
798 dev = rte_event_pmd_vdev_init(name,
799 sizeof(struct sw_evdev), socket_id);
801 SW_LOG_ERR("eventdev vdev init() failed");
804 dev->dev_ops = &evdev_sw_ops;
805 dev->enqueue = sw_event_enqueue;
806 dev->enqueue_burst = sw_event_enqueue_burst;
807 dev->enqueue_new_burst = sw_event_enqueue_burst;
808 dev->enqueue_forward_burst = sw_event_enqueue_burst;
809 dev->dequeue = sw_event_dequeue;
810 dev->dequeue_burst = sw_event_dequeue_burst;
812 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
815 sw = dev->data->dev_private;
816 sw->data = dev->data;
818 /* copy values passed from vdev command line to instance */
819 sw->credit_update_quanta = credit_quanta;
820 sw->sched_quanta = sched_quanta;
822 /* register service with EAL */
823 struct rte_service_spec service;
824 memset(&service, 0, sizeof(struct rte_service_spec));
825 snprintf(service.name, sizeof(service.name), "%s_service", name);
826 snprintf(sw->service_name, sizeof(sw->service_name), "%s_service",
828 service.socket_id = socket_id;
829 service.callback = sw_sched_service_func;
830 service.callback_userdata = (void *)dev;
832 int32_t ret = rte_service_component_register(&service, &sw->service_id);
834 SW_LOG_ERR("service register() failed");
838 dev->data->service_inited = 1;
839 dev->data->service_id = sw->service_id;
845 sw_remove(struct rte_vdev_device *vdev)
849 name = rte_vdev_device_name(vdev);
853 SW_LOG_INFO("Closing eventdev sw device %s\n", name);
855 return rte_event_pmd_vdev_uninit(name);
858 static struct rte_vdev_driver evdev_sw_pmd_drv = {
863 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv);
864 RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "=<int> "
865 SCHED_QUANTA_ARG "=<int>" CREDIT_QUANTA_ARG "=<int>");