X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fdsw%2Fdsw_evdev.c;h=7798a38ad91e7cb61ee844ff6c2e325f79114376;hb=dc36bd5dfdeb;hp=bcfa17bab2c6f8c467194076e7800f349897604b;hpb=2b7bc6a5b2b4c48990d9ef3e46d8645f94ee60b1;p=dpdk.git diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c index bcfa17bab2..7798a38ad9 100644 --- a/drivers/event/dsw/dsw_evdev.c +++ b/drivers/event/dsw/dsw_evdev.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "dsw_evdev.h" @@ -20,6 +21,7 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id, struct dsw_evdev *dsw = dsw_pmd_priv(dev); struct dsw_port *port; struct rte_event_ring *in_ring; + struct rte_ring *ctl_in_ring; char ring_name[RTE_RING_NAMESIZE]; port = &dsw->ports[port_id]; @@ -42,13 +44,31 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id, if (in_ring == NULL) return -ENOMEM; + snprintf(ring_name, sizeof(ring_name), "dswctl%d_p%u", + dev->data->dev_id, port_id); + + ctl_in_ring = rte_ring_create_elem(ring_name, + sizeof(struct dsw_ctl_msg), + DSW_CTL_IN_RING_SIZE, + dev->data->socket_id, + RING_F_SC_DEQ|RING_F_EXACT_SZ); + + if (ctl_in_ring == NULL) { + rte_event_ring_free(in_ring); + return -ENOMEM; + } + port->in_ring = in_ring; + port->ctl_in_ring = ctl_in_ring; rte_atomic16_init(&port->load); port->load_update_interval = (DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S; + port->migration_interval = + (DSW_MIGRATION_INTERVAL * rte_get_timer_hz()) / US_PER_S; + dev->data->ports[port_id] = port; return 0; @@ -72,6 +92,7 @@ dsw_port_release(void *p) struct dsw_port *port = p; rte_event_ring_free(port->in_ring); + rte_ring_free(port->ctl_in_ring); } static int @@ -84,9 +105,6 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg) return -ENOTSUP; - if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED) - return -ENOTSUP; - /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since * the queue will only have a single serving port, no @@ -95,8 +113,12 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, */ if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) queue->schedule_type = RTE_SCHED_TYPE_ATOMIC; - else /* atomic or parallel */ + else { + if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED) + return -ENOTSUP; + /* atomic or parallel */ queue->schedule_type = conf->schedule_type; + } queue->num_serving_ports = 0; @@ -199,7 +221,9 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused, .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH, .max_num_events = DSW_MAX_EVENTS, .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE| - RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED + RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED| + RTE_EVENT_DEV_CAP_NONSEQ_MODE| + RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT }; } @@ -272,6 +296,14 @@ dsw_port_drain_buf(uint8_t dev_id, struct rte_event *buf, uint16_t buf_len, flush(dev_id, buf[i], flush_arg); } +static void +dsw_port_drain_paused(uint8_t dev_id, struct dsw_port *port, + eventdev_stop_flush_t flush, void *flush_arg) +{ + dsw_port_drain_buf(dev_id, port->paused_events, port->paused_events_len, + flush, flush_arg); +} + static void dsw_port_drain_out(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port, eventdev_stop_flush_t flush, void *flush_arg) @@ -308,6 +340,7 @@ dsw_drain(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port = &dsw->ports[port_id]; dsw_port_drain_out(dev_id, dsw, port, flush, flush_arg); + dsw_port_drain_paused(dev_id, port, flush, flush_arg); dsw_port_drain_in_ring(dev_id, port, flush, flush_arg); } } @@ -351,7 +384,10 @@ static struct rte_eventdev_ops dsw_evdev_ops = { .dev_configure = dsw_configure, .dev_start = dsw_start, .dev_stop = dsw_stop, - .dev_close = dsw_close + .dev_close = dsw_close, + .xstats_get = dsw_xstats_get, + .xstats_get_names = dsw_xstats_get_names, + .xstats_get_by_name = dsw_xstats_get_by_name }; static int