X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fdsw%2Fdsw_evdev.c;h=2301a4b7a05923841e218dafe1a2c070082b8212;hb=fb15afa295af939a7c9143a8641ac29a338409f1;hp=bcfa17bab2c6f8c467194076e7800f349897604b;hpb=2b7bc6a5b2b4c48990d9ef3e46d8645f94ee60b1;p=dpdk.git diff --git a/drivers/event/dsw/dsw_evdev.c b/drivers/event/dsw/dsw_evdev.c index bcfa17bab2..2301a4b7a0 100644 --- a/drivers/event/dsw/dsw_evdev.c +++ b/drivers/event/dsw/dsw_evdev.c @@ -5,9 +5,10 @@ #include #include -#include -#include +#include +#include #include +#include #include "dsw_evdev.h" @@ -20,6 +21,7 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id, struct dsw_evdev *dsw = dsw_pmd_priv(dev); struct dsw_port *port; struct rte_event_ring *in_ring; + struct rte_ring *ctl_in_ring; char ring_name[RTE_RING_NAMESIZE]; port = &dsw->ports[port_id]; @@ -42,13 +44,29 @@ dsw_port_setup(struct rte_eventdev *dev, uint8_t port_id, if (in_ring == NULL) return -ENOMEM; - port->in_ring = in_ring; + snprintf(ring_name, sizeof(ring_name), "dswctl%d_p%u", + dev->data->dev_id, port_id); + + ctl_in_ring = rte_ring_create_elem(ring_name, + sizeof(struct dsw_ctl_msg), + DSW_CTL_IN_RING_SIZE, + dev->data->socket_id, + RING_F_SC_DEQ|RING_F_EXACT_SZ); + + if (ctl_in_ring == NULL) { + rte_event_ring_free(in_ring); + return -ENOMEM; + } - rte_atomic16_init(&port->load); + port->in_ring = in_ring; + port->ctl_in_ring = ctl_in_ring; port->load_update_interval = (DSW_LOAD_UPDATE_INTERVAL * rte_get_timer_hz()) / US_PER_S; + port->migration_interval = + (DSW_MIGRATION_INTERVAL * rte_get_timer_hz()) / US_PER_S; + dev->data->ports[port_id] = port; return 0; @@ -72,6 +90,7 @@ dsw_port_release(void *p) struct dsw_port *port = p; rte_event_ring_free(port->in_ring); + rte_ring_free(port->ctl_in_ring); } static int @@ -84,9 +103,6 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, if (RTE_EVENT_QUEUE_CFG_ALL_TYPES & conf->event_queue_cfg) return -ENOTSUP; - if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED) - return -ENOTSUP; - /* SINGLE_LINK is better off treated as TYPE_ATOMIC, since it * avoid the "fake" TYPE_PARALLEL flow_id assignment. Since * the queue will only have a single serving port, no @@ -95,8 +111,12 @@ dsw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, */ if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK & conf->event_queue_cfg) queue->schedule_type = RTE_SCHED_TYPE_ATOMIC; - else /* atomic or parallel */ + else { + if (conf->schedule_type == RTE_SCHED_TYPE_ORDERED) + return -ENOTSUP; + /* atomic or parallel */ queue->schedule_type = conf->schedule_type; + } queue->num_serving_ports = 0; @@ -199,7 +219,10 @@ dsw_info_get(struct rte_eventdev *dev __rte_unused, .max_event_port_enqueue_depth = DSW_MAX_PORT_ENQUEUE_DEPTH, .max_num_events = DSW_MAX_EVENTS, .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE| - RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED + RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED| + RTE_EVENT_DEV_CAP_NONSEQ_MODE| + RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT| + RTE_EVENT_DEV_CAP_CARRY_FLOW_ID }; } @@ -249,7 +272,7 @@ dsw_start(struct rte_eventdev *dev) uint16_t i; uint64_t now; - rte_atomic32_init(&dsw->credits_on_loan); + dsw->credits_on_loan = 0; initial_flow_to_port_assignment(dsw); @@ -272,6 +295,14 @@ dsw_port_drain_buf(uint8_t dev_id, struct rte_event *buf, uint16_t buf_len, flush(dev_id, buf[i], flush_arg); } +static void +dsw_port_drain_paused(uint8_t dev_id, struct dsw_port *port, + eventdev_stop_flush_t flush, void *flush_arg) +{ + dsw_port_drain_buf(dev_id, port->paused_events, port->paused_events_len, + flush, flush_arg); +} + static void dsw_port_drain_out(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port, eventdev_stop_flush_t flush, void *flush_arg) @@ -308,6 +339,7 @@ dsw_drain(uint8_t dev_id, struct dsw_evdev *dsw, struct dsw_port *port = &dsw->ports[port_id]; dsw_port_drain_out(dev_id, dsw, port, flush, flush_arg); + dsw_port_drain_paused(dev_id, port, flush, flush_arg); dsw_port_drain_in_ring(dev_id, port, flush, flush_arg); } } @@ -338,6 +370,34 @@ dsw_close(struct rte_eventdev *dev) return 0; } +static int +dsw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev __rte_unused, + const struct rte_eth_dev *eth_dev __rte_unused, + uint32_t *caps) +{ + *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; + return 0; +} + +static int +dsw_timer_adapter_caps_get(const struct rte_eventdev *dev __rte_unused, + uint64_t flags __rte_unused, uint32_t *caps, + const struct rte_event_timer_adapter_ops **ops) +{ + *caps = 0; + *ops = NULL; + return 0; +} + +static int +dsw_crypto_adapter_caps_get(const struct rte_eventdev *dev __rte_unused, + const struct rte_cryptodev *cdev __rte_unused, + uint32_t *caps) +{ + *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; + return 0; +} + static struct rte_eventdev_ops dsw_evdev_ops = { .port_setup = dsw_port_setup, .port_def_conf = dsw_port_def_conf, @@ -351,7 +411,13 @@ static struct rte_eventdev_ops dsw_evdev_ops = { .dev_configure = dsw_configure, .dev_start = dsw_start, .dev_stop = dsw_stop, - .dev_close = dsw_close + .dev_close = dsw_close, + .eth_rx_adapter_caps_get = dsw_eth_rx_adapter_caps_get, + .timer_adapter_caps_get = dsw_timer_adapter_caps_get, + .crypto_adapter_caps_get = dsw_crypto_adapter_caps_get, + .xstats_get = dsw_xstats_get, + .xstats_get_names = dsw_xstats_get_names, + .xstats_get_by_name = dsw_xstats_get_by_name }; static int