X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=drivers%2Fevent%2Fsw%2Fsw_evdev.c;h=1175d6cdbee7e00de6a308f35515aa0f44bd997a;hb=bd5ac24fea88a2825e437c505a6f05b102010dda;hp=500387fe7e2523e9e38f7931ea67573292344c2b;hpb=e1f2dcdb8f5c01bb1296c5c6c03d39cdf368b91a;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index 500387fe7e..1175d6cdbe 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -13,7 +13,7 @@ #include #include "sw_evdev.h" -#include "iq_ring.h" +#include "iq_chunk.h" #define EVENTDEV_NAME_SW_PMD event_sw #define NUMA_NODE_ARG "numa_node" @@ -113,9 +113,21 @@ sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], } } } + + p->unlinks_in_progress += unlinked; + rte_smp_mb(); + return unlinked; } +static int +sw_port_unlinks_in_progress(struct rte_eventdev *dev, void *port) +{ + RTE_SET_USED(dev); + struct sw_port *p = port; + return p->unlinks_in_progress; +} + static int sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, const struct rte_event_port_conf *conf) @@ -163,6 +175,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, } p->inflight_max = conf->new_event_threshold; + p->implicit_release = !conf->disable_implicit_release; /* check if ring exists, same as rx_worker above */ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id, @@ -213,18 +226,9 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, unsigned int i; int dev_id = sw->data->dev_id; int socket_id = sw->data->socket_id; - char buf[IQ_RING_NAMESIZE]; + char buf[IQ_ROB_NAMESIZE]; struct sw_qid *qid = &sw->qids[idx]; - for (i = 0; i < SW_IQS_MAX; i++) { - snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i); - qid->iq[i] = iq_ring_create(buf, socket_id); - if (!qid->iq[i]) { - SW_LOG_DBG("ring create failed"); - goto cleanup; - } - } - /* Initialize the FID structures to no pinning (-1), and zero packets */ const struct sw_fid_t fid = {.cq = -1, .pcount = 0}; for (i = 0; i < RTE_DIM(qid->fids); i++) @@ -302,11 +306,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, return 0; cleanup: - for (i = 0; i < SW_IQS_MAX; i++) { - if (qid->iq[i]) - iq_ring_destroy(qid->iq[i]); - } - if (qid->reorder_buffer) { rte_free(qid->reorder_buffer); qid->reorder_buffer = NULL; @@ -325,10 +324,6 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id) { struct sw_evdev *sw = sw_pmd_priv(dev); struct sw_qid *qid = &sw->qids[id]; - uint32_t i; - - for (i = 0; i < SW_IQS_MAX; i++) - iq_ring_destroy(qid->iq[i]); if (qid->type == RTE_SCHED_TYPE_ORDERED) { rte_free(qid->reorder_buffer); @@ -361,6 +356,131 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, return qid_init(sw, queue_id, type, conf); } +static void +sw_init_qid_iqs(struct sw_evdev *sw) +{ + int i, j; + + /* Initialize the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; + + if (!qid->initialized) + continue; + + for (j = 0; j < SW_IQS_MAX; j++) + iq_init(sw, &qid->iq[j]); + } +} + +static int +sw_qids_empty(struct sw_evdev *sw) +{ + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) { + if (iq_count(&sw->qids[i].iq[j])) + return 0; + } + } + + return 1; +} + +static int +sw_ports_empty(struct sw_evdev *sw) +{ + unsigned int i; + + for (i = 0; i < sw->port_count; i++) { + if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) || + rte_event_ring_count(sw->ports[i].cq_worker_ring)) + return 0; + } + + return 1; +} + +static void +sw_drain_ports(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + eventdev_stop_flush_t flush; + unsigned int i; + uint8_t dev_id; + void *arg; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + for (i = 0; i < sw->port_count; i++) { + struct rte_event ev; + + while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) { + if (flush) + flush(dev_id, ev, arg); + + ev.op = RTE_EVENT_OP_RELEASE; + rte_event_enqueue_burst(dev_id, i, &ev, 1); + } + } +} + +static void +sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + eventdev_stop_flush_t flush; + uint8_t dev_id; + void *arg; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + while (iq_count(iq) > 0) { + struct rte_event ev; + + iq_dequeue_burst(sw, iq, &ev, 1); + + if (flush) + flush(dev_id, ev, arg); + } +} + +static void +sw_drain_queues(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) + sw_drain_queue(dev, &sw->qids[i].iq[j]); + } +} + +static void +sw_clean_qid_iqs(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + int i, j; + + /* Release the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; + + for (j = 0; j < SW_IQS_MAX; j++) { + if (!qid->iq[j].head) + continue; + iq_free_chunk_list(sw, qid->iq[j].head); + qid->iq[j].head = NULL; + } + } +} + static void sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, struct rte_event_queue_conf *conf) @@ -388,6 +508,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, port_conf->new_event_threshold = 1024; port_conf->dequeue_depth = 16; port_conf->enqueue_depth = 16; + port_conf->disable_implicit_release = 0; } static int @@ -396,12 +517,36 @@ sw_dev_configure(const struct rte_eventdev *dev) struct sw_evdev *sw = sw_pmd_priv(dev); const struct rte_eventdev_data *data = dev->data; const struct rte_event_dev_config *conf = &data->dev_conf; + int num_chunks, i; sw->qid_count = conf->nb_event_queues; sw->port_count = conf->nb_event_ports; sw->nb_events_limit = conf->nb_events_limit; rte_atomic32_set(&sw->inflights, 0); + /* Number of chunks sized for worst-case spread of events across IQs */ + num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) + + sw->qid_count*SW_IQS_MAX*2; + + /* If this is a reconfiguration, free the previous IQ allocation. All + * IQ chunk references were cleaned out of the QIDs in sw_stop(), and + * will be reinitialized in sw_start(). + */ + if (sw->chunks) + rte_free(sw->chunks); + + sw->chunks = rte_malloc_socket(NULL, + sizeof(struct sw_queue_chunk) * + num_chunks, + 0, + sw->data->socket_id); + if (!sw->chunks) + return -ENOMEM; + + sw->chunk_list_head = NULL; + for (i = 0; i < num_chunks; i++) + iq_free_chunk(sw, &sw->chunks[i]); + if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) return -ENOTSUP; @@ -421,6 +566,33 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, return 0; } +static int +sw_timer_adapter_caps_get(const struct rte_eventdev *dev, + uint64_t flags, + uint32_t *caps, + const struct rte_event_timer_adapter_ops **ops) +{ + RTE_SET_USED(dev); + RTE_SET_USED(flags); + *caps = 0; + + /* Use default SW ops */ + *ops = NULL; + + return 0; +} + +static int +sw_crypto_adapter_caps_get(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev, + uint32_t *caps) +{ + RTE_SET_USED(dev); + RTE_SET_USED(cdev); + *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; + return 0; +} + static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) { @@ -436,9 +608,14 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH, .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH, .max_num_events = SW_INFLIGHT_EVENTS_TOTAL, - .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | - RTE_EVENT_DEV_CAP_BURST_MODE | - RTE_EVENT_DEV_CAP_EVENT_QOS), + .event_dev_cap = ( + RTE_EVENT_DEV_CAP_QUEUE_QOS | + RTE_EVENT_DEV_CAP_BURST_MODE | + RTE_EVENT_DEV_CAP_EVENT_QOS | + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE| + RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | + RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | + RTE_EVENT_DEV_CAP_NONSEQ_MODE), }; *info = evdev_sw_info; @@ -575,17 +752,16 @@ sw_dump(struct rte_eventdev *dev, FILE *f) uint32_t iq; uint32_t iq_printed = 0; for (iq = 0; iq < SW_IQS_MAX; iq++) { - if (!qid->iq[iq]) { + if (!qid->iq[iq].head) { fprintf(f, "\tiq %d is not initialized.\n", iq); iq_printed = 1; continue; } - uint32_t used = iq_ring_count(qid->iq[iq]); - uint32_t free = iq_ring_free_count(qid->iq[iq]); - const char *col = (free == 0) ? COL_RED : COL_RESET; + uint32_t used = iq_count(&qid->iq[iq]); + const char *col = COL_RESET; if (used > 0) { - fprintf(f, "\t%siq %d: Used %d\tFree %d" - COL_RESET"\n", col, iq, used, free); + fprintf(f, "\t%siq %d: Used %d" + COL_RESET"\n", col, iq, used); iq_printed = 1; } } @@ -618,8 +794,8 @@ sw_start(struct rte_eventdev *dev) /* check all queues are configured and mapped to ports*/ for (i = 0; i < sw->qid_count; i++) - if (sw->qids[i].iq[0] == NULL || - sw->qids[i].cq_num_mapped_cqs == 0) { + if (!sw->qids[i].initialized || + sw->qids[i].cq_num_mapped_cqs == 0) { SW_LOG_ERR("Queue %d not configured\n", i); return -ENOLINK; } @@ -640,6 +816,8 @@ sw_start(struct rte_eventdev *dev) } } + sw_init_qid_iqs(sw); + if (sw_xstats_init(sw) < 0) return -EINVAL; @@ -653,9 +831,30 @@ static void sw_stop(struct rte_eventdev *dev) { struct sw_evdev *sw = sw_pmd_priv(dev); + int32_t runstate; + + /* Stop the scheduler if it's running */ + runstate = rte_service_runstate_get(sw->service_id); + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 0); + + while (rte_service_may_be_active(sw->service_id)) + rte_pause(); + + /* Flush all events out of the device */ + while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) { + sw_event_schedule(dev); + sw_drain_ports(dev); + sw_drain_queues(dev); + } + + sw_clean_qid_iqs(dev); sw_xstats_uninit(sw); sw->started = 0; rte_smp_wmb(); + + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 1); } static int @@ -722,7 +921,7 @@ static int32_t sw_sched_service_func(void *args) static int sw_probe(struct rte_vdev_device *vdev) { - static const struct rte_eventdev_ops evdev_sw_ops = { + static struct rte_eventdev_ops evdev_sw_ops = { .dev_configure = sw_dev_configure, .dev_infos_get = sw_info_get, .dev_close = sw_close, @@ -738,13 +937,20 @@ sw_probe(struct rte_vdev_device *vdev) .port_release = sw_port_release, .port_link = sw_port_link, .port_unlink = sw_port_unlink, + .port_unlinks_in_progress = sw_port_unlinks_in_progress, .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get, + .timer_adapter_caps_get = sw_timer_adapter_caps_get, + + .crypto_adapter_caps_get = sw_crypto_adapter_caps_get, + .xstats_get = sw_xstats_get, .xstats_get_names = sw_xstats_get_names, .xstats_get_by_name = sw_xstats_get_by_name, .xstats_reset = sw_xstats_reset, + + .dev_selftest = test_sw_eventdev, }; static const char *const args[] = { @@ -877,3 +1083,13 @@ static struct rte_vdev_driver evdev_sw_pmd_drv = { RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "= " SCHED_QUANTA_ARG "=" CREDIT_QUANTA_ARG "="); + +/* declared extern in header, for access from other .c files */ +int eventdev_sw_log_level; + +RTE_INIT(evdev_sw_init_log) +{ + eventdev_sw_log_level = rte_log_register("pmd.event.sw"); + if (eventdev_sw_log_level >= 0) + rte_log_set_level(eventdev_sw_log_level, RTE_LOG_NOTICE); +}