X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fsw%2Fsw_evdev.c;h=0d8013adf7cb384b92f7327567e9241109ba1232;hb=bbf19e89b87cab524a91f76a89347538a038ceae;hp=74976c01af5c4be032351a47587ec9caa6258fe8;hpb=5566a3e35866ce9e5eacf886c27b460ebfcd6ee9;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index 74976c01af..0d8013adf7 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -13,12 +13,16 @@ #include #include "sw_evdev.h" -#include "iq_ring.h" +#include "iq_chunk.h" +#include "event_ring.h" #define EVENTDEV_NAME_SW_PMD event_sw #define NUMA_NODE_ARG "numa_node" #define SCHED_QUANTA_ARG "sched_quanta" #define CREDIT_QUANTA_ARG "credit_quanta" +#define MIN_BURST_SIZE_ARG "min_burst" +#define DEQ_BURST_SIZE_ARG "deq_burst" +#define REFIL_ONCE_ARG "refill_once" static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info); @@ -34,27 +38,37 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], RTE_SET_USED(priorities); for (i = 0; i < num; i++) { struct sw_qid *q = &sw->qids[queues[i]]; + unsigned int j; /* check for qid map overflow */ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } if (p->is_directed && p->num_qids_mapped > 0) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } + for (j = 0; j < q->cq_num_mapped_cqs; j++) { + if (q->cq_map[j] == p->id) + break; + } + + /* check if port is already linked */ + if (j < q->cq_num_mapped_cqs) + continue; + if (q->type == SW_SCHED_TYPE_DIRECT) { /* check directed qids only map to one port */ if (p->num_qids_mapped > 0) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } /* check port only takes a directed flow */ if (num > 1) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } @@ -103,9 +117,21 @@ sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], } } } + + p->unlinks_in_progress += unlinked; + rte_smp_mb(); + return unlinked; } +static int +sw_port_unlinks_in_progress(struct rte_eventdev *dev, void *port) +{ + RTE_SET_USED(dev); + struct sw_port *p = port; + return p->unlinks_in_progress; +} + static int sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, const struct rte_event_port_conf *conf) @@ -153,6 +179,8 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, } p->inflight_max = conf->new_event_threshold; + p->implicit_release = !(conf->event_port_cfg & + RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL); /* check if ring exists, same as rx_worker above */ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id, @@ -203,18 +231,9 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, unsigned int i; int dev_id = sw->data->dev_id; int socket_id = sw->data->socket_id; - char buf[IQ_RING_NAMESIZE]; + char buf[IQ_ROB_NAMESIZE]; struct sw_qid *qid = &sw->qids[idx]; - for (i = 0; i < SW_IQS_MAX; i++) { - snprintf(buf, sizeof(buf), "q_%u_iq_%d", idx, i); - qid->iq[i] = iq_ring_create(buf, socket_id); - if (!qid->iq[i]) { - SW_LOG_DBG("ring create failed"); - goto cleanup; - } - } - /* Initialize the FID structures to no pinning (-1), and zero packets */ const struct sw_fid_t fid = {.cq = -1, .pcount = 0}; for (i = 0; i < RTE_DIM(qid->fids); i++) @@ -225,7 +244,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, qid->priority = queue_conf->priority; if (qid->type == RTE_SCHED_TYPE_ORDERED) { - char ring_name[RTE_RING_NAMESIZE]; uint32_t window_size; /* rte_ring and window_size_mask require require window_size to @@ -256,18 +274,8 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, 0, window_size * sizeof(qid->reorder_buffer[0])); - snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist", - dev_id, idx); - - /* lookup the ring, and if it already exists, free it */ - struct rte_ring *cleanup = rte_ring_lookup(ring_name); - if (cleanup) - rte_ring_free(cleanup); - - qid->reorder_buffer_freelist = rte_ring_create(ring_name, - window_size, - socket_id, - RING_F_SP_ENQ | RING_F_SC_DEQ); + qid->reorder_buffer_freelist = rob_ring_create(window_size, + socket_id); if (!qid->reorder_buffer_freelist) { SW_LOG_DBG("freelist ring create failed"); goto cleanup; @@ -278,8 +286,8 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, * that many. */ for (i = 0; i < window_size - 1; i++) { - if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist, - &qid->reorder_buffer[i]) < 0) + if (rob_ring_enqueue(qid->reorder_buffer_freelist, + &qid->reorder_buffer[i]) != 1) goto cleanup; } @@ -292,24 +300,32 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, return 0; cleanup: - for (i = 0; i < SW_IQS_MAX; i++) { - if (qid->iq[i]) - iq_ring_destroy(qid->iq[i]); - } - if (qid->reorder_buffer) { rte_free(qid->reorder_buffer); qid->reorder_buffer = NULL; } if (qid->reorder_buffer_freelist) { - rte_ring_free(qid->reorder_buffer_freelist); + rob_ring_free(qid->reorder_buffer_freelist); qid->reorder_buffer_freelist = NULL; } return -EINVAL; } +static void +sw_queue_release(struct rte_eventdev *dev, uint8_t id) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + struct sw_qid *qid = &sw->qids[id]; + + if (qid->type == RTE_SCHED_TYPE_ORDERED) { + rte_free(qid->reorder_buffer); + rob_ring_free(qid->reorder_buffer_freelist); + } + memset(qid, 0, sizeof(*qid)); +} + static int sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, const struct rte_event_queue_conf *conf) @@ -327,24 +343,136 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, } struct sw_evdev *sw = sw_pmd_priv(dev); + + if (sw->qids[queue_id].initialized) + sw_queue_release(dev, queue_id); + return qid_init(sw, queue_id, type, conf); } static void -sw_queue_release(struct rte_eventdev *dev, uint8_t id) +sw_init_qid_iqs(struct sw_evdev *sw) +{ + int i, j; + + /* Initialize the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; + + if (!qid->initialized) + continue; + + for (j = 0; j < SW_IQS_MAX; j++) + iq_init(sw, &qid->iq[j]); + } +} + +static int +sw_qids_empty(struct sw_evdev *sw) +{ + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) { + if (iq_count(&sw->qids[i].iq[j])) + return 0; + } + } + + return 1; +} + +static int +sw_ports_empty(struct sw_evdev *sw) +{ + unsigned int i; + + for (i = 0; i < sw->port_count; i++) { + if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) || + rte_event_ring_count(sw->ports[i].cq_worker_ring)) + return 0; + } + + return 1; +} + +static void +sw_drain_ports(struct rte_eventdev *dev) { struct sw_evdev *sw = sw_pmd_priv(dev); - struct sw_qid *qid = &sw->qids[id]; - uint32_t i; + eventdev_stop_flush_t flush; + unsigned int i; + uint8_t dev_id; + void *arg; - for (i = 0; i < SW_IQS_MAX; i++) - iq_ring_destroy(qid->iq[i]); + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; - if (qid->type == RTE_SCHED_TYPE_ORDERED) { - rte_free(qid->reorder_buffer); - rte_ring_free(qid->reorder_buffer_freelist); + for (i = 0; i < sw->port_count; i++) { + struct rte_event ev; + + while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) { + if (flush) + flush(dev_id, ev, arg); + + ev.op = RTE_EVENT_OP_RELEASE; + rte_event_enqueue_burst(dev_id, i, &ev, 1); + } + } +} + +static void +sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + eventdev_stop_flush_t flush; + uint8_t dev_id; + void *arg; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + while (iq_count(iq) > 0) { + struct rte_event ev; + + iq_dequeue_burst(sw, iq, &ev, 1); + + if (flush) + flush(dev_id, ev, arg); + } +} + +static void +sw_drain_queues(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) + sw_drain_queue(dev, &sw->qids[i].iq[j]); + } +} + +static void +sw_clean_qid_iqs(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + int i, j; + + /* Release the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; + + for (j = 0; j < SW_IQS_MAX; j++) { + if (!qid->iq[j].head) + continue; + iq_free_chunk_list(sw, qid->iq[j].head); + qid->iq[j].head = NULL; + } } - memset(qid, 0, sizeof(*qid)); } static void @@ -374,6 +502,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, port_conf->new_event_threshold = 1024; port_conf->dequeue_depth = 16; port_conf->enqueue_depth = 16; + port_conf->event_port_cfg = 0; } static int @@ -382,12 +511,36 @@ sw_dev_configure(const struct rte_eventdev *dev) struct sw_evdev *sw = sw_pmd_priv(dev); const struct rte_eventdev_data *data = dev->data; const struct rte_event_dev_config *conf = &data->dev_conf; + int num_chunks, i; sw->qid_count = conf->nb_event_queues; sw->port_count = conf->nb_event_ports; sw->nb_events_limit = conf->nb_events_limit; rte_atomic32_set(&sw->inflights, 0); + /* Number of chunks sized for worst-case spread of events across IQs */ + num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) + + sw->qid_count*SW_IQS_MAX*2; + + /* If this is a reconfiguration, free the previous IQ allocation. All + * IQ chunk references were cleaned out of the QIDs in sw_stop(), and + * will be reinitialized in sw_start(). + */ + if (sw->chunks) + rte_free(sw->chunks); + + sw->chunks = rte_malloc_socket(NULL, + sizeof(struct sw_queue_chunk) * + num_chunks, + 0, + sw->data->socket_id); + if (!sw->chunks) + return -ENOMEM; + + sw->chunk_list_head = NULL; + for (i = 0; i < num_chunks; i++) + iq_free_chunk(sw, &sw->chunks[i]); + if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) return -ENOTSUP; @@ -407,6 +560,33 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, return 0; } +static int +sw_timer_adapter_caps_get(const struct rte_eventdev *dev, + uint64_t flags, + uint32_t *caps, + const struct rte_event_timer_adapter_ops **ops) +{ + RTE_SET_USED(dev); + RTE_SET_USED(flags); + *caps = 0; + + /* Use default SW ops */ + *ops = NULL; + + return 0; +} + +static int +sw_crypto_adapter_caps_get(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev, + uint32_t *caps) +{ + RTE_SET_USED(dev); + RTE_SET_USED(cdev); + *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; + return 0; +} + static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) { @@ -422,9 +602,15 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH, .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH, .max_num_events = SW_INFLIGHT_EVENTS_TOTAL, - .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | - RTE_EVENT_DEV_CAP_BURST_MODE | - RTE_EVENT_DEV_CAP_EVENT_QOS), + .event_dev_cap = ( + RTE_EVENT_DEV_CAP_QUEUE_QOS | + RTE_EVENT_DEV_CAP_BURST_MODE | + RTE_EVENT_DEV_CAP_EVENT_QOS | + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE| + RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | + RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | + RTE_EVENT_DEV_CAP_NONSEQ_MODE | + RTE_EVENT_DEV_CAP_CARRY_FLOW_ID), }; *info = evdev_sw_info; @@ -533,11 +719,11 @@ sw_dump(struct rte_eventdev *dev, FILE *f) qid->stats.rx_pkts, qid->stats.rx_dropped, qid->stats.tx_pkts); if (qid->type == RTE_SCHED_TYPE_ORDERED) { - struct rte_ring *rob_buf_free = + struct rob_ring *rob_buf_free = qid->reorder_buffer_freelist; if (rob_buf_free) fprintf(f, "\tReorder entries in use: %u\n", - rte_ring_free_count(rob_buf_free)); + rob_ring_free_count(rob_buf_free)); else fprintf(f, "\tReorder buffer not initialized\n"); @@ -561,17 +747,16 @@ sw_dump(struct rte_eventdev *dev, FILE *f) uint32_t iq; uint32_t iq_printed = 0; for (iq = 0; iq < SW_IQS_MAX; iq++) { - if (!qid->iq[iq]) { + if (!qid->iq[iq].head) { fprintf(f, "\tiq %d is not initialized.\n", iq); iq_printed = 1; continue; } - uint32_t used = iq_ring_count(qid->iq[iq]); - uint32_t free = iq_ring_free_count(qid->iq[iq]); - const char *col = (free == 0) ? COL_RED : COL_RESET; + uint32_t used = iq_count(&qid->iq[iq]); + const char *col = COL_RESET; if (used > 0) { - fprintf(f, "\t%siq %d: Used %d\tFree %d" - COL_RESET"\n", col, iq, used, free); + fprintf(f, "\t%siq %d: Used %d" + COL_RESET"\n", col, iq, used); iq_printed = 1; } } @@ -604,8 +789,8 @@ sw_start(struct rte_eventdev *dev) /* check all queues are configured and mapped to ports*/ for (i = 0; i < sw->qid_count; i++) - if (sw->qids[i].iq[0] == NULL || - sw->qids[i].cq_num_mapped_cqs == 0) { + if (!sw->qids[i].initialized || + sw->qids[i].cq_num_mapped_cqs == 0) { SW_LOG_ERR("Queue %d not configured\n", i); return -ENOLINK; } @@ -626,6 +811,8 @@ sw_start(struct rte_eventdev *dev) } } + sw_init_qid_iqs(sw); + if (sw_xstats_init(sw) < 0) return -EINVAL; @@ -639,9 +826,30 @@ static void sw_stop(struct rte_eventdev *dev) { struct sw_evdev *sw = sw_pmd_priv(dev); + int32_t runstate; + + /* Stop the scheduler if it's running */ + runstate = rte_service_runstate_get(sw->service_id); + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 0); + + while (rte_service_may_be_active(sw->service_id)) + rte_pause(); + + /* Flush all events out of the device */ + while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) { + sw_event_schedule(dev); + sw_drain_ports(dev); + sw_drain_queues(dev); + } + + sw_clean_qid_iqs(dev); sw_xstats_uninit(sw); sw->started = 0; rte_smp_wmb(); + + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 1); } static int @@ -697,6 +905,35 @@ set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque) return 0; } +static int +set_deq_burst_sz(const char *key __rte_unused, const char *value, void *opaque) +{ + int *deq_burst_sz = opaque; + *deq_burst_sz = atoi(value); + if (*deq_burst_sz < 0 || *deq_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE) + return -1; + return 0; +} + +static int +set_min_burst_sz(const char *key __rte_unused, const char *value, void *opaque) +{ + int *min_burst_sz = opaque; + *min_burst_sz = atoi(value); + if (*min_burst_sz < 0 || *min_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE) + return -1; + return 0; +} + +static int +set_refill_once(const char *key __rte_unused, const char *value, void *opaque) +{ + int *refill_once_per_call = opaque; + *refill_once_per_call = atoi(value); + if (*refill_once_per_call < 0 || *refill_once_per_call > 1) + return -1; + return 0; +} static int32_t sw_sched_service_func(void *args) { @@ -708,7 +945,7 @@ static int32_t sw_sched_service_func(void *args) static int sw_probe(struct rte_vdev_device *vdev) { - static const struct rte_eventdev_ops evdev_sw_ops = { + static struct rte_eventdev_ops evdev_sw_ops = { .dev_configure = sw_dev_configure, .dev_infos_get = sw_info_get, .dev_close = sw_close, @@ -724,19 +961,29 @@ sw_probe(struct rte_vdev_device *vdev) .port_release = sw_port_release, .port_link = sw_port_link, .port_unlink = sw_port_unlink, + .port_unlinks_in_progress = sw_port_unlinks_in_progress, .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get, + .timer_adapter_caps_get = sw_timer_adapter_caps_get, + + .crypto_adapter_caps_get = sw_crypto_adapter_caps_get, + .xstats_get = sw_xstats_get, .xstats_get_names = sw_xstats_get_names, .xstats_get_by_name = sw_xstats_get_by_name, .xstats_reset = sw_xstats_reset, + + .dev_selftest = test_sw_eventdev, }; static const char *const args[] = { NUMA_NODE_ARG, SCHED_QUANTA_ARG, CREDIT_QUANTA_ARG, + MIN_BURST_SIZE_ARG, + DEQ_BURST_SIZE_ARG, + REFIL_ONCE_ARG, NULL }; const char *name; @@ -746,6 +993,9 @@ sw_probe(struct rte_vdev_device *vdev) int socket_id = rte_socket_id(); int sched_quanta = SW_DEFAULT_SCHED_QUANTA; int credit_quanta = SW_DEFAULT_CREDIT_QUANTA; + int min_burst_size = 1; + int deq_burst_size = SCHED_DEQUEUE_DEFAULT_BURST_SIZE; + int refill_once = 0; name = rte_vdev_device_name(vdev); params = rte_vdev_device_args(vdev); @@ -787,13 +1037,46 @@ sw_probe(struct rte_vdev_device *vdev) return ret; } + ret = rte_kvargs_process(kvlist, MIN_BURST_SIZE_ARG, + set_min_burst_sz, &min_burst_size); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing minimum burst size parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, DEQ_BURST_SIZE_ARG, + set_deq_burst_sz, &deq_burst_size); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing dequeue burst size parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, REFIL_ONCE_ARG, + set_refill_once, &refill_once); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing refill once per call switch", + name); + rte_kvargs_free(kvlist); + return ret; + } + rte_kvargs_free(kvlist); } } SW_LOG_INFO( - "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n", - name, socket_id, sched_quanta, credit_quanta); + "Creating eventdev sw device %s, numa_node=%d, " + "sched_quanta=%d, credit_quanta=%d " + "min_burst=%d, deq_burst=%d, refill_once=%d\n", + name, socket_id, sched_quanta, credit_quanta, + min_burst_size, deq_burst_size, refill_once); dev = rte_event_pmd_vdev_init(name, sizeof(struct sw_evdev), socket_id); @@ -818,6 +1101,9 @@ sw_probe(struct rte_vdev_device *vdev) /* copy values passed from vdev command line to instance */ sw->credit_update_quanta = credit_quanta; sw->sched_quanta = sched_quanta; + sw->sched_min_burst_size = min_burst_size; + sw->sched_deq_burst_size = deq_burst_size; + sw->refill_once_per_iter = refill_once; /* register service with EAL */ struct rte_service_spec service; @@ -862,4 +1148,7 @@ static struct rte_vdev_driver evdev_sw_pmd_drv = { RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "= " - SCHED_QUANTA_ARG "=" CREDIT_QUANTA_ARG "="); + SCHED_QUANTA_ARG "=" CREDIT_QUANTA_ARG "=" + MIN_BURST_SIZE_ARG "=" DEQ_BURST_SIZE_ARG "=" + REFIL_ONCE_ARG "="); +RTE_LOG_REGISTER(eventdev_sw_log_level, pmd.event.sw, NOTICE);