X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fsw%2Fsw_evdev.c;h=0d8013adf7cb384b92f7327567e9241109ba1232;hb=bbf19e89b87cab524a91f76a89347538a038ceae;hp=f9daf4fcb25356d6c28cc03304b407b2821d5f2c;hpb=561c5c7b7f7e431811a5c3bf46e4adc032e8e67d;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index f9daf4fcb2..0d8013adf7 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -14,11 +14,15 @@ #include "sw_evdev.h" #include "iq_chunk.h" +#include "event_ring.h" #define EVENTDEV_NAME_SW_PMD event_sw #define NUMA_NODE_ARG "numa_node" #define SCHED_QUANTA_ARG "sched_quanta" #define CREDIT_QUANTA_ARG "credit_quanta" +#define MIN_BURST_SIZE_ARG "min_burst" +#define DEQ_BURST_SIZE_ARG "deq_burst" +#define REFIL_ONCE_ARG "refill_once" static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info); @@ -38,12 +42,12 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], /* check for qid map overflow */ if (q->cq_num_mapped_cqs >= RTE_DIM(q->cq_map)) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } if (p->is_directed && p->num_qids_mapped > 0) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } @@ -59,12 +63,12 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], if (q->type == SW_SCHED_TYPE_DIRECT) { /* check directed qids only map to one port */ if (p->num_qids_mapped > 0) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } /* check port only takes a directed flow */ if (num > 1) { - rte_errno = -EDQUOT; + rte_errno = EDQUOT; break; } @@ -113,9 +117,21 @@ sw_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[], } } } + + p->unlinks_in_progress += unlinked; + rte_smp_mb(); + return unlinked; } +static int +sw_port_unlinks_in_progress(struct rte_eventdev *dev, void *port) +{ + RTE_SET_USED(dev); + struct sw_port *p = port; + return p->unlinks_in_progress; +} + static int sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, const struct rte_event_port_conf *conf) @@ -163,7 +179,8 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, } p->inflight_max = conf->new_event_threshold; - p->implicit_release = !conf->disable_implicit_release; + p->implicit_release = !(conf->event_port_cfg & + RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL); /* check if ring exists, same as rx_worker above */ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id, @@ -227,7 +244,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, qid->priority = queue_conf->priority; if (qid->type == RTE_SCHED_TYPE_ORDERED) { - char ring_name[RTE_RING_NAMESIZE]; uint32_t window_size; /* rte_ring and window_size_mask require require window_size to @@ -258,18 +274,8 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, 0, window_size * sizeof(qid->reorder_buffer[0])); - snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist", - dev_id, idx); - - /* lookup the ring, and if it already exists, free it */ - struct rte_ring *cleanup = rte_ring_lookup(ring_name); - if (cleanup) - rte_ring_free(cleanup); - - qid->reorder_buffer_freelist = rte_ring_create(ring_name, - window_size, - socket_id, - RING_F_SP_ENQ | RING_F_SC_DEQ); + qid->reorder_buffer_freelist = rob_ring_create(window_size, + socket_id); if (!qid->reorder_buffer_freelist) { SW_LOG_DBG("freelist ring create failed"); goto cleanup; @@ -280,8 +286,8 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, * that many. */ for (i = 0; i < window_size - 1; i++) { - if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist, - &qid->reorder_buffer[i]) < 0) + if (rob_ring_enqueue(qid->reorder_buffer_freelist, + &qid->reorder_buffer[i]) != 1) goto cleanup; } @@ -300,7 +306,7 @@ cleanup: } if (qid->reorder_buffer_freelist) { - rte_ring_free(qid->reorder_buffer_freelist); + rob_ring_free(qid->reorder_buffer_freelist); qid->reorder_buffer_freelist = NULL; } @@ -315,7 +321,7 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id) if (qid->type == RTE_SCHED_TYPE_ORDERED) { rte_free(qid->reorder_buffer); - rte_ring_free(qid->reorder_buffer_freelist); + rob_ring_free(qid->reorder_buffer_freelist); } memset(qid, 0, sizeof(*qid)); } @@ -361,9 +367,99 @@ sw_init_qid_iqs(struct sw_evdev *sw) } } +static int +sw_qids_empty(struct sw_evdev *sw) +{ + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) { + if (iq_count(&sw->qids[i].iq[j])) + return 0; + } + } + + return 1; +} + +static int +sw_ports_empty(struct sw_evdev *sw) +{ + unsigned int i; + + for (i = 0; i < sw->port_count; i++) { + if ((rte_event_ring_count(sw->ports[i].rx_worker_ring)) || + rte_event_ring_count(sw->ports[i].cq_worker_ring)) + return 0; + } + + return 1; +} + +static void +sw_drain_ports(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + eventdev_stop_flush_t flush; + unsigned int i; + uint8_t dev_id; + void *arg; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + for (i = 0; i < sw->port_count; i++) { + struct rte_event ev; + + while (rte_event_dequeue_burst(dev_id, i, &ev, 1, 0)) { + if (flush) + flush(dev_id, ev, arg); + + ev.op = RTE_EVENT_OP_RELEASE; + rte_event_enqueue_burst(dev_id, i, &ev, 1); + } + } +} + +static void +sw_drain_queue(struct rte_eventdev *dev, struct sw_iq *iq) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + eventdev_stop_flush_t flush; + uint8_t dev_id; + void *arg; + + flush = dev->dev_ops->dev_stop_flush; + dev_id = dev->data->dev_id; + arg = dev->data->dev_stop_flush_arg; + + while (iq_count(iq) > 0) { + struct rte_event ev; + + iq_dequeue_burst(sw, iq, &ev, 1); + + if (flush) + flush(dev_id, ev, arg); + } +} + +static void +sw_drain_queues(struct rte_eventdev *dev) +{ + struct sw_evdev *sw = sw_pmd_priv(dev); + unsigned int i, j; + + for (i = 0; i < sw->qid_count; i++) { + for (j = 0; j < SW_IQS_MAX; j++) + sw_drain_queue(dev, &sw->qids[i].iq[j]); + } +} + static void -sw_clean_qid_iqs(struct sw_evdev *sw) +sw_clean_qid_iqs(struct rte_eventdev *dev) { + struct sw_evdev *sw = sw_pmd_priv(dev); int i, j; /* Release the IQ memory of all configured qids */ @@ -406,7 +502,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, port_conf->new_event_threshold = 1024; port_conf->dequeue_depth = 16; port_conf->enqueue_depth = 16; - port_conf->disable_implicit_release = 0; + port_conf->event_port_cfg = 0; } static int @@ -464,6 +560,33 @@ sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, return 0; } +static int +sw_timer_adapter_caps_get(const struct rte_eventdev *dev, + uint64_t flags, + uint32_t *caps, + const struct rte_event_timer_adapter_ops **ops) +{ + RTE_SET_USED(dev); + RTE_SET_USED(flags); + *caps = 0; + + /* Use default SW ops */ + *ops = NULL; + + return 0; +} + +static int +sw_crypto_adapter_caps_get(const struct rte_eventdev *dev, + const struct rte_cryptodev *cdev, + uint32_t *caps) +{ + RTE_SET_USED(dev); + RTE_SET_USED(cdev); + *caps = RTE_EVENT_CRYPTO_ADAPTER_SW_CAP; + return 0; +} + static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) { @@ -486,7 +609,8 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE| RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | - RTE_EVENT_DEV_CAP_NONSEQ_MODE), + RTE_EVENT_DEV_CAP_NONSEQ_MODE | + RTE_EVENT_DEV_CAP_CARRY_FLOW_ID), }; *info = evdev_sw_info; @@ -595,11 +719,11 @@ sw_dump(struct rte_eventdev *dev, FILE *f) qid->stats.rx_pkts, qid->stats.rx_dropped, qid->stats.tx_pkts); if (qid->type == RTE_SCHED_TYPE_ORDERED) { - struct rte_ring *rob_buf_free = + struct rob_ring *rob_buf_free = qid->reorder_buffer_freelist; if (rob_buf_free) fprintf(f, "\tReorder entries in use: %u\n", - rte_ring_free_count(rob_buf_free)); + rob_ring_free_count(rob_buf_free)); else fprintf(f, "\tReorder buffer not initialized\n"); @@ -702,10 +826,30 @@ static void sw_stop(struct rte_eventdev *dev) { struct sw_evdev *sw = sw_pmd_priv(dev); - sw_clean_qid_iqs(sw); + int32_t runstate; + + /* Stop the scheduler if it's running */ + runstate = rte_service_runstate_get(sw->service_id); + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 0); + + while (rte_service_may_be_active(sw->service_id)) + rte_pause(); + + /* Flush all events out of the device */ + while (!(sw_qids_empty(sw) && sw_ports_empty(sw))) { + sw_event_schedule(dev); + sw_drain_ports(dev); + sw_drain_queues(dev); + } + + sw_clean_qid_iqs(dev); sw_xstats_uninit(sw); sw->started = 0; rte_smp_wmb(); + + if (runstate == 1) + rte_service_runstate_set(sw->service_id, 1); } static int @@ -761,6 +905,35 @@ set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque) return 0; } +static int +set_deq_burst_sz(const char *key __rte_unused, const char *value, void *opaque) +{ + int *deq_burst_sz = opaque; + *deq_burst_sz = atoi(value); + if (*deq_burst_sz < 0 || *deq_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE) + return -1; + return 0; +} + +static int +set_min_burst_sz(const char *key __rte_unused, const char *value, void *opaque) +{ + int *min_burst_sz = opaque; + *min_burst_sz = atoi(value); + if (*min_burst_sz < 0 || *min_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE) + return -1; + return 0; +} + +static int +set_refill_once(const char *key __rte_unused, const char *value, void *opaque) +{ + int *refill_once_per_call = opaque; + *refill_once_per_call = atoi(value); + if (*refill_once_per_call < 0 || *refill_once_per_call > 1) + return -1; + return 0; +} static int32_t sw_sched_service_func(void *args) { @@ -772,7 +945,7 @@ static int32_t sw_sched_service_func(void *args) static int sw_probe(struct rte_vdev_device *vdev) { - static const struct rte_eventdev_ops evdev_sw_ops = { + static struct rte_eventdev_ops evdev_sw_ops = { .dev_configure = sw_dev_configure, .dev_infos_get = sw_info_get, .dev_close = sw_close, @@ -788,9 +961,14 @@ sw_probe(struct rte_vdev_device *vdev) .port_release = sw_port_release, .port_link = sw_port_link, .port_unlink = sw_port_unlink, + .port_unlinks_in_progress = sw_port_unlinks_in_progress, .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get, + .timer_adapter_caps_get = sw_timer_adapter_caps_get, + + .crypto_adapter_caps_get = sw_crypto_adapter_caps_get, + .xstats_get = sw_xstats_get, .xstats_get_names = sw_xstats_get_names, .xstats_get_by_name = sw_xstats_get_by_name, @@ -803,6 +981,9 @@ sw_probe(struct rte_vdev_device *vdev) NUMA_NODE_ARG, SCHED_QUANTA_ARG, CREDIT_QUANTA_ARG, + MIN_BURST_SIZE_ARG, + DEQ_BURST_SIZE_ARG, + REFIL_ONCE_ARG, NULL }; const char *name; @@ -812,6 +993,9 @@ sw_probe(struct rte_vdev_device *vdev) int socket_id = rte_socket_id(); int sched_quanta = SW_DEFAULT_SCHED_QUANTA; int credit_quanta = SW_DEFAULT_CREDIT_QUANTA; + int min_burst_size = 1; + int deq_burst_size = SCHED_DEQUEUE_DEFAULT_BURST_SIZE; + int refill_once = 0; name = rte_vdev_device_name(vdev); params = rte_vdev_device_args(vdev); @@ -853,13 +1037,46 @@ sw_probe(struct rte_vdev_device *vdev) return ret; } + ret = rte_kvargs_process(kvlist, MIN_BURST_SIZE_ARG, + set_min_burst_sz, &min_burst_size); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing minimum burst size parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, DEQ_BURST_SIZE_ARG, + set_deq_burst_sz, &deq_burst_size); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing dequeue burst size parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, REFIL_ONCE_ARG, + set_refill_once, &refill_once); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing refill once per call switch", + name); + rte_kvargs_free(kvlist); + return ret; + } + rte_kvargs_free(kvlist); } } SW_LOG_INFO( - "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n", - name, socket_id, sched_quanta, credit_quanta); + "Creating eventdev sw device %s, numa_node=%d, " + "sched_quanta=%d, credit_quanta=%d " + "min_burst=%d, deq_burst=%d, refill_once=%d\n", + name, socket_id, sched_quanta, credit_quanta, + min_burst_size, deq_burst_size, refill_once); dev = rte_event_pmd_vdev_init(name, sizeof(struct sw_evdev), socket_id); @@ -884,6 +1101,9 @@ sw_probe(struct rte_vdev_device *vdev) /* copy values passed from vdev command line to instance */ sw->credit_update_quanta = credit_quanta; sw->sched_quanta = sched_quanta; + sw->sched_min_burst_size = min_burst_size; + sw->sched_deq_burst_size = deq_burst_size; + sw->refill_once_per_iter = refill_once; /* register service with EAL */ struct rte_service_spec service; @@ -928,4 +1148,7 @@ static struct rte_vdev_driver evdev_sw_pmd_drv = { RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "= " - SCHED_QUANTA_ARG "=" CREDIT_QUANTA_ARG "="); + SCHED_QUANTA_ARG "=" CREDIT_QUANTA_ARG "=" + MIN_BURST_SIZE_ARG "=" DEQ_BURST_SIZE_ARG "=" + REFIL_ONCE_ARG "="); +RTE_LOG_REGISTER(eventdev_sw_log_level, pmd.event.sw, NOTICE);