X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fsw%2Fsw_evdev.c;h=0d8013adf7cb384b92f7327567e9241109ba1232;hb=75d113136f3869249987dc2ce46a96c5df9a3438;hp=fb8e8bebbbfed046c1e699b4a428c3fbd85c80d9;hpb=0e2132445930a6fc70f99a9306f76f7001eefb49;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index fb8e8bebbb..0d8013adf7 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -14,11 +14,15 @@ #include "sw_evdev.h" #include "iq_chunk.h" +#include "event_ring.h" #define EVENTDEV_NAME_SW_PMD event_sw #define NUMA_NODE_ARG "numa_node" #define SCHED_QUANTA_ARG "sched_quanta" #define CREDIT_QUANTA_ARG "credit_quanta" +#define MIN_BURST_SIZE_ARG "min_burst" +#define DEQ_BURST_SIZE_ARG "deq_burst" +#define REFIL_ONCE_ARG "refill_once" static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info); @@ -175,7 +179,8 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, } p->inflight_max = conf->new_event_threshold; - p->implicit_release = !conf->disable_implicit_release; + p->implicit_release = !(conf->event_port_cfg & + RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL); /* check if ring exists, same as rx_worker above */ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id, @@ -239,7 +244,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, qid->priority = queue_conf->priority; if (qid->type == RTE_SCHED_TYPE_ORDERED) { - char ring_name[RTE_RING_NAMESIZE]; uint32_t window_size; /* rte_ring and window_size_mask require require window_size to @@ -270,18 +274,8 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, 0, window_size * sizeof(qid->reorder_buffer[0])); - snprintf(ring_name, sizeof(ring_name), "sw%d_q%d_freelist", - dev_id, idx); - - /* lookup the ring, and if it already exists, free it */ - struct rte_ring *cleanup = rte_ring_lookup(ring_name); - if (cleanup) - rte_ring_free(cleanup); - - qid->reorder_buffer_freelist = rte_ring_create(ring_name, - window_size, - socket_id, - RING_F_SP_ENQ | RING_F_SC_DEQ); + qid->reorder_buffer_freelist = rob_ring_create(window_size, + socket_id); if (!qid->reorder_buffer_freelist) { SW_LOG_DBG("freelist ring create failed"); goto cleanup; @@ -292,8 +286,8 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, * that many. */ for (i = 0; i < window_size - 1; i++) { - if (rte_ring_sp_enqueue(qid->reorder_buffer_freelist, - &qid->reorder_buffer[i]) < 0) + if (rob_ring_enqueue(qid->reorder_buffer_freelist, + &qid->reorder_buffer[i]) != 1) goto cleanup; } @@ -312,7 +306,7 @@ cleanup: } if (qid->reorder_buffer_freelist) { - rte_ring_free(qid->reorder_buffer_freelist); + rob_ring_free(qid->reorder_buffer_freelist); qid->reorder_buffer_freelist = NULL; } @@ -327,7 +321,7 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id) if (qid->type == RTE_SCHED_TYPE_ORDERED) { rte_free(qid->reorder_buffer); - rte_ring_free(qid->reorder_buffer_freelist); + rob_ring_free(qid->reorder_buffer_freelist); } memset(qid, 0, sizeof(*qid)); } @@ -508,7 +502,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, port_conf->new_event_threshold = 1024; port_conf->dequeue_depth = 16; port_conf->enqueue_depth = 16; - port_conf->disable_implicit_release = 0; + port_conf->event_port_cfg = 0; } static int @@ -615,7 +609,8 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE| RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | - RTE_EVENT_DEV_CAP_NONSEQ_MODE), + RTE_EVENT_DEV_CAP_NONSEQ_MODE | + RTE_EVENT_DEV_CAP_CARRY_FLOW_ID), }; *info = evdev_sw_info; @@ -724,11 +719,11 @@ sw_dump(struct rte_eventdev *dev, FILE *f) qid->stats.rx_pkts, qid->stats.rx_dropped, qid->stats.tx_pkts); if (qid->type == RTE_SCHED_TYPE_ORDERED) { - struct rte_ring *rob_buf_free = + struct rob_ring *rob_buf_free = qid->reorder_buffer_freelist; if (rob_buf_free) fprintf(f, "\tReorder entries in use: %u\n", - rte_ring_free_count(rob_buf_free)); + rob_ring_free_count(rob_buf_free)); else fprintf(f, "\tReorder buffer not initialized\n"); @@ -910,6 +905,35 @@ set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque) return 0; } +static int +set_deq_burst_sz(const char *key __rte_unused, const char *value, void *opaque) +{ + int *deq_burst_sz = opaque; + *deq_burst_sz = atoi(value); + if (*deq_burst_sz < 0 || *deq_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE) + return -1; + return 0; +} + +static int +set_min_burst_sz(const char *key __rte_unused, const char *value, void *opaque) +{ + int *min_burst_sz = opaque; + *min_burst_sz = atoi(value); + if (*min_burst_sz < 0 || *min_burst_sz > SCHED_DEQUEUE_MAX_BURST_SIZE) + return -1; + return 0; +} + +static int +set_refill_once(const char *key __rte_unused, const char *value, void *opaque) +{ + int *refill_once_per_call = opaque; + *refill_once_per_call = atoi(value); + if (*refill_once_per_call < 0 || *refill_once_per_call > 1) + return -1; + return 0; +} static int32_t sw_sched_service_func(void *args) { @@ -957,6 +981,9 @@ sw_probe(struct rte_vdev_device *vdev) NUMA_NODE_ARG, SCHED_QUANTA_ARG, CREDIT_QUANTA_ARG, + MIN_BURST_SIZE_ARG, + DEQ_BURST_SIZE_ARG, + REFIL_ONCE_ARG, NULL }; const char *name; @@ -966,6 +993,9 @@ sw_probe(struct rte_vdev_device *vdev) int socket_id = rte_socket_id(); int sched_quanta = SW_DEFAULT_SCHED_QUANTA; int credit_quanta = SW_DEFAULT_CREDIT_QUANTA; + int min_burst_size = 1; + int deq_burst_size = SCHED_DEQUEUE_DEFAULT_BURST_SIZE; + int refill_once = 0; name = rte_vdev_device_name(vdev); params = rte_vdev_device_args(vdev); @@ -1007,13 +1037,46 @@ sw_probe(struct rte_vdev_device *vdev) return ret; } + ret = rte_kvargs_process(kvlist, MIN_BURST_SIZE_ARG, + set_min_burst_sz, &min_burst_size); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing minimum burst size parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, DEQ_BURST_SIZE_ARG, + set_deq_burst_sz, &deq_burst_size); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing dequeue burst size parameter", + name); + rte_kvargs_free(kvlist); + return ret; + } + + ret = rte_kvargs_process(kvlist, REFIL_ONCE_ARG, + set_refill_once, &refill_once); + if (ret != 0) { + SW_LOG_ERR( + "%s: Error parsing refill once per call switch", + name); + rte_kvargs_free(kvlist); + return ret; + } + rte_kvargs_free(kvlist); } } SW_LOG_INFO( - "Creating eventdev sw device %s, numa_node=%d, sched_quanta=%d, credit_quanta=%d\n", - name, socket_id, sched_quanta, credit_quanta); + "Creating eventdev sw device %s, numa_node=%d, " + "sched_quanta=%d, credit_quanta=%d " + "min_burst=%d, deq_burst=%d, refill_once=%d\n", + name, socket_id, sched_quanta, credit_quanta, + min_burst_size, deq_burst_size, refill_once); dev = rte_event_pmd_vdev_init(name, sizeof(struct sw_evdev), socket_id); @@ -1038,6 +1101,9 @@ sw_probe(struct rte_vdev_device *vdev) /* copy values passed from vdev command line to instance */ sw->credit_update_quanta = credit_quanta; sw->sched_quanta = sched_quanta; + sw->sched_min_burst_size = min_burst_size; + sw->sched_deq_burst_size = deq_burst_size; + sw->refill_once_per_iter = refill_once; /* register service with EAL */ struct rte_service_spec service; @@ -1082,14 +1148,7 @@ static struct rte_vdev_driver evdev_sw_pmd_drv = { RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_SW_PMD, evdev_sw_pmd_drv); RTE_PMD_REGISTER_PARAM_STRING(event_sw, NUMA_NODE_ARG "= " - SCHED_QUANTA_ARG "=" CREDIT_QUANTA_ARG "="); - -/* declared extern in header, for access from other .c files */ -int eventdev_sw_log_level; - -RTE_INIT(evdev_sw_init_log) -{ - eventdev_sw_log_level = rte_log_register("pmd.event.sw"); - if (eventdev_sw_log_level >= 0) - rte_log_set_level(eventdev_sw_log_level, RTE_LOG_NOTICE); -} + SCHED_QUANTA_ARG "=" CREDIT_QUANTA_ARG "=" + MIN_BURST_SIZE_ARG "=" DEQ_BURST_SIZE_ARG "=" + REFIL_ONCE_ARG "="); +RTE_LOG_REGISTER(eventdev_sw_log_level, pmd.event.sw, NOTICE);