X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fsw%2Fsw_evdev.c;h=f9daf4fcb25356d6c28cc03304b407b2821d5f2c;hb=009ac7157c940194b862be6e8a08acae37383090;hp=18963e4100469a020326809166644347908038e5;hpb=dca926ca9faa61b0ec7bdc93c605558d37b08fb1;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index 18963e4100..f9daf4fcb2 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -163,6 +163,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, } p->inflight_max = conf->new_event_threshold; + p->implicit_release = !conf->disable_implicit_release; /* check if ring exists, same as rx_worker above */ snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id, @@ -216,9 +217,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, char buf[IQ_ROB_NAMESIZE]; struct sw_qid *qid = &sw->qids[idx]; - for (i = 0; i < SW_IQS_MAX; i++) - iq_init(sw, &qid->iq[i]); - /* Initialize the FID structures to no pinning (-1), and zero packets */ const struct sw_fid_t fid = {.cq = -1, .pcount = 0}; for (i = 0; i < RTE_DIM(qid->fids); i++) @@ -296,11 +294,6 @@ qid_init(struct sw_evdev *sw, unsigned int idx, int type, return 0; cleanup: - for (i = 0; i < SW_IQS_MAX; i++) { - if (qid->iq[i].head) - iq_free_chunk(sw, qid->iq[i].head); - } - if (qid->reorder_buffer) { rte_free(qid->reorder_buffer); qid->reorder_buffer = NULL; @@ -319,13 +312,6 @@ sw_queue_release(struct rte_eventdev *dev, uint8_t id) { struct sw_evdev *sw = sw_pmd_priv(dev); struct sw_qid *qid = &sw->qids[id]; - uint32_t i; - - for (i = 0; i < SW_IQS_MAX; i++) { - if (!qid->iq[i].head) - continue; - iq_free_chunk(sw, qid->iq[i].head); - } if (qid->type == RTE_SCHED_TYPE_ORDERED) { rte_free(qid->reorder_buffer); @@ -358,6 +344,41 @@ sw_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, return qid_init(sw, queue_id, type, conf); } +static void +sw_init_qid_iqs(struct sw_evdev *sw) +{ + int i, j; + + /* Initialize the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; + + if (!qid->initialized) + continue; + + for (j = 0; j < SW_IQS_MAX; j++) + iq_init(sw, &qid->iq[j]); + } +} + +static void +sw_clean_qid_iqs(struct sw_evdev *sw) +{ + int i, j; + + /* Release the IQ memory of all configured qids */ + for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + struct sw_qid *qid = &sw->qids[i]; + + for (j = 0; j < SW_IQS_MAX; j++) { + if (!qid->iq[j].head) + continue; + iq_free_chunk_list(sw, qid->iq[j].head); + qid->iq[j].head = NULL; + } + } +} + static void sw_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, struct rte_event_queue_conf *conf) @@ -385,6 +406,7 @@ sw_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, port_conf->new_event_threshold = 1024; port_conf->dequeue_depth = 16; port_conf->enqueue_depth = 16; + port_conf->disable_implicit_release = 0; } static int @@ -404,7 +426,10 @@ sw_dev_configure(const struct rte_eventdev *dev) num_chunks = ((SW_INFLIGHT_EVENTS_TOTAL/SW_EVS_PER_Q_CHUNK)+1) + sw->qid_count*SW_IQS_MAX*2; - /* If this is a reconfiguration, free the previous IQ allocation */ + /* If this is a reconfiguration, free the previous IQ allocation. All + * IQ chunk references were cleaned out of the QIDs in sw_stop(), and + * will be reinitialized in sw_start(). + */ if (sw->chunks) rte_free(sw->chunks); @@ -454,9 +479,14 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) .max_event_port_dequeue_depth = MAX_SW_CONS_Q_DEPTH, .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH, .max_num_events = SW_INFLIGHT_EVENTS_TOTAL, - .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | - RTE_EVENT_DEV_CAP_BURST_MODE | - RTE_EVENT_DEV_CAP_EVENT_QOS), + .event_dev_cap = ( + RTE_EVENT_DEV_CAP_QUEUE_QOS | + RTE_EVENT_DEV_CAP_BURST_MODE | + RTE_EVENT_DEV_CAP_EVENT_QOS | + RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE| + RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | + RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | + RTE_EVENT_DEV_CAP_NONSEQ_MODE), }; *info = evdev_sw_info; @@ -635,8 +665,8 @@ sw_start(struct rte_eventdev *dev) /* check all queues are configured and mapped to ports*/ for (i = 0; i < sw->qid_count; i++) - if (sw->qids[i].iq[0].head == NULL || - sw->qids[i].cq_num_mapped_cqs == 0) { + if (!sw->qids[i].initialized || + sw->qids[i].cq_num_mapped_cqs == 0) { SW_LOG_ERR("Queue %d not configured\n", i); return -ENOLINK; } @@ -657,6 +687,8 @@ sw_start(struct rte_eventdev *dev) } } + sw_init_qid_iqs(sw); + if (sw_xstats_init(sw) < 0) return -EINVAL; @@ -670,6 +702,7 @@ static void sw_stop(struct rte_eventdev *dev) { struct sw_evdev *sw = sw_pmd_priv(dev); + sw_clean_qid_iqs(sw); sw_xstats_uninit(sw); sw->started = 0; rte_smp_wmb(); @@ -762,6 +795,8 @@ sw_probe(struct rte_vdev_device *vdev) .xstats_get_names = sw_xstats_get_names, .xstats_get_by_name = sw_xstats_get_by_name, .xstats_reset = sw_xstats_reset, + + .dev_selftest = test_sw_eventdev, }; static const char *const args[] = {