X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fevent%2Fsw%2Fsw_evdev.c;h=aed8b728fe6a40847cc127a8639e45875f85bece;hb=51e202f0596f3cc2061bd2c69bd1f7bd5f04d4be;hp=a31aaa662573666994b437c922887be5f09031be;hpb=5d2aa461cbcae1b5ede5cee0b0b3a1228c4afc59;p=dpdk.git diff --git a/drivers/event/sw/sw_evdev.c b/drivers/event/sw/sw_evdev.c index a31aaa6625..aed8b728fe 100644 --- a/drivers/event/sw/sw_evdev.c +++ b/drivers/event/sw/sw_evdev.c @@ -30,6 +30,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#include #include #include @@ -37,10 +38,11 @@ #include #include #include +#include +#include #include "sw_evdev.h" #include "iq_ring.h" -#include "event_ring.h" #define EVENTDEV_NAME_SW_PMD event_sw #define NUMA_NODE_ARG "numa_node" @@ -90,7 +92,8 @@ sw_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[], } else if (q->type == RTE_SCHED_TYPE_ORDERED) { p->num_ordered_qids++; p->num_qids_mapped++; - } else if (q->type == RTE_SCHED_TYPE_ATOMIC) { + } else if (q->type == RTE_SCHED_TYPE_ATOMIC || + q->type == RTE_SCHED_TYPE_PARALLEL) { p->num_qids_mapped++; } @@ -138,7 +141,7 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, { struct sw_evdev *sw = sw_pmd_priv(dev); struct sw_port *p = &sw->ports[port_id]; - char buf[QE_RING_NAMESIZE]; + char buf[RTE_RING_NAMESIZE]; unsigned int i; struct rte_event_dev_info info; @@ -159,10 +162,19 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, p->id = port_id; p->sw = sw; - snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id, - "rx_worker_ring"); - p->rx_worker_ring = qe_ring_create(buf, MAX_SW_PROD_Q_DEPTH, - dev->data->socket_id); + /* check to see if rings exists - port_setup() can be called multiple + * times legally (assuming device is stopped). If ring exists, free it + * to so it gets re-created with the correct size + */ + snprintf(buf, sizeof(buf), "sw%d_p%u_%s", dev->data->dev_id, + port_id, "rx_worker_ring"); + struct rte_event_ring *existing_ring = rte_event_ring_lookup(buf); + if (existing_ring) + rte_event_ring_free(existing_ring); + + p->rx_worker_ring = rte_event_ring_create(buf, MAX_SW_PROD_Q_DEPTH, + dev->data->socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ); if (p->rx_worker_ring == NULL) { SW_LOG_ERR("Error creating RX worker ring for port %d\n", port_id); @@ -171,12 +183,18 @@ sw_port_setup(struct rte_eventdev *dev, uint8_t port_id, p->inflight_max = conf->new_event_threshold; - snprintf(buf, sizeof(buf), "sw%d_%s", dev->data->dev_id, - "cq_worker_ring"); - p->cq_worker_ring = qe_ring_create(buf, conf->dequeue_depth, - dev->data->socket_id); + /* check if ring exists, same as rx_worker above */ + snprintf(buf, sizeof(buf), "sw%d_p%u, %s", dev->data->dev_id, + port_id, "cq_worker_ring"); + existing_ring = rte_event_ring_lookup(buf); + if (existing_ring) + rte_event_ring_free(existing_ring); + + p->cq_worker_ring = rte_event_ring_create(buf, conf->dequeue_depth, + dev->data->socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ); if (p->cq_worker_ring == NULL) { - qe_ring_destroy(p->rx_worker_ring); + rte_event_ring_free(p->rx_worker_ring); SW_LOG_ERR("Error creating CQ worker ring for port %d\n", port_id); return -1; @@ -202,8 +220,8 @@ sw_port_release(void *port) if (p == NULL) return; - qe_ring_destroy(p->rx_worker_ring); - qe_ring_destroy(p->cq_worker_ring); + rte_event_ring_free(p->rx_worker_ring); + rte_event_ring_free(p->cq_worker_ring); memset(p, 0, sizeof(*p)); } @@ -419,6 +437,19 @@ sw_dev_configure(const struct rte_eventdev *dev) return 0; } +struct rte_eth_dev; + +static int +sw_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, + const struct rte_eth_dev *eth_dev, + uint32_t *caps) +{ + RTE_SET_USED(dev); + RTE_SET_USED(eth_dev); + *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; + return 0; +} + static void sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) { @@ -435,6 +466,7 @@ sw_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) .max_event_port_enqueue_depth = MAX_SW_PROD_Q_DEPTH, .max_num_events = SW_INFLIGHT_EVENTS_TOTAL, .event_dev_cap = (RTE_EVENT_DEV_CAP_QUEUE_QOS | + RTE_EVENT_DEV_CAP_BURST_MODE | RTE_EVENT_DEV_CAP_EVENT_QOS), }; @@ -509,8 +541,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f) fprintf(f, "\n"); if (p->rx_worker_ring) { - uint64_t used = qe_ring_count(p->rx_worker_ring); - uint64_t space = qe_ring_free_count(p->rx_worker_ring); + uint64_t used = rte_event_ring_count(p->rx_worker_ring); + uint64_t space = rte_event_ring_free_count( + p->rx_worker_ring); const char *col = (space == 0) ? COL_RED : COL_RESET; fprintf(f, "\t%srx ring used: %4"PRIu64"\tfree: %4" PRIu64 COL_RESET"\n", col, used, space); @@ -518,8 +551,9 @@ sw_dump(struct rte_eventdev *dev, FILE *f) fprintf(f, "\trx ring not initialized.\n"); if (p->cq_worker_ring) { - uint64_t used = qe_ring_count(p->cq_worker_ring); - uint64_t space = qe_ring_free_count(p->cq_worker_ring); + uint64_t used = rte_event_ring_count(p->cq_worker_ring); + uint64_t space = rte_event_ring_free_count( + p->cq_worker_ring); const char *col = (space == 0) ? COL_RED : COL_RESET; fprintf(f, "\t%scq ring used: %4"PRIu64"\tfree: %4" PRIu64 COL_RESET"\n", col, used, space); @@ -559,12 +593,13 @@ sw_dump(struct rte_eventdev *dev, FILE *f) inflights += qid->fids[flow].pcount; } - uint32_t cq; - fprintf(f, "\tInflights: %u\tFlows pinned per port: ", - inflights); - for (cq = 0; cq < sw->port_count; cq++) - fprintf(f, "%d ", affinities_per_port[cq]); - fprintf(f, "\n"); + uint32_t port; + fprintf(f, "\tPer Port Stats:\n"); + for (port = 0; port < sw->port_count; port++) { + fprintf(f, "\t Port %d: Pkts: %"PRIu64, port, + qid->to_port[port]); + fprintf(f, "\tFlows: %d\n", affinities_per_port[port]); + } uint32_t iq; uint32_t iq_printed = 0; @@ -593,6 +628,12 @@ sw_start(struct rte_eventdev *dev) { unsigned int i, j; struct sw_evdev *sw = sw_pmd_priv(dev); + + /* check a service core is mapped to this service */ + if (!rte_service_runstate_get(sw->service_id)) + SW_LOG_ERR("Warning: No Service core enabled on service %s\n", + sw->service_name); + /* check all ports are set up */ for (i = 0; i < sw->port_count; i++) if (sw->ports[i].rx_worker_ring == NULL) { @@ -695,6 +736,14 @@ set_credit_quanta(const char *key __rte_unused, const char *value, void *opaque) return 0; } + +static int32_t sw_sched_service_func(void *args) +{ + struct rte_eventdev *dev = args; + sw_event_schedule(dev); + return 0; +} + static int sw_probe(struct rte_vdev_device *vdev) { @@ -715,6 +764,8 @@ sw_probe(struct rte_vdev_device *vdev) .port_link = sw_port_link, .port_unlink = sw_port_unlink, + .eth_rx_adapter_caps_get = sw_eth_rx_adapter_caps_get, + .xstats_get = sw_xstats_get, .xstats_get_names = sw_xstats_get_names, .xstats_get_by_name = sw_xstats_get_by_name, @@ -792,6 +843,8 @@ sw_probe(struct rte_vdev_device *vdev) dev->dev_ops = &evdev_sw_ops; dev->enqueue = sw_event_enqueue; dev->enqueue_burst = sw_event_enqueue_burst; + dev->enqueue_new_burst = sw_event_enqueue_burst; + dev->enqueue_forward_burst = sw_event_enqueue_burst; dev->dequeue = sw_event_dequeue; dev->dequeue_burst = sw_event_dequeue_burst; dev->schedule = sw_event_schedule; @@ -806,6 +859,22 @@ sw_probe(struct rte_vdev_device *vdev) sw->credit_update_quanta = credit_quanta; sw->sched_quanta = sched_quanta; + /* register service with EAL */ + struct rte_service_spec service; + memset(&service, 0, sizeof(struct rte_service_spec)); + snprintf(service.name, sizeof(service.name), "%s_service", name); + snprintf(sw->service_name, sizeof(sw->service_name), "%s_service", + name); + service.socket_id = socket_id; + service.callback = sw_sched_service_func; + service.callback_userdata = (void *)dev; + + int32_t ret = rte_service_component_register(&service, &sw->service_id); + if (ret) { + SW_LOG_ERR("service register() failed"); + return -ENOEXEC; + } + return 0; }