X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_eventdev.c;h=e706688c8d30f5adc951f9e80df5f9807e62b8f8;hb=41218a9d436df89008e3320676638099f990cfd3;hp=c7452f0e2644b0021df3121fb83c2bec58bf72b7;hpb=d3e281a540567879a1fb65f4af95050de27eddca;p=dpdk.git diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c index c7452f0e26..e706688c8d 100644 --- a/lib/librte_eventdev/rte_eventdev.c +++ b/lib/librte_eventdev/rte_eventdev.c @@ -45,7 +45,6 @@ #include #include #include -#include #include #include #include @@ -125,9 +124,7 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) dev_info->dequeue_timeout_ns = dev->data->dev_conf.dequeue_timeout_ns; - dev_info->pci_dev = dev->pci_dev; - if (dev->driver) - dev_info->driver_name = dev->driver->pci_drv.driver.name; + dev_info->dev = dev->dev; return 0; } @@ -190,6 +187,8 @@ rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) return 0; } +#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) + static inline int rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) { @@ -251,6 +250,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) "nb_ports %u", nb_ports); return -(ENOMEM); } + for (i = 0; i < nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; i++) + dev->data->links_map[i] = + EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } else if (dev->data->ports != NULL && nb_ports != 0) {/* re-config */ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_release, -ENOTSUP); @@ -305,6 +307,10 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) if (nb_ports > old_nb_ports) { uint8_t new_ps = nb_ports - old_nb_ports; + unsigned int old_links_map_end = + old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; + unsigned int links_map_end = + nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV; memset(ports + old_nb_ports, 0, sizeof(ports[0]) * new_ps); @@ -312,9 +318,9 @@ rte_event_dev_port_config(struct rte_eventdev *dev, uint8_t nb_ports) sizeof(ports_dequeue_depth[0]) * new_ps); memset(ports_enqueue_depth + old_nb_ports, 0, sizeof(ports_enqueue_depth[0]) * new_ps); - memset(links_map + - (old_nb_ports * RTE_EVENT_MAX_QUEUES_PER_DEV), - 0, sizeof(ports_enqueue_depth[0]) * new_ps); + for (i = old_links_map_end; i < links_map_end; i++) + links_map[i] = + EVENT_QUEUE_SERVICE_PRIORITY_INVALID; } dev->data->ports = ports; @@ -359,10 +365,11 @@ rte_event_dev_configure(uint8_t dev_id, (*dev->dev_ops->dev_infos_get)(dev, &info); /* Check dequeue_timeout_ns value is in limit */ - if (!dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { - if (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns + if (!(dev_conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT)) { + if (dev_conf->dequeue_timeout_ns && + (dev_conf->dequeue_timeout_ns < info.min_dequeue_timeout_ns || dev_conf->dequeue_timeout_ns > - info.max_dequeue_timeout_ns) { + info.max_dequeue_timeout_ns)) { RTE_EDEV_LOG_ERR("dev%d invalid dequeue_timeout_ns=%d" " min_dequeue_timeout_ns=%d max_dequeue_timeout_ns=%d", dev_id, dev_conf->dequeue_timeout_ns, @@ -420,8 +427,9 @@ rte_event_dev_configure(uint8_t dev_id, dev_id); return -EINVAL; } - if (dev_conf->nb_event_port_dequeue_depth > - info.max_event_port_dequeue_depth) { + if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && + (dev_conf->nb_event_port_dequeue_depth > + info.max_event_port_dequeue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_dq_depth=%d > max_dq_depth=%d", dev_id, dev_conf->nb_event_port_dequeue_depth, info.max_event_port_dequeue_depth); @@ -434,8 +442,9 @@ rte_event_dev_configure(uint8_t dev_id, dev_id); return -EINVAL; } - if (dev_conf->nb_event_port_enqueue_depth > - info.max_event_port_enqueue_depth) { + if ((info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) && + (dev_conf->nb_event_port_enqueue_depth > + info.max_event_port_enqueue_depth)) { RTE_EDEV_LOG_ERR("dev%d nb_enq_depth=%d > max_enq_depth=%d", dev_id, dev_conf->nb_event_port_enqueue_depth, info.max_event_port_enqueue_depth); @@ -593,7 +602,6 @@ rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->queue_def_conf, -ENOTSUP); (*dev->dev_ops->queue_def_conf)(dev, queue_id, &def_conf); - def_conf.event_queue_cfg = RTE_EVENT_QUEUE_CFG_DEFAULT; queue_conf = &def_conf; } @@ -798,7 +806,7 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, } for (i = 0; i < nb_links; i++) - if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV) + if (queues[i] >= dev->data->nb_queues) return -EINVAL; diag = (*dev->dev_ops->port_link)(dev, dev->data->ports[port_id], @@ -815,8 +823,6 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, return diag; } -#define EVENT_QUEUE_SERVICE_PRIORITY_INVALID (0xdead) - int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks) @@ -843,7 +849,7 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, } for (i = 0; i < nb_unlinks; i++) - if (queues[i] >= RTE_EVENT_MAX_QUEUES_PER_DEV) + if (queues[i] >= dev->data->nb_queues) return -EINVAL; diag = (*dev->dev_ops->port_unlink)(dev, dev->data->ports[port_id], @@ -879,7 +885,7 @@ rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, links_map = dev->data->links_map; /* Point links_map to this port specific area */ links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); - for (i = 0; i < RTE_EVENT_MAX_QUEUES_PER_DEV; i++) { + for (i = 0; i < dev->data->nb_queues; i++) { if (links_map[i] != EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { queues[count] = i; priorities[count] = (uint8_t)links_map[i]; @@ -902,8 +908,7 @@ rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, if (timeout_ticks == NULL) return -EINVAL; - (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); - return 0; + return (*dev->dev_ops->timeout_ticks)(dev, ns, timeout_ticks); } int @@ -920,6 +925,89 @@ rte_event_dev_dump(uint8_t dev_id, FILE *f) } +static int +xstats_get_count(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id) +{ + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + if (dev->dev_ops->xstats_get_names != NULL) + return (*dev->dev_ops->xstats_get_names)(dev, mode, + queue_port_id, + NULL, NULL, 0); + return 0; +} + +int +rte_event_dev_xstats_names_get(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, + struct rte_event_dev_xstats_name *xstats_names, + unsigned int *ids, unsigned int size) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); + const int cnt_expected_entries = xstats_get_count(dev_id, mode, + queue_port_id); + if (xstats_names == NULL || cnt_expected_entries < 0 || + (int)size < cnt_expected_entries) + return cnt_expected_entries; + + /* dev_id checked above */ + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + if (dev->dev_ops->xstats_get_names != NULL) + return (*dev->dev_ops->xstats_get_names)(dev, mode, + queue_port_id, xstats_names, ids, size); + + return -ENOTSUP; +} + +/* retrieve eventdev extended statistics */ +int +rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, + uint8_t queue_port_id, const unsigned int ids[], + uint64_t values[], unsigned int n) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -ENODEV); + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + /* implemented by the driver */ + if (dev->dev_ops->xstats_get != NULL) + return (*dev->dev_ops->xstats_get)(dev, mode, queue_port_id, + ids, values, n); + return -ENOTSUP; +} + +uint64_t +rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, + unsigned int *id) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, 0); + const struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + unsigned int temp = -1; + + if (id != NULL) + *id = (unsigned int)-1; + else + id = &temp; /* ensure driver never gets a NULL value */ + + /* implemented by driver */ + if (dev->dev_ops->xstats_get_by_name != NULL) + return (*dev->dev_ops->xstats_get_by_name)(dev, name, id); + return -ENOTSUP; +} + +int rte_event_dev_xstats_reset(uint8_t dev_id, + enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, + const uint32_t ids[], uint32_t nb_ids) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + if (dev->dev_ops->xstats_reset != NULL) + return (*dev->dev_ops->xstats_reset)(dev, mode, queue_port_id, + ids, nb_ids); + return -ENOTSUP; +} + int rte_event_dev_start(uint8_t dev_id) { @@ -1080,143 +1168,33 @@ int rte_event_pmd_release(struct rte_eventdev *eventdev) { int ret; + char mz_name[RTE_EVENTDEV_NAME_MAX_LEN]; + const struct rte_memzone *mz; if (eventdev == NULL) return -EINVAL; - ret = rte_event_dev_close(eventdev->data->dev_id); - if (ret < 0) - return ret; - eventdev->attached = RTE_EVENTDEV_DETACHED; eventdev_globals.nb_devs--; - eventdev->data = NULL; - - return 0; -} - -struct rte_eventdev * -rte_event_pmd_vdev_init(const char *name, size_t dev_private_size, - int socket_id) -{ - struct rte_eventdev *eventdev; - - /* Allocate device structure */ - eventdev = rte_event_pmd_allocate(name, socket_id); - if (eventdev == NULL) - return NULL; - /* Allocate private device structure */ if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eventdev->data->dev_private = - rte_zmalloc_socket("eventdev device private", - dev_private_size, - RTE_CACHE_LINE_SIZE, - socket_id); - - if (eventdev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private device" - " data"); - } - - return eventdev; -} - -int -rte_event_pmd_pci_probe(struct rte_pci_driver *pci_drv, - struct rte_pci_device *pci_dev) -{ - struct rte_eventdev_driver *eventdrv; - struct rte_eventdev *eventdev; - - char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN]; - - int retval; - - eventdrv = (struct rte_eventdev_driver *)pci_drv; - if (eventdrv == NULL) - return -ENODEV; - - rte_eal_pci_device_name(&pci_dev->addr, eventdev_name, - sizeof(eventdev_name)); - - eventdev = rte_event_pmd_allocate(eventdev_name, - pci_dev->device.numa_node); - if (eventdev == NULL) - return -ENOMEM; - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) { - eventdev->data->dev_private = - rte_zmalloc_socket( - "eventdev private structure", - eventdrv->dev_private_size, - RTE_CACHE_LINE_SIZE, - rte_socket_id()); - - if (eventdev->data->dev_private == NULL) - rte_panic("Cannot allocate memzone for private " - "device data"); - } - - eventdev->pci_dev = pci_dev; - eventdev->driver = eventdrv; - - /* Invoke PMD device initialization function */ - retval = (*eventdrv->eventdev_init)(eventdev); - if (retval == 0) - return 0; - - RTE_EDEV_LOG_ERR("driver %s: (vendor_id=0x%x device_id=0x%x)" - " failed", pci_drv->driver.name, - (unsigned int) pci_dev->id.vendor_id, - (unsigned int) pci_dev->id.device_id); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) rte_free(eventdev->data->dev_private); - eventdev->attached = RTE_EVENTDEV_DETACHED; - eventdev_globals.nb_devs--; - - return -ENXIO; -} - -int -rte_event_pmd_pci_remove(struct rte_pci_device *pci_dev) -{ - const struct rte_eventdev_driver *eventdrv; - struct rte_eventdev *eventdev; - char eventdev_name[RTE_EVENTDEV_NAME_MAX_LEN]; - int ret; - - if (pci_dev == NULL) - return -EINVAL; - - rte_eal_pci_device_name(&pci_dev->addr, eventdev_name, - sizeof(eventdev_name)); - - eventdev = rte_event_pmd_get_named_dev(eventdev_name); - if (eventdev == NULL) - return -ENODEV; + /* Generate memzone name */ + ret = snprintf(mz_name, sizeof(mz_name), "rte_eventdev_data_%u", + eventdev->data->dev_id); + if (ret >= (int)sizeof(mz_name)) + return -EINVAL; - eventdrv = (const struct rte_eventdev_driver *)pci_dev->driver; - if (eventdrv == NULL) - return -ENODEV; + mz = rte_memzone_lookup(mz_name); + if (mz == NULL) + return -ENOMEM; - /* Invoke PMD device un-init function */ - if (*eventdrv->eventdev_uninit) { - ret = (*eventdrv->eventdev_uninit)(eventdev); + ret = rte_memzone_free(mz); if (ret) return ret; } - /* Free event device */ - rte_event_pmd_release(eventdev); - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eventdev->data->dev_private); - - eventdev->pci_dev = NULL; - eventdev->driver = NULL; - + eventdev->data = NULL; return 0; }