X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_eventdev%2Frte_eventdev.c;h=b987e074546bd094541ab7695224e3b65eab95c8;hb=b8a0415008a699283e76a23cc2e4c09f02ee118e;hp=08a8fa6a703d09965f59e6b8f2dfeb011cf6d053;hpb=ec36d881f56de787b59ec545372c351ecee6179a;p=dpdk.git diff --git a/lib/librte_eventdev/rte_eventdev.c b/lib/librte_eventdev/rte_eventdev.c index 08a8fa6a70..b987e07454 100644 --- a/lib/librte_eventdev/rte_eventdev.c +++ b/lib/librte_eventdev/rte_eventdev.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -29,42 +30,47 @@ #include #include #include +#include +#include #include "rte_eventdev.h" #include "rte_eventdev_pmd.h" -struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; +static struct rte_eventdev rte_event_devices[RTE_EVENT_MAX_DEVS]; -struct rte_eventdev *rte_eventdevs = &rte_event_devices[0]; +struct rte_eventdev *rte_eventdevs = rte_event_devices; static struct rte_eventdev_global eventdev_globals = { .nb_devs = 0 }; -struct rte_eventdev_global *rte_eventdev_globals = &eventdev_globals; - /* Event dev north bound API implementation */ uint8_t rte_event_dev_count(void) { - return rte_eventdev_globals->nb_devs; + return eventdev_globals.nb_devs; } int rte_event_dev_get_dev_id(const char *name) { int i; + uint8_t cmp; if (!name) return -EINVAL; - for (i = 0; i < rte_eventdev_globals->nb_devs; i++) - if ((strcmp(rte_event_devices[i].data->name, name) - == 0) && - (rte_event_devices[i].attached == - RTE_EVENTDEV_ATTACHED)) + for (i = 0; i < eventdev_globals.nb_devs; i++) { + cmp = (strncmp(rte_event_devices[i].data->name, name, + RTE_EVENTDEV_NAME_MAX_LEN) == 0) || + (rte_event_devices[i].dev ? (strncmp( + rte_event_devices[i].dev->driver->name, name, + RTE_EVENTDEV_NAME_MAX_LEN) == 0) : 0); + if (cmp && (rte_event_devices[i].attached == + RTE_EVENTDEV_ATTACHED)) return i; + } return -ENODEV; } @@ -102,7 +108,7 @@ rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info) } int -rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id, +rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps) { struct rte_eventdev *dev; @@ -123,6 +129,76 @@ rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint8_t eth_port_id, : 0; } +int +rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps) +{ + struct rte_eventdev *dev; + const struct rte_event_timer_adapter_ops *ops; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + + dev = &rte_eventdevs[dev_id]; + + if (caps == NULL) + return -EINVAL; + *caps = 0; + + return dev->dev_ops->timer_adapter_caps_get ? + (*dev->dev_ops->timer_adapter_caps_get)(dev, + 0, + caps, + &ops) + : 0; +} + +int +rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, + uint32_t *caps) +{ + struct rte_eventdev *dev; + struct rte_cryptodev *cdev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + if (!rte_cryptodev_pmd_is_valid_dev(cdev_id)) + return -EINVAL; + + dev = &rte_eventdevs[dev_id]; + cdev = rte_cryptodev_pmd_get_dev(cdev_id); + + if (caps == NULL) + return -EINVAL; + *caps = 0; + + return dev->dev_ops->crypto_adapter_caps_get ? + (*dev->dev_ops->crypto_adapter_caps_get) + (dev, cdev, caps) : -ENOTSUP; +} + +int +rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, + uint32_t *caps) +{ + struct rte_eventdev *dev; + struct rte_eth_dev *eth_dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + RTE_ETH_VALID_PORTID_OR_ERR_RET(eth_port_id, -EINVAL); + + dev = &rte_eventdevs[dev_id]; + eth_dev = &rte_eth_devices[eth_port_id]; + + if (caps == NULL) + return -EINVAL; + + *caps = 0; + + return dev->dev_ops->eth_tx_adapter_caps_get ? + (*dev->dev_ops->eth_tx_adapter_caps_get)(dev, + eth_dev, + caps) + : 0; +} + static inline int rte_event_dev_queue_config(struct rte_eventdev *dev, uint8_t nb_queues) { @@ -813,18 +889,18 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, uint16_t *links_map; int i, diag; - RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; if (*dev->dev_ops->port_link == NULL) { - RTE_PMD_DEBUG_TRACE("Function not supported\n"); - rte_errno = -ENOTSUP; + RTE_EDEV_LOG_ERR("Function not supported\n"); + rte_errno = ENOTSUP; return 0; } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -845,7 +921,7 @@ rte_event_port_link(uint8_t dev_id, uint8_t port_id, for (i = 0; i < nb_links; i++) if (queues[i] >= dev->data->nb_queues) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -869,34 +945,50 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, { struct rte_eventdev *dev; uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV]; - int i, diag; + int i, diag, j; uint16_t *links_map; - RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, -EINVAL, 0); + RTE_EVENTDEV_VALID_DEVID_OR_ERRNO_RET(dev_id, EINVAL, 0); dev = &rte_eventdevs[dev_id]; if (*dev->dev_ops->port_unlink == NULL) { - RTE_PMD_DEBUG_TRACE("Function not supported\n"); - rte_errno = -ENOTSUP; + RTE_EDEV_LOG_ERR("Function not supported"); + rte_errno = ENOTSUP; return 0; } if (!is_valid_port(dev, port_id)) { RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } + links_map = dev->data->links_map; + /* Point links_map to this port specific area */ + links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); + if (queues == NULL) { - for (i = 0; i < dev->data->nb_queues; i++) - all_queues[i] = i; + j = 0; + for (i = 0; i < dev->data->nb_queues; i++) { + if (links_map[i] != + EVENT_QUEUE_SERVICE_PRIORITY_INVALID) { + all_queues[j] = i; + j++; + } + } queues = all_queues; - nb_unlinks = dev->data->nb_queues; + } else { + for (j = 0; j < nb_unlinks; j++) { + if (links_map[queues[j]] == + EVENT_QUEUE_SERVICE_PRIORITY_INVALID) + break; + } } + nb_unlinks = j; for (i = 0; i < nb_unlinks; i++) if (queues[i] >= dev->data->nb_queues) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return 0; } @@ -906,15 +998,34 @@ rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, if (diag < 0) return diag; - links_map = dev->data->links_map; - /* Point links_map to this port specific area */ - links_map += (port_id * RTE_EVENT_MAX_QUEUES_PER_DEV); for (i = 0; i < diag; i++) links_map[queues[i]] = EVENT_QUEUE_SERVICE_PRIORITY_INVALID; return diag; } +int +rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id) +{ + struct rte_eventdev *dev; + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; + if (!is_valid_port(dev, port_id)) { + RTE_EDEV_LOG_ERR("Invalid port_id=%" PRIu8, port_id); + return -EINVAL; + } + + /* Return 0 if the PMD does not implement unlinks in progress. + * This allows PMDs which handle unlink synchronously to not implement + * this function at all. + */ + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->port_unlinks_in_progress, 0); + + return (*dev->dev_ops->port_unlinks_in_progress)(dev, + dev->data->ports[port_id]); +} + int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[]) @@ -1073,6 +1184,16 @@ int rte_event_dev_xstats_reset(uint8_t dev_id, return -ENOTSUP; } +int rte_event_dev_selftest(uint8_t dev_id) +{ + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + struct rte_eventdev *dev = &rte_eventdevs[dev_id]; + + if (dev->dev_ops->dev_selftest != NULL) + return (*dev->dev_ops->dev_selftest)(); + return -ENOTSUP; +} + int rte_event_dev_start(uint8_t dev_id) { @@ -1100,6 +1221,23 @@ rte_event_dev_start(uint8_t dev_id) return 0; } +int +rte_event_dev_stop_flush_callback_register(uint8_t dev_id, + eventdev_stop_flush_t callback, void *userdata) +{ + struct rte_eventdev *dev; + + RTE_EDEV_LOG_DEBUG("Stop flush register dev_id=%" PRIu8, dev_id); + + RTE_EVENTDEV_VALID_DEVID_OR_ERR_RET(dev_id, -EINVAL); + dev = &rte_eventdevs[dev_id]; + + dev->dev_ops->dev_stop_flush = callback; + dev->data->dev_stop_flush_arg = userdata; + + return 0; +} + void rte_event_dev_stop(uint8_t dev_id) { @@ -1183,6 +1321,15 @@ rte_eventdev_find_free_device_index(void) return RTE_EVENT_MAX_DEVS; } +static uint16_t +rte_event_tx_adapter_enqueue(__rte_unused void *port, + __rte_unused struct rte_event ev[], + __rte_unused uint16_t nb_events) +{ + rte_errno = ENOTSUP; + return 0; +} + struct rte_eventdev * rte_event_pmd_allocate(const char *name, int socket_id) { @@ -1203,6 +1350,9 @@ rte_event_pmd_allocate(const char *name, int socket_id) eventdev = &rte_eventdevs[dev_id]; + eventdev->txa_enqueue = rte_event_tx_adapter_enqueue; + eventdev->txa_enqueue_same_dest = rte_event_tx_adapter_enqueue; + if (eventdev->data == NULL) { struct rte_eventdev_data *eventdev_data = NULL; @@ -1214,8 +1364,7 @@ rte_event_pmd_allocate(const char *name, int socket_id) eventdev->data = eventdev_data; - snprintf(eventdev->data->name, RTE_EVENTDEV_NAME_MAX_LEN, - "%s", name); + strlcpy(eventdev->data->name, name, RTE_EVENTDEV_NAME_MAX_LEN); eventdev->data->dev_id = dev_id; eventdev->data->socket_id = socket_id;