X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvdpa.c;h=ae6fdd24edbece10206ec5c27b961868977c797a;hb=83c2047c5fe82fb25674c3053d4871838e7455cb;hp=49ada002773431f84161e8b6d670f4ccd878f5a9;hpb=81a6b7fe068f64d1381ea4d6fce5088a1bcc7f81;p=dpdk.git diff --git a/lib/librte_vhost/vdpa.c b/lib/librte_vhost/vdpa.c index 49ada00277..ae6fdd24ed 100644 --- a/lib/librte_vhost/vdpa.c +++ b/lib/librte_vhost/vdpa.c @@ -9,65 +9,63 @@ */ #include +#include #include #include +#include +#include + #include "rte_vdpa.h" +#include "rte_vdpa_dev.h" #include "vhost.h" -static struct rte_vdpa_device vdpa_devices[MAX_VHOST_DEVICE]; -static uint32_t vdpa_device_num; +/** Double linked list of vDPA devices. */ +TAILQ_HEAD(vdpa_device_list, rte_vdpa_device); +static struct vdpa_device_list vdpa_device_list = + TAILQ_HEAD_INITIALIZER(vdpa_device_list); +static rte_spinlock_t vdpa_device_list_lock = RTE_SPINLOCK_INITIALIZER; -int -rte_vdpa_find_device_id(struct rte_vdpa_device *dev) -{ - struct rte_vdpa_device *tmp_dev; - int i; - if (dev == NULL) - return -1; +/* Unsafe, needs to be called with vdpa_device_list_lock held */ +static struct rte_vdpa_device * +__vdpa_find_device_by_name(const char *name) +{ + struct rte_vdpa_device *dev, *ret = NULL; - for (i = 0; i < MAX_VHOST_DEVICE; ++i) { - tmp_dev = &vdpa_devices[i]; - if (tmp_dev->ops == NULL) - continue; + if (name == NULL) + return NULL; - if (tmp_dev == dev) - return i; + TAILQ_FOREACH(dev, &vdpa_device_list, next) { + if (!strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN)) { + ret = dev; + break; + } } - return -1; + return ret; } -int -rte_vdpa_find_device_id_by_name(const char *name) +struct rte_vdpa_device * +rte_vdpa_find_device_by_name(const char *name) { struct rte_vdpa_device *dev; - int i; - - if (name == NULL) - return -1; - for (i = 0; i < MAX_VHOST_DEVICE; ++i) { - dev = &vdpa_devices[i]; - if (dev->ops == NULL) - continue; - - if (strncmp(dev->device->name, name, RTE_DEV_NAME_MAX_LEN) == 0) - return i; - } + rte_spinlock_lock(&vdpa_device_list_lock); + dev = __vdpa_find_device_by_name(name); + rte_spinlock_unlock(&vdpa_device_list_lock); - return -1; + return dev; } -struct rte_vdpa_device * -rte_vdpa_get_device(int did) +struct rte_device * +rte_vdpa_get_rte_device(struct rte_vdpa_device *vdpa_dev) { - if (did < 0 || did >= MAX_VHOST_DEVICE) + if (vdpa_dev == NULL) return NULL; - return &vdpa_devices[did]; + return vdpa_dev->device; } struct rte_vdpa_device * @@ -75,54 +73,60 @@ rte_vdpa_register_device(struct rte_device *rte_dev, struct rte_vdpa_dev_ops *ops) { struct rte_vdpa_device *dev; - int i; - if (vdpa_device_num >= MAX_VHOST_DEVICE || ops == NULL) + if (ops == NULL) return NULL; - for (i = 0; i < MAX_VHOST_DEVICE; i++) { - dev = &vdpa_devices[i]; - if (dev->ops == NULL) - continue; - - if (dev->device == rte_dev) - return NULL; + /* Check mandatory ops are implemented */ + if (!ops->get_queue_num || !ops->get_features || + !ops->get_protocol_features || !ops->dev_conf || + !ops->dev_close || !ops->set_vring_state || + !ops->set_features) { + VHOST_LOG_CONFIG(ERR, + "Some mandatory vDPA ops aren't implemented\n"); + return NULL; } - for (i = 0; i < MAX_VHOST_DEVICE; i++) { - if (vdpa_devices[i].ops == NULL) - break; + rte_spinlock_lock(&vdpa_device_list_lock); + /* Check the device hasn't been register already */ + dev = __vdpa_find_device_by_name(rte_dev->name); + if (dev) { + dev = NULL; + goto out_unlock; } - if (i == MAX_VHOST_DEVICE) - return NULL; + dev = rte_zmalloc(NULL, sizeof(*dev), 0); + if (!dev) + goto out_unlock; - dev = &vdpa_devices[i]; dev->device = rte_dev; dev->ops = ops; - vdpa_device_num++; + TAILQ_INSERT_TAIL(&vdpa_device_list, dev, next); +out_unlock: + rte_spinlock_unlock(&vdpa_device_list_lock); return dev; } int -rte_vdpa_unregister_device(struct rte_vdpa_device *vdev) +rte_vdpa_unregister_device(struct rte_vdpa_device *dev) { - int did = rte_vdpa_find_device_id(vdev); - - if (did < 0 || vdpa_devices[did].ops == NULL) - return -1; + struct rte_vdpa_device *cur_dev, *tmp_dev; + int ret = -1; - memset(&vdpa_devices[did], 0, sizeof(struct rte_vdpa_device)); - vdpa_device_num--; + rte_spinlock_lock(&vdpa_device_list_lock); + TAILQ_FOREACH_SAFE(cur_dev, &vdpa_device_list, next, tmp_dev) { + if (dev != cur_dev) + continue; - return 0; -} + TAILQ_REMOVE(&vdpa_device_list, dev, next); + rte_free(dev); + ret = 0; + break; + } + rte_spinlock_unlock(&vdpa_device_list_lock); -int -rte_vdpa_get_device_num(void) -{ - return vdpa_device_num; + return ret; } int @@ -228,63 +232,67 @@ fail: } int -rte_vdpa_get_stats_names(int did, struct rte_vdpa_stat_name *stats_names, - unsigned int size) +rte_vdpa_get_queue_num(struct rte_vdpa_device *dev, uint32_t *queue_num) { - struct rte_vdpa_device *vdpa_dev; + if (dev == NULL || dev->ops == NULL || dev->ops->get_queue_num == NULL) + return -1; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev) - return -ENODEV; + return dev->ops->get_queue_num(dev, queue_num); +} - RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats_names, -ENOTSUP); +int +rte_vdpa_get_features(struct rte_vdpa_device *dev, uint64_t *features) +{ + if (dev == NULL || dev->ops == NULL || dev->ops->get_features == NULL) + return -1; - return vdpa_dev->ops->get_stats_names(vdpa_dev, stats_names, size); + return dev->ops->get_features(dev, features); } int -rte_vdpa_get_stats(int did, uint16_t qid, struct rte_vdpa_stat *stats, - unsigned int n) +rte_vdpa_get_protocol_features(struct rte_vdpa_device *dev, uint64_t *features) { - struct rte_vdpa_device *vdpa_dev; + if (dev == NULL || dev->ops == NULL || + dev->ops->get_protocol_features == NULL) + return -1; - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev) - return -ENODEV; + return dev->ops->get_protocol_features(dev, features); +} - if (!stats || !n) +int +rte_vdpa_get_stats_names(struct rte_vdpa_device *dev, + struct rte_vdpa_stat_name *stats_names, + unsigned int size) +{ + if (!dev) return -EINVAL; - RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_stats, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats_names, -ENOTSUP); - return vdpa_dev->ops->get_stats(vdpa_dev, qid, stats, n); + return dev->ops->get_stats_names(dev, stats_names, size); } int -rte_vdpa_reset_stats(int did, uint16_t qid) +rte_vdpa_get_stats(struct rte_vdpa_device *dev, uint16_t qid, + struct rte_vdpa_stat *stats, unsigned int n) { - struct rte_vdpa_device *vdpa_dev; - - vdpa_dev = rte_vdpa_get_device(did); - if (!vdpa_dev) - return -ENODEV; + if (!dev || !stats || !n) + return -EINVAL; - RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->reset_stats, -ENOTSUP); + RTE_FUNC_PTR_OR_ERR_RET(dev->ops->get_stats, -ENOTSUP); - return vdpa_dev->ops->reset_stats(vdpa_dev, qid); + return dev->ops->get_stats(dev, qid, stats, n); } -static uint16_t -vdpa_dev_to_id(const struct rte_vdpa_device *dev) +int +rte_vdpa_reset_stats(struct rte_vdpa_device *dev, uint16_t qid) { - if (dev == NULL) - return MAX_VHOST_DEVICE; + if (!dev) + return -EINVAL; - if (dev < &vdpa_devices[0] || - dev >= &vdpa_devices[MAX_VHOST_DEVICE]) - return MAX_VHOST_DEVICE; + RTE_FUNC_PTR_OR_ERR_RET(dev->ops->reset_stats, -ENOTSUP); - return (uint16_t)(dev - vdpa_devices); + return dev->ops->reset_stats(dev, qid); } static int @@ -306,24 +314,22 @@ vdpa_find_device(const struct rte_vdpa_device *start, rte_vdpa_cmp_t cmp, struct rte_device *rte_dev) { struct rte_vdpa_device *dev; - uint16_t idx; - if (start != NULL) - idx = vdpa_dev_to_id(start) + 1; + rte_spinlock_lock(&vdpa_device_list_lock); + if (start == NULL) + dev = TAILQ_FIRST(&vdpa_device_list); else - idx = 0; - for (; idx < MAX_VHOST_DEVICE; idx++) { - dev = &vdpa_devices[idx]; - /* - * ToDo: Certainly better to introduce a state field, - * but rely on ops being set for now. - */ - if (dev->ops == NULL) - continue; + dev = TAILQ_NEXT(start, next); + + while (dev != NULL) { if (cmp(dev, rte_dev) == 0) - return dev; + break; + + dev = TAILQ_NEXT(dev, next); } - return NULL; + rte_spinlock_unlock(&vdpa_device_list_lock); + + return dev; } static void *