struct internal_list *list;
struct ifcvf_internal *internal;
- vdev = rte_vdpa_get_device(rte_vhost_get_vdpa_device_id(vid));
+ vdev = rte_vhost_get_vdpa_device(vid);
list = find_internal_resource_by_vdev(vdev);
if (list == NULL) {
DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
struct internal_list *list;
struct ifcvf_internal *internal;
- vdev = rte_vdpa_get_device(rte_vhost_get_vdpa_device_id(vid));
+ vdev = rte_vhost_get_vdpa_device(vid);
list = find_internal_resource_by_vdev(vdev);
if (list == NULL) {
DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
struct ifcvf_internal *internal;
uint64_t log_base = 0, log_size = 0;
- vdev = rte_vdpa_get_device(rte_vhost_get_vdpa_device_id(vid));
+ vdev = rte_vhost_get_vdpa_device(vid);
list = find_internal_resource_by_vdev(vdev);
if (list == NULL) {
DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
struct rte_vdpa_device *vdev;
struct internal_list *list;
- vdev = rte_vdpa_get_device(rte_vhost_get_vdpa_device_id(vid));
+ vdev = rte_vhost_get_vdpa_device(vid);
list = find_internal_resource_by_vdev(vdev);
if (list == NULL) {
DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
struct rte_vdpa_device *vdev;
struct internal_list *list;
- vdev = rte_vdpa_get_device(rte_vhost_get_vdpa_device_id(vid));
+ vdev = rte_vhost_get_vdpa_device(vid);
list = find_internal_resource_by_vdev(vdev);
if (list == NULL) {
DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
struct vfio_region_info reg = { .argsz = sizeof(reg) };
int ret;
- vdev = rte_vdpa_get_device(rte_vhost_get_vdpa_device_id(vid));
+ vdev = rte_vhost_get_vdpa_device(vid);
list = find_internal_resource_by_vdev(vdev);
if (list == NULL) {
DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
static int
mlx5_vdpa_set_vring_state(int vid, int vring, int state)
{
- struct rte_vdpa_device *vdev = rte_vdpa_get_device(
- rte_vhost_get_vdpa_device_id(vid));
+ struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
static int
mlx5_vdpa_features_set(int vid)
{
- struct rte_vdpa_device *vdev = rte_vdpa_get_device(
- rte_vhost_get_vdpa_device_id(vid));
+ struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
uint64_t log_base, log_size;
static int
mlx5_vdpa_dev_close(int vid)
{
- struct rte_vdpa_device *vdev = rte_vdpa_get_device(
- rte_vhost_get_vdpa_device_id(vid));
+ struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
int ret = 0;
static int
mlx5_vdpa_dev_config(int vid)
{
- struct rte_vdpa_device *vdev = rte_vdpa_get_device(
- rte_vhost_get_vdpa_device_id(vid));
+ struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
static int
mlx5_vdpa_get_device_fd(int vid)
{
- struct rte_vdpa_device *vdev = rte_vdpa_get_device(
- rte_vhost_get_vdpa_device_id(vid));
+ struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
static int
mlx5_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
{
- struct rte_vdpa_device *vdev = rte_vdpa_get_device(
- rte_vhost_get_vdpa_device_id(vid));
+ struct rte_vdpa_device *vdev = rte_vhost_get_vdpa_device(vid);
struct mlx5_vdpa_priv *priv =
mlx5_vdpa_find_priv_resource_by_vdev(vdev);
{
int ret;
char *socket_path = vport->ifname;
+ struct rte_vdpa_device *vdev;
int did = vport->did;
if (client_mode)
"register driver ops failed: %s\n",
socket_path);
- ret = rte_vhost_driver_attach_vdpa_device(socket_path, did);
+ vdev = rte_vdpa_get_device(did);
+ if (!vdev)
+ rte_exit(EXIT_FAILURE,
+ "vDPA device retrieval failed: %p\n",
+ vdev);
+
+ ret = rte_vhost_driver_attach_vdpa_device(socket_path, vdev);
if (ret != 0)
rte_exit(EXIT_FAILURE,
"attach vdpa device failed: %s\n",
#define VHOST_USER_F_PROTOCOL_FEATURES 30
#endif
+struct rte_vdpa_device;
/**
* Information relating to memory regions including offsets to
*
* @param path
* The vhost-user socket file path
- * @param did
- * Device id
+ * @param dev
+ * vDPA device pointer
* @return
* 0 on success, -1 on failure
*/
__rte_experimental
int
-rte_vhost_driver_attach_vdpa_device(const char *path, int did);
+rte_vhost_driver_attach_vdpa_device(const char *path,
+ struct rte_vdpa_device *dev);
/**
* Unset the vdpa device id
* @param path
* The vhost-user socket file path
* @return
- * Device id, -1 on failure
+ * vDPA device pointer, NULL on failure
*/
__rte_experimental
-int
-rte_vhost_driver_get_vdpa_device_id(const char *path);
+struct rte_vdpa_device *
+rte_vhost_driver_get_vdpa_device(const char *path);
/**
* Set the feature bits the vhost-user driver supports.
* @param vid
* vhost device id
* @return
- * device id
+ * vDPA device pointer on success, NULL on failure
*/
__rte_experimental
-int
-rte_vhost_get_vdpa_device_id(int vid);
+struct rte_vdpa_device *
+rte_vhost_get_vdpa_device(int vid);
/**
* Notify the guest that should get virtio configuration space from backend.
rte_vdpa_reset_stats;
rte_vhost_driver_attach_vdpa_device;
rte_vhost_driver_detach_vdpa_device;
- rte_vhost_driver_get_vdpa_device_id;
- rte_vhost_get_vdpa_device_id;
+ rte_vhost_driver_get_vdpa_device;
+ rte_vhost_get_vdpa_device;
rte_vhost_driver_get_protocol_features;
rte_vhost_driver_get_queue_num;
rte_vhost_get_log_base;
uint64_t protocol_features;
- /*
- * Device id to identify a specific backend device.
- * It's set to -1 for the default software implementation.
- * If valid, one socket can have 1 connection only.
- */
- int vdpa_dev_id;
+ struct rte_vdpa_device *vdpa_dev;
struct vhost_device_ops const *notify_ops;
};
vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
- vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id);
+ vhost_attach_vdpa_device(vid, vsocket->vdpa_dev);
if (vsocket->dequeue_zero_copy)
vhost_enable_dequeue_zero_copy(vid);
}
int
-rte_vhost_driver_attach_vdpa_device(const char *path, int did)
+rte_vhost_driver_attach_vdpa_device(const char *path,
+ struct rte_vdpa_device *dev)
{
struct vhost_user_socket *vsocket;
- if (rte_vdpa_get_device(did) == NULL || path == NULL)
+ if (dev == NULL || path == NULL)
return -1;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
- vsocket->vdpa_dev_id = did;
+ vsocket->vdpa_dev = dev;
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
- vsocket->vdpa_dev_id = -1;
+ vsocket->vdpa_dev = NULL;
pthread_mutex_unlock(&vhost_user.mutex);
return vsocket ? 0 : -1;
}
-int
-rte_vhost_driver_get_vdpa_device_id(const char *path)
+struct rte_vdpa_device *
+rte_vhost_driver_get_vdpa_device(const char *path)
{
struct vhost_user_socket *vsocket;
- int did = -1;
+ struct rte_vdpa_device *dev = NULL;
pthread_mutex_lock(&vhost_user.mutex);
vsocket = find_vhost_user_socket(path);
if (vsocket)
- did = vsocket->vdpa_dev_id;
+ dev = vsocket->vdpa_dev;
pthread_mutex_unlock(&vhost_user.mutex);
- return did;
+ return dev;
}
int
struct vhost_user_socket *vsocket;
uint64_t vdpa_features;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
goto unlock_exit;
}
- did = vsocket->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = vsocket->vdpa_dev;
if (!vdpa_dev || !vdpa_dev->ops->get_features) {
*features = vsocket->features;
goto unlock_exit;
struct vhost_user_socket *vsocket;
uint64_t vdpa_protocol_features;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
goto unlock_exit;
}
- did = vsocket->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = vsocket->vdpa_dev;
if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) {
*protocol_features = vsocket->protocol_features;
goto unlock_exit;
struct vhost_user_socket *vsocket;
uint32_t vdpa_queue_num;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
int ret = 0;
pthread_mutex_lock(&vhost_user.mutex);
goto unlock_exit;
}
- did = vsocket->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = vsocket->vdpa_dev;
if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) {
*queue_num = VHOST_MAX_QUEUE_PAIRS;
goto unlock_exit;
"error: failed to init connection mutex\n");
goto out_free;
}
- vsocket->vdpa_dev_id = -1;
+ vsocket->vdpa_dev = NULL;
vsocket->dequeue_zero_copy = flags & RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
vsocket->extbuf = flags & RTE_VHOST_USER_EXTBUF_SUPPORT;
vsocket->linearbuf = flags & RTE_VHOST_USER_LINEARBUF_SUPPORT;
dev->vid = i;
dev->flags = VIRTIO_DEV_BUILTIN_VIRTIO_NET;
dev->slave_req_fd = -1;
- dev->vdpa_dev_id = -1;
dev->postcopy_ufd = -1;
rte_spinlock_init(&dev->slave_req_lock);
vhost_destroy_device_notify(struct virtio_net *dev)
{
struct rte_vdpa_device *vdpa_dev;
- int did;
if (dev->flags & VIRTIO_DEV_RUNNING) {
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = dev->vdpa_dev;
if (vdpa_dev && vdpa_dev->ops->dev_close)
vdpa_dev->ops->dev_close(dev->vid);
dev->flags &= ~VIRTIO_DEV_RUNNING;
}
void
-vhost_attach_vdpa_device(int vid, int did)
+vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *vdpa_dev)
{
struct virtio_net *dev = get_device(vid);
if (dev == NULL)
return;
- if (rte_vdpa_get_device(did) == NULL)
- return;
-
- dev->vdpa_dev_id = did;
+ dev->vdpa_dev = vdpa_dev;
}
void
return ret;
}
-int rte_vhost_get_vdpa_device_id(int vid)
+struct rte_vdpa_device *
+rte_vhost_get_vdpa_device(int vid)
{
struct virtio_net *dev = get_device(vid);
if (dev == NULL)
- return -1;
+ return NULL;
- return dev->vdpa_dev_id;
+ return dev->vdpa_dev;
}
int rte_vhost_get_log_base(int vid, uint64_t *log_base,
int postcopy_ufd;
int postcopy_listening;
- /*
- * Device id to identify a specific backend device.
- * It's set to -1 for the default software implementation.
- */
- int vdpa_dev_id;
+ struct rte_vdpa_device *vdpa_dev;
/* context data for the external message handlers */
void *extern_data;
int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
-void vhost_attach_vdpa_device(int vid, int did);
+void vhost_attach_vdpa_device(int vid, struct rte_vdpa_device *dev);
void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
void vhost_enable_dequeue_zero_copy(int vid);
uint64_t features = msg->payload.u64;
uint64_t vhost_features = 0;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
}
}
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = dev->vdpa_dev;
if (vdpa_dev && vdpa_dev->ops->set_features)
vdpa_dev->ops->set_features(dev->vid);
int enable = (int)msg->payload.state.num;
int index = (int)msg->payload.state.index;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
"set queue enable: %d to qp idx: %d\n",
enable, index);
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = dev->vdpa_dev;
if (vdpa_dev && vdpa_dev->ops->set_vring_state)
vdpa_dev->ops->set_vring_state(dev->vid, index, enable);
struct virtio_net *dev = *pdev;
uint8_t *mac = (uint8_t *)&msg->payload.u64;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
if (validate_msg_fds(msg, 0) != 0)
return RTE_VHOST_MSG_RESULT_ERR;
* copied before the flag is set.
*/
__atomic_store_n(&dev->broadcast_rarp, 1, __ATOMIC_RELEASE);
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = dev->vdpa_dev;
if (vdpa_dev && vdpa_dev->ops->migration_done)
vdpa_dev->ops->migration_done(dev->vid);
struct virtio_net *dev;
struct VhostUserMsg msg;
struct rte_vdpa_device *vdpa_dev;
- int did = -1;
int ret;
int unlock_required = 0;
bool handled;
}
}
- did = dev->vdpa_dev_id;
- vdpa_dev = rte_vdpa_get_device(did);
+ vdpa_dev = dev->vdpa_dev;
if (vdpa_dev && virtio_is_ready(dev) &&
!(dev->flags & VIRTIO_DEV_VDPA_CONFIGURED) &&
msg.request.master == VHOST_USER_SET_VRING_CALL) {
{
struct virtio_net *dev;
struct rte_vdpa_device *vdpa_dev;
- int vfio_device_fd, did, ret = 0;
+ int vfio_device_fd, ret = 0;
uint64_t offset, size;
unsigned int i;
if (!dev)
return -ENODEV;
- did = dev->vdpa_dev_id;
- if (did < 0)
- return -EINVAL;
+ vdpa_dev = dev->vdpa_dev;
+ if (vdpa_dev == NULL)
+ return -ENODEV;
if (!(dev->features & (1ULL << VIRTIO_F_VERSION_1)) ||
!(dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES)) ||
(1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER)))
return -ENOTSUP;
- vdpa_dev = rte_vdpa_get_device(did);
- if (!vdpa_dev)
- return -ENODEV;
-
RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_vfio_device_fd, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(vdpa_dev->ops->get_notify_area, -ENOTSUP);