Vhost backend of different devices have different features.
Add an API to get vDPA device type, net device or blk device
currently, so users can set different features for different
kinds of devices.
Signed-off-by: Andy Pei <andy.pei@intel.com>
Reviewed-by: Chenbo Xia <chenbo.xia@intel.com>
Receive ``count`` packets from guest to host in async data path,
and store them at ``pkts``.
+* ``rte_vhost_driver_get_vdpa_dev_type(path, type)``
+
+ Get device type of vDPA device, such as VDPA_DEVICE_TYPE_NET,
+ VDPA_DEVICE_TYPE_BLK.
+
Vhost-user Implementations
--------------------------
Added vhost async dequeue API which can leverage DMA devices to
accelerate receiving packets from guest.
+* **Added vhost API to get the device type of a vDPA device.**
+
+ Added an API which can get the device type of vDPA device.
+
* **Updated Intel iavf driver.**
* Added Tx QoS queue rate limitation support.
#define RTE_MAX_VHOST_DEVICE 1024
+#define RTE_VHOST_VDPA_DEVICE_TYPE_NET 0
+#define RTE_VHOST_VDPA_DEVICE_TYPE_BLK 1
+
struct rte_vdpa_device;
/**
struct rte_vdpa_device *
rte_vhost_driver_get_vdpa_device(const char *path);
+/**
+ * Get the device type of the vdpa device.
+ *
+ * @param path
+ * The vhost-user socket file path
+ * @param type
+ * the device type of the vdpa device
+ * @return
+ * 0 on success, -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_driver_get_vdpa_dev_type(const char *path, uint32_t *type);
+
/**
* Set the feature bits the vhost-user driver supports.
*
return dev;
}
+int
+rte_vhost_driver_get_vdpa_dev_type(const char *path, uint32_t *type)
+{
+ struct vhost_user_socket *vsocket;
+ struct rte_vdpa_device *vdpa_dev;
+ uint32_t vdpa_type = 0;
+ int ret = 0;
+
+ pthread_mutex_lock(&vhost_user.mutex);
+ vsocket = find_vhost_user_socket(path);
+ if (!vsocket) {
+ VHOST_LOG_CONFIG(ERR,
+ "(%s) socket file is not registered yet.\n",
+ path);
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ vdpa_dev = vsocket->vdpa_dev;
+ if (!vdpa_dev) {
+ ret = -1;
+ goto unlock_exit;
+ }
+
+ if (vdpa_dev->ops->get_dev_type) {
+ ret = vdpa_dev->ops->get_dev_type(vdpa_dev, &vdpa_type);
+ if (ret) {
+ VHOST_LOG_CONFIG(ERR,
+ "(%s) failed to get vdpa dev type for socket file.\n",
+ path);
+ ret = -1;
+ goto unlock_exit;
+ }
+ } else {
+ vdpa_type = RTE_VHOST_VDPA_DEVICE_TYPE_NET;
+ }
+
+ *type = vdpa_type;
+
+unlock_exit:
+ pthread_mutex_unlock(&vhost_user.mutex);
+ return ret;
+}
+
int
rte_vhost_driver_disable_features(const char *path, uint64_t features)
{
/** Set the device configuration space */
int (*set_config)(int vid, uint8_t *config, uint32_t offset,
uint32_t size, uint32_t flags);
+
+ /** get device type: net device, blk device... */
+ int (*get_dev_type)(struct rte_vdpa_device *dev, uint32_t *type);
};
/**
rte_vhost_vring_stats_get;
rte_vhost_vring_stats_reset;
rte_vhost_async_try_dequeue_burst;
+ rte_vhost_driver_get_vdpa_dev_type;
};
INTERNAL {