* Copyright(c) 2020-2021 Xilinx, Inc.
*/
+#include <pthread.h>
+#include <unistd.h>
#include <sys/ioctl.h>
#include <rte_errno.h>
return 0;
}
+static void *
+sfc_vdpa_notify_ctrl(void *arg)
+{
+ struct sfc_vdpa_ops_data *ops_data;
+ int vid;
+
+ ops_data = arg;
+ if (ops_data == NULL)
+ return NULL;
+
+ sfc_vdpa_adapter_lock(ops_data->dev_handle);
+
+ vid = ops_data->vid;
+
+ if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
+ sfc_vdpa_info(ops_data->dev_handle,
+ "vDPA (%s): Notifier could not get configured",
+ ops_data->vdpa_dev->device->name);
+
+ sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+
+ return NULL;
+}
+
+static int
+sfc_vdpa_setup_notify_ctrl(struct sfc_vdpa_ops_data *ops_data)
+{
+ int ret;
+
+ ops_data->is_notify_thread_started = false;
+
+ /*
+ * Use rte_vhost_host_notifier_ctrl in a thread to avoid
+ * dead lock scenario when multiple VFs are used in single vdpa
+ * application and multiple VFs are passed to a single VM.
+ */
+ ret = pthread_create(&ops_data->notify_tid, NULL,
+ sfc_vdpa_notify_ctrl, ops_data);
+ if (ret != 0) {
+ sfc_vdpa_err(ops_data->dev_handle,
+ "failed to create notify_ctrl thread: %s",
+ rte_strerror(ret));
+ return -1;
+ }
+ ops_data->is_notify_thread_started = true;
+
+ return 0;
+}
+
static int
sfc_vdpa_dev_config(int vid)
{
if (rc != 0)
goto fail_vdpa_start;
- sfc_vdpa_adapter_unlock(ops_data->dev_handle);
+ rc = sfc_vdpa_setup_notify_ctrl(ops_data);
+ if (rc != 0)
+ goto fail_vdpa_notify;
- sfc_vdpa_log_init(ops_data->dev_handle, "vhost notifier ctrl");
- if (rte_vhost_host_notifier_ctrl(vid, RTE_VHOST_QUEUE_ALL, true) != 0)
- sfc_vdpa_info(ops_data->dev_handle,
- "vDPA (%s): software relay for notify is used.",
- vdpa_dev->device->name);
+ sfc_vdpa_adapter_unlock(ops_data->dev_handle);
sfc_vdpa_log_init(ops_data->dev_handle, "done");
return 0;
+fail_vdpa_notify:
+ sfc_vdpa_stop(ops_data);
+
fail_vdpa_start:
sfc_vdpa_close(ops_data);
static int
sfc_vdpa_dev_close(int vid)
{
+ int ret;
struct rte_vdpa_device *vdpa_dev;
struct sfc_vdpa_ops_data *ops_data;
}
sfc_vdpa_adapter_lock(ops_data->dev_handle);
+ if (ops_data->is_notify_thread_started == true) {
+ void *status;
+ ret = pthread_cancel(ops_data->notify_tid);
+ if (ret != 0) {
+ sfc_vdpa_err(ops_data->dev_handle,
+ "failed to cancel notify_ctrl thread: %s",
+ rte_strerror(ret));
+ }
+
+ ret = pthread_join(ops_data->notify_tid, &status);
+ if (ret != 0) {
+ sfc_vdpa_err(ops_data->dev_handle,
+ "failed to join terminated notify_ctrl thread: %s",
+ rte_strerror(ret));
+ }
+ }
+ ops_data->is_notify_thread_started = false;
sfc_vdpa_stop(ops_data);
sfc_vdpa_close(ops_data);
return vfio_dev_fd;
}
+static int
+sfc_vdpa_get_notify_area(int vid, int qid, uint64_t *offset, uint64_t *size)
+{
+ int ret;
+ efx_nic_t *nic;
+ int vfio_dev_fd;
+ efx_rc_t rc;
+ unsigned int bar_offset;
+ struct rte_vdpa_device *vdpa_dev;
+ struct sfc_vdpa_ops_data *ops_data;
+ struct vfio_region_info reg = { .argsz = sizeof(reg) };
+ const efx_nic_cfg_t *encp;
+ int max_vring_cnt;
+ int64_t len;
+ void *dev;
+
+ vdpa_dev = rte_vhost_get_vdpa_device(vid);
+
+ ops_data = sfc_vdpa_get_data_by_dev(vdpa_dev);
+ if (ops_data == NULL)
+ return -1;
+
+ dev = ops_data->dev_handle;
+
+ vfio_dev_fd = sfc_vdpa_adapter_by_dev_handle(dev)->vfio_dev_fd;
+ max_vring_cnt =
+ (sfc_vdpa_adapter_by_dev_handle(dev)->max_queue_count * 2);
+
+ nic = sfc_vdpa_adapter_by_dev_handle(ops_data->dev_handle)->nic;
+ encp = efx_nic_cfg_get(nic);
+
+ if (qid >= max_vring_cnt) {
+ sfc_vdpa_err(dev, "invalid qid : %d", qid);
+ return -1;
+ }
+
+ if (ops_data->vq_cxt[qid].enable != B_TRUE) {
+ sfc_vdpa_err(dev, "vq is not enabled");
+ return -1;
+ }
+
+ rc = efx_virtio_get_doorbell_offset(ops_data->vq_cxt[qid].vq,
+ &bar_offset);
+ if (rc != 0) {
+ sfc_vdpa_err(dev, "failed to get doorbell offset: %s",
+ rte_strerror(rc));
+ return rc;
+ }
+
+ reg.index = sfc_vdpa_adapter_by_dev_handle(dev)->mem_bar.esb_rid;
+ ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, ®);
+ if (ret != 0) {
+ sfc_vdpa_err(dev, "could not get device region info: %s",
+ strerror(errno));
+ return ret;
+ }
+
+ *offset = reg.offset + bar_offset;
+
+ len = (1U << encp->enc_vi_window_shift) / 2;
+ if (len >= sysconf(_SC_PAGESIZE)) {
+ *size = sysconf(_SC_PAGESIZE);
+ } else {
+ sfc_vdpa_err(dev, "invalid VI window size : 0x%" PRIx64, len);
+ return -1;
+ }
+
+ sfc_vdpa_info(dev, "vDPA ops get_notify_area :: offset : 0x%" PRIx64,
+ *offset);
+
+ return 0;
+}
+
static struct rte_vdpa_dev_ops sfc_vdpa_ops = {
.get_queue_num = sfc_vdpa_get_queue_num,
.get_features = sfc_vdpa_get_features,
.set_vring_state = sfc_vdpa_set_vring_state,
.set_features = sfc_vdpa_set_features,
.get_vfio_device_fd = sfc_vdpa_get_vfio_device_fd,
+ .get_notify_area = sfc_vdpa_get_notify_area,
};
struct sfc_vdpa_ops_data *