#include <linux/virtio_net.h>
#include <stdbool.h>
+#include <rte_eal_paging.h>
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_bus_pci.h>
#include <rte_vhost.h>
#include <rte_vdpa.h>
-#include <rte_vdpa_dev.h>
+#include <vdpa_driver.h>
#include <rte_vfio.h>
#include <rte_spinlock.h>
#include <rte_log.h>
#include "base/ifcvf.h"
-RTE_LOG_REGISTER(ifcvf_vdpa_logtype, pmd.net.ifcvf_vdpa, NOTICE);
+RTE_LOG_REGISTER(ifcvf_vdpa_logtype, pmd.vdpa.ifcvf, NOTICE);
#define DRV_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, ifcvf_vdpa_logtype, \
"IFCVF %s(): " fmt "\n", __func__, ##args)
-#ifndef PAGE_SIZE
-#define PAGE_SIZE 4096
-#endif
-
#define IFCVF_USED_RING_LEN(size) \
((size) * sizeof(struct vring_used_elem) + sizeof(uint16_t) * 3)
#define IFCVF_VDPA_MODE "vdpa"
#define IFCVF_SW_FALLBACK_LM "sw-live-migration"
+#define THREAD_NAME_LEN 16
+
static const char * const ifcvf_valid_arguments[] = {
IFCVF_VDPA_MODE,
IFCVF_SW_FALLBACK_LM,
struct ifcvf_internal {
struct rte_pci_device *pdev;
struct ifcvf_hw hw;
+ int configured;
int vfio_container_fd;
int vfio_group_fd;
int vfio_dev_fd;
if (rte_pci_map_device(dev))
goto err;
- internal->vfio_dev_fd = dev->intr_handle.vfio_dev_fd;
+ internal->vfio_dev_fd = rte_intr_dev_fd_get(dev->intr_handle);
for (i = 0; i < RTE_MIN(PCI_MAX_RESOURCE, IFCVF_PCI_MAX_RESOURCE);
i++) {
}
static int
-ifcvf_dma_map(struct ifcvf_internal *internal, int do_map)
+ifcvf_dma_map(struct ifcvf_internal *internal, bool do_map)
{
uint32_t i;
int ret;
}
exit:
- if (mem)
- free(mem);
+ free(mem);
return ret;
}
}
exit:
- if (mem)
- free(mem);
+ free(mem);
return gpa;
}
irq_set->index = VFIO_PCI_MSIX_IRQ_INDEX;
irq_set->start = 0;
fd_ptr = (int *)&irq_set->data;
- fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] = internal->pdev->intr_handle.fd;
+ fd_ptr[RTE_INTR_VEC_ZERO_OFFSET] =
+ rte_intr_fd_get(internal->pdev->intr_handle);
for (i = 0; i < nr_vring; i++)
internal->intr_fd[i] = -1;
static int
setup_notify_relay(struct ifcvf_internal *internal)
{
+ char name[THREAD_NAME_LEN];
int ret;
- ret = pthread_create(&internal->tid, NULL, notify_relay,
- (void *)internal);
- if (ret) {
+ snprintf(name, sizeof(name), "ifc-notify-%d", internal->vid);
+ ret = rte_ctrl_thread_create(&internal->tid, name, NULL, notify_relay,
+ (void *)internal);
+ if (ret != 0) {
DRV_LOG(ERR, "failed to create notify relay pthread.");
return -1;
}
+
return 0;
}
if (!rte_atomic32_read(&internal->running) &&
(rte_atomic32_read(&internal->started) &&
rte_atomic32_read(&internal->dev_attached))) {
- ret = ifcvf_dma_map(internal, 1);
+ ret = ifcvf_dma_map(internal, true);
if (ret)
goto err;
- ret = vdpa_enable_vfio_intr(internal, 0);
+ ret = vdpa_enable_vfio_intr(internal, false);
if (ret)
goto err;
if (ret)
goto err;
- ret = ifcvf_dma_map(internal, 0);
+ ret = ifcvf_dma_map(internal, false);
if (ret)
goto err;
for (i = 0; i < nr_vring; i++) {
rte_vhost_get_vhost_vring(vid, i, &vq);
- size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
- PAGE_SIZE);
- vring_buf = rte_zmalloc("ifcvf", size, PAGE_SIZE);
+ size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
+ rte_mem_page_size());
+ vring_buf = rte_zmalloc("ifcvf", size, rte_mem_page_size());
vring_init(&internal->m_vring[i], vq.size, vring_buf,
- PAGE_SIZE);
+ rte_mem_page_size());
ret = rte_vfio_container_dma_map(internal->vfio_container_fd,
(uint64_t)(uintptr_t)vring_buf, m_vring_iova, size);
error:
for (i = 0; i < nr_vring; i++)
- if (internal->m_vring[i].desc)
- rte_free(internal->m_vring[i].desc);
+ rte_free(internal->m_vring[i].desc);
return -1;
}
len = IFCVF_USED_RING_LEN(vq.size);
rte_vhost_log_used_vring(vid, i, 0, len);
- size = RTE_ALIGN_CEIL(vring_size(vq.size, PAGE_SIZE),
- PAGE_SIZE);
+ size = RTE_ALIGN_CEIL(vring_size(vq.size, rte_mem_page_size()),
+ rte_mem_page_size());
rte_vfio_container_dma_unmap(internal->vfio_container_fd,
(uint64_t)(uintptr_t)internal->m_vring[i].desc,
m_vring_iova, size);
static int
setup_vring_relay(struct ifcvf_internal *internal)
{
+ char name[THREAD_NAME_LEN];
int ret;
- ret = pthread_create(&internal->tid, NULL, vring_relay,
- (void *)internal);
- if (ret) {
+ snprintf(name, sizeof(name), "ifc-vring-%d", internal->vid);
+ ret = rte_ctrl_thread_create(&internal->tid, name, NULL, vring_relay,
+ (void *)internal);
+ if (ret != 0) {
DRV_LOG(ERR, "failed to create ring relay pthread.");
return -1;
}
+
return 0;
}
goto error;
/* set up interrupt for interrupt relay */
- ret = vdpa_enable_vfio_intr(internal, 1);
+ ret = vdpa_enable_vfio_intr(internal, true);
if (ret)
goto unmap;
unset_intr:
vdpa_disable_vfio_intr(internal);
unmap:
- ifcvf_dma_map(internal, 0);
+ ifcvf_dma_map(internal, false);
error:
return -1;
}
DRV_LOG(NOTICE, "vDPA (%s): software relay is used.",
vdev->device->name);
+ internal->configured = 1;
return 0;
}
vdpa_disable_vfio_intr(internal);
/* unset DMA map for guest memory */
- ifcvf_dma_map(internal, 0);
+ ifcvf_dma_map(internal, false);
internal->sw_fallback_running = false;
} else {
update_datapath(internal);
}
+ internal->configured = 0;
return 0;
}
1ULL << VHOST_USER_PROTOCOL_F_SLAVE_REQ | \
1ULL << VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD | \
1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER | \
- 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD)
+ 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD | \
+ 1ULL << VHOST_USER_PROTOCOL_F_STATUS)
static int
ifcvf_get_protocol_features(struct rte_vdpa_device *vdev, uint64_t *features)
{
return 0;
}
+static int
+ifcvf_set_vring_state(int vid, int vring, int state)
+{
+ struct rte_vdpa_device *vdev;
+ struct internal_list *list;
+ struct ifcvf_internal *internal;
+ struct ifcvf_hw *hw;
+ struct ifcvf_pci_common_cfg *cfg;
+ int ret = 0;
+
+ vdev = rte_vhost_get_vdpa_device(vid);
+ list = find_internal_resource_by_vdev(vdev);
+ if (list == NULL) {
+ DRV_LOG(ERR, "Invalid vDPA device: %p", vdev);
+ return -1;
+ }
+
+ internal = list->internal;
+ if (vring < 0 || vring >= internal->max_queues * 2) {
+ DRV_LOG(ERR, "Vring index %d not correct", vring);
+ return -1;
+ }
+
+ hw = &internal->hw;
+ if (!internal->configured)
+ goto exit;
+
+ cfg = hw->common_cfg;
+ IFCVF_WRITE_REG16(vring, &cfg->queue_select);
+ IFCVF_WRITE_REG16(!!state, &cfg->queue_enable);
+
+ if (!state && hw->vring[vring].enable) {
+ ret = vdpa_disable_vfio_intr(internal);
+ if (ret)
+ return ret;
+ }
+
+ if (state && !hw->vring[vring].enable) {
+ ret = vdpa_enable_vfio_intr(internal, false);
+ if (ret)
+ return ret;
+ }
+
+exit:
+ hw->vring[vring].enable = !!state;
+ return 0;
+}
+
static struct rte_vdpa_dev_ops ifcvf_ops = {
.get_queue_num = ifcvf_get_queue_num,
.get_features = ifcvf_get_vdpa_features,
.get_protocol_features = ifcvf_get_protocol_features,
.dev_conf = ifcvf_dev_config,
.dev_close = ifcvf_dev_close,
- .set_vring_state = NULL,
+ .set_vring_state = ifcvf_set_vring_state,
.set_features = ifcvf_set_features,
.migration_done = NULL,
.get_vfio_group_fd = ifcvf_get_vfio_group_fd,
goto error;
}
+ internal->configured = 0;
internal->max_queues = IFCVF_MAX_QUEUES;
features = ifcvf_get_features(&internal->hw);
internal->features = (features &