struct vhost_vring_file file;
struct vhost_vring_state state;
struct vring *vring = &dev->vrings[queue_sel];
+ struct vring_packed *pq_vring = &dev->packed_vrings[queue_sel];
struct vhost_vring_addr addr = {
.index = queue_sel,
- .desc_user_addr = (uint64_t)(uintptr_t)vring->desc,
- .avail_user_addr = (uint64_t)(uintptr_t)vring->avail,
- .used_user_addr = (uint64_t)(uintptr_t)vring->used,
.log_guest_addr = 0,
.flags = 0, /* disable log */
};
+ if (dev->features & (1ULL << VIRTIO_F_RING_PACKED)) {
+ addr.desc_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->desc_packed;
+ addr.avail_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->driver_event;
+ addr.used_user_addr =
+ (uint64_t)(uintptr_t)pq_vring->device_event;
+ } else {
+ addr.desc_user_addr = (uint64_t)(uintptr_t)vring->desc;
+ addr.avail_user_addr = (uint64_t)(uintptr_t)vring->avail;
+ addr.used_user_addr = (uint64_t)(uintptr_t)vring->used;
+ }
+
state.index = queue_sel;
state.num = vring->num;
dev->ops->send_request(dev, VHOST_USER_SET_VRING_NUM, &state);
if (!in_order)
dev->unsupported_features |= (1ull << VIRTIO_F_IN_ORDER);
- if (packed_vq) {
- if (cq) {
- PMD_INIT_LOG(ERR, "control vq not supported yet with "
- "packed virtqueues\n");
- return -1;
- }
- } else {
+ if (!packed_vq)
dev->unsupported_features |= (1ull << VIRTIO_F_RING_PACKED);
- }
if (dev->mac_specified)
dev->frontend_features |= (1ull << VIRTIO_NET_F_MAC);
return n_descs;
}
+static inline int
+desc_is_avail(struct vring_packed_desc *desc, bool wrap_counter)
+{
+ return wrap_counter == !!(desc->flags & VRING_DESC_F_AVAIL(1)) &&
+ wrap_counter != !!(desc->flags & VRING_DESC_F_USED(1));
+}
+
+static uint32_t
+virtio_user_handle_ctrl_msg_pq(struct virtio_user_dev *dev,
+ struct vring_packed *vring,
+ uint16_t idx_hdr)
+{
+ struct virtio_net_ctrl_hdr *hdr;
+ virtio_net_ctrl_ack status = ~0;
+ uint16_t idx_data, idx_status;
+ /* initialize to one, header is first */
+ uint32_t n_descs = 1;
+
+ /* locate desc for header, data, and status */
+ idx_data = idx_hdr + 1;
+ if (idx_data >= dev->queue_size)
+ idx_data -= dev->queue_size;
+
+ n_descs++;
+
+ idx_status = idx_data;
+ while (vring->desc_packed[idx_status].flags & VRING_DESC_F_NEXT) {
+ idx_status++;
+ if (idx_status >= dev->queue_size)
+ idx_status -= dev->queue_size;
+ n_descs++;
+ }
+
+ hdr = (void *)(uintptr_t)vring->desc_packed[idx_hdr].addr;
+ if (hdr->class == VIRTIO_NET_CTRL_MQ &&
+ hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
+ uint16_t queues;
+
+ queues = *(uint16_t *)(uintptr_t)
+ vring->desc_packed[idx_data].addr;
+ status = virtio_user_handle_mq(dev, queues);
+ }
+
+ /* Update status */
+ *(virtio_net_ctrl_ack *)(uintptr_t)
+ vring->desc_packed[idx_status].addr = status;
+
+ return n_descs;
+}
+
+void
+virtio_user_handle_cq_packed(struct virtio_user_dev *dev, uint16_t queue_idx)
+{
+ struct virtio_user_queue *vq = &dev->packed_queues[queue_idx];
+ struct vring_packed *vring = &dev->packed_vrings[queue_idx];
+ uint16_t id, n_descs;
+
+ while (desc_is_avail(&vring->desc_packed[vq->used_idx],
+ vq->used_wrap_counter)) {
+ id = vring->desc_packed[vq->used_idx].id;
+
+ n_descs = virtio_user_handle_ctrl_msg_pq(dev, vring, id);
+
+ do {
+ vring->desc_packed[vq->used_idx].flags =
+ VRING_DESC_F_AVAIL(vq->used_wrap_counter) |
+ VRING_DESC_F_USED(vq->used_wrap_counter);
+ if (++vq->used_idx >= dev->queue_size) {
+ vq->used_idx -= dev->queue_size;
+ vq->used_wrap_counter ^= 1;
+ }
+ n_descs--;
+ } while (n_descs);
+ }
+}
+
void
virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx)
{
#include "../virtio_ring.h"
#include "vhost.h"
+struct virtio_user_queue {
+ uint16_t used_idx;
+ bool avail_wrap_counter;
+ bool used_wrap_counter;
+};
+
struct virtio_user_dev {
/* for vhost_user backend */
int vhostfd;
uint16_t port_id;
uint8_t mac_addr[ETHER_ADDR_LEN];
char path[PATH_MAX];
- struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ union {
+ struct vring vrings[VIRTIO_MAX_VIRTQUEUES];
+ struct vring_packed packed_vrings[VIRTIO_MAX_VIRTQUEUES];
+ };
+ struct virtio_user_queue packed_queues[VIRTIO_MAX_VIRTQUEUES];
+
struct virtio_user_backend_ops *ops;
pthread_mutex_t mutex;
bool started;
int mrg_rxbuf, int in_order, int packed_vq);
void virtio_user_dev_uninit(struct virtio_user_dev *dev);
void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx);
+void virtio_user_handle_cq_packed(struct virtio_user_dev *dev,
+ uint16_t queue_idx);
uint8_t virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs);
#endif
return dev->queue_size;
}
-static int
-virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+static void
+virtio_user_setup_queue_packed(struct virtqueue *vq,
+ struct virtio_user_dev *dev)
+
+{
+ uint16_t queue_idx = vq->vq_queue_index;
+ struct vring_packed *vring;
+ uint64_t desc_addr;
+ uint64_t avail_addr;
+ uint64_t used_addr;
+ uint16_t i;
+
+ vring = &dev->packed_vrings[queue_idx];
+ desc_addr = (uintptr_t)vq->vq_ring_virt_mem;
+ avail_addr = desc_addr + vq->vq_nentries *
+ sizeof(struct vring_packed_desc);
+ used_addr = RTE_ALIGN_CEIL(avail_addr +
+ sizeof(struct vring_packed_desc_event),
+ VIRTIO_PCI_VRING_ALIGN);
+ vring->num = vq->vq_nentries;
+ vring->desc_packed =
+ (void *)(uintptr_t)desc_addr;
+ vring->driver_event =
+ (void *)(uintptr_t)avail_addr;
+ vring->device_event =
+ (void *)(uintptr_t)used_addr;
+ dev->packed_queues[queue_idx].avail_wrap_counter = true;
+ dev->packed_queues[queue_idx].used_wrap_counter = true;
+
+ for (i = 0; i < vring->num; i++) {
+ vring->desc_packed[i].flags = VRING_DESC_F_USED(1) |
+ VRING_DESC_F_AVAIL(1);
+ }
+}
+
+static void
+virtio_user_setup_queue_split(struct virtqueue *vq, struct virtio_user_dev *dev)
{
- struct virtio_user_dev *dev = virtio_user_get_dev(hw);
uint16_t queue_idx = vq->vq_queue_index;
uint64_t desc_addr, avail_addr, used_addr;
dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr;
dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr;
dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr;
+}
+
+static int
+virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq)
+{
+ struct virtio_user_dev *dev = virtio_user_get_dev(hw);
+
+ if (vtpci_packed_queue(hw))
+ virtio_user_setup_queue_packed(vq, dev);
+ else
+ virtio_user_setup_queue_split(vq, dev);
return 0;
}
struct virtio_user_dev *dev = virtio_user_get_dev(hw);
if (hw->cvq && (hw->cvq->vq == vq)) {
- virtio_user_handle_cq(dev, vq->vq_queue_index);
+ if (vtpci_packed_queue(vq->hw))
+ virtio_user_handle_cq_packed(dev, vq->vq_queue_index);
+ else
+ virtio_user_handle_cq(dev, vq->vq_queue_index);
return;
}