dev->features = features;
if (dev->features &
- ((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
+ ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+ (1ULL << VIRTIO_F_RING_PACKED))) {
dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
if (vq->desc || vq->avail || vq->used) {
/*
* If the memory table got updated, the ring addresses
for (i = 0; i < num_queues; i++) {
vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
if (vq_is_packed(dev)) {
vq->inflight_packed = addr;
vq->inflight_packed->desc_num = queue_size;
vq->shadow_used_split = NULL;
if (vq->async_pkts_pending)
rte_free(vq->async_pkts_pending);
- if (vq->async_pending_info)
- rte_free(vq->async_pending_info);
+ if (vq->async_pkts_info)
+ rte_free(vq->async_pkts_info);
vq->async_pkts_pending = NULL;
- vq->async_pending_info = NULL;
+ vq->async_pkts_info = NULL;
}
rte_free(vq->batch_copy_elems);
"set queue enable: %d to qp idx: %d\n",
enable, index);
- if (!enable && dev->virtqueue[index]->async_registered) {
+ if (enable && dev->virtqueue[index]->async_registered) {
if (dev->virtqueue[index]->async_pkts_inflight_n) {
- VHOST_LOG_CONFIG(ERR, "failed to disable vring. "
+ VHOST_LOG_CONFIG(ERR, "failed to enable vring. "
"async inflight packets must be completed first\n");
return RTE_VHOST_MSG_RESULT_ERR;
}
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
vhost_user_iotlb_cache_insert(vq, imsg->iova, vva,
len, imsg->perm);
for (i = 0; i < dev->nr_vring; i++) {
struct vhost_virtqueue *vq = dev->virtqueue[i];
+ if (!vq)
+ continue;
+
vhost_user_iotlb_cache_remove(vq, imsg->iova,
imsg->size);