return -1;
}
- len += descs[idx].len;
+ dlen = descs[idx].len;
+ len += dlen;
if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
- descs[idx].addr, descs[idx].len,
+ descs[idx].addr, dlen,
perm))) {
free_ind_table(idesc);
return -1;
return -1;
}
- *len += descs[i].len;
+ dlen = descs[i].len;
+ *len += dlen;
if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
- descs[i].addr, descs[i].len,
+ descs[i].addr, dlen,
perm)))
return -1;
}
bool wrap_counter = vq->avail_wrap_counter;
struct vring_packed_desc *descs = vq->desc_packed;
uint16_t vec_id = *vec_idx;
+ uint64_t dlen;
if (avail_idx < vq->last_avail_idx)
wrap_counter ^= 1;
len, perm) < 0))
return -1;
} else {
- *len += descs[avail_idx].len;
+ dlen = descs[avail_idx].len;
+ *len += dlen;
if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id,
descs[avail_idx].addr,
- descs[avail_idx].len,
+ dlen,
perm)))
return -1;
}
hdr_mbuf = m;
hdr_addr = buf_addr;
- if (unlikely(buf_len < dev->vhost_hlen))
+ if (unlikely(buf_len < dev->vhost_hlen)) {
+ memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
hdr = &tmp_hdr;
- else
+ } else
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
hdr_mbuf = m;
hdr_addr = buf_addr;
- if (unlikely(buf_len < dev->vhost_hlen))
+ if (unlikely(buf_len < dev->vhost_hlen)) {
+ memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
hdr = &tmp_hdr;
- else
+ } else
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(vq->enabled == 0))
+ if (unlikely(!vq->enabled))
goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
- if (unlikely(vq->access_ok == 0))
+ if (unlikely(!vq->access_ok))
if (unlikely(vring_translate(dev, vq) < 0))
goto out;
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(vq->enabled == 0 || !vq->async_registered))
+ if (unlikely(!vq->enabled || !vq->async_registered))
goto out_access_unlock;
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
- if (unlikely(vq->access_ok == 0))
+ if (unlikely(!vq->access_ok))
if (unlikely(vring_translate(dev, vq) < 0))
goto out;
{
bool wrap = vq->avail_wrap_counter;
struct vring_packed_desc *descs = vq->desc_packed;
- struct virtio_net_hdr *hdr;
uint64_t lens[PACKED_BATCH_SIZE];
uint64_t buf_lens[PACKED_BATCH_SIZE];
uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
}
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
- pkts[i]->pkt_len = descs[avail_idx + i].len - buf_offset;
+ pkts[i]->pkt_len = lens[i] - buf_offset;
pkts[i]->data_len = pkts[i]->pkt_len;
ids[i] = descs[avail_idx + i].id;
}
- if (virtio_net_with_host_offload(dev)) {
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
- hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i]);
- }
- }
-
return 0;
free_buf:
{
uint16_t avail_idx = vq->last_avail_idx;
uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ struct virtio_net_hdr *hdr;
uintptr_t desc_addrs[PACKED_BATCH_SIZE];
uint16_t ids[PACKED_BATCH_SIZE];
uint16_t i;
(void *)(uintptr_t)(desc_addrs[i] + buf_offset),
pkts[i]->pkt_len);
+ if (virtio_net_with_host_offload(dev)) {
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
+ vhost_dequeue_offload(hdr, pkts[i]);
+ }
+ }
+
if (virtio_net_is_inorder(dev))
vhost_shadow_dequeue_batch_packed_inorder(vq,
ids[PACKED_BATCH_SIZE - 1]);
if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
return 0;
- if (unlikely(vq->enabled == 0)) {
+ if (unlikely(!vq->enabled)) {
count = 0;
goto out_access_unlock;
}
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
- if (unlikely(vq->access_ok == 0))
+ if (unlikely(!vq->access_ok))
if (unlikely(vring_translate(dev, vq) < 0)) {
count = 0;
goto out;