sizeof(vq->used->idx));
}
-static __rte_always_inline void
-async_flush_shadow_used_ring_split(struct virtio_net *dev,
- struct vhost_virtqueue *vq)
-{
- uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
-
- if (used_idx + vq->shadow_used_idx <= vq->size) {
- do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
- vq->shadow_used_idx);
- } else {
- uint16_t size;
-
- /* update used ring interval [used_idx, vq->size] */
- size = vq->size - used_idx;
- do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
-
- /* update the left half used ring interval [0, left_size] */
- do_flush_shadow_used_ring_split(dev, vq, 0, size,
- vq->shadow_used_idx - size);
- }
-
- vq->last_used_idx += vq->shadow_used_idx;
- vq->shadow_used_idx = 0;
-}
-
static __rte_always_inline void
update_shadow_used_ring_split(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint32_t len)
used_idx -= vq->size;
}
- rte_smp_wmb();
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
for (i = 0; i < vq->shadow_used_idx; i++) {
uint16_t flags;
struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
- rte_smp_wmb();
- vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
+ /* desc flags is the synchronization point for virtio packed vring */
+ __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
+ used_elem->flags, __ATOMIC_RELEASE);
vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
sizeof(struct vring_packed_desc),
vq->desc_packed[vq->last_used_idx + i].len = lens[i];
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
vq->desc_packed[vq->last_used_idx + i].len = 0;
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
}
out:
- async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
- async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+ if (tlen) {
+ async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
+ async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+ } else {
+ src_it->count = 0;
+ }
return error;
}
uint16_t buf_id = 0;
uint32_t len = 0;
uint16_t desc_count;
- uint32_t size = pkt->pkt_len + dev->vhost_hlen;
+ uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
uint16_t num_buffers = 0;
uint32_t buffer_len[vq->size];
uint16_t buffer_buf_id[vq->size];
uint16_t avail_idx = vq->last_avail_idx;
uint64_t desc_addrs[PACKED_BATCH_SIZE];
struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
- uint32_t buf_offset = dev->vhost_hlen;
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
uint64_t lens[PACKED_BATCH_SIZE];
uint16_t ids[PACKED_BATCH_SIZE];
uint16_t i;
return -1;
}
- rte_smp_rmb();
-
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;
rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
(uintptr_t)desc_addrs[i];
- lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
+ lens[i] = pkts[i]->pkt_len +
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
}
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint16_t nr_descs = 0;
- rte_smp_rmb();
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
VHOST_LOG_DATA(DEBUG,
(vq_size - n_inflight + pkts_idx) & (vq_size - 1);
}
-static __rte_always_inline void
-virtio_dev_rx_async_submit_split_err(struct virtio_net *dev,
- struct vhost_virtqueue *vq, uint16_t queue_id,
- uint16_t last_idx, uint16_t shadow_idx)
-{
- uint16_t start_idx, pkts_idx, vq_size;
- uint64_t *async_pending_info;
-
- pkts_idx = vq->async_pkts_idx;
- async_pending_info = vq->async_pending_info;
- vq_size = vq->size;
- start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
- vq_size, vq->async_pkts_inflight_n);
-
- while (likely((start_idx & (vq_size - 1)) != pkts_idx)) {
- uint64_t n_seg =
- async_pending_info[(start_idx) & (vq_size - 1)] >>
- ASYNC_PENDING_INFO_N_SFT;
-
- while (n_seg)
- n_seg -= vq->async_ops.check_completed_copies(dev->vid,
- queue_id, 0, 1);
- }
-
- vq->async_pkts_inflight_n = 0;
- vq->batch_copy_nb_elems = 0;
-
- vq->shadow_used_idx = shadow_idx;
- vq->last_avail_idx = last_idx;
-}
-
static __rte_noinline uint32_t
virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct vhost_virtqueue *vq, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+ struct rte_mbuf **pkts, uint32_t count,
+ struct rte_mbuf **comp_pkts, uint32_t *comp_count)
{
uint32_t pkt_idx = 0, pkt_burst_idx = 0;
uint16_t num_buffers;
struct buf_vector buf_vec[BUF_VECTOR_MAX];
- uint16_t avail_head, last_idx, shadow_idx;
+ uint16_t avail_head;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
struct rte_vhost_iov_iter *src_it = it_pool;
struct rte_vhost_iov_iter *dst_it = it_pool + 1;
- uint16_t n_free_slot, slot_idx;
- int n_pkts = 0;
-
- avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
- last_idx = vq->last_avail_idx;
- shadow_idx = vq->shadow_used_idx;
+ uint16_t slot_idx = 0;
+ uint16_t segs_await = 0;
+ struct async_inflight_info *pkts_info = vq->async_pkts_info;
+ uint32_t n_pkts = 0, pkt_err = 0;
+ uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ struct {
+ uint16_t pkt_idx;
+ uint16_t last_avail_idx;
+ } async_pkts_log[MAX_PKT_BURST];
/*
- * The ordering between avail index and
- * desc reads needs to be enforced.
+ * The ordering between avail index and desc reads need to be enforced.
*/
- rte_smp_rmb();
+ avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
break;
}
- slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+ slot_idx = (vq->async_pkts_idx + num_async_pkts) &
+ (vq->size - 1);
if (src_it->count) {
- async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it);
- pkt_burst_idx++;
- vq->async_pending_info[slot_idx] =
- num_buffers | (src_it->nr_segs << 16);
+ uint16_t from, to;
+
+ async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
+ pkts_info[slot_idx].descs = num_buffers;
+ pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+ async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
+ async_pkts_log[num_async_pkts++].last_avail_idx =
+ vq->last_avail_idx;
src_iovec += src_it->nr_segs;
dst_iovec += dst_it->nr_segs;
src_it += 2;
dst_it += 2;
- } else {
- vq->async_pending_info[slot_idx] = num_buffers;
- vq->async_pkts_inflight_n++;
- }
+ segs_await += src_it->nr_segs;
+
+ /**
+ * recover shadow used ring and keep DMA-occupied
+ * descriptors.
+ */
+ from = vq->shadow_used_idx - num_buffers;
+ to = vq->async_desc_idx & (vq->size - 1);
+ if (num_buffers + to <= vq->size) {
+ rte_memcpy(&vq->async_descs_split[to],
+ &vq->shadow_used_split[from],
+ num_buffers *
+ sizeof(struct vring_used_elem));
+ } else {
+ int size = vq->size - to;
+
+ rte_memcpy(&vq->async_descs_split[to],
+ &vq->shadow_used_split[from],
+ size *
+ sizeof(struct vring_used_elem));
+ rte_memcpy(vq->async_descs_split,
+ &vq->shadow_used_split[from +
+ size], (num_buffers - size) *
+ sizeof(struct vring_used_elem));
+ }
+ vq->async_desc_idx += num_buffers;
+ vq->shadow_used_idx -= num_buffers;
+ } else
+ comp_pkts[num_done_pkts++] = pkts[pkt_idx];
vq->last_avail_idx += num_buffers;
- if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
- (pkt_idx == count - 1 && pkt_burst_idx)) {
+ /*
+ * conditions to trigger async device transfer:
+ * - buffered packet number reaches transfer threshold
+ * - unused async iov number is less than max vhost vector
+ */
+ if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+ ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
+ BUF_VECTOR_MAX))) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
src_iovec = vec_pool;
dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
src_it = it_pool;
dst_it = it_pool + 1;
+ segs_await = 0;
+ vq->async_pkts_inflight_n += n_pkts;
- if (unlikely(n_pkts < (int)pkt_burst_idx)) {
- vq->async_pkts_inflight_n +=
- n_pkts > 0 ? n_pkts : 0;
- virtio_dev_rx_async_submit_split_err(dev,
- vq, queue_id, last_idx, shadow_idx);
- return 0;
+ if (unlikely(n_pkts < pkt_burst_idx)) {
+ /*
+ * log error packets number here and do actual
+ * error processing when applications poll
+ * completion
+ */
+ pkt_err = pkt_burst_idx - n_pkts;
+ pkt_burst_idx = 0;
+ break;
}
pkt_burst_idx = 0;
- vq->async_pkts_inflight_n += n_pkts;
}
}
if (pkt_burst_idx) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
- if (unlikely(n_pkts < (int)pkt_burst_idx)) {
- vq->async_pkts_inflight_n += n_pkts > 0 ? n_pkts : 0;
- virtio_dev_rx_async_submit_split_err(dev, vq, queue_id,
- last_idx, shadow_idx);
- return 0;
- }
-
vq->async_pkts_inflight_n += n_pkts;
+
+ if (unlikely(n_pkts < pkt_burst_idx))
+ pkt_err = pkt_burst_idx - n_pkts;
}
do_data_copy_enqueue(dev, vq);
- n_free_slot = vq->size - vq->async_pkts_idx;
- if (n_free_slot > pkt_idx) {
- rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
- pkts, pkt_idx * sizeof(uintptr_t));
- vq->async_pkts_idx += pkt_idx;
- } else {
- rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
- pkts, n_free_slot * sizeof(uintptr_t));
- rte_memcpy(&vq->async_pkts_pending[0],
- &pkts[n_free_slot],
- (pkt_idx - n_free_slot) * sizeof(uintptr_t));
- vq->async_pkts_idx = pkt_idx - n_free_slot;
+ if (unlikely(pkt_err)) {
+ uint16_t num_descs = 0;
+
+ num_async_pkts -= pkt_err;
+ /* calculate the sum of descriptors of DMA-error packets. */
+ while (pkt_err-- > 0) {
+ num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
+ slot_idx--;
+ }
+ vq->async_desc_idx -= num_descs;
+ /* recover shadow used ring and available ring */
+ vq->shadow_used_idx -= (vq->last_avail_idx -
+ async_pkts_log[num_async_pkts].last_avail_idx -
+ num_descs);
+ vq->last_avail_idx =
+ async_pkts_log[num_async_pkts].last_avail_idx;
+ pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
+ num_done_pkts = pkt_idx - num_async_pkts;
}
- if (likely(vq->shadow_used_idx))
- async_flush_shadow_used_ring_split(dev, vq);
+ vq->async_pkts_idx += num_async_pkts;
+ *comp_count = num_done_pkts;
+
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
+ }
return pkt_idx;
}
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
- uint16_t n_segs_cpl, n_pkts_put = 0, n_descs = 0;
+ uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
uint16_t start_idx, pkts_idx, vq_size;
- uint16_t n_inflight;
- uint64_t *async_pending_info;
+ struct async_inflight_info *pkts_info;
+ uint16_t from, i;
if (!dev)
return 0;
vq = dev->virtqueue[queue_id];
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
rte_spinlock_lock(&vq->access_lock);
- n_inflight = vq->async_pkts_inflight_n;
- pkts_idx = vq->async_pkts_idx;
- async_pending_info = vq->async_pending_info;
+ pkts_idx = vq->async_pkts_idx & (vq->size - 1);
+ pkts_info = vq->async_pkts_info;
vq_size = vq->size;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- n_segs_cpl = vq->async_ops.check_completed_copies(vid, queue_id,
- 0, ASYNC_MAX_POLL_SEG - vq->async_last_seg_n) +
- vq->async_last_seg_n;
-
- rte_smp_wmb();
-
- while (likely((n_pkts_put < count) && n_inflight)) {
- uint64_t info = async_pending_info[
- (start_idx + n_pkts_put) & (vq_size - 1)];
- uint64_t n_segs;
- n_pkts_put++;
- n_inflight--;
- n_descs += info & ASYNC_PENDING_INFO_N_MSK;
- n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
-
- if (n_segs) {
- if (unlikely(n_segs_cpl < n_segs)) {
- n_pkts_put--;
- n_inflight++;
- n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
- if (n_segs_cpl) {
- async_pending_info[
- (start_idx + n_pkts_put) &
- (vq_size - 1)] =
- ((n_segs - n_segs_cpl) <<
- ASYNC_PENDING_INFO_N_SFT) |
- (info & ASYNC_PENDING_INFO_N_MSK);
- n_segs_cpl = 0;
- }
- break;
+ if (count > vq->async_last_pkts_n)
+ n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ queue_id, 0, count - vq->async_last_pkts_n);
+ n_pkts_cpl += vq->async_last_pkts_n;
+
+ n_pkts_put = RTE_MIN(count, n_pkts_cpl);
+ if (unlikely(n_pkts_put == 0)) {
+ vq->async_last_pkts_n = n_pkts_cpl;
+ goto done;
+ }
+
+ for (i = 0; i < n_pkts_put; i++) {
+ from = (start_idx + i) & (vq_size - 1);
+ n_descs += pkts_info[from].descs;
+ pkts[i] = pkts_info[from].mbuf;
+ }
+ vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
+ vq->async_pkts_inflight_n -= n_pkts_put;
+
+ if (likely(vq->enabled && vq->access_ok)) {
+ uint16_t nr_left = n_descs;
+ uint16_t nr_copy;
+ uint16_t to;
+
+ /* write back completed descriptors to used ring */
+ do {
+ from = vq->last_async_desc_idx & (vq->size - 1);
+ nr_copy = nr_left + from <= vq->size ? nr_left :
+ vq->size - from;
+ to = vq->last_used_idx & (vq->size - 1);
+
+ if (to + nr_copy <= vq->size) {
+ rte_memcpy(&vq->used->ring[to],
+ &vq->async_descs_split[from],
+ nr_copy *
+ sizeof(struct vring_used_elem));
+ } else {
+ uint16_t size = vq->size - to;
+
+ rte_memcpy(&vq->used->ring[to],
+ &vq->async_descs_split[from],
+ size *
+ sizeof(struct vring_used_elem));
+ rte_memcpy(vq->used->ring,
+ &vq->async_descs_split[from +
+ size], (nr_copy - size) *
+ sizeof(struct vring_used_elem));
}
- n_segs_cpl -= n_segs;
- }
- }
- vq->async_last_seg_n = n_segs_cpl;
+ vq->last_async_desc_idx += nr_copy;
+ vq->last_used_idx += nr_copy;
+ nr_left -= nr_copy;
+ } while (nr_left > 0);
- if (n_pkts_put) {
- vq->async_pkts_inflight_n = n_inflight;
- if (likely(vq->enabled && vq->access_ok)) {
- __atomic_add_fetch(&vq->used->idx,
- n_descs, __ATOMIC_RELEASE);
- vhost_vring_call_split(dev, vq);
- }
- }
-
- if (start_idx + n_pkts_put <= vq_size) {
- rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
- n_pkts_put * sizeof(uintptr_t));
- } else {
- rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
- (vq_size - start_idx) * sizeof(uintptr_t));
- rte_memcpy(&pkts[vq_size - start_idx], vq->async_pkts_pending,
- (n_pkts_put - vq_size + start_idx) * sizeof(uintptr_t));
- }
+ __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
+ vhost_vring_call_split(dev, vq);
+ } else
+ vq->last_async_desc_idx += n_descs;
+done:
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+ struct rte_mbuf **pkts, uint32_t count,
+ struct rte_mbuf **comp_pkts, uint32_t *comp_count)
{
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- bool drawback = false;
VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(vq->enabled == 0))
- goto out_access_unlock;
-
- if (unlikely(!vq->async_registered)) {
- drawback = true;
+ if (unlikely(vq->enabled == 0 || !vq->async_registered))
goto out_access_unlock;
- }
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
nb_tx = 0;
else
nb_tx = virtio_dev_rx_async_submit_split(dev,
- vq, queue_id, pkts, count);
+ vq, queue_id, pkts, count, comp_pkts,
+ comp_count);
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
out_access_unlock:
rte_spinlock_unlock(&vq->access_lock);
- if (drawback)
- return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, count);
-
return nb_tx;
}
uint16_t
rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mbuf **pkts, uint16_t count,
+ struct rte_mbuf **comp_pkts, uint32_t *comp_count)
{
struct virtio_net *dev = get_device(vid);
+ *comp_count = 0;
if (!dev)
return 0;
return 0;
}
- return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
+ return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, comp_pkts,
+ comp_count);
}
static inline bool
case RTE_ETHER_TYPE_IPV4:
ipv4_hdr = l3_hdr;
*l4_proto = ipv4_hdr->next_proto_id;
- m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
+ m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
*l4_hdr = (char *)l3_hdr + m->l3_len;
m->ol_flags |= PKT_TX_IPV4;
break;
rte_iova_t iova;
void *buf;
- /* Try to use pkt buffer to store shinfo to reduce the amount of memory
- * required, otherwise store shinfo in the new buffer.
- */
- if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
- shinfo = rte_pktmbuf_mtod(pkt,
- struct rte_mbuf_ext_shared_info *);
- else {
- total_len += sizeof(*shinfo) + sizeof(uintptr_t);
- total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
- }
+ total_len += sizeof(*shinfo) + sizeof(uintptr_t);
+ total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
if (unlikely(total_len > UINT16_MAX))
return -ENOSPC;
return -ENOMEM;
/* Initialize shinfo */
- if (shinfo) {
- shinfo->free_cb = virtio_dev_extbuf_free;
- shinfo->fcb_opaque = buf;
- rte_mbuf_ext_refcnt_set(shinfo, 1);
- } else {
- shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
- virtio_dev_extbuf_free, buf);
- if (unlikely(shinfo == NULL)) {
- rte_free(buf);
- VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
- return -1;
- }
+ shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
+ virtio_dev_extbuf_free, buf);
+ if (unlikely(shinfo == NULL)) {
+ rte_free(buf);
+ VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+ return -1;
}
iova = rte_malloc_virt2iova(buf);
{
bool wrap = vq->avail_wrap_counter;
struct vring_packed_desc *descs = vq->desc_packed;
- struct virtio_net_hdr *hdr;
uint64_t lens[PACKED_BATCH_SIZE];
uint64_t buf_lens[PACKED_BATCH_SIZE];
- uint32_t buf_offset = dev->vhost_hlen;
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
uint16_t flags, i;
if (unlikely(avail_idx & PACKED_BATCH_MASK))
return -1;
}
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;
ids[i] = descs[avail_idx + i].id;
}
- if (virtio_net_with_host_offload(dev)) {
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
- hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i]);
- }
- }
-
return 0;
free_buf:
struct rte_mbuf **pkts)
{
uint16_t avail_idx = vq->last_avail_idx;
- uint32_t buf_offset = dev->vhost_hlen;
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ struct virtio_net_hdr *hdr;
uintptr_t desc_addrs[PACKED_BATCH_SIZE];
uint16_t ids[PACKED_BATCH_SIZE];
uint16_t i;
(void *)(uintptr_t)(desc_addrs[i] + buf_offset),
pkts[i]->pkt_len);
+ if (virtio_net_with_host_offload(dev)) {
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
+ vhost_dequeue_offload(hdr, pkts[i]);
+ }
+ }
+
if (virtio_net_is_inorder(dev))
vhost_shadow_dequeue_batch_packed_inorder(vq,
ids[PACKED_BATCH_SIZE - 1]);