sizeof(vq->used->idx));
}
-static __rte_always_inline void
-async_flush_shadow_used_ring_split(struct virtio_net *dev,
- struct vhost_virtqueue *vq)
-{
- uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
-
- if (used_idx + vq->shadow_used_idx <= vq->size) {
- do_flush_shadow_used_ring_split(dev, vq, used_idx, 0,
- vq->shadow_used_idx);
- } else {
- uint16_t size;
-
- /* update used ring interval [used_idx, vq->size] */
- size = vq->size - used_idx;
- do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size);
-
- /* update the left half used ring interval [0, left_size] */
- do_flush_shadow_used_ring_split(dev, vq, 0, size,
- vq->shadow_used_idx - size);
- }
-
- vq->last_used_idx += vq->shadow_used_idx;
- vq->shadow_used_idx = 0;
-}
-
static __rte_always_inline void
update_shadow_used_ring_split(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint32_t len)
used_idx -= vq->size;
}
- rte_smp_wmb();
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
for (i = 0; i < vq->shadow_used_idx; i++) {
uint16_t flags;
struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0];
vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id;
- rte_smp_wmb();
- vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags;
+ /* desc flags is the synchronization point for virtio packed vring */
+ __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags,
+ used_elem->flags, __ATOMIC_RELEASE);
vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx *
sizeof(struct vring_packed_desc),
vq->desc_packed[vq->last_used_idx + i].len = lens[i];
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
vq->desc_packed[vq->last_used_idx + i].len = 0;
}
- rte_smp_wmb();
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
vhost_for_each_try_unroll(i, begin, PACKED_BATCH_SIZE)
vq->desc_packed[vq->last_used_idx + i].flags = flags;
struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
int error = 0;
+ uint64_t mapped_len;
uint32_t tlen = 0;
int tvec_idx = 0;
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- if (unlikely(cpy_len >= cpy_threshold)) {
- hpa = (void *)(uintptr_t)gpa_to_hpa(dev,
- buf_iova + buf_offset, cpy_len);
+ while (unlikely(cpy_len && cpy_len >= cpy_threshold)) {
+ hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+ buf_iova + buf_offset,
+ cpy_len, &mapped_len);
- if (unlikely(!hpa)) {
- error = -1;
- goto out;
- }
+ if (unlikely(!hpa || mapped_len < cpy_threshold))
+ break;
async_fill_vec(src_iovec + tvec_idx,
(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
- mbuf_offset), cpy_len);
+ mbuf_offset), (size_t)mapped_len);
- async_fill_vec(dst_iovec + tvec_idx, hpa, cpy_len);
+ async_fill_vec(dst_iovec + tvec_idx,
+ hpa, (size_t)mapped_len);
- tlen += cpy_len;
+ tlen += (uint32_t)mapped_len;
+ cpy_len -= (uint32_t)mapped_len;
+ mbuf_avail -= (uint32_t)mapped_len;
+ mbuf_offset += (uint32_t)mapped_len;
+ buf_avail -= (uint32_t)mapped_len;
+ buf_offset += (uint32_t)mapped_len;
tvec_idx++;
- } else {
+ }
+
+ if (likely(cpy_len)) {
if (unlikely(vq->batch_copy_nb_elems >= vq->size)) {
rte_memcpy(
(void *)((uintptr_t)(buf_addr + buf_offset)),
cpy_len;
vq->batch_copy_nb_elems++;
}
+
+ mbuf_avail -= cpy_len;
+ mbuf_offset += cpy_len;
+ buf_avail -= cpy_len;
+ buf_offset += cpy_len;
}
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- buf_avail -= cpy_len;
- buf_offset += cpy_len;
}
out:
- async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
- async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+ if (tlen) {
+ async_fill_iter(src_it, tlen, src_iovec, tvec_idx);
+ async_fill_iter(dst_it, tlen, dst_iovec, tvec_idx);
+ } else {
+ src_it->count = 0;
+ }
return error;
}
uint16_t buf_id = 0;
uint32_t len = 0;
uint16_t desc_count;
- uint32_t size = pkt->pkt_len + dev->vhost_hlen;
+ uint32_t size = pkt->pkt_len + sizeof(struct virtio_net_hdr_mrg_rxbuf);
uint16_t num_buffers = 0;
uint32_t buffer_len[vq->size];
uint16_t buffer_buf_id[vq->size];
uint16_t avail_idx = vq->last_avail_idx;
uint64_t desc_addrs[PACKED_BATCH_SIZE];
struct virtio_net_hdr_mrg_rxbuf *hdrs[PACKED_BATCH_SIZE];
- uint32_t buf_offset = dev->vhost_hlen;
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
uint64_t lens[PACKED_BATCH_SIZE];
uint16_t ids[PACKED_BATCH_SIZE];
uint16_t i;
return -1;
}
- rte_smp_rmb();
-
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;
rte_prefetch0((void *)(uintptr_t)desc_addrs[i]);
hdrs[i] = (struct virtio_net_hdr_mrg_rxbuf *)
(uintptr_t)desc_addrs[i];
- lens[i] = pkts[i]->pkt_len + dev->vhost_hlen;
+ lens[i] = pkts[i]->pkt_len +
+ sizeof(struct virtio_net_hdr_mrg_rxbuf);
}
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint16_t nr_descs = 0;
- rte_smp_rmb();
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
VHOST_LOG_DATA(DEBUG,
static __rte_noinline uint32_t
virtio_dev_rx_packed(struct virtio_net *dev,
- struct vhost_virtqueue *vq,
- struct rte_mbuf **pkts,
+ struct vhost_virtqueue *__rte_restrict vq,
+ struct rte_mbuf **__rte_restrict pkts,
uint32_t count)
{
uint32_t pkt_idx = 0;
uint16_t
rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mbuf **__rte_restrict pkts, uint16_t count)
{
struct virtio_net *dev = get_device(vid);
(vq_size - n_inflight + pkts_idx) & (vq_size - 1);
}
-static __rte_always_inline void
-virtio_dev_rx_async_submit_split_err(struct virtio_net *dev,
- struct vhost_virtqueue *vq, uint16_t queue_id,
- uint16_t last_idx, uint16_t shadow_idx)
-{
- uint16_t start_idx, pkts_idx, vq_size;
- uint64_t *async_pending_info;
-
- pkts_idx = vq->async_pkts_idx;
- async_pending_info = vq->async_pending_info;
- vq_size = vq->size;
- start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
- vq_size, vq->async_pkts_inflight_n);
-
- while (likely((start_idx & (vq_size - 1)) != pkts_idx)) {
- uint64_t n_seg =
- async_pending_info[(start_idx) & (vq_size - 1)] >>
- ASYNC_PENDING_INFO_N_SFT;
-
- while (n_seg)
- n_seg -= vq->async_ops.check_completed_copies(dev->vid,
- queue_id, 0, 1);
- }
-
- vq->async_pkts_inflight_n = 0;
- vq->batch_copy_nb_elems = 0;
-
- vq->shadow_used_idx = shadow_idx;
- vq->last_avail_idx = last_idx;
-}
-
static __rte_noinline uint32_t
virtio_dev_rx_async_submit_split(struct virtio_net *dev,
struct vhost_virtqueue *vq, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+ struct rte_mbuf **pkts, uint32_t count,
+ struct rte_mbuf **comp_pkts, uint32_t *comp_count)
{
uint32_t pkt_idx = 0, pkt_burst_idx = 0;
uint16_t num_buffers;
struct buf_vector buf_vec[BUF_VECTOR_MAX];
- uint16_t avail_head, last_idx, shadow_idx;
+ uint16_t avail_head;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
struct rte_vhost_iov_iter *src_it = it_pool;
struct rte_vhost_iov_iter *dst_it = it_pool + 1;
- uint16_t n_free_slot, slot_idx;
- int n_pkts = 0;
-
- avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
- last_idx = vq->last_avail_idx;
- shadow_idx = vq->shadow_used_idx;
+ uint16_t slot_idx = 0;
+ uint16_t segs_await = 0;
+ struct async_inflight_info *pkts_info = vq->async_pkts_info;
+ uint32_t n_pkts = 0, pkt_err = 0;
+ uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ struct {
+ uint16_t pkt_idx;
+ uint16_t last_avail_idx;
+ } async_pkts_log[MAX_PKT_BURST];
/*
- * The ordering between avail index and
- * desc reads needs to be enforced.
+ * The ordering between avail index and desc reads need to be enforced.
*/
- rte_smp_rmb();
+ avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
break;
}
- slot_idx = (vq->async_pkts_idx + pkt_idx) & (vq->size - 1);
+ slot_idx = (vq->async_pkts_idx + num_async_pkts) &
+ (vq->size - 1);
if (src_it->count) {
- async_fill_desc(&tdes[pkt_burst_idx], src_it, dst_it);
- pkt_burst_idx++;
- vq->async_pending_info[slot_idx] =
- num_buffers | (src_it->nr_segs << 16);
+ uint16_t from, to;
+
+ async_fill_desc(&tdes[pkt_burst_idx++], src_it, dst_it);
+ pkts_info[slot_idx].descs = num_buffers;
+ pkts_info[slot_idx].mbuf = pkts[pkt_idx];
+ async_pkts_log[num_async_pkts].pkt_idx = pkt_idx;
+ async_pkts_log[num_async_pkts++].last_avail_idx =
+ vq->last_avail_idx;
src_iovec += src_it->nr_segs;
dst_iovec += dst_it->nr_segs;
src_it += 2;
dst_it += 2;
- } else {
- vq->async_pending_info[slot_idx] = num_buffers;
- vq->async_pkts_inflight_n++;
- }
+ segs_await += src_it->nr_segs;
+
+ /**
+ * recover shadow used ring and keep DMA-occupied
+ * descriptors.
+ */
+ from = vq->shadow_used_idx - num_buffers;
+ to = vq->async_desc_idx & (vq->size - 1);
+ if (num_buffers + to <= vq->size) {
+ rte_memcpy(&vq->async_descs_split[to],
+ &vq->shadow_used_split[from],
+ num_buffers *
+ sizeof(struct vring_used_elem));
+ } else {
+ int size = vq->size - to;
+
+ rte_memcpy(&vq->async_descs_split[to],
+ &vq->shadow_used_split[from],
+ size *
+ sizeof(struct vring_used_elem));
+ rte_memcpy(vq->async_descs_split,
+ &vq->shadow_used_split[from +
+ size], (num_buffers - size) *
+ sizeof(struct vring_used_elem));
+ }
+ vq->async_desc_idx += num_buffers;
+ vq->shadow_used_idx -= num_buffers;
+ } else
+ comp_pkts[num_done_pkts++] = pkts[pkt_idx];
vq->last_avail_idx += num_buffers;
- if (pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
- (pkt_idx == count - 1 && pkt_burst_idx)) {
+ /*
+ * conditions to trigger async device transfer:
+ * - buffered packet number reaches transfer threshold
+ * - unused async iov number is less than max vhost vector
+ */
+ if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
+ ((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
+ BUF_VECTOR_MAX))) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
src_iovec = vec_pool;
dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1);
src_it = it_pool;
dst_it = it_pool + 1;
+ segs_await = 0;
+ vq->async_pkts_inflight_n += n_pkts;
- if (unlikely(n_pkts < (int)pkt_burst_idx)) {
- vq->async_pkts_inflight_n +=
- n_pkts > 0 ? n_pkts : 0;
- virtio_dev_rx_async_submit_split_err(dev,
- vq, queue_id, last_idx, shadow_idx);
- return 0;
+ if (unlikely(n_pkts < pkt_burst_idx)) {
+ /*
+ * log error packets number here and do actual
+ * error processing when applications poll
+ * completion
+ */
+ pkt_err = pkt_burst_idx - n_pkts;
+ pkt_burst_idx = 0;
+ break;
}
pkt_burst_idx = 0;
- vq->async_pkts_inflight_n += n_pkts;
}
}
if (pkt_burst_idx) {
n_pkts = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
- if (unlikely(n_pkts < (int)pkt_burst_idx)) {
- vq->async_pkts_inflight_n += n_pkts > 0 ? n_pkts : 0;
- virtio_dev_rx_async_submit_split_err(dev, vq, queue_id,
- last_idx, shadow_idx);
- return 0;
- }
-
vq->async_pkts_inflight_n += n_pkts;
+
+ if (unlikely(n_pkts < pkt_burst_idx))
+ pkt_err = pkt_burst_idx - n_pkts;
}
do_data_copy_enqueue(dev, vq);
- n_free_slot = vq->size - vq->async_pkts_idx;
- if (n_free_slot > pkt_idx) {
- rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
- pkts, pkt_idx * sizeof(uintptr_t));
- vq->async_pkts_idx += pkt_idx;
- } else {
- rte_memcpy(&vq->async_pkts_pending[vq->async_pkts_idx],
- pkts, n_free_slot * sizeof(uintptr_t));
- rte_memcpy(&vq->async_pkts_pending[0],
- &pkts[n_free_slot],
- (pkt_idx - n_free_slot) * sizeof(uintptr_t));
- vq->async_pkts_idx = pkt_idx - n_free_slot;
+ if (unlikely(pkt_err)) {
+ uint16_t num_descs = 0;
+
+ num_async_pkts -= pkt_err;
+ /* calculate the sum of descriptors of DMA-error packets. */
+ while (pkt_err-- > 0) {
+ num_descs += pkts_info[slot_idx & (vq->size - 1)].descs;
+ slot_idx--;
+ }
+ vq->async_desc_idx -= num_descs;
+ /* recover shadow used ring and available ring */
+ vq->shadow_used_idx -= (vq->last_avail_idx -
+ async_pkts_log[num_async_pkts].last_avail_idx -
+ num_descs);
+ vq->last_avail_idx =
+ async_pkts_log[num_async_pkts].last_avail_idx;
+ pkt_idx = async_pkts_log[num_async_pkts].pkt_idx;
+ num_done_pkts = pkt_idx - num_async_pkts;
}
- if (likely(vq->shadow_used_idx))
- async_flush_shadow_used_ring_split(dev, vq);
+ vq->async_pkts_idx += num_async_pkts;
+ *comp_count = num_done_pkts;
+
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
+ }
return pkt_idx;
}
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
- uint16_t n_pkts_cpl, n_pkts_put = 0, n_descs = 0;
+ uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0;
uint16_t start_idx, pkts_idx, vq_size;
- uint64_t *async_pending_info;
+ struct async_inflight_info *pkts_info;
+ uint16_t from, i;
+
+ if (!dev)
+ return 0;
VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
vq = dev->virtqueue[queue_id];
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
rte_spinlock_lock(&vq->access_lock);
- pkts_idx = vq->async_pkts_idx;
- async_pending_info = vq->async_pending_info;
+ pkts_idx = vq->async_pkts_idx & (vq->size - 1);
+ pkts_info = vq->async_pkts_info;
vq_size = vq->size;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- n_pkts_cpl =
- vq->async_ops.check_completed_copies(vid, queue_id, 0, count);
-
- rte_smp_wmb();
-
- while (likely(((start_idx + n_pkts_put) & (vq_size - 1)) != pkts_idx)) {
- uint64_t info = async_pending_info[
- (start_idx + n_pkts_put) & (vq_size - 1)];
- uint64_t n_segs;
- n_pkts_put++;
- n_descs += info & ASYNC_PENDING_INFO_N_MSK;
- n_segs = info >> ASYNC_PENDING_INFO_N_SFT;
-
- if (n_segs) {
- if (!n_pkts_cpl || n_pkts_cpl < n_segs) {
- n_pkts_put--;
- n_descs -= info & ASYNC_PENDING_INFO_N_MSK;
- if (n_pkts_cpl) {
- async_pending_info[
- (start_idx + n_pkts_put) &
- (vq_size - 1)] =
- ((n_segs - n_pkts_cpl) <<
- ASYNC_PENDING_INFO_N_SFT) |
- (info & ASYNC_PENDING_INFO_N_MSK);
- n_pkts_cpl = 0;
- }
- break;
+ if (count > vq->async_last_pkts_n)
+ n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ queue_id, 0, count - vq->async_last_pkts_n);
+ n_pkts_cpl += vq->async_last_pkts_n;
+
+ n_pkts_put = RTE_MIN(count, n_pkts_cpl);
+ if (unlikely(n_pkts_put == 0)) {
+ vq->async_last_pkts_n = n_pkts_cpl;
+ goto done;
+ }
+
+ for (i = 0; i < n_pkts_put; i++) {
+ from = (start_idx + i) & (vq_size - 1);
+ n_descs += pkts_info[from].descs;
+ pkts[i] = pkts_info[from].mbuf;
+ }
+ vq->async_last_pkts_n = n_pkts_cpl - n_pkts_put;
+ vq->async_pkts_inflight_n -= n_pkts_put;
+
+ if (likely(vq->enabled && vq->access_ok)) {
+ uint16_t nr_left = n_descs;
+ uint16_t nr_copy;
+ uint16_t to;
+
+ /* write back completed descriptors to used ring */
+ do {
+ from = vq->last_async_desc_idx & (vq->size - 1);
+ nr_copy = nr_left + from <= vq->size ? nr_left :
+ vq->size - from;
+ to = vq->last_used_idx & (vq->size - 1);
+
+ if (to + nr_copy <= vq->size) {
+ rte_memcpy(&vq->used->ring[to],
+ &vq->async_descs_split[from],
+ nr_copy *
+ sizeof(struct vring_used_elem));
+ } else {
+ uint16_t size = vq->size - to;
+
+ rte_memcpy(&vq->used->ring[to],
+ &vq->async_descs_split[from],
+ size *
+ sizeof(struct vring_used_elem));
+ rte_memcpy(vq->used->ring,
+ &vq->async_descs_split[from +
+ size], (nr_copy - size) *
+ sizeof(struct vring_used_elem));
}
- n_pkts_cpl -= n_segs;
- }
- }
- if (n_pkts_put) {
- vq->async_pkts_inflight_n -= n_pkts_put;
- __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
+ vq->last_async_desc_idx += nr_copy;
+ vq->last_used_idx += nr_copy;
+ nr_left -= nr_copy;
+ } while (nr_left > 0);
+ __atomic_add_fetch(&vq->used->idx, n_descs, __ATOMIC_RELEASE);
vhost_vring_call_split(dev, vq);
- }
-
- if (start_idx + n_pkts_put <= vq_size) {
- rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
- n_pkts_put * sizeof(uintptr_t));
- } else {
- rte_memcpy(pkts, &vq->async_pkts_pending[start_idx],
- (vq_size - start_idx) * sizeof(uintptr_t));
- rte_memcpy(&pkts[vq_size - start_idx], vq->async_pkts_pending,
- (n_pkts_put - vq_size + start_idx) * sizeof(uintptr_t));
- }
+ } else
+ vq->last_async_desc_idx += n_descs;
+done:
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_put;
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+ struct rte_mbuf **pkts, uint32_t count,
+ struct rte_mbuf **comp_pkts, uint32_t *comp_count)
{
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- bool drawback = false;
VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
rte_spinlock_lock(&vq->access_lock);
- if (unlikely(vq->enabled == 0))
- goto out_access_unlock;
-
- if (unlikely(!vq->async_registered)) {
- drawback = true;
+ if (unlikely(vq->enabled == 0 || !vq->async_registered))
goto out_access_unlock;
- }
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
vhost_user_iotlb_rd_lock(vq);
nb_tx = 0;
else
nb_tx = virtio_dev_rx_async_submit_split(dev,
- vq, queue_id, pkts, count);
+ vq, queue_id, pkts, count, comp_pkts,
+ comp_count);
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
out_access_unlock:
rte_spinlock_unlock(&vq->access_lock);
- if (drawback)
- return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, count);
-
return nb_tx;
}
uint16_t
rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mbuf **pkts, uint16_t count,
+ struct rte_mbuf **comp_pkts, uint32_t *comp_count)
{
struct virtio_net *dev = get_device(vid);
+ *comp_count = 0;
if (!dev)
return 0;
return 0;
}
- return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
+ return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, comp_pkts,
+ comp_count);
}
static inline bool
case RTE_ETHER_TYPE_IPV4:
ipv4_hdr = l3_hdr;
*l4_proto = ipv4_hdr->next_proto_id;
- m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
+ m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
*l4_hdr = (char *)l3_hdr + m->l3_len;
m->ol_flags |= PKT_TX_IPV4;
break;
struct rte_mbuf *m, struct rte_mempool *mbuf_pool)
{
uint32_t buf_avail, buf_offset;
- uint64_t buf_addr, buf_iova, buf_len;
+ uint64_t buf_addr, buf_len;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
struct rte_mbuf *cur = m, *prev = m;
int error = 0;
buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1)) {
buf_offset = dev->vhost_hlen - buf_len;
vec_idx++;
buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
buf_avail = buf_len - buf_offset;
} else if (buf_len == dev->vhost_hlen) {
if (unlikely(++vec_idx >= nr_vec))
goto out;
buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
buf_offset = 0;
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
while (1) {
- uint64_t hpa;
-
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- /*
- * A desc buf might across two host physical pages that are
- * not continuous. In such case (gpa_to_hpa returns 0), data
- * will be copied even though zero copy is enabled.
- */
- if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
- buf_iova + buf_offset, cpy_len)))) {
- cur->data_len = cpy_len;
- cur->data_off = 0;
- cur->buf_addr =
- (void *)(uintptr_t)(buf_addr + buf_offset);
- cur->buf_iova = hpa;
-
- /*
- * In zero copy mode, one mbuf can only reference data
- * for one or partial of one desc buff.
- */
- mbuf_avail = cpy_len;
- } else {
- if (likely(cpy_len > MAX_BATCH_LEN ||
- vq->batch_copy_nb_elems >= vq->size ||
- (hdr && cur == m))) {
- rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
- mbuf_offset),
- (void *)((uintptr_t)(buf_addr +
- buf_offset)),
- cpy_len);
- } else {
- batch_copy[vq->batch_copy_nb_elems].dst =
- rte_pktmbuf_mtod_offset(cur, void *,
- mbuf_offset);
- batch_copy[vq->batch_copy_nb_elems].src =
+ if (likely(cpy_len > MAX_BATCH_LEN ||
+ vq->batch_copy_nb_elems >= vq->size ||
+ (hdr && cur == m))) {
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset),
(void *)((uintptr_t)(buf_addr +
- buf_offset));
- batch_copy[vq->batch_copy_nb_elems].len =
- cpy_len;
- vq->batch_copy_nb_elems++;
- }
+ buf_offset)), cpy_len);
+ } else {
+ batch_copy[vq->batch_copy_nb_elems].dst =
+ rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset);
+ batch_copy[vq->batch_copy_nb_elems].src =
+ (void *)((uintptr_t)(buf_addr + buf_offset));
+ batch_copy[vq->batch_copy_nb_elems].len = cpy_len;
+ vq->batch_copy_nb_elems++;
}
mbuf_avail -= cpy_len;
break;
buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
buf_len = buf_vec[vec_idx].buf_len;
buf_offset = 0;
error = -1;
goto out;
}
- if (unlikely(dev->dequeue_zero_copy))
- rte_mbuf_refcnt_update(cur, 1);
prev->next = cur;
prev->data_len = mbuf_offset;
return error;
}
-static __rte_always_inline struct zcopy_mbuf *
-get_zmbuf(struct vhost_virtqueue *vq)
-{
- uint16_t i;
- uint16_t last;
- int tries = 0;
-
- /* search [last_zmbuf_idx, zmbuf_size) */
- i = vq->last_zmbuf_idx;
- last = vq->zmbuf_size;
-
-again:
- for (; i < last; i++) {
- if (vq->zmbufs[i].in_use == 0) {
- vq->last_zmbuf_idx = i + 1;
- vq->zmbufs[i].in_use = 1;
- return &vq->zmbufs[i];
- }
- }
-
- tries++;
- if (tries == 1) {
- /* search [0, last_zmbuf_idx) */
- i = 0;
- last = vq->last_zmbuf_idx;
- goto again;
- }
-
- return NULL;
-}
-
static void
virtio_dev_extbuf_free(void *addr __rte_unused, void *opaque)
{
rte_iova_t iova;
void *buf;
- /* Try to use pkt buffer to store shinfo to reduce the amount of memory
- * required, otherwise store shinfo in the new buffer.
- */
- if (rte_pktmbuf_tailroom(pkt) >= sizeof(*shinfo))
- shinfo = rte_pktmbuf_mtod(pkt,
- struct rte_mbuf_ext_shared_info *);
- else {
- total_len += sizeof(*shinfo) + sizeof(uintptr_t);
- total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
- }
+ total_len += sizeof(*shinfo) + sizeof(uintptr_t);
+ total_len = RTE_ALIGN_CEIL(total_len, sizeof(uintptr_t));
if (unlikely(total_len > UINT16_MAX))
return -ENOSPC;
return -ENOMEM;
/* Initialize shinfo */
- if (shinfo) {
- shinfo->free_cb = virtio_dev_extbuf_free;
- shinfo->fcb_opaque = buf;
- rte_mbuf_ext_refcnt_set(shinfo, 1);
- } else {
- shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
- virtio_dev_extbuf_free, buf);
- if (unlikely(shinfo == NULL)) {
- rte_free(buf);
- VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
- return -1;
- }
+ shinfo = rte_pktmbuf_ext_shinfo_init_helper(buf, &buf_len,
+ virtio_dev_extbuf_free, buf);
+ if (unlikely(shinfo == NULL)) {
+ rte_free(buf);
+ VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+ return -1;
}
iova = rte_malloc_virt2iova(buf);
uint16_t dropped = 0;
static bool allocerr_warned;
- if (unlikely(dev->dequeue_zero_copy)) {
- struct zcopy_mbuf *zmbuf, *next;
-
- for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
- zmbuf != NULL; zmbuf = next) {
- next = TAILQ_NEXT(zmbuf, next);
-
- if (mbuf_is_consumed(zmbuf->mbuf)) {
- update_shadow_used_ring_split(vq,
- zmbuf->desc_idx, 0);
- TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
- restore_mbuf(zmbuf->mbuf);
- rte_pktmbuf_free(zmbuf->mbuf);
- put_zmbuf(zmbuf);
- vq->nr_zmbuf -= 1;
- }
- }
-
- if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_split(dev, vq);
- vhost_vring_call_split(dev, vq);
- }
- }
-
/*
* The ordering between avail index and
* desc reads needs to be enforced.
VHOST_ACCESS_RO) < 0))
break;
- if (likely(dev->dequeue_zero_copy == 0))
- update_shadow_used_ring_split(vq, head_idx, 0);
+ update_shadow_used_ring_split(vq, head_idx, 0);
pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
if (unlikely(pkts[i] == NULL)) {
i++;
break;
}
-
- if (unlikely(dev->dequeue_zero_copy)) {
- struct zcopy_mbuf *zmbuf;
-
- zmbuf = get_zmbuf(vq);
- if (!zmbuf) {
- rte_pktmbuf_free(pkts[i]);
- dropped += 1;
- i++;
- break;
- }
- zmbuf->mbuf = pkts[i];
- zmbuf->desc_idx = head_idx;
-
- /*
- * Pin lock the mbuf; we will check later to see
- * whether the mbuf is freed (when we are the last
- * user) or not. If that's the case, we then could
- * update the used ring safely.
- */
- rte_mbuf_refcnt_update(pkts[i], 1);
-
- vq->nr_zmbuf += 1;
- TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
- }
}
+
vq->last_avail_idx += i;
- if (likely(dev->dequeue_zero_copy == 0)) {
- do_data_copy_dequeue(vq);
- if (unlikely(i < count))
- vq->shadow_used_idx = i;
- if (likely(vq->shadow_used_idx)) {
- flush_shadow_used_ring_split(dev, vq);
- vhost_vring_call_split(dev, vq);
- }
+ do_data_copy_dequeue(vq);
+ if (unlikely(i < count))
+ vq->shadow_used_idx = i;
+ if (likely(vq->shadow_used_idx)) {
+ flush_shadow_used_ring_split(dev, vq);
+ vhost_vring_call_split(dev, vq);
}
return (i - dropped);
{
bool wrap = vq->avail_wrap_counter;
struct vring_packed_desc *descs = vq->desc_packed;
- struct virtio_net_hdr *hdr;
uint64_t lens[PACKED_BATCH_SIZE];
uint64_t buf_lens[PACKED_BATCH_SIZE];
- uint32_t buf_offset = dev->vhost_hlen;
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
uint16_t flags, i;
if (unlikely(avail_idx & PACKED_BATCH_MASK))
return -1;
}
- rte_smp_rmb();
+ rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
lens[i] = descs[avail_idx + i].len;
ids[i] = descs[avail_idx + i].id;
}
- if (virtio_net_with_host_offload(dev)) {
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
- hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i]);
- }
- }
-
return 0;
free_buf:
struct rte_mbuf **pkts)
{
uint16_t avail_idx = vq->last_avail_idx;
- uint32_t buf_offset = dev->vhost_hlen;
+ uint32_t buf_offset = sizeof(struct virtio_net_hdr_mrg_rxbuf);
+ struct virtio_net_hdr *hdr;
uintptr_t desc_addrs[PACKED_BATCH_SIZE];
uint16_t ids[PACKED_BATCH_SIZE];
uint16_t i;
(void *)(uintptr_t)(desc_addrs[i] + buf_offset),
pkts[i]->pkt_len);
+ if (virtio_net_with_host_offload(dev)) {
+ vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+ hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
+ vhost_dequeue_offload(hdr, pkts[i]);
+ }
+ }
+
if (virtio_net_is_inorder(dev))
vhost_shadow_dequeue_batch_packed_inorder(vq,
ids[PACKED_BATCH_SIZE - 1]);
return ret;
}
-static __rte_always_inline int
-virtio_dev_tx_batch_packed_zmbuf(struct virtio_net *dev,
- struct vhost_virtqueue *vq,
- struct rte_mempool *mbuf_pool,
- struct rte_mbuf **pkts)
-{
- struct zcopy_mbuf *zmbufs[PACKED_BATCH_SIZE];
- uintptr_t desc_addrs[PACKED_BATCH_SIZE];
- uint16_t ids[PACKED_BATCH_SIZE];
- uint16_t i;
-
- uint16_t avail_idx = vq->last_avail_idx;
-
- if (vhost_reserve_avail_batch_packed(dev, vq, mbuf_pool, pkts,
- avail_idx, desc_addrs, ids))
- return -1;
-
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
- zmbufs[i] = get_zmbuf(vq);
-
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
- if (!zmbufs[i])
- goto free_pkt;
- }
-
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
- zmbufs[i]->mbuf = pkts[i];
- zmbufs[i]->desc_idx = ids[i];
- zmbufs[i]->desc_count = 1;
- }
-
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
- rte_mbuf_refcnt_update(pkts[i], 1);
-
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
- TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbufs[i], next);
-
- vq->nr_zmbuf += PACKED_BATCH_SIZE;
- vq_inc_last_avail_packed(vq, PACKED_BATCH_SIZE);
-
- return 0;
-
-free_pkt:
- vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE)
- rte_pktmbuf_free(pkts[i]);
-
- return -1;
-}
-
-static __rte_always_inline int
-virtio_dev_tx_single_packed_zmbuf(struct virtio_net *dev,
- struct vhost_virtqueue *vq,
- struct rte_mempool *mbuf_pool,
- struct rte_mbuf **pkts)
-{
- uint16_t buf_id, desc_count;
- struct zcopy_mbuf *zmbuf;
-
- if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
- &desc_count))
- return -1;
-
- zmbuf = get_zmbuf(vq);
- if (!zmbuf) {
- rte_pktmbuf_free(*pkts);
- return -1;
- }
- zmbuf->mbuf = *pkts;
- zmbuf->desc_idx = buf_id;
- zmbuf->desc_count = desc_count;
-
- rte_mbuf_refcnt_update(*pkts, 1);
-
- vq->nr_zmbuf += 1;
- TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
-
- vq_inc_last_avail_packed(vq, desc_count);
- return 0;
-}
-
-static __rte_always_inline void
-free_zmbuf(struct vhost_virtqueue *vq)
-{
- struct zcopy_mbuf *next = NULL;
- struct zcopy_mbuf *zmbuf;
-
- for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
- zmbuf != NULL; zmbuf = next) {
- next = TAILQ_NEXT(zmbuf, next);
-
- uint16_t last_used_idx = vq->last_used_idx;
-
- if (mbuf_is_consumed(zmbuf->mbuf)) {
- uint16_t flags;
- flags = vq->desc_packed[last_used_idx].flags;
- if (vq->used_wrap_counter) {
- flags |= VRING_DESC_F_USED;
- flags |= VRING_DESC_F_AVAIL;
- } else {
- flags &= ~VRING_DESC_F_USED;
- flags &= ~VRING_DESC_F_AVAIL;
- }
-
- vq->desc_packed[last_used_idx].id = zmbuf->desc_idx;
- vq->desc_packed[last_used_idx].len = 0;
-
- rte_smp_wmb();
- vq->desc_packed[last_used_idx].flags = flags;
-
- vq_inc_last_used_packed(vq, zmbuf->desc_count);
-
- TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
- restore_mbuf(zmbuf->mbuf);
- rte_pktmbuf_free(zmbuf->mbuf);
- put_zmbuf(zmbuf);
- vq->nr_zmbuf -= 1;
- }
- }
-}
-
-static __rte_noinline uint16_t
-virtio_dev_tx_packed_zmbuf(struct virtio_net *dev,
- struct vhost_virtqueue *vq,
- struct rte_mempool *mbuf_pool,
- struct rte_mbuf **pkts,
- uint32_t count)
-{
- uint32_t pkt_idx = 0;
- uint32_t remained = count;
-
- free_zmbuf(vq);
-
- do {
- if (remained >= PACKED_BATCH_SIZE) {
- if (!virtio_dev_tx_batch_packed_zmbuf(dev, vq,
- mbuf_pool, &pkts[pkt_idx])) {
- pkt_idx += PACKED_BATCH_SIZE;
- remained -= PACKED_BATCH_SIZE;
- continue;
- }
- }
-
- if (virtio_dev_tx_single_packed_zmbuf(dev, vq, mbuf_pool,
- &pkts[pkt_idx]))
- break;
- pkt_idx++;
- remained--;
-
- } while (remained);
-
- if (pkt_idx)
- vhost_vring_call_packed(dev, vq);
-
- return pkt_idx;
-}
-
static __rte_noinline uint16_t
virtio_dev_tx_packed(struct virtio_net *dev,
- struct vhost_virtqueue *vq,
+ struct vhost_virtqueue *__rte_restrict vq,
struct rte_mempool *mbuf_pool,
- struct rte_mbuf **pkts,
+ struct rte_mbuf **__rte_restrict pkts,
uint32_t count)
{
uint32_t pkt_idx = 0;
count -= 1;
}
- if (vq_is_packed(dev)) {
- if (unlikely(dev->dequeue_zero_copy))
- count = virtio_dev_tx_packed_zmbuf(dev, vq, mbuf_pool,
- pkts, count);
- else
- count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts,
- count);
- } else
+ if (vq_is_packed(dev))
+ count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count);
+ else
count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count);
out: