#include <rte_net.h>
#include <rte_ether.h>
#include <rte_ip.h>
+#include <rte_dmadev.h>
#include <rte_vhost.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#define MAX_BATCH_LEN 256
+/* DMA device copy operation tracking array. */
+struct async_dma_info dma_copy_track[RTE_DMADEV_DEFAULT_MAX];
+
static __rte_always_inline bool
rxvq_is_mergeable(struct virtio_net *dev)
{
return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
+static __rte_always_inline int64_t
+vhost_async_dma_transfer_one(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ int16_t dma_id, uint16_t vchan_id, uint16_t flag_idx,
+ struct vhost_iov_iter *pkt)
+{
+ struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
+ uint16_t ring_mask = dma_info->ring_mask;
+ static bool vhost_async_dma_copy_log;
+
+
+ struct vhost_iovec *iov = pkt->iov;
+ int copy_idx = 0;
+ uint32_t nr_segs = pkt->nr_segs;
+ uint16_t i;
+
+ if (rte_dma_burst_capacity(dma_id, vchan_id) < nr_segs)
+ return -1;
+
+ for (i = 0; i < nr_segs; i++) {
+ copy_idx = rte_dma_copy(dma_id, vchan_id, (rte_iova_t)iov[i].src_addr,
+ (rte_iova_t)iov[i].dst_addr, iov[i].len, RTE_DMA_OP_FLAG_LLC);
+ /**
+ * Since all memory is pinned and DMA vChannel
+ * ring has enough space, failure should be a
+ * rare case. If failure happens, it means DMA
+ * device encounters serious errors; in this
+ * case, please stop async data-path and check
+ * what has happened to DMA device.
+ */
+ if (unlikely(copy_idx < 0)) {
+ if (!vhost_async_dma_copy_log) {
+ VHOST_LOG_DATA(ERR, "(%s) DMA copy failed for channel %d:%u\n",
+ dev->ifname, dma_id, vchan_id);
+ vhost_async_dma_copy_log = true;
+ }
+ return -1;
+ }
+ }
+
+ /**
+ * Only store packet completion flag address in the last copy's
+ * slot, and other slots are set to NULL.
+ */
+ dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = &vq->async->pkts_cmpl_flag[flag_idx];
+
+ return nr_segs;
+}
+
+static __rte_always_inline uint16_t
+vhost_async_dma_transfer(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ int16_t dma_id, uint16_t vchan_id, uint16_t head_idx,
+ struct vhost_iov_iter *pkts, uint16_t nr_pkts)
+{
+ struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
+ int64_t ret, nr_copies = 0;
+ uint16_t pkt_idx;
+
+ rte_spinlock_lock(&dma_info->dma_lock);
+
+ for (pkt_idx = 0; pkt_idx < nr_pkts; pkt_idx++) {
+ ret = vhost_async_dma_transfer_one(dev, vq, dma_id, vchan_id, head_idx,
+ &pkts[pkt_idx]);
+ if (unlikely(ret < 0))
+ break;
+
+ nr_copies += ret;
+ head_idx++;
+ if (head_idx >= vq->size)
+ head_idx -= vq->size;
+ }
+
+ if (likely(nr_copies > 0))
+ rte_dma_submit(dma_id, vchan_id);
+
+ rte_spinlock_unlock(&dma_info->dma_lock);
+
+ return pkt_idx;
+}
+
+static __rte_always_inline uint16_t
+vhost_async_dma_check_completed(struct virtio_net *dev, int16_t dma_id, uint16_t vchan_id,
+ uint16_t max_pkts)
+{
+ struct async_dma_vchan_info *dma_info = &dma_copy_track[dma_id].vchans[vchan_id];
+ uint16_t ring_mask = dma_info->ring_mask;
+ uint16_t last_idx = 0;
+ uint16_t nr_copies;
+ uint16_t copy_idx;
+ uint16_t i;
+ bool has_error = false;
+ static bool vhost_async_dma_complete_log;
+
+ rte_spinlock_lock(&dma_info->dma_lock);
+
+ /**
+ * Print error log for debugging, if DMA reports error during
+ * DMA transfer. We do not handle error in vhost level.
+ */
+ nr_copies = rte_dma_completed(dma_id, vchan_id, max_pkts, &last_idx, &has_error);
+ if (unlikely(!vhost_async_dma_complete_log && has_error)) {
+ VHOST_LOG_DATA(ERR, "(%s) DMA completion failure on channel %d:%u\n", dev->ifname,
+ dma_id, vchan_id);
+ vhost_async_dma_complete_log = true;
+ } else if (nr_copies == 0) {
+ goto out;
+ }
+
+ copy_idx = last_idx - nr_copies + 1;
+ for (i = 0; i < nr_copies; i++) {
+ bool *flag;
+
+ flag = dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask];
+ if (flag) {
+ /**
+ * Mark the packet flag as received. The flag
+ * could belong to another virtqueue but write
+ * is atomic.
+ */
+ *flag = true;
+ dma_info->pkts_cmpl_flag_addr[copy_idx & ring_mask] = NULL;
+ }
+ copy_idx++;
+ }
+
+out:
+ rte_spinlock_unlock(&dma_info->dma_lock);
+ return nr_copies;
+}
+
static inline void
do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
}
static __rte_always_inline int
-async_iter_initialize(struct vhost_async *async)
+async_iter_initialize(struct virtio_net *dev, struct vhost_async *async)
{
- struct rte_vhost_iov_iter *iter;
+ struct vhost_iov_iter *iter;
if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
- VHOST_LOG_DATA(ERR, "no more async iovec available\n");
+ VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
return -1;
}
}
static __rte_always_inline int
-async_iter_add_iovec(struct vhost_async *async, void *src, void *dst, size_t len)
+async_iter_add_iovec(struct virtio_net *dev, struct vhost_async *async,
+ void *src, void *dst, size_t len)
{
- struct rte_vhost_iov_iter *iter;
- struct rte_vhost_iovec *iovec;
+ struct vhost_iov_iter *iter;
+ struct vhost_iovec *iovec;
if (unlikely(async->iovec_idx >= VHOST_MAX_ASYNC_VEC)) {
static bool vhost_max_async_vec_log;
if (!vhost_max_async_vec_log) {
- VHOST_LOG_DATA(ERR, "no more async iovec available\n");
+ VHOST_LOG_DATA(ERR, "(%s) no more async iovec available\n", dev->ifname);
vhost_max_async_vec_log = true;
}
static __rte_always_inline void
async_iter_cancel(struct vhost_async *async)
{
- struct rte_vhost_iov_iter *iter;
+ struct vhost_iov_iter *iter;
iter = async->iov_iter + async->iter_idx;
async->iovec_idx -= iter->nr_segs;
struct vhost_async *async = vq->async;
uint64_t mapped_len;
uint32_t buf_offset = 0;
- void *hpa;
+ void *host_iova;
while (cpy_len) {
- hpa = (void *)(uintptr_t)gpa_to_first_hpa(dev,
+ host_iova = (void *)(uintptr_t)gpa_to_first_hpa(dev,
buf_iova + buf_offset, cpy_len, &mapped_len);
- if (unlikely(!hpa)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to get hpa.\n", dev->vid, __func__);
+ if (unlikely(!host_iova)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: failed to get host iova.\n",
+ dev->ifname, __func__);
return -1;
}
- if (unlikely(async_iter_add_iovec(async,
+ if (unlikely(async_iter_add_iovec(dev, async,
(void *)(uintptr_t)rte_pktmbuf_iova_offset(m,
mbuf_offset),
- hpa, (size_t)mapped_len)))
+ host_iova, (size_t)mapped_len)))
return -1;
cpy_len -= (uint32_t)mapped_len;
}
static __rte_always_inline int
-copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *m, struct buf_vector *buf_vec,
- uint16_t nr_vec, uint16_t num_buffers)
+mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, struct buf_vector *buf_vec,
+ uint16_t nr_vec, uint16_t num_buffers, bool is_async)
{
uint32_t vec_idx = 0;
uint32_t mbuf_offset, mbuf_avail;
uint64_t hdr_addr;
struct rte_mbuf *hdr_mbuf;
struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
-
- if (unlikely(m == NULL))
- return -1;
-
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
-
- if (unlikely(buf_len < dev->vhost_hlen && nr_vec <= 1))
- return -1;
-
- hdr_mbuf = m;
- hdr_addr = buf_addr;
- if (unlikely(buf_len < dev->vhost_hlen)) {
- memset(&tmp_hdr, 0, sizeof(struct virtio_net_hdr_mrg_rxbuf));
- hdr = &tmp_hdr;
- } else
- hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
-
- VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
- dev->vid, num_buffers);
-
- if (unlikely(buf_len < dev->vhost_hlen)) {
- buf_offset = dev->vhost_hlen - buf_len;
- vec_idx++;
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
- buf_avail = buf_len - buf_offset;
- } else {
- buf_offset = dev->vhost_hlen;
- buf_avail = buf_len - dev->vhost_hlen;
- }
-
- mbuf_avail = rte_pktmbuf_data_len(m);
- mbuf_offset = 0;
- while (mbuf_avail != 0 || m->next != NULL) {
- /* done with current buf, get the next one */
- if (buf_avail == 0) {
- vec_idx++;
- if (unlikely(vec_idx >= nr_vec))
- goto error;
-
- buf_addr = buf_vec[vec_idx].buf_addr;
- buf_iova = buf_vec[vec_idx].buf_iova;
- buf_len = buf_vec[vec_idx].buf_len;
-
- buf_offset = 0;
- buf_avail = buf_len;
- }
-
- /* done with current mbuf, get the next one */
- if (mbuf_avail == 0) {
- m = m->next;
-
- mbuf_offset = 0;
- mbuf_avail = rte_pktmbuf_data_len(m);
- }
-
- if (hdr_addr) {
- virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
- if (rxvq_is_mergeable(dev))
- ASSIGN_UNLESS_EQUAL(hdr->num_buffers,
- num_buffers);
-
- if (unlikely(hdr == &tmp_hdr)) {
- copy_vnet_hdr_to_desc(dev, vq, buf_vec, hdr);
- } else {
- PRINT_PACKET(dev, (uintptr_t)hdr_addr,
- dev->vhost_hlen, 0);
- vhost_log_cache_write_iova(dev, vq,
- buf_vec[0].buf_iova,
- dev->vhost_hlen);
- }
-
- hdr_addr = 0;
- }
-
- cpy_len = RTE_MIN(buf_avail, mbuf_avail);
-
- sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
- buf_addr + buf_offset,
- buf_iova + buf_offset, cpy_len);
-
- mbuf_avail -= cpy_len;
- mbuf_offset += cpy_len;
- buf_avail -= cpy_len;
- buf_offset += cpy_len;
- }
-
- return 0;
-error:
- return -1;
-}
-
-static __rte_always_inline int
-async_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
- struct rte_mbuf *m, struct buf_vector *buf_vec,
- uint16_t nr_vec, uint16_t num_buffers)
-{
struct vhost_async *async = vq->async;
- struct rte_mbuf *hdr_mbuf;
- struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
- uint64_t buf_addr, buf_iova;
- uint64_t hdr_addr;
- uint32_t vec_idx = 0;
- uint32_t mbuf_offset, mbuf_avail;
- uint32_t buf_offset, buf_avail;
- uint32_t cpy_len, buf_len;
if (unlikely(m == NULL))
return -1;
} else
hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
- VHOST_LOG_DATA(DEBUG, "(%d) RX: num merge buffers %d\n",
- dev->vid, num_buffers);
+ VHOST_LOG_DATA(DEBUG, "(%s) RX: num merge buffers %d\n",
+ dev->ifname, num_buffers);
if (unlikely(buf_len < dev->vhost_hlen)) {
buf_offset = dev->vhost_hlen - buf_len;
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
- if (async_iter_initialize(async))
- return -1;
+ if (is_async) {
+ if (async_iter_initialize(dev, async))
+ return -1;
+ }
while (mbuf_avail != 0 || m->next != NULL) {
/* done with current buf, get the next one */
buf_len = buf_vec[vec_idx].buf_len;
buf_offset = 0;
- buf_avail = buf_len;
+ buf_avail = buf_len;
}
/* done with current mbuf, get the next one */
m = m->next;
mbuf_offset = 0;
- mbuf_avail = rte_pktmbuf_data_len(m);
+ mbuf_avail = rte_pktmbuf_data_len(m);
}
if (hdr_addr) {
cpy_len = RTE_MIN(buf_avail, mbuf_avail);
- if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
- buf_iova + buf_offset, cpy_len) < 0) {
- goto error;
+ if (is_async) {
+ if (async_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+ buf_iova + buf_offset, cpy_len) < 0)
+ goto error;
+ } else {
+ sync_mbuf_to_desc_seg(dev, vq, m, mbuf_offset,
+ buf_addr + buf_offset,
+ buf_iova + buf_offset, cpy_len);
}
mbuf_avail -= cpy_len;
buf_offset += cpy_len;
}
- async_iter_finalize(async);
+ if (is_async)
+ async_iter_finalize(async);
return 0;
error:
- async_iter_cancel(async);
+ if (is_async)
+ async_iter_cancel(async);
return -1;
}
avail_idx -= vq->size;
}
- if (copy_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers) < 0)
+ if (mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, num_buffers, false) < 0)
return -1;
vhost_shadow_enqueue_single_packed(dev, vq, buffer_len, buffer_buf_id,
pkt_len, buf_vec, &num_buffers,
avail_head, &nr_vec) < 0)) {
VHOST_LOG_DATA(DEBUG,
- "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx,
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
- if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx],
- buf_vec, nr_vec,
- num_buffers) < 0) {
+ if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec,
+ num_buffers, false) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
}
if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec,
&nr_descs) < 0)) {
- VHOST_LOG_DATA(DEBUG,
- "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx,
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx,
vq->last_avail_idx + nr_descs);
vq_inc_last_avail_packed(vq, nr_descs);
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
}
static __rte_noinline uint32_t
-virtio_dev_rx_async_submit_split(struct virtio_net *dev,
- struct vhost_virtqueue *vq, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
+ int16_t dma_id, uint16_t vchan_id)
{
struct buf_vector buf_vec[BUF_VECTOR_MAX];
uint32_t pkt_idx = 0;
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
uint32_t pkt_err = 0;
- int32_t n_xfer;
+ uint16_t n_xfer;
uint16_t slot_idx = 0;
/*
if (unlikely(reserve_avail_buf_split(dev, vq, pkt_len, buf_vec,
&num_buffers, avail_head, &nr_vec) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n",
- dev->vid);
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n",
+ dev->ifname);
vq->shadow_used_idx -= num_buffers;
break;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx, vq->last_avail_idx + num_buffers);
- if (async_mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers) < 0) {
+ if (mbuf_to_desc(dev, vq, pkts[pkt_idx], buf_vec, nr_vec, num_buffers, true) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
}
if (unlikely(pkt_idx == 0))
return 0;
- n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
- if (unlikely(n_xfer < 0)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
- n_xfer = 0;
- }
+ n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
+ async->iov_iter, pkt_idx);
pkt_err = pkt_idx - n_xfer;
if (unlikely(pkt_err)) {
uint16_t num_descs = 0;
+ VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
+ dev->ifname, __func__, pkt_err, queue_id);
+
/* update number of completed packets */
pkt_idx = n_xfer;
return pkt_idx;
}
-static __rte_always_inline void
-vhost_update_used_packed(struct vhost_virtqueue *vq,
- struct vring_used_elem_packed *shadow_ring,
- uint16_t count)
-{
- int i;
- uint16_t used_idx = vq->last_used_idx;
- uint16_t head_idx = vq->last_used_idx;
- uint16_t head_flags = 0;
-
- if (count == 0)
- return;
-
- /* Split loop in two to save memory barriers */
- for (i = 0; i < count; i++) {
- vq->desc_packed[used_idx].id = shadow_ring[i].id;
- vq->desc_packed[used_idx].len = shadow_ring[i].len;
-
- used_idx += shadow_ring[i].count;
- if (used_idx >= vq->size)
- used_idx -= vq->size;
- }
-
- /* The ordering for storing desc flags needs to be enforced. */
- rte_atomic_thread_fence(__ATOMIC_RELEASE);
-
- for (i = 0; i < count; i++) {
- uint16_t flags;
-
- if (vq->shadow_used_packed[i].len)
- flags = VRING_DESC_F_WRITE;
- else
- flags = 0;
-
- if (vq->used_wrap_counter) {
- flags |= VRING_DESC_F_USED;
- flags |= VRING_DESC_F_AVAIL;
- } else {
- flags &= ~VRING_DESC_F_USED;
- flags &= ~VRING_DESC_F_AVAIL;
- }
-
- if (i > 0) {
- vq->desc_packed[vq->last_used_idx].flags = flags;
- } else {
- head_idx = vq->last_used_idx;
- head_flags = flags;
- }
-
- vq_inc_last_used_packed(vq, shadow_ring[i].count);
- }
-
- vq->desc_packed[head_idx].flags = head_flags;
-}
static __rte_always_inline int
vhost_enqueue_async_packed(struct virtio_net *dev,
avail_idx -= vq->size;
}
- if (unlikely(async_mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec,
- *nr_buffers) < 0))
+ if (unlikely(mbuf_to_desc(dev, vq, pkt, buf_vec, nr_vec, *nr_buffers, true) < 0))
return -1;
vhost_shadow_enqueue_packed(vq, buffer_len, buffer_buf_id, buffer_desc_count, *nr_buffers);
if (unlikely(vhost_enqueue_async_packed(dev, vq, pkt, buf_vec,
nr_descs, nr_buffers) < 0)) {
- VHOST_LOG_DATA(DEBUG, "(%d) failed to get enough desc from vring\n", dev->vid);
+ VHOST_LOG_DATA(DEBUG, "(%s) failed to get enough desc from vring\n", dev->ifname);
return -1;
}
- VHOST_LOG_DATA(DEBUG, "(%d) current index %d | end index %d\n",
- dev->vid, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
+ VHOST_LOG_DATA(DEBUG, "(%s) current index %d | end index %d\n",
+ dev->ifname, vq->last_avail_idx, vq->last_avail_idx + *nr_descs);
return 0;
}
}
static __rte_noinline uint32_t
-virtio_dev_rx_async_submit_packed(struct virtio_net *dev,
- struct vhost_virtqueue *vq, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+virtio_dev_rx_async_submit_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count,
+ int16_t dma_id, uint16_t vchan_id)
{
uint32_t pkt_idx = 0;
uint32_t remained = count;
- int32_t n_xfer;
+ uint16_t n_xfer;
uint16_t num_buffers;
uint16_t num_descs;
if (unlikely(pkt_idx == 0))
return 0;
- n_xfer = async->ops.transfer_data(dev->vid, queue_id, async->iov_iter, 0, pkt_idx);
- if (unlikely(n_xfer < 0)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
- dev->vid, __func__, queue_id);
- n_xfer = 0;
- }
-
- pkt_err = pkt_idx - n_xfer;
+ n_xfer = vhost_async_dma_transfer(dev, vq, dma_id, vchan_id, async->pkts_idx,
+ async->iov_iter, pkt_idx);
async_iter_reset(async);
- if (unlikely(pkt_err))
+ pkt_err = pkt_idx - n_xfer;
+ if (unlikely(pkt_err)) {
+ VHOST_LOG_DATA(DEBUG, "(%s) %s: failed to transfer %u packets for queue %u.\n",
+ dev->ifname, __func__, pkt_err, queue_id);
dma_error_handler_packed(vq, slot_idx, pkt_err, &pkt_idx);
+ }
if (likely(vq->shadow_used_idx)) {
/* keep used descriptors. */
uint16_t n_buffers)
{
struct vhost_async *async = vq->async;
- uint16_t nr_left = n_buffers;
- uint16_t from, to;
+ uint16_t from = async->last_buffer_idx_packed;
+ uint16_t used_idx = vq->last_used_idx;
+ uint16_t head_idx = vq->last_used_idx;
+ uint16_t head_flags = 0;
+ uint16_t i;
- do {
- from = async->last_buffer_idx_packed;
- to = (from + nr_left) % vq->size;
- if (to > from) {
- vhost_update_used_packed(vq, async->buffers_packed + from, to - from);
- async->last_buffer_idx_packed += nr_left;
- nr_left = 0;
+ /* Split loop in two to save memory barriers */
+ for (i = 0; i < n_buffers; i++) {
+ vq->desc_packed[used_idx].id = async->buffers_packed[from].id;
+ vq->desc_packed[used_idx].len = async->buffers_packed[from].len;
+
+ used_idx += async->buffers_packed[from].count;
+ if (used_idx >= vq->size)
+ used_idx -= vq->size;
+
+ from++;
+ if (from >= vq->size)
+ from = 0;
+ }
+
+ /* The ordering for storing desc flags needs to be enforced. */
+ rte_atomic_thread_fence(__ATOMIC_RELEASE);
+
+ from = async->last_buffer_idx_packed;
+
+ for (i = 0; i < n_buffers; i++) {
+ uint16_t flags;
+
+ if (async->buffers_packed[from].len)
+ flags = VRING_DESC_F_WRITE;
+ else
+ flags = 0;
+
+ if (vq->used_wrap_counter) {
+ flags |= VRING_DESC_F_USED;
+ flags |= VRING_DESC_F_AVAIL;
} else {
- vhost_update_used_packed(vq, async->buffers_packed + from,
- vq->size - from);
- async->last_buffer_idx_packed = 0;
- nr_left -= vq->size - from;
+ flags &= ~VRING_DESC_F_USED;
+ flags &= ~VRING_DESC_F_AVAIL;
}
- } while (nr_left > 0);
+
+ if (i > 0) {
+ vq->desc_packed[vq->last_used_idx].flags = flags;
+ } else {
+ head_idx = vq->last_used_idx;
+ head_flags = flags;
+ }
+
+ vq_inc_last_used_packed(vq, async->buffers_packed[from].count);
+
+ from++;
+ if (from == vq->size)
+ from = 0;
+ }
+
+ vq->desc_packed[head_idx].flags = head_flags;
+ async->last_buffer_idx_packed = from;
}
static __rte_always_inline uint16_t
vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+ uint16_t vchan_id)
{
struct vhost_virtqueue *vq = dev->virtqueue[queue_id];
struct vhost_async *async = vq->async;
struct async_inflight_info *pkts_info = async->pkts_info;
- int32_t n_cpl;
+ uint16_t nr_cpl_pkts = 0;
uint16_t n_descs = 0, n_buffers = 0;
uint16_t start_idx, from, i;
- n_cpl = async->ops.check_completed_copies(dev->vid, queue_id, 0, count);
- if (unlikely(n_cpl < 0)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: failed to check completed copies for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
+ /* Check completed copies for the given DMA vChannel */
+ vhost_async_dma_check_completed(dev, dma_id, vchan_id, VHOST_DMA_MAX_COPY_COMPLETE);
+
+ start_idx = async_get_first_inflight_pkt_idx(vq);
+ /**
+ * Calculate the number of copy completed packets.
+ * Note that there may be completed packets even if
+ * no copies are reported done by the given DMA vChannel,
+ * as it's possible that a virtqueue uses multiple DMA
+ * vChannels.
+ */
+ from = start_idx;
+ while (vq->async->pkts_cmpl_flag[from] && count--) {
+ vq->async->pkts_cmpl_flag[from] = false;
+ from++;
+ if (from >= vq->size)
+ from -= vq->size;
+ nr_cpl_pkts++;
}
- if (n_cpl == 0)
+ if (nr_cpl_pkts == 0)
return 0;
- start_idx = async_get_first_inflight_pkt_idx(vq);
-
- for (i = 0; i < n_cpl; i++) {
+ for (i = 0; i < nr_cpl_pkts; i++) {
from = (start_idx + i) % vq->size;
/* Only used with packed ring */
n_buffers += pkts_info[from].nr_buffers;
pkts[i] = pkts_info[from].mbuf;
}
- async->pkts_inflight_n -= n_cpl;
+ async->pkts_inflight_n -= nr_cpl_pkts;
if (likely(vq->enabled && vq->access_ok)) {
if (vq_is_packed(dev)) {
}
}
- return n_cpl;
+ return nr_cpl_pkts;
}
uint16_t
rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+ uint16_t vchan_id)
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
if (unlikely(!dev))
return 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
+ return 0;
+ }
+
+ if (unlikely(!dma_copy_track[dma_id].vchans ||
+ !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
+ dma_id, vchan_id);
return 0;
}
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ if (!rte_spinlock_trylock(&vq->access_lock)) {
+ VHOST_LOG_DATA(DEBUG, "(%s) %s: virtqueue %u is busy.\n", dev->ifname, __func__,
+ queue_id);
return 0;
}
- rte_spinlock_lock(&vq->access_lock);
+ if (unlikely(!vq->async)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for virtqueue %d.\n",
+ dev->ifname, __func__, queue_id);
+ goto out;
+ }
- n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
+out:
rte_spinlock_unlock(&vq->access_lock);
return n_pkts_cpl;
uint16_t
rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+ uint16_t vchan_id)
{
struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
if (!dev)
return 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
vq = dev->virtqueue[queue_id];
if (unlikely(!vq->async)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: async not registered for queue id %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
- n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+ if (unlikely(!dma_copy_track[dma_id].vchans ||
+ !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
+ dma_id, vchan_id);
+ return 0;
+ }
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count, dma_id, vchan_id);
return n_pkts_cpl;
}
static __rte_always_inline uint32_t
virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id,
- struct rte_mbuf **pkts, uint32_t count)
+ struct rte_mbuf **pkts, uint32_t count, int16_t dma_id, uint16_t vchan_id)
{
struct vhost_virtqueue *vq;
uint32_t nb_tx = 0;
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
+ return 0;
+ }
+
+ if (unlikely(!dma_copy_track[dma_id].vchans ||
+ !dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr)) {
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid channel %d:%u.\n", dev->ifname, __func__,
+ dma_id, vchan_id);
return 0;
}
if (vq_is_packed(dev))
nb_tx = virtio_dev_rx_async_submit_packed(dev, vq, queue_id,
- pkts, count);
+ pkts, count, dma_id, vchan_id);
else
nb_tx = virtio_dev_rx_async_submit_split(dev, vq, queue_id,
- pkts, count);
+ pkts, count, dma_id, vchan_id);
out:
if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
uint16_t
rte_vhost_submit_enqueue_burst(int vid, uint16_t queue_id,
- struct rte_mbuf **pkts, uint16_t count)
+ struct rte_mbuf **pkts, uint16_t count, int16_t dma_id,
+ uint16_t vchan_id)
{
struct virtio_net *dev = get_device(vid);
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
- return virtio_dev_rx_async_submit(dev, queue_id, pkts, count);
+ return virtio_dev_rx_async_submit(dev, queue_id, pkts, count, dma_id, vchan_id);
}
static inline bool
}
static __rte_always_inline void
-vhost_dequeue_offload_legacy(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
+vhost_dequeue_offload_legacy(struct virtio_net *dev, struct virtio_net_hdr *hdr,
+ struct rte_mbuf *m)
{
uint8_t l4_proto = 0;
struct rte_tcp_hdr *tcp_hdr = NULL;
m->l4_len = sizeof(struct rte_udp_hdr);
break;
default:
- VHOST_LOG_DATA(WARNING,
- "unsupported gso type %u.\n", hdr->gso_type);
+ VHOST_LOG_DATA(WARNING, "(%s) unsupported gso type %u.\n",
+ dev->ifname, hdr->gso_type);
goto error;
}
}
}
static __rte_always_inline void
-vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m,
- bool legacy_ol_flags)
+vhost_dequeue_offload(struct virtio_net *dev, struct virtio_net_hdr *hdr,
+ struct rte_mbuf *m, bool legacy_ol_flags)
{
struct rte_net_hdr_lens hdr_lens;
int l4_supported = 0;
return;
if (legacy_ol_flags) {
- vhost_dequeue_offload_legacy(hdr, m);
+ vhost_dequeue_offload_legacy(dev, hdr, m);
return;
}
if (mbuf_avail == 0) {
cur = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(cur == NULL)) {
- VHOST_LOG_DATA(ERR, "Failed to "
- "allocate memory for mbuf.\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to allocate memory for mbuf.\n",
+ dev->ifname);
error = -1;
goto out;
}
m->pkt_len += mbuf_offset;
if (hdr)
- vhost_dequeue_offload(hdr, m, legacy_ol_flags);
+ vhost_dequeue_offload(dev, hdr, m, legacy_ol_flags);
out:
}
static int
-virtio_dev_extbuf_alloc(struct rte_mbuf *pkt, uint32_t size)
+virtio_dev_extbuf_alloc(struct virtio_net *dev, struct rte_mbuf *pkt, uint32_t size)
{
struct rte_mbuf_ext_shared_info *shinfo = NULL;
uint32_t total_len = RTE_PKTMBUF_HEADROOM + size;
virtio_dev_extbuf_free, buf);
if (unlikely(shinfo == NULL)) {
rte_free(buf);
- VHOST_LOG_DATA(ERR, "Failed to init shinfo\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to init shinfo\n", dev->ifname);
return -1;
}
return 0;
/* attach an external buffer if supported */
- if (dev->extbuf && !virtio_dev_extbuf_alloc(pkt, data_len))
+ if (dev->extbuf && !virtio_dev_extbuf_alloc(dev, pkt, data_len))
return 0;
/* check if chained buffers are allowed */
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ VHOST_LOG_DATA(DEBUG, "(%s) %s\n", dev->ifname, __func__);
count = RTE_MIN(count, MAX_PKT_BURST);
count = RTE_MIN(count, free_entries);
- VHOST_LOG_DATA(DEBUG, "(%d) about to dequeue %u buffers\n",
- dev->vid, count);
+ VHOST_LOG_DATA(DEBUG, "(%s) about to dequeue %u buffers\n",
+ dev->ifname, count);
if (rte_pktmbuf_alloc_bulk(mbuf_pool, pkts, count))
return 0;
* is required. Drop this packet.
*/
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed mbuf alloc of size %d from %s on %s.\n",
- buf_len, mbuf_pool->name, dev->ifname);
+ VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
+ dev->ifname, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
dropped += 1;
mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed to copy desc to mbuf on %s.\n",
+ VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
dev->ifname);
allocerr_warned = true;
}
if (virtio_net_with_host_offload(dev)) {
vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
hdr = (struct virtio_net_hdr *)(desc_addrs[i]);
- vhost_dequeue_offload(hdr, pkts[i], legacy_ol_flags);
+ vhost_dequeue_offload(dev, hdr, pkts[i], legacy_ol_flags);
}
}
if (unlikely(virtio_dev_pktmbuf_prep(dev, pkts, buf_len))) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed mbuf alloc of size %d from %s on %s.\n",
- buf_len, mbuf_pool->name, dev->ifname);
+ VHOST_LOG_DATA(ERR, "(%s) failed mbuf alloc of size %d from %s.\n",
+ dev->ifname, buf_len, mbuf_pool->name);
allocerr_warned = true;
}
return -1;
mbuf_pool, legacy_ol_flags);
if (unlikely(err)) {
if (!allocerr_warned) {
- VHOST_LOG_DATA(ERR,
- "Failed to copy desc to mbuf on %s.\n",
+ VHOST_LOG_DATA(ERR, "(%s) failed to copy desc to mbuf.\n",
dev->ifname);
allocerr_warned = true;
}
return 0;
if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: built-in vhost net backend is disabled.\n",
- dev->vid, __func__);
+ VHOST_LOG_DATA(ERR, "(%s) %s: built-in vhost net backend is disabled.\n",
+ dev->ifname, __func__);
return 0;
}
if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR,
- "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
+ VHOST_LOG_DATA(ERR, "(%s) %s: invalid virtqueue idx %d.\n",
+ dev->ifname, __func__, queue_id);
return 0;
}
rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac);
if (rarp_mbuf == NULL) {
- VHOST_LOG_DATA(ERR, "Failed to make RARP packet.\n");
+ VHOST_LOG_DATA(ERR, "(%s) failed to make RARP packet.\n", dev->ifname);
count = 0;
goto out;
}