X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=8ad30c94a6d1519660f3bae39d056d21c4fc0462;hb=88e2d78a20fae4cccde351e38e0b1ef534ad3302;hp=00fcbef3a0c4e73fe696e91132a05e32fcc73e4e;hpb=d6315ce7964d6ac22e73638baffffddde0fa73ff;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 00fcbef3a0..8ad30c94a6 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -77,11 +77,12 @@ free_ind_table(void *idesc) } static __rte_always_inline void -do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint16_t to, uint16_t from, uint16_t size) +do_flush_shadow_used_ring_split(struct virtio_net *dev, + struct vhost_virtqueue *vq, + uint16_t to, uint16_t from, uint16_t size) { rte_memcpy(&vq->used->ring[to], - &vq->shadow_used_ring[from], + &vq->shadow_used_split[from], size * sizeof(struct vring_used_elem)); vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[to]), @@ -89,22 +90,22 @@ do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, } static __rte_always_inline void -flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) +flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t used_idx = vq->last_used_idx & (vq->size - 1); if (used_idx + vq->shadow_used_idx <= vq->size) { - do_flush_shadow_used_ring(dev, vq, used_idx, 0, + do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, vq->shadow_used_idx); } else { uint16_t size; /* update used ring interval [used_idx, vq->size] */ size = vq->size - used_idx; - do_flush_shadow_used_ring(dev, vq, used_idx, 0, size); + do_flush_shadow_used_ring_split(dev, vq, used_idx, 0, size); /* update the left half used ring interval [0, left_size] */ - do_flush_shadow_used_ring(dev, vq, 0, size, + do_flush_shadow_used_ring_split(dev, vq, 0, size, vq->shadow_used_idx - size); } vq->last_used_idx += vq->shadow_used_idx; @@ -120,13 +121,78 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) } static __rte_always_inline void -update_shadow_used_ring(struct vhost_virtqueue *vq, - uint16_t desc_idx, uint16_t len) +update_shadow_used_ring_split(struct vhost_virtqueue *vq, + uint16_t desc_idx, uint32_t len) { uint16_t i = vq->shadow_used_idx++; - vq->shadow_used_ring[i].id = desc_idx; - vq->shadow_used_ring[i].len = len; + vq->shadow_used_split[i].id = desc_idx; + vq->shadow_used_split[i].len = len; +} + +static __rte_always_inline void +flush_shadow_used_ring_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + int i; + uint16_t used_idx = vq->last_used_idx; + + /* Split loop in two to save memory barriers */ + for (i = 0; i < vq->shadow_used_idx; i++) { + vq->desc_packed[used_idx].id = vq->shadow_used_packed[i].id; + vq->desc_packed[used_idx].len = vq->shadow_used_packed[i].len; + + used_idx += vq->shadow_used_packed[i].count; + if (used_idx >= vq->size) + used_idx -= vq->size; + } + + rte_smp_wmb(); + + for (i = 0; i < vq->shadow_used_idx; i++) { + uint16_t flags; + + if (vq->shadow_used_packed[i].len) + flags = VRING_DESC_F_WRITE; + else + flags = 0; + + if (vq->used_wrap_counter) { + flags |= VRING_DESC_F_USED; + flags |= VRING_DESC_F_AVAIL; + } else { + flags &= ~VRING_DESC_F_USED; + flags &= ~VRING_DESC_F_AVAIL; + } + + vq->desc_packed[vq->last_used_idx].flags = flags; + + vhost_log_cache_used_vring(dev, vq, + vq->last_used_idx * + sizeof(struct vring_packed_desc), + sizeof(struct vring_packed_desc)); + + vq->last_used_idx += vq->shadow_used_packed[i].count; + if (vq->last_used_idx >= vq->size) { + vq->used_wrap_counter ^= 1; + vq->last_used_idx -= vq->size; + } + } + + rte_smp_wmb(); + vq->shadow_used_idx = 0; + vhost_log_cache_sync(dev, vq); +} + +static __rte_always_inline void +update_shadow_used_ring_packed(struct vhost_virtqueue *vq, + uint16_t desc_idx, uint32_t len, uint16_t count) +{ + uint16_t i = vq->shadow_used_idx++; + + vq->shadow_used_packed[i].id = desc_idx; + vq->shadow_used_packed[i].len = len; + vq->shadow_used_packed[i].count = count; } static inline void @@ -141,6 +207,8 @@ do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq) vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len); PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0); } + + vq->batch_copy_nb_elems = 0; } static inline void @@ -152,6 +220,8 @@ do_data_copy_dequeue(struct vhost_virtqueue *vq) for (i = 0; i < count; i++) rte_memcpy(elem[i].dst, elem[i].src, elem[i].len); + + vq->batch_copy_nb_elems = 0; } /* avoid write operation when necessary, to lessen cache issues */ @@ -222,15 +292,49 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) } static __rte_always_inline int -fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint32_t avail_idx, uint32_t *vec_idx, +map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct buf_vector *buf_vec, uint16_t *vec_idx, + uint64_t desc_iova, uint64_t desc_len, uint8_t perm) +{ + uint16_t vec_id = *vec_idx; + + while (desc_len) { + uint64_t desc_addr; + uint64_t desc_chunck_len = desc_len; + + if (unlikely(vec_id >= BUF_VECTOR_MAX)) + return -1; + + desc_addr = vhost_iova_to_vva(dev, vq, + desc_iova, + &desc_chunck_len, + perm); + if (unlikely(!desc_addr)) + return -1; + + buf_vec[vec_id].buf_iova = desc_iova; + buf_vec[vec_id].buf_addr = desc_addr; + buf_vec[vec_id].buf_len = desc_chunck_len; + + desc_len -= desc_chunck_len; + desc_iova += desc_chunck_len; + vec_id++; + } + *vec_idx = vec_id; + + return 0; +} + +static __rte_always_inline int +fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint32_t avail_idx, uint16_t *vec_idx, struct buf_vector *buf_vec, uint16_t *desc_chain_head, - uint16_t *desc_chain_len, uint8_t perm) + uint32_t *desc_chain_len, uint8_t perm) { uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; - uint32_t vec_id = *vec_idx; + uint16_t vec_id = *vec_idx; uint32_t len = 0; - uint64_t dlen, desc_avail, desc_iova; + uint64_t dlen; struct vring_desc *descs = vq->desc; struct vring_desc *idesc = NULL; @@ -267,37 +371,13 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, return -1; } - len += descs[idx].len; - desc_avail = descs[idx].len; - desc_iova = descs[idx].addr; - - while (desc_avail) { - uint64_t desc_addr; - uint64_t desc_chunck_len = desc_avail; - - if (unlikely(vec_id >= BUF_VECTOR_MAX)) { - free_ind_table(idesc); - return -1; - } - desc_addr = vhost_iova_to_vva(dev, vq, - desc_iova, - &desc_chunck_len, - perm); - if (unlikely(!desc_addr)) { - free_ind_table(idesc); - return -1; - } - - buf_vec[vec_id].buf_iova = desc_iova; - buf_vec[vec_id].buf_addr = desc_addr; - buf_vec[vec_id].buf_len = desc_chunck_len; - buf_vec[vec_id].desc_idx = idx; - - desc_avail -= desc_chunck_len; - desc_iova += desc_chunck_len; - vec_id++; + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[idx].addr, descs[idx].len, + perm))) { + free_ind_table(idesc); + return -1; } if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0) @@ -319,49 +399,218 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, * Returns -1 on fail, 0 on success */ static inline int -reserve_avail_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, +reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t size, struct buf_vector *buf_vec, uint16_t *num_buffers, uint16_t avail_head, uint16_t *nr_vec) { uint16_t cur_idx; - uint32_t vec_idx = 0; + uint16_t vec_idx = 0; uint16_t max_tries, tries = 0; uint16_t head_idx = 0; - uint16_t len = 0; + uint32_t len = 0; *num_buffers = 0; cur_idx = vq->last_avail_idx; if (rxvq_is_mergeable(dev)) - max_tries = vq->size; + max_tries = vq->size - 1; else max_tries = 1; while (size > 0) { if (unlikely(cur_idx == avail_head)) return -1; + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal + * happened. + */ + if (unlikely(++tries > max_tries)) + return -1; - if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec, + if (unlikely(fill_vec_buf_split(dev, vq, cur_idx, + &vec_idx, buf_vec, &head_idx, &len, VHOST_ACCESS_RW) < 0)) return -1; len = RTE_MIN(len, size); - update_shadow_used_ring(vq, head_idx, len); + update_shadow_used_ring_split(vq, head_idx, len); size -= len; cur_idx++; - tries++; *num_buffers += 1; + } + + *nr_vec = vec_idx; + + return 0; +} + +static __rte_always_inline int +fill_vec_buf_packed_indirect(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct vring_packed_desc *desc, uint16_t *vec_idx, + struct buf_vector *buf_vec, uint32_t *len, uint8_t perm) +{ + uint16_t i; + uint32_t nr_descs; + uint16_t vec_id = *vec_idx; + uint64_t dlen; + struct vring_packed_desc *descs, *idescs = NULL; + + dlen = desc->len; + descs = (struct vring_packed_desc *)(uintptr_t) + vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO); + if (unlikely(!descs)) + return -1; + + if (unlikely(dlen < desc->len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len); + if (unlikely(!idescs)) + return -1; + + descs = idescs; + } + + nr_descs = desc->len / sizeof(struct vring_packed_desc); + if (unlikely(nr_descs >= vq->size)) { + free_ind_table(idescs); + return -1; + } + + for (i = 0; i < nr_descs; i++) { + if (unlikely(vec_id >= BUF_VECTOR_MAX)) { + free_ind_table(idescs); + return -1; + } + + *len += descs[i].len; + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[i].addr, descs[i].len, + perm))) + return -1; + } + *vec_idx = vec_id; + + if (unlikely(!!idescs)) + free_ind_table(idescs); + + return 0; +} + +static __rte_always_inline int +fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint16_t avail_idx, uint16_t *desc_count, + struct buf_vector *buf_vec, uint16_t *vec_idx, + uint16_t *buf_id, uint32_t *len, uint8_t perm) +{ + bool wrap_counter = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; + uint16_t vec_id = *vec_idx; + + if (avail_idx < vq->last_avail_idx) + wrap_counter ^= 1; + + if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter))) + return -1; + + *desc_count = 0; + *len = 0; + + while (1) { + if (unlikely(vec_id >= BUF_VECTOR_MAX)) + return -1; + + *desc_count += 1; + *buf_id = descs[avail_idx].id; + + if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) { + if (unlikely(fill_vec_buf_packed_indirect(dev, vq, + &descs[avail_idx], + &vec_id, buf_vec, + len, perm) < 0)) + return -1; + } else { + *len += descs[avail_idx].len; + + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[avail_idx].addr, + descs[avail_idx].len, + perm))) + return -1; + } + + if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0) + break; + + if (++avail_idx >= vq->size) { + avail_idx -= vq->size; + wrap_counter ^= 1; + } + } + + *vec_idx = vec_id; + + return 0; +} +/* + * Returns -1 on fail, 0 on success + */ +static inline int +reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint32_t size, struct buf_vector *buf_vec, + uint16_t *nr_vec, uint16_t *num_buffers, + uint16_t *nr_descs) +{ + uint16_t avail_idx; + uint16_t vec_idx = 0; + uint16_t max_tries, tries = 0; + + uint16_t buf_id = 0; + uint32_t len = 0; + uint16_t desc_count; + + *num_buffers = 0; + avail_idx = vq->last_avail_idx; + + if (rxvq_is_mergeable(dev)) + max_tries = vq->size - 1; + else + max_tries = 1; + + while (size > 0) { /* * if we tried all available ring items, and still * can't get enough buf, it means something abnormal * happened. */ - if (unlikely(tries > max_tries)) + if (unlikely(++tries > max_tries)) + return -1; + + if (unlikely(fill_vec_buf_packed(dev, vq, + avail_idx, &desc_count, + buf_vec, &vec_idx, + &buf_id, &len, + VHOST_ACCESS_RO) < 0)) return -1; + + len = RTE_MIN(len, size); + update_shadow_used_ring_packed(vq, buf_id, len, desc_count); + size -= len; + + avail_idx += desc_count; + if (avail_idx >= vq->size) + avail_idx -= vq->size; + + *nr_descs += desc_count; + *num_buffers += 1; } *nr_vec = vec_idx; @@ -469,7 +718,8 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t hdr_vec_idx = 0; while (remain) { - len = remain; + len = RTE_MIN(remain, + buf_vec[hdr_vec_idx].buf_len); dst = buf_vec[hdr_vec_idx].buf_addr; rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, @@ -496,7 +746,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, hdr_addr = 0; } - cpy_len = RTE_MIN(buf_len, mbuf_avail); + cpy_len = RTE_MIN(buf_avail, mbuf_avail); if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) { @@ -530,52 +780,73 @@ out: } static __rte_always_inline uint32_t -virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, +virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mbuf **pkts, uint32_t count) { - struct vhost_virtqueue *vq; uint32_t pkt_idx = 0; uint16_t num_buffers; struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; - VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); - if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { - RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, queue_id); - return 0; - } + rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); + avail_head = *((volatile uint16_t *)&vq->avail->idx); - vq = dev->virtqueue[queue_id]; + for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { + uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; + uint16_t nr_vec = 0; - rte_spinlock_lock(&vq->access_lock); + if (unlikely(reserve_avail_buf_split(dev, vq, + pkt_len, buf_vec, &num_buffers, + avail_head, &nr_vec) < 0)) { + VHOST_LOG_DEBUG(VHOST_DATA, + "(%d) failed to get enough desc from vring\n", + dev->vid); + vq->shadow_used_idx -= num_buffers; + break; + } - if (unlikely(vq->enabled == 0)) - goto out_access_unlock; + rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); - if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) - vhost_user_iotlb_rd_lock(vq); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", + dev->vid, vq->last_avail_idx, + vq->last_avail_idx + num_buffers); - if (unlikely(vq->access_ok == 0)) - if (unlikely(vring_translate(dev, vq) < 0)) - goto out; + if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx], + buf_vec, nr_vec, + num_buffers) < 0) { + vq->shadow_used_idx -= num_buffers; + break; + } - count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); - if (count == 0) - goto out; + vq->last_avail_idx += num_buffers; + } - vq->batch_copy_nb_elems = 0; + do_data_copy_enqueue(dev, vq); - rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } + + return pkt_idx; +} + +static __rte_always_inline uint32_t +virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf **pkts, uint32_t count) +{ + uint32_t pkt_idx = 0; + uint16_t num_buffers; + struct buf_vector buf_vec[BUF_VECTOR_MAX]; - avail_head = *((volatile uint16_t *)&vq->avail->idx); for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; uint16_t nr_vec = 0; + uint16_t nr_descs = 0; - if (unlikely(reserve_avail_buf(dev, vq, - pkt_len, buf_vec, &num_buffers, - avail_head, &nr_vec) < 0)) { + if (unlikely(reserve_avail_buf_packed(dev, vq, + pkt_len, buf_vec, &nr_vec, + &num_buffers, &nr_descs) < 0)) { VHOST_LOG_DEBUG(VHOST_DATA, "(%d) failed to get enough desc from vring\n", dev->vid); @@ -596,16 +867,60 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, break; } - vq->last_avail_idx += num_buffers; + vq->last_avail_idx += nr_descs; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } } do_data_copy_enqueue(dev, vq); if (likely(vq->shadow_used_idx)) { - flush_shadow_used_ring(dev, vq); - vhost_vring_call(dev, vq); + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); } + return pkt_idx; +} + +static __rte_always_inline uint32_t +virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, + struct rte_mbuf **pkts, uint32_t count) +{ + struct vhost_virtqueue *vq; + uint32_t nb_tx = 0; + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { + RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); + + if (unlikely(vq->enabled == 0)) + goto out_access_unlock; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + if (unlikely(vq->access_ok == 0)) + if (unlikely(vring_translate(dev, vq) < 0)) + goto out; + + count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); + if (count == 0) + goto out; + + if (vq_is_packed(dev)) + nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count); + else + nb_tx = virtio_dev_rx_split(dev, vq, pkts, count); + out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_unlock(vq); @@ -613,7 +928,7 @@ out: out_access_unlock: rte_spinlock_unlock(&vq->access_lock); - return pkt_idx; + return nb_tx; } uint16_t @@ -797,7 +1112,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, * in a contiguous virtual area. */ while (remain) { - len = remain; + len = RTE_MIN(remain, + buf_vec[hdr_vec_idx].buf_len); src = buf_vec[hdr_vec_idx].buf_addr; rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); @@ -1019,62 +1335,23 @@ restore_mbuf(struct rte_mbuf *m) } } -uint16_t -rte_vhost_dequeue_burst(int vid, uint16_t queue_id, +static __rte_always_inline uint16_t +virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) { - struct virtio_net *dev; - struct rte_mbuf *rarp_mbuf = NULL; - struct vhost_virtqueue *vq; - uint32_t i = 0; + uint16_t i; uint16_t free_entries; - dev = get_device(vid); - if (!dev) - return 0; - - if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { - RTE_LOG(ERR, VHOST_DATA, - "(%d) %s: built-in vhost net backend is disabled.\n", - dev->vid, __func__); - return 0; - } - - if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) { - RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", - dev->vid, __func__, queue_id); - return 0; - } - - vq = dev->virtqueue[queue_id]; - - if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) - return 0; - - if (unlikely(vq->enabled == 0)) - goto out_access_unlock; - - vq->batch_copy_nb_elems = 0; - - if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) - vhost_user_iotlb_rd_lock(vq); - - if (unlikely(vq->access_ok == 0)) - if (unlikely(vring_translate(dev, vq) < 0)) - goto out; - if (unlikely(dev->dequeue_zero_copy)) { struct zcopy_mbuf *zmbuf, *next; - int nr_updated = 0; for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); zmbuf != NULL; zmbuf = next) { next = TAILQ_NEXT(zmbuf, next); if (mbuf_is_consumed(zmbuf->mbuf)) { - update_shadow_used_ring(vq, zmbuf->desc_idx, 0); - nr_updated += 1; - + update_shadow_used_ring_split(vq, + zmbuf->desc_idx, 0); TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); restore_mbuf(zmbuf->mbuf); rte_pktmbuf_free(zmbuf->mbuf); @@ -1083,45 +1360,18 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, } } - flush_shadow_used_ring(dev, vq); - vhost_vring_call(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } } rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); - /* - * Construct a RARP broadcast packet, and inject it to the "pkts" - * array, to looks like that guest actually send such packet. - * - * Check user_send_rarp() for more information. - * - * broadcast_rarp shares a cacheline in the virtio_net structure - * with some fields that are accessed during enqueue and - * rte_atomic16_cmpset() causes a write if using cmpxchg. This could - * result in false sharing between enqueue and dequeue. - * - * Prevent unnecessary false sharing by reading broadcast_rarp first - * and only performing cmpset if the read indicates it is likely to - * be set. - */ - - if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) && - rte_atomic16_cmpset((volatile uint16_t *) - &dev->broadcast_rarp.cnt, 1, 0))) { - - rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac); - if (rarp_mbuf == NULL) { - RTE_LOG(ERR, VHOST_DATA, - "Failed to make RARP packet.\n"); - return 0; - } - count -= 1; - } - free_entries = *((volatile uint16_t *)&vq->avail->idx) - vq->last_avail_idx; if (free_entries == 0) - goto out; + return 0; VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); @@ -1132,11 +1382,12 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, for (i = 0; i < count; i++) { struct buf_vector buf_vec[BUF_VECTOR_MAX]; - uint16_t head_idx, dummy_len; - uint32_t nr_vec = 0; + uint16_t head_idx; + uint32_t dummy_len; + uint16_t nr_vec = 0; int err; - if (unlikely(fill_vec_buf(dev, vq, + if (unlikely(fill_vec_buf_split(dev, vq, vq->last_avail_idx + i, &nr_vec, buf_vec, &head_idx, &dummy_len, @@ -1144,7 +1395,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, break; if (likely(dev->dequeue_zero_copy == 0)) - update_shadow_used_ring(vq, head_idx, 0); + update_shadow_used_ring_split(vq, head_idx, 0); rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); @@ -1191,10 +1442,212 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, do_data_copy_dequeue(vq); if (unlikely(i < count)) vq->shadow_used_idx = i; - flush_shadow_used_ring(dev, vq); - vhost_vring_call(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } + } + + return i; +} + +static __rte_always_inline uint16_t +virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +{ + uint16_t i; + + rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]); + + if (unlikely(dev->dequeue_zero_copy)) { + struct zcopy_mbuf *zmbuf, *next; + + for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); + zmbuf != NULL; zmbuf = next) { + next = TAILQ_NEXT(zmbuf, next); + + if (mbuf_is_consumed(zmbuf->mbuf)) { + update_shadow_used_ring_packed(vq, + zmbuf->desc_idx, + 0, + zmbuf->desc_count); + + TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); + rte_pktmbuf_free(zmbuf->mbuf); + put_zmbuf(zmbuf); + vq->nr_zmbuf -= 1; + } + } + + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } + } + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + + count = RTE_MIN(count, MAX_PKT_BURST); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", + dev->vid, count); + + for (i = 0; i < count; i++) { + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint16_t buf_id; + uint32_t dummy_len; + uint16_t desc_count, nr_vec = 0; + int err; + + if (unlikely(fill_vec_buf_packed(dev, vq, + vq->last_avail_idx, &desc_count, + buf_vec, &nr_vec, + &buf_id, &dummy_len, + VHOST_ACCESS_RW) < 0)) + break; + + if (likely(dev->dequeue_zero_copy == 0)) + update_shadow_used_ring_packed(vq, buf_id, 0, + desc_count); + + rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); + + pkts[i] = rte_pktmbuf_alloc(mbuf_pool); + if (unlikely(pkts[i] == NULL)) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to allocate memory for mbuf.\n"); + break; + } + + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i], + mbuf_pool); + if (unlikely(err)) { + rte_pktmbuf_free(pkts[i]); + break; + } + + if (unlikely(dev->dequeue_zero_copy)) { + struct zcopy_mbuf *zmbuf; + + zmbuf = get_zmbuf(vq); + if (!zmbuf) { + rte_pktmbuf_free(pkts[i]); + break; + } + zmbuf->mbuf = pkts[i]; + zmbuf->desc_idx = buf_id; + zmbuf->desc_count = desc_count; + + /* + * Pin lock the mbuf; we will check later to see + * whether the mbuf is freed (when we are the last + * user) or not. If that's the case, we then could + * update the used ring safely. + */ + rte_mbuf_refcnt_update(pkts[i], 1); + + vq->nr_zmbuf += 1; + TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); + } + + vq->last_avail_idx += desc_count; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + } + + if (likely(dev->dequeue_zero_copy == 0)) { + do_data_copy_dequeue(vq); + if (unlikely(i < count)) + vq->shadow_used_idx = i; + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } + return i; +} + +uint16_t +rte_vhost_dequeue_burst(int vid, uint16_t queue_id, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +{ + struct virtio_net *dev; + struct rte_mbuf *rarp_mbuf = NULL; + struct vhost_virtqueue *vq; + + dev = get_device(vid); + if (!dev) + return 0; + + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return 0; + } + + if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) { + RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", + dev->vid, __func__, queue_id); + return 0; + } + + vq = dev->virtqueue[queue_id]; + + if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) + return 0; + + if (unlikely(vq->enabled == 0)) { + count = 0; + goto out_access_unlock; + } + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + if (unlikely(vq->access_ok == 0)) + if (unlikely(vring_translate(dev, vq) < 0)) { + count = 0; + goto out; + } + + /* + * Construct a RARP broadcast packet, and inject it to the "pkts" + * array, to looks like that guest actually send such packet. + * + * Check user_send_rarp() for more information. + * + * broadcast_rarp shares a cacheline in the virtio_net structure + * with some fields that are accessed during enqueue and + * rte_atomic16_cmpset() causes a write if using cmpxchg. This could + * result in false sharing between enqueue and dequeue. + * + * Prevent unnecessary false sharing by reading broadcast_rarp first + * and only performing cmpset if the read indicates it is likely to + * be set. + */ + if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) && + rte_atomic16_cmpset((volatile uint16_t *) + &dev->broadcast_rarp.cnt, 1, 0))) { + + rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac); + if (rarp_mbuf == NULL) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to make RARP packet.\n"); + count = 0; + goto out; + } + count -= 1; + } + + if (vq_is_packed(dev)) + count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count); + else + count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count); + out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_unlock(vq); @@ -1207,10 +1660,10 @@ out_access_unlock: * Inject it to the head of "pkts" array, so that switch's mac * learning table will get updated first. */ - memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *)); + memmove(&pkts[1], pkts, count * sizeof(struct rte_mbuf *)); pkts[0] = rarp_mbuf; - i += 1; + count += 1; } - return i; + return count; }