X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=8ad30c94a6d1519660f3bae39d056d21c4fc0462;hb=88e2d78a20fae4cccde351e38e0b1ef534ad3302;hp=2b7ffcf922a9757ab60fd565476e707274ec35d0;hpb=b1cce26af1dca2389e140d07fb32902abce9fd86;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 2b7ffcf922..8ad30c94a6 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -122,7 +122,7 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) static __rte_always_inline void update_shadow_used_ring_split(struct vhost_virtqueue *vq, - uint16_t desc_idx, uint16_t len) + uint16_t desc_idx, uint32_t len) { uint16_t i = vq->shadow_used_idx++; @@ -186,7 +186,7 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, static __rte_always_inline void update_shadow_used_ring_packed(struct vhost_virtqueue *vq, - uint16_t desc_idx, uint16_t len, uint16_t count) + uint16_t desc_idx, uint32_t len, uint16_t count) { uint16_t i = vq->shadow_used_idx++; @@ -329,7 +329,7 @@ static __rte_always_inline int fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t avail_idx, uint16_t *vec_idx, struct buf_vector *buf_vec, uint16_t *desc_chain_head, - uint16_t *desc_chain_len, uint8_t perm) + uint32_t *desc_chain_len, uint8_t perm) { uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; uint16_t vec_id = *vec_idx; @@ -409,19 +409,26 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t max_tries, tries = 0; uint16_t head_idx = 0; - uint16_t len = 0; + uint32_t len = 0; *num_buffers = 0; cur_idx = vq->last_avail_idx; if (rxvq_is_mergeable(dev)) - max_tries = vq->size; + max_tries = vq->size - 1; else max_tries = 1; while (size > 0) { if (unlikely(cur_idx == avail_head)) return -1; + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal + * happened. + */ + if (unlikely(++tries > max_tries)) + return -1; if (unlikely(fill_vec_buf_split(dev, vq, cur_idx, &vec_idx, buf_vec, @@ -433,16 +440,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, size -= len; cur_idx++; - tries++; *num_buffers += 1; - - /* - * if we tried all available ring items, and still - * can't get enough buf, it means something abnormal - * happened. - */ - if (unlikely(tries > max_tries)) - return -1; } *nr_vec = vec_idx; @@ -454,7 +452,7 @@ static __rte_always_inline int fill_vec_buf_packed_indirect(struct virtio_net *dev, struct vhost_virtqueue *vq, struct vring_packed_desc *desc, uint16_t *vec_idx, - struct buf_vector *buf_vec, uint16_t *len, uint8_t perm) + struct buf_vector *buf_vec, uint32_t *len, uint8_t perm) { uint16_t i; uint32_t nr_descs; @@ -510,7 +508,7 @@ static __rte_always_inline int fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t avail_idx, uint16_t *desc_count, struct buf_vector *buf_vec, uint16_t *vec_idx, - uint16_t *buf_id, uint16_t *len, uint8_t perm) + uint16_t *buf_id, uint32_t *len, uint8_t perm) { bool wrap_counter = vq->avail_wrap_counter; struct vring_packed_desc *descs = vq->desc_packed; @@ -523,6 +521,7 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, return -1; *desc_count = 0; + *len = 0; while (1) { if (unlikely(vec_id >= BUF_VECTOR_MAX)) @@ -575,18 +574,26 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t max_tries, tries = 0; uint16_t buf_id = 0; - uint16_t len = 0; + uint32_t len = 0; uint16_t desc_count; *num_buffers = 0; avail_idx = vq->last_avail_idx; if (rxvq_is_mergeable(dev)) - max_tries = vq->size; + max_tries = vq->size - 1; else max_tries = 1; while (size > 0) { + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal + * happened. + */ + if (unlikely(++tries > max_tries)) + return -1; + if (unlikely(fill_vec_buf_packed(dev, vq, avail_idx, &desc_count, buf_vec, &vec_idx, @@ -603,16 +610,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, avail_idx -= vq->size; *nr_descs += desc_count; - tries++; *num_buffers += 1; - - /* - * if we tried all available ring items, and still - * can't get enough buf, it means something abnormal - * happened. - */ - if (unlikely(tries > max_tries)) - return -1; } *nr_vec = vec_idx; @@ -720,7 +718,8 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t hdr_vec_idx = 0; while (remain) { - len = remain; + len = RTE_MIN(remain, + buf_vec[hdr_vec_idx].buf_len); dst = buf_vec[hdr_vec_idx].buf_addr; rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, @@ -747,7 +746,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, hdr_addr = 0; } - cpy_len = RTE_MIN(buf_len, mbuf_avail); + cpy_len = RTE_MIN(buf_avail, mbuf_avail); if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) { @@ -890,6 +889,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) { struct vhost_virtqueue *vq; + uint32_t nb_tx = 0; VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { @@ -917,9 +917,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, goto out; if (vq_is_packed(dev)) - count = virtio_dev_rx_packed(dev, vq, pkts, count); + nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count); else - count = virtio_dev_rx_split(dev, vq, pkts, count); + nb_tx = virtio_dev_rx_split(dev, vq, pkts, count); out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) @@ -928,7 +928,7 @@ out: out_access_unlock: rte_spinlock_unlock(&vq->access_lock); - return count; + return nb_tx; } uint16_t @@ -1112,7 +1112,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, * in a contiguous virtual area. */ while (remain) { - len = remain; + len = RTE_MIN(remain, + buf_vec[hdr_vec_idx].buf_len); src = buf_vec[hdr_vec_idx].buf_addr; rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); @@ -1343,7 +1344,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(dev->dequeue_zero_copy)) { struct zcopy_mbuf *zmbuf, *next; - int nr_updated = 0; for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); zmbuf != NULL; zmbuf = next) { @@ -1352,8 +1352,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (mbuf_is_consumed(zmbuf->mbuf)) { update_shadow_used_ring_split(vq, zmbuf->desc_idx, 0); - nr_updated += 1; - TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); restore_mbuf(zmbuf->mbuf); rte_pktmbuf_free(zmbuf->mbuf); @@ -1362,8 +1360,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } } - flush_shadow_used_ring_split(dev, vq); - vhost_vring_call_split(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } } rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); @@ -1382,7 +1382,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, for (i = 0; i < count; i++) { struct buf_vector buf_vec[BUF_VECTOR_MAX]; - uint16_t head_idx, dummy_len; + uint16_t head_idx; + uint32_t dummy_len; uint16_t nr_vec = 0; int err; @@ -1441,8 +1442,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_dequeue(vq); if (unlikely(i < count)) vq->shadow_used_idx = i; - flush_shadow_used_ring_split(dev, vq); - vhost_vring_call_split(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } } return i; @@ -1477,8 +1480,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, } } - flush_shadow_used_ring_packed(dev, vq); - vhost_vring_call_packed(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); @@ -1489,7 +1494,8 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, for (i = 0; i < count; i++) { struct buf_vector buf_vec[BUF_VECTOR_MAX]; - uint16_t buf_id, dummy_len; + uint16_t buf_id; + uint32_t dummy_len; uint16_t desc_count, nr_vec = 0; int err; @@ -1555,8 +1561,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_dequeue(vq); if (unlikely(i < count)) vq->shadow_used_idx = i; - flush_shadow_used_ring_packed(dev, vq); - vhost_vring_call_packed(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } return i; @@ -1592,15 +1600,19 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) return 0; - if (unlikely(vq->enabled == 0)) + if (unlikely(vq->enabled == 0)) { + count = 0; goto out_access_unlock; + } if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); if (unlikely(vq->access_ok == 0)) - if (unlikely(vring_translate(dev, vq) < 0)) + if (unlikely(vring_translate(dev, vq) < 0)) { + count = 0; goto out; + } /* * Construct a RARP broadcast packet, and inject it to the "pkts" @@ -1625,7 +1637,8 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (rarp_mbuf == NULL) { RTE_LOG(ERR, VHOST_DATA, "Failed to make RARP packet.\n"); - return 0; + count = 0; + goto out; } count -= 1; }