X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=5e1a1a727cf3bc0a8475b42c35311734ffd66a29;hb=43d162bc168e5c66346acf9f464495a088a5a9f0;hp=4b7d91dfb67180b3b6f9f29c39c5ff6375265c0c;hpb=37f5e79a271d598c88b85d169f22538a087ab0ad;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 4b7d91dfb6..5e1a1a727c 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -122,7 +122,7 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq) static __rte_always_inline void update_shadow_used_ring_split(struct vhost_virtqueue *vq, - uint16_t desc_idx, uint16_t len) + uint16_t desc_idx, uint32_t len) { uint16_t i = vq->shadow_used_idx++; @@ -130,7 +130,7 @@ update_shadow_used_ring_split(struct vhost_virtqueue *vq, vq->shadow_used_split[i].len = len; } -static __rte_unused __rte_always_inline void +static __rte_always_inline void flush_shadow_used_ring_packed(struct virtio_net *dev, struct vhost_virtqueue *vq) { @@ -184,9 +184,9 @@ flush_shadow_used_ring_packed(struct virtio_net *dev, vhost_log_cache_sync(dev, vq); } -static __rte_unused __rte_always_inline void +static __rte_always_inline void update_shadow_used_ring_packed(struct vhost_virtqueue *vq, - uint16_t desc_idx, uint16_t len, uint16_t count) + uint16_t desc_idx, uint32_t len, uint16_t count) { uint16_t i = vq->shadow_used_idx++; @@ -291,16 +291,50 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) } } +static __rte_always_inline int +map_one_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct buf_vector *buf_vec, uint16_t *vec_idx, + uint64_t desc_iova, uint64_t desc_len, uint8_t perm) +{ + uint16_t vec_id = *vec_idx; + + while (desc_len) { + uint64_t desc_addr; + uint64_t desc_chunck_len = desc_len; + + if (unlikely(vec_id >= BUF_VECTOR_MAX)) + return -1; + + desc_addr = vhost_iova_to_vva(dev, vq, + desc_iova, + &desc_chunck_len, + perm); + if (unlikely(!desc_addr)) + return -1; + + buf_vec[vec_id].buf_iova = desc_iova; + buf_vec[vec_id].buf_addr = desc_addr; + buf_vec[vec_id].buf_len = desc_chunck_len; + + desc_len -= desc_chunck_len; + desc_iova += desc_chunck_len; + vec_id++; + } + *vec_idx = vec_id; + + return 0; +} + static __rte_always_inline int fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t avail_idx, uint16_t *vec_idx, struct buf_vector *buf_vec, uint16_t *desc_chain_head, - uint16_t *desc_chain_len, uint8_t perm) + uint32_t *desc_chain_len, uint8_t perm) { uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; uint16_t vec_id = *vec_idx; uint32_t len = 0; - uint64_t dlen, desc_avail, desc_iova; + uint64_t dlen; struct vring_desc *descs = vq->desc; struct vring_desc *idesc = NULL; @@ -337,37 +371,13 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, return -1; } - len += descs[idx].len; - desc_avail = descs[idx].len; - desc_iova = descs[idx].addr; - - while (desc_avail) { - uint64_t desc_addr; - uint64_t desc_chunck_len = desc_avail; - - if (unlikely(vec_id >= BUF_VECTOR_MAX)) { - free_ind_table(idesc); - return -1; - } - - desc_addr = vhost_iova_to_vva(dev, vq, - desc_iova, - &desc_chunck_len, - perm); - if (unlikely(!desc_addr)) { - free_ind_table(idesc); - return -1; - } - buf_vec[vec_id].buf_iova = desc_iova; - buf_vec[vec_id].buf_addr = desc_addr; - buf_vec[vec_id].buf_len = desc_chunck_len; - buf_vec[vec_id].desc_idx = idx; - - desc_avail -= desc_chunck_len; - desc_iova += desc_chunck_len; - vec_id++; + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[idx].addr, descs[idx].len, + perm))) { + free_ind_table(idesc); + return -1; } if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0) @@ -399,19 +409,26 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t max_tries, tries = 0; uint16_t head_idx = 0; - uint16_t len = 0; + uint32_t len = 0; *num_buffers = 0; cur_idx = vq->last_avail_idx; if (rxvq_is_mergeable(dev)) - max_tries = vq->size; + max_tries = vq->size - 1; else max_tries = 1; while (size > 0) { if (unlikely(cur_idx == avail_head)) return -1; + /* + * if we tried all available ring items, and still + * can't get enough buf, it means something abnormal + * happened. + */ + if (unlikely(++tries > max_tries)) + return -1; if (unlikely(fill_vec_buf_split(dev, vq, cur_idx, &vec_idx, buf_vec, @@ -423,16 +440,177 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq, size -= len; cur_idx++; - tries++; *num_buffers += 1; + } + *nr_vec = vec_idx; + + return 0; +} + +static __rte_always_inline int +fill_vec_buf_packed_indirect(struct virtio_net *dev, + struct vhost_virtqueue *vq, + struct vring_packed_desc *desc, uint16_t *vec_idx, + struct buf_vector *buf_vec, uint32_t *len, uint8_t perm) +{ + uint16_t i; + uint32_t nr_descs; + uint16_t vec_id = *vec_idx; + uint64_t dlen; + struct vring_packed_desc *descs, *idescs = NULL; + + dlen = desc->len; + descs = (struct vring_packed_desc *)(uintptr_t) + vhost_iova_to_vva(dev, vq, desc->addr, &dlen, VHOST_ACCESS_RO); + if (unlikely(!descs)) + return -1; + + if (unlikely(dlen < desc->len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idescs = alloc_copy_ind_table(dev, vq, desc->addr, desc->len); + if (unlikely(!idescs)) + return -1; + + descs = idescs; + } + + nr_descs = desc->len / sizeof(struct vring_packed_desc); + if (unlikely(nr_descs >= vq->size)) { + free_ind_table(idescs); + return -1; + } + + for (i = 0; i < nr_descs; i++) { + if (unlikely(vec_id >= BUF_VECTOR_MAX)) { + free_ind_table(idescs); + return -1; + } + + *len += descs[i].len; + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[i].addr, descs[i].len, + perm))) + return -1; + } + *vec_idx = vec_id; + + if (unlikely(!!idescs)) + free_ind_table(idescs); + + return 0; +} + +static __rte_always_inline int +fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint16_t avail_idx, uint16_t *desc_count, + struct buf_vector *buf_vec, uint16_t *vec_idx, + uint16_t *buf_id, uint32_t *len, uint8_t perm) +{ + bool wrap_counter = vq->avail_wrap_counter; + struct vring_packed_desc *descs = vq->desc_packed; + uint16_t vec_id = *vec_idx; + + if (avail_idx < vq->last_avail_idx) + wrap_counter ^= 1; + + if (unlikely(!desc_is_avail(&descs[avail_idx], wrap_counter))) + return -1; + + *desc_count = 0; + *len = 0; + + while (1) { + if (unlikely(vec_id >= BUF_VECTOR_MAX)) + return -1; + + *desc_count += 1; + *buf_id = descs[avail_idx].id; + + if (descs[avail_idx].flags & VRING_DESC_F_INDIRECT) { + if (unlikely(fill_vec_buf_packed_indirect(dev, vq, + &descs[avail_idx], + &vec_id, buf_vec, + len, perm) < 0)) + return -1; + } else { + *len += descs[avail_idx].len; + + if (unlikely(map_one_desc(dev, vq, buf_vec, &vec_id, + descs[avail_idx].addr, + descs[avail_idx].len, + perm))) + return -1; + } + + if ((descs[avail_idx].flags & VRING_DESC_F_NEXT) == 0) + break; + + if (++avail_idx >= vq->size) { + avail_idx -= vq->size; + wrap_counter ^= 1; + } + } + + *vec_idx = vec_id; + + return 0; +} + +/* + * Returns -1 on fail, 0 on success + */ +static inline int +reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint32_t size, struct buf_vector *buf_vec, + uint16_t *nr_vec, uint16_t *num_buffers, + uint16_t *nr_descs) +{ + uint16_t avail_idx; + uint16_t vec_idx = 0; + uint16_t max_tries, tries = 0; + + uint16_t buf_id = 0; + uint32_t len = 0; + uint16_t desc_count; + + *num_buffers = 0; + avail_idx = vq->last_avail_idx; + + if (rxvq_is_mergeable(dev)) + max_tries = vq->size - 1; + else + max_tries = 1; + + while (size > 0) { /* * if we tried all available ring items, and still * can't get enough buf, it means something abnormal * happened. */ - if (unlikely(tries > max_tries)) + if (unlikely(++tries > max_tries)) return -1; + + if (unlikely(fill_vec_buf_packed(dev, vq, + avail_idx, &desc_count, + buf_vec, &vec_idx, + &buf_id, &len, + VHOST_ACCESS_RW) < 0)) + return -1; + + len = RTE_MIN(len, size); + update_shadow_used_ring_packed(vq, buf_id, len, desc_count); + size -= len; + + avail_idx += desc_count; + if (avail_idx >= vq->size) + avail_idx -= vq->size; + + *nr_descs += desc_count; + *num_buffers += 1; } *nr_vec = vec_idx; @@ -540,7 +718,8 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t hdr_vec_idx = 0; while (remain) { - len = remain; + len = RTE_MIN(remain, + buf_vec[hdr_vec_idx].buf_len); dst = buf_vec[hdr_vec_idx].buf_addr; rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, @@ -567,7 +746,7 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, hdr_addr = 0; } - cpy_len = RTE_MIN(buf_len, mbuf_avail); + cpy_len = RTE_MIN(buf_avail, mbuf_avail); if (likely(cpy_len > MAX_BATCH_LEN || vq->batch_copy_nb_elems >= vq->size)) { @@ -646,7 +825,60 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (likely(vq->shadow_used_idx)) { flush_shadow_used_ring_split(dev, vq); - vhost_vring_call(dev, vq); + vhost_vring_call_split(dev, vq); + } + + return pkt_idx; +} + +static __rte_always_inline uint32_t +virtio_dev_rx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf **pkts, uint32_t count) +{ + uint32_t pkt_idx = 0; + uint16_t num_buffers; + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + + for (pkt_idx = 0; pkt_idx < count; pkt_idx++) { + uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen; + uint16_t nr_vec = 0; + uint16_t nr_descs = 0; + + if (unlikely(reserve_avail_buf_packed(dev, vq, + pkt_len, buf_vec, &nr_vec, + &num_buffers, &nr_descs) < 0)) { + VHOST_LOG_DEBUG(VHOST_DATA, + "(%d) failed to get enough desc from vring\n", + dev->vid); + vq->shadow_used_idx -= num_buffers; + break; + } + + rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", + dev->vid, vq->last_avail_idx, + vq->last_avail_idx + num_buffers); + + if (copy_mbuf_to_desc(dev, vq, pkts[pkt_idx], + buf_vec, nr_vec, + num_buffers) < 0) { + vq->shadow_used_idx -= num_buffers; + break; + } + + vq->last_avail_idx += nr_descs; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + } + + do_data_copy_enqueue(dev, vq); + + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); } return pkt_idx; @@ -657,6 +889,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) { struct vhost_virtqueue *vq; + uint32_t nb_tx = 0; VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { @@ -683,7 +916,10 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, if (count == 0) goto out; - count = virtio_dev_rx_split(dev, vq, pkts, count); + if (vq_is_packed(dev)) + nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count); + else + nb_tx = virtio_dev_rx_split(dev, vq, pkts, count); out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) @@ -692,7 +928,7 @@ out: out_access_unlock: rte_spinlock_unlock(&vq->access_lock); - return count; + return nb_tx; } uint16_t @@ -876,7 +1112,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, * in a contiguous virtual area. */ while (remain) { - len = remain; + len = RTE_MIN(remain, + buf_vec[hdr_vec_idx].buf_len); src = buf_vec[hdr_vec_idx].buf_addr; rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); @@ -1107,7 +1344,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (unlikely(dev->dequeue_zero_copy)) { struct zcopy_mbuf *zmbuf, *next; - int nr_updated = 0; for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); zmbuf != NULL; zmbuf = next) { @@ -1116,8 +1352,6 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, if (mbuf_is_consumed(zmbuf->mbuf)) { update_shadow_used_ring_split(vq, zmbuf->desc_idx, 0); - nr_updated += 1; - TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); restore_mbuf(zmbuf->mbuf); rte_pktmbuf_free(zmbuf->mbuf); @@ -1126,8 +1360,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, } } - flush_shadow_used_ring_split(dev, vq); - vhost_vring_call(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } } rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); @@ -1146,7 +1382,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, for (i = 0; i < count; i++) { struct buf_vector buf_vec[BUF_VECTOR_MAX]; - uint16_t head_idx, dummy_len; + uint16_t head_idx; + uint32_t dummy_len; uint16_t nr_vec = 0; int err; @@ -1205,8 +1442,129 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq, do_data_copy_dequeue(vq); if (unlikely(i < count)) vq->shadow_used_idx = i; - flush_shadow_used_ring_split(dev, vq); - vhost_vring_call(dev, vq); + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_split(dev, vq); + vhost_vring_call_split(dev, vq); + } + } + + return i; +} + +static __rte_always_inline uint16_t +virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) +{ + uint16_t i; + + rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]); + + if (unlikely(dev->dequeue_zero_copy)) { + struct zcopy_mbuf *zmbuf, *next; + + for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list); + zmbuf != NULL; zmbuf = next) { + next = TAILQ_NEXT(zmbuf, next); + + if (mbuf_is_consumed(zmbuf->mbuf)) { + update_shadow_used_ring_packed(vq, + zmbuf->desc_idx, + 0, + zmbuf->desc_count); + + TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); + rte_pktmbuf_free(zmbuf->mbuf); + put_zmbuf(zmbuf); + vq->nr_zmbuf -= 1; + } + } + + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } + } + + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + + count = RTE_MIN(count, MAX_PKT_BURST); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", + dev->vid, count); + + for (i = 0; i < count; i++) { + struct buf_vector buf_vec[BUF_VECTOR_MAX]; + uint16_t buf_id; + uint32_t dummy_len; + uint16_t desc_count, nr_vec = 0; + int err; + + if (unlikely(fill_vec_buf_packed(dev, vq, + vq->last_avail_idx, &desc_count, + buf_vec, &nr_vec, + &buf_id, &dummy_len, + VHOST_ACCESS_RO) < 0)) + break; + + if (likely(dev->dequeue_zero_copy == 0)) + update_shadow_used_ring_packed(vq, buf_id, 0, + desc_count); + + rte_prefetch0((void *)(uintptr_t)buf_vec[0].buf_addr); + + pkts[i] = rte_pktmbuf_alloc(mbuf_pool); + if (unlikely(pkts[i] == NULL)) { + RTE_LOG(ERR, VHOST_DATA, + "Failed to allocate memory for mbuf.\n"); + break; + } + + err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i], + mbuf_pool); + if (unlikely(err)) { + rte_pktmbuf_free(pkts[i]); + break; + } + + if (unlikely(dev->dequeue_zero_copy)) { + struct zcopy_mbuf *zmbuf; + + zmbuf = get_zmbuf(vq); + if (!zmbuf) { + rte_pktmbuf_free(pkts[i]); + break; + } + zmbuf->mbuf = pkts[i]; + zmbuf->desc_idx = buf_id; + zmbuf->desc_count = desc_count; + + /* + * Pin lock the mbuf; we will check later to see + * whether the mbuf is freed (when we are the last + * user) or not. If that's the case, we then could + * update the used ring safely. + */ + rte_mbuf_refcnt_update(pkts[i], 1); + + vq->nr_zmbuf += 1; + TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); + } + + vq->last_avail_idx += desc_count; + if (vq->last_avail_idx >= vq->size) { + vq->last_avail_idx -= vq->size; + vq->avail_wrap_counter ^= 1; + } + } + + if (likely(dev->dequeue_zero_copy == 0)) { + do_data_copy_dequeue(vq); + if (unlikely(i < count)) + vq->shadow_used_idx = i; + if (likely(vq->shadow_used_idx)) { + flush_shadow_used_ring_packed(dev, vq); + vhost_vring_call_packed(dev, vq); + } } return i; @@ -1242,15 +1600,19 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) return 0; - if (unlikely(vq->enabled == 0)) + if (unlikely(vq->enabled == 0)) { + count = 0; goto out_access_unlock; + } if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); if (unlikely(vq->access_ok == 0)) - if (unlikely(vring_translate(dev, vq) < 0)) + if (unlikely(vring_translate(dev, vq) < 0)) { + count = 0; goto out; + } /* * Construct a RARP broadcast packet, and inject it to the "pkts" @@ -1275,12 +1637,16 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (rarp_mbuf == NULL) { RTE_LOG(ERR, VHOST_DATA, "Failed to make RARP packet.\n"); - return 0; + count = 0; + goto out; } count -= 1; } - count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count); + if (vq_is_packed(dev)) + count = virtio_dev_tx_packed(dev, vq, mbuf_pool, pkts, count); + else + count = virtio_dev_tx_split(dev, vq, mbuf_pool, pkts, count); out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))