X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=ed7198dbbc9b5bc18645289397a85518befef72e;hb=264713ba1025e18436433a00fb14da8fc3f27101;hp=ca892886dea9312b373c33c0056bf623fabdb665;hpb=c3ffdba0e88a4220665d2284db5532173578a346;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index ca892886de..ed7198dbbc 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "iotlb.h" #include "vhost.h" @@ -294,7 +295,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, uint16_t used_idx; uint32_t i, sz; - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); @@ -302,8 +303,11 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, } vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); + if (unlikely(vq->enabled == 0)) - return 0; + goto out_access_unlock; if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); @@ -323,7 +327,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, if (count == 0) goto out; - LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n", dev->vid, start_idx, start_idx + count); vq->batch_copy_nb_elems = 0; @@ -389,6 +393,9 @@ out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_unlock(vq); +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + return count; } @@ -517,7 +524,7 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, hdr_phys_addr = buf_vec[vec_idx].buf_addr; rte_prefetch0((void *)(uintptr_t)hdr_addr); - LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", dev->vid, num_buffers); desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen; @@ -613,7 +620,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t avail_head; - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); @@ -621,8 +628,11 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, } vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); + if (unlikely(vq->enabled == 0)) - return 0; + goto out_access_unlock; if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); @@ -647,14 +657,14 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, if (unlikely(reserve_avail_buf_mergeable(dev, vq, pkt_len, buf_vec, &num_buffers, avail_head) < 0)) { - LOG_DEBUG(VHOST_DATA, + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) failed to get enough desc from vring\n", dev->vid); vq->shadow_used_idx -= num_buffers; break; } - LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n", dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers); @@ -678,6 +688,9 @@ out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_unlock(vq); +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + return pkt_idx; } @@ -690,6 +703,13 @@ rte_vhost_enqueue_burst(int vid, uint16_t queue_id, if (!dev) return 0; + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return 0; + } + if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) return virtio_dev_merge_rx(dev, queue_id, pkts, count); else @@ -1082,6 +1102,22 @@ mbuf_is_consumed(struct rte_mbuf *m) return true; } +static __rte_always_inline void +restore_mbuf(struct rte_mbuf *m) +{ + uint32_t mbuf_size, priv_size; + + while (m) { + priv_size = rte_pktmbuf_priv_size(m->pool); + mbuf_size = sizeof(struct rte_mbuf) + priv_size; + /* start of buffer is after mbuf structure and priv data */ + + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m = m->next; + } +} + uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) @@ -1099,6 +1135,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (!dev) return 0; + if (unlikely(!(dev->flags & VIRTIO_DEV_BUILTIN_VIRTIO_NET))) { + RTE_LOG(ERR, VHOST_DATA, + "(%d) %s: built-in vhost net backend is disabled.\n", + dev->vid, __func__); + return 0; + } + if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->nr_vring))) { RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n", dev->vid, __func__, queue_id); @@ -1106,9 +1149,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, } vq = dev->virtqueue[queue_id]; - if (unlikely(vq->enabled == 0)) + + if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) return 0; + if (unlikely(vq->enabled == 0)) + goto out_access_unlock; + vq->batch_copy_nb_elems = 0; if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) @@ -1133,6 +1180,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, nr_updated += 1; TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); rte_pktmbuf_free(zmbuf->mbuf); put_zmbuf(zmbuf); vq->nr_zmbuf -= 1; @@ -1162,19 +1210,13 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, rte_atomic16_cmpset((volatile uint16_t *) &dev->broadcast_rarp.cnt, 1, 0))) { - rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool); + rarp_mbuf = rte_net_make_rarp_packet(mbuf_pool, &dev->mac); if (rarp_mbuf == NULL) { RTE_LOG(ERR, VHOST_DATA, - "Failed to allocate memory for mbuf.\n"); + "Failed to make RARP packet.\n"); return 0; } - - if (rte_net_make_rarp_packet(rarp_mbuf, &dev->mac) < 0) { - rte_pktmbuf_free(rarp_mbuf); - rarp_mbuf = NULL; - } else { - count -= 1; - } + count -= 1; } free_entries = *((volatile uint16_t *)&vq->avail->idx) - @@ -1182,7 +1224,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (free_entries == 0) goto out; - LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__); /* Prefetch available and used ring */ avail_idx = vq->last_avail_idx & (vq->size - 1); @@ -1192,7 +1234,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, count = RTE_MIN(count, MAX_PKT_BURST); count = RTE_MIN(count, free_entries); - LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", + VHOST_LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n", dev->vid, count); /* Retrieve all of the head indexes first to avoid caching issues. */ @@ -1282,6 +1324,9 @@ out: if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_unlock(vq); +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + if (unlikely(rarp_mbuf != NULL)) { /* * Inject it to the head of "pkts" array, so that switch's mac