X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=b779034dca83b9f50f67763858960acefc605c08;hb=9253c34cfb9dd32faeb8e513f6d8fafd356a2e62;hp=3a019b74466992fdcc1cae62fcb39beb99737b07;hpb=3e1e9c24648a44f10c4e3f04f016cdb881535218;p=dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 3a019b7446..b779034dca 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -222,8 +222,9 @@ vhost_flush_dequeue_shadow_packed(struct virtio_net *dev, struct vring_used_elem_packed *used_elem = &vq->shadow_used_packed[0]; vq->desc_packed[vq->shadow_last_used_idx].id = used_elem->id; - rte_smp_wmb(); - vq->desc_packed[vq->shadow_last_used_idx].flags = used_elem->flags; + /* desc flags is the synchronization point for virtio packed vring */ + __atomic_store_n(&vq->desc_packed[vq->shadow_last_used_idx].flags, + used_elem->flags, __ATOMIC_RELEASE); vhost_log_cache_used_vring(dev, vq, vq->shadow_last_used_idx * sizeof(struct vring_packed_desc), @@ -1281,8 +1282,6 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev, return -1; } - rte_smp_rmb(); - vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) lens[i] = descs[avail_idx + i].len; @@ -1343,7 +1342,6 @@ virtio_dev_rx_single_packed(struct virtio_net *dev, struct buf_vector buf_vec[BUF_VECTOR_MAX]; uint16_t nr_descs = 0; - rte_smp_rmb(); if (unlikely(vhost_enqueue_single_packed(dev, vq, pkt, buf_vec, &nr_descs) < 0)) { VHOST_LOG_DATA(DEBUG, @@ -1491,19 +1489,16 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, struct iovec *dst_iovec = vec_pool + (VHOST_MAX_ASYNC_VEC >> 1); struct rte_vhost_iov_iter *src_it = it_pool; struct rte_vhost_iov_iter *dst_it = it_pool + 1; - uint16_t n_free_slot, slot_idx; + uint16_t n_free_slot, slot_idx = 0; uint16_t pkt_err = 0; uint16_t segs_await = 0; struct async_inflight_info *pkts_info = vq->async_pkts_info; int n_pkts = 0; - avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE); - /* - * The ordering between avail index and - * desc reads needs to be enforced. + * The ordering between avail index and desc reads need to be enforced. */ - rte_smp_rmb(); + avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE); rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); @@ -1567,7 +1562,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, src_it = it_pool; dst_it = it_pool + 1; segs_await = 0; - vq->async_pkts_inflight_n += n_pkts; + vq->async_pkts_inflight_n += pkt_burst_idx; if (unlikely(n_pkts < (int)pkt_burst_idx)) { /* @@ -1587,7 +1582,7 @@ virtio_dev_rx_async_submit_split(struct virtio_net *dev, if (pkt_burst_idx) { n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx); - vq->async_pkts_inflight_n += n_pkts; + vq->async_pkts_inflight_n += pkt_burst_idx; if (unlikely(n_pkts < (int)pkt_burst_idx)) pkt_err = pkt_burst_idx - n_pkts; @@ -1714,7 +1709,6 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id, { struct vhost_virtqueue *vq; uint32_t nb_tx = 0; - bool drawback = false; VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__); if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) { @@ -1727,13 +1721,8 @@ virtio_dev_rx_async_submit(struct virtio_net *dev, uint16_t queue_id, rte_spinlock_lock(&vq->access_lock); - if (unlikely(vq->enabled == 0)) - goto out_access_unlock; - - if (unlikely(!vq->async_registered)) { - drawback = true; + if (unlikely(vq->enabled == 0 || !vq->async_registered)) goto out_access_unlock; - } if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) vhost_user_iotlb_rd_lock(vq); @@ -1760,9 +1749,6 @@ out: out_access_unlock: rte_spinlock_unlock(&vq->access_lock); - if (drawback) - return rte_vhost_enqueue_burst(dev->vid, queue_id, pkts, count); - return nb_tx; }