vhost: enforce avail index and desc read ordering
[dpdk.git] / lib / librte_vhost / virtio_net.c
index 8ad30c9..7f37bbb 100644 (file)
@@ -37,45 +37,6 @@ is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
        return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
 }
 
-static __rte_always_inline void *
-alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
-               uint64_t desc_addr, uint64_t desc_len)
-{
-       void *idesc;
-       uint64_t src, dst;
-       uint64_t len, remain = desc_len;
-
-       idesc = rte_malloc(__func__, desc_len, 0);
-       if (unlikely(!idesc))
-               return 0;
-
-       dst = (uint64_t)(uintptr_t)idesc;
-
-       while (remain) {
-               len = remain;
-               src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
-                               VHOST_ACCESS_RO);
-               if (unlikely(!src || !len)) {
-                       rte_free(idesc);
-                       return 0;
-               }
-
-               rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
-
-               remain -= len;
-               dst += len;
-               desc_addr += len;
-       }
-
-       return idesc;
-}
-
-static __rte_always_inline void
-free_ind_table(void *idesc)
-{
-       rte_free(idesc);
-}
-
 static __rte_always_inline void
 do_flush_shadow_used_ring_split(struct virtio_net *dev,
                        struct vhost_virtqueue *vq,
@@ -598,7 +559,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                                avail_idx, &desc_count,
                                                buf_vec, &vec_idx,
                                                &buf_id, &len,
-                                               VHOST_ACCESS_RO) < 0))
+                                               VHOST_ACCESS_RW) < 0))
                        return -1;
 
                len = RTE_MIN(len, size);
@@ -791,6 +752,12 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
        avail_head = *((volatile uint16_t *)&vq->avail->idx);
 
+       /*
+        * The ordering between avail index and
+        * desc reads needs to be enforced.
+        */
+       rte_smp_rmb();
+
        for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
                uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
                uint16_t nr_vec = 0;
@@ -1373,6 +1340,12 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        if (free_entries == 0)
                return 0;
 
+       /*
+        * The ordering between avail index and
+        * desc reads needs to be enforced.
+        */
+       rte_smp_rmb();
+
        VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
 
        count = RTE_MIN(count, MAX_PKT_BURST);
@@ -1503,7 +1476,7 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                                vq->last_avail_idx, &desc_count,
                                                buf_vec, &nr_vec,
                                                &buf_id, &dummy_len,
-                                               VHOST_ACCESS_RW) < 0))
+                                               VHOST_ACCESS_RO) < 0))
                        break;
 
                if (likely(dev->dequeue_zero_copy == 0))