net: add rte prefix to ether structures
[dpdk.git] / lib / librte_vhost / virtio_net.c
index a9be633..f97ec76 100644 (file)
@@ -97,6 +97,8 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
 {
        int i;
        uint16_t used_idx = vq->last_used_idx;
+       uint16_t head_idx = vq->last_used_idx;
+       uint16_t head_flags = 0;
 
        /* Split loop in two to save memory barriers */
        for (i = 0; i < vq->shadow_used_idx; i++) {
@@ -126,12 +128,17 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
                        flags &= ~VRING_DESC_F_AVAIL;
                }
 
-               vq->desc_packed[vq->last_used_idx].flags = flags;
+               if (i > 0) {
+                       vq->desc_packed[vq->last_used_idx].flags = flags;
 
-               vhost_log_cache_used_vring(dev, vq,
+                       vhost_log_cache_used_vring(dev, vq,
                                        vq->last_used_idx *
                                        sizeof(struct vring_packed_desc),
                                        sizeof(struct vring_packed_desc));
+               } else {
+                       head_idx = vq->last_used_idx;
+                       head_flags = flags;
+               }
 
                vq->last_used_idx += vq->shadow_used_packed[i].count;
                if (vq->last_used_idx >= vq->size) {
@@ -140,7 +147,13 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
                }
        }
 
-       rte_smp_wmb();
+       vq->desc_packed[head_idx].flags = head_flags;
+
+       vhost_log_cache_used_vring(dev, vq,
+                               head_idx *
+                               sizeof(struct vring_packed_desc),
+                               sizeof(struct vring_packed_desc));
+
        vq->shadow_used_idx = 0;
        vhost_log_cache_sync(dev, vq);
 }
@@ -296,13 +309,22 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint16_t vec_id = *vec_idx;
        uint32_t len    = 0;
        uint64_t dlen;
+       uint32_t nr_descs = vq->size;
+       uint32_t cnt    = 0;
        struct vring_desc *descs = vq->desc;
        struct vring_desc *idesc = NULL;
 
+       if (unlikely(idx >= vq->size))
+               return -1;
+
        *desc_chain_head = idx;
 
        if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
                dlen = vq->desc[idx].len;
+               nr_descs = dlen / sizeof(struct vring_desc);
+               if (unlikely(nr_descs > vq->size))
+                       return -1;
+
                descs = (struct vring_desc *)(uintptr_t)
                        vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
                                                &dlen,
@@ -327,7 +349,7 @@ fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        }
 
        while (1) {
-               if (unlikely(idx >= vq->size)) {
+               if (unlikely(idx >= nr_descs || cnt++ >= nr_descs)) {
                        free_ind_table(idesc);
                        return -1;
                }
@@ -494,6 +516,9 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                if (unlikely(vec_id >= BUF_VECTOR_MAX))
                        return -1;
 
+               if (unlikely(*desc_count >= vq->size))
+                       return -1;
+
                *desc_count += 1;
                *buf_id = descs[avail_idx].id;
 
@@ -944,18 +969,19 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
        struct ipv4_hdr *ipv4_hdr;
        struct ipv6_hdr *ipv6_hdr;
        void *l3_hdr = NULL;
-       struct ether_hdr *eth_hdr;
+       struct rte_ether_hdr *eth_hdr;
        uint16_t ethertype;
 
-       eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+       eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
 
-       m->l2_len = sizeof(struct ether_hdr);
+       m->l2_len = sizeof(struct rte_ether_hdr);
        ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
 
        if (ethertype == ETHER_TYPE_VLAN) {
-               struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
+               struct rte_vlan_hdr *vlan_hdr =
+                       (struct rte_vlan_hdr *)(eth_hdr + 1);
 
-               m->l2_len += sizeof(struct vlan_hdr);
+               m->l2_len += sizeof(struct rte_vlan_hdr);
                ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
        }
 
@@ -1038,12 +1064,6 @@ vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
        }
 }
 
-static __rte_always_inline void
-put_zmbuf(struct zcopy_mbuf *zmbuf)
-{
-       zmbuf->in_use = 0;
-}
-
 static __rte_always_inline int
 copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
                  struct buf_vector *buf_vec, uint16_t nr_vec,
@@ -1281,34 +1301,6 @@ again:
        return NULL;
 }
 
-static __rte_always_inline bool
-mbuf_is_consumed(struct rte_mbuf *m)
-{
-       while (m) {
-               if (rte_mbuf_refcnt_read(m) > 1)
-                       return false;
-               m = m->next;
-       }
-
-       return true;
-}
-
-static __rte_always_inline void
-restore_mbuf(struct rte_mbuf *m)
-{
-       uint32_t mbuf_size, priv_size;
-
-       while (m) {
-               priv_size = rte_pktmbuf_priv_size(m->pool);
-               mbuf_size = sizeof(struct rte_mbuf) + priv_size;
-               /* start of buffer is after mbuf structure and priv data */
-
-               m->buf_addr = (char *)m + mbuf_size;
-               m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
-               m = m->next;
-       }
-}
-
 static __rte_always_inline uint16_t
 virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
@@ -1437,8 +1429,6 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
        uint16_t i;
 
-       rte_prefetch0(&vq->desc_packed[vq->last_avail_idx]);
-
        if (unlikely(dev->dequeue_zero_copy)) {
                struct zcopy_mbuf *zmbuf, *next;