pci: reject negative values in PCI id
[dpdk.git] / lib / librte_vhost / virtio_net.c
index 62f37da..751c1f3 100644 (file)
@@ -107,11 +107,10 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
        }
        vq->last_used_idx += vq->shadow_used_idx;
 
-       rte_smp_wmb();
-
        vhost_log_cache_sync(dev, vq);
 
-       *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
+       __atomic_add_fetch(&vq->used->idx, vq->shadow_used_idx,
+                          __ATOMIC_RELEASE);
        vq->shadow_used_idx = 0;
        vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
                sizeof(vq->used->idx));
@@ -978,13 +977,11 @@ virtio_dev_rx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        struct buf_vector buf_vec[BUF_VECTOR_MAX];
        uint16_t avail_head;
 
-       avail_head = *((volatile uint16_t *)&vq->avail->idx);
-
        /*
         * The ordering between avail index and
         * desc reads needs to be enforced.
         */
-       rte_smp_rmb();
+       avail_head = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE);
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
@@ -1072,6 +1069,8 @@ virtio_dev_rx_batch_packed(struct virtio_net *dev,
                                                  VHOST_ACCESS_RW);
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               if (unlikely(!desc_addrs[i]))
+                       return -1;
                if (unlikely(lens[i] != descs[avail_idx + i].len))
                        return -1;
        }
@@ -1674,6 +1673,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 {
        uint16_t i;
        uint16_t free_entries;
+       uint16_t dropped = 0;
+       static bool allocerr_warned;
 
        if (unlikely(dev->dequeue_zero_copy)) {
                struct zcopy_mbuf *zmbuf, *next;
@@ -1699,16 +1700,14 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                }
        }
 
-       free_entries = *((volatile uint16_t *)&vq->avail->idx) -
-                       vq->last_avail_idx;
-       if (free_entries == 0)
-               return 0;
-
        /*
         * The ordering between avail index and
         * desc reads needs to be enforced.
         */
-       rte_smp_rmb();
+       free_entries = __atomic_load_n(&vq->avail->idx, __ATOMIC_ACQUIRE) -
+                       vq->last_avail_idx;
+       if (free_entries == 0)
+               return 0;
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
 
@@ -1737,13 +1736,35 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        update_shadow_used_ring_split(vq, head_idx, 0);
 
                pkts[i] = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
-               if (unlikely(pkts[i] == NULL))
+               if (unlikely(pkts[i] == NULL)) {
+                       /*
+                        * mbuf allocation fails for jumbo packets when external
+                        * buffer allocation is not allowed and linear buffer
+                        * is required. Drop this packet.
+                        */
+                       if (!allocerr_warned) {
+                               VHOST_LOG_DATA(ERR,
+                                       "Failed mbuf alloc of size %d from %s on %s.\n",
+                                       buf_len, mbuf_pool->name, dev->ifname);
+                               allocerr_warned = true;
+                       }
+                       dropped += 1;
+                       i++;
                        break;
+               }
 
                err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, pkts[i],
                                mbuf_pool);
                if (unlikely(err)) {
                        rte_pktmbuf_free(pkts[i]);
+                       if (!allocerr_warned) {
+                               VHOST_LOG_DATA(ERR,
+                                       "Failed to copy desc to mbuf on %s.\n",
+                                       dev->ifname);
+                               allocerr_warned = true;
+                       }
+                       dropped += 1;
+                       i++;
                        break;
                }
 
@@ -1753,6 +1774,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        zmbuf = get_zmbuf(vq);
                        if (!zmbuf) {
                                rte_pktmbuf_free(pkts[i]);
+                               dropped += 1;
+                               i++;
                                break;
                        }
                        zmbuf->mbuf = pkts[i];
@@ -1782,7 +1805,7 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                }
        }
 
-       return i;
+       return (i - dropped);
 }
 
 static __rte_always_inline int
@@ -1827,6 +1850,8 @@ vhost_reserve_avail_batch_packed(struct virtio_net *dev,
        }
 
        vhost_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
+               if (unlikely(!desc_addrs[i]))
+                       return -1;
                if (unlikely((lens[i] != descs[avail_idx + i].len)))
                        return -1;
        }
@@ -1914,6 +1939,7 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
        uint32_t buf_len;
        uint16_t nr_vec = 0;
        int err;
+       static bool allocerr_warned;
 
        if (unlikely(fill_vec_buf_packed(dev, vq,
                                         vq->last_avail_idx, desc_count,
@@ -1924,14 +1950,24 @@ vhost_dequeue_single_packed(struct virtio_net *dev,
 
        *pkts = virtio_dev_pktmbuf_alloc(dev, mbuf_pool, buf_len);
        if (unlikely(*pkts == NULL)) {
-               VHOST_LOG_DATA(ERR,
-                       "Failed to allocate memory for mbuf.\n");
+               if (!allocerr_warned) {
+                       VHOST_LOG_DATA(ERR,
+                               "Failed mbuf alloc of size %d from %s on %s.\n",
+                               buf_len, mbuf_pool->name, dev->ifname);
+                       allocerr_warned = true;
+               }
                return -1;
        }
 
        err = copy_desc_to_mbuf(dev, vq, buf_vec, nr_vec, *pkts,
                                mbuf_pool);
        if (unlikely(err)) {
+               if (!allocerr_warned) {
+                       VHOST_LOG_DATA(ERR,
+                               "Failed to copy desc to mbuf on %s.\n",
+                               dev->ifname);
+                       allocerr_warned = true;
+               }
                rte_pktmbuf_free(*pkts);
                return -1;
        }
@@ -1946,21 +1982,24 @@ virtio_dev_tx_single_packed(struct virtio_net *dev,
                            struct rte_mbuf **pkts)
 {
 
-       uint16_t buf_id, desc_count;
+       uint16_t buf_id, desc_count = 0;
+       int ret;
 
-       if (vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
-                                       &desc_count))
-               return -1;
+       ret = vhost_dequeue_single_packed(dev, vq, mbuf_pool, pkts, &buf_id,
+                                       &desc_count);
 
-       if (virtio_net_is_inorder(dev))
-               vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
-                                                          desc_count);
-       else
-               vhost_shadow_dequeue_single_packed(vq, buf_id, desc_count);
+       if (likely(desc_count > 0)) {
+               if (virtio_net_is_inorder(dev))
+                       vhost_shadow_dequeue_single_packed_inorder(vq, buf_id,
+                                                                  desc_count);
+               else
+                       vhost_shadow_dequeue_single_packed(vq, buf_id,
+                                       desc_count);
 
-       vq_inc_last_avail_packed(vq, desc_count);
+               vq_inc_last_avail_packed(vq, desc_count);
+       }
 
-       return 0;
+       return ret;
 }
 
 static __rte_always_inline int