net/virtio: fix packet corruption
[dpdk.git] / drivers / net / virtio / virtio_rxtx_simple.c
index 7b50119..6517aa8 100644 (file)
@@ -80,8 +80,9 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq,
        vq->sw_ring[desc_idx] = cookie;
 
        start_dp = vq->vq_ring.desc;
-       start_dp[desc_idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset) -
-                                 vq->hw->vtnet_hdr_size;
+       start_dp[desc_idx].addr =
+               VIRTIO_MBUF_ADDR(cookie, vq) +
+               RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
        start_dp[desc_idx].len = cookie->buf_len -
                RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
 
@@ -120,8 +121,8 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
                *(uint64_t *)p = rxvq->mbuf_initializer;
 
                start_dp[i].addr =
-                       MBUF_DATA_DMA_ADDR(sw_ring[i], vq->offset) -
-                       vq->hw->vtnet_hdr_size;
+                       VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
+                       RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
                start_dp[i].len = sw_ring[i]->buf_len -
                        RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
        }
@@ -184,8 +185,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
                return 0;
 
-       nb_used = *(volatile uint16_t *)&vq->vq_ring.used->idx -
-               vq->vq_used_cons_idx;
+       nb_used = VIRTQUEUE_NUSED(vq);
 
        rte_compiler_barrier();
 
@@ -302,7 +302,7 @@ static inline void
 virtio_xmit_cleanup(struct virtqueue *vq)
 {
        uint16_t i, desc_idx;
-       int nb_free = 0;
+       uint32_t nb_free = 0;
        struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
 
        desc_idx = (uint16_t)(vq->vq_used_cons_idx &
@@ -320,13 +320,16 @@ virtio_xmit_cleanup(struct virtqueue *vq)
                                        free[nb_free++] = m;
                                else {
                                        rte_mempool_put_bulk(free[0]->pool,
-                                               (void **)free, nb_free);
+                                               (void **)free,
+                                               RTE_MIN(RTE_DIM(free),
+                                                       nb_free));
                                        free[0] = m;
                                        nb_free = 1;
                                }
                        }
                }
-               rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+               rte_mempool_put_bulk(free[0]->pool, (void **)free,
+                       RTE_MIN(RTE_DIM(free), nb_free));
        } else {
                for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
                        m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
@@ -369,7 +372,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
                        vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
                for (i = 0; i < nb_tail; i++) {
                        start_dp[desc_idx].addr =
-                               MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
+                               VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
                        start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
                        tx_pkts++;
                        desc_idx++;
@@ -381,7 +384,7 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
                vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
        for (i = 0; i < nb_commit; i++) {
                start_dp[desc_idx].addr =
-                       MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
+                       VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
                start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
                tx_pkts++;
                desc_idx++;