net/virtio: revert forcing IOVA as VA mode for virtio-user
[dpdk.git] / drivers / net / virtio / virtqueue.h
index 03957b2..5baac22 100644 (file)
@@ -113,6 +113,25 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp,
 
 #define VIRTQUEUE_MAX_NAME_SZ 32
 
+/**
+ * Return the IOVA (or virtual address in case of virtio-user) of mbuf
+ * data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
+ */
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+       ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)))
+
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer, taking care of mbuf data offset
+ */
+#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
+       (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
+
 #define VTNET_SQ_RQ_QUEUE_IDX 0
 #define VTNET_SQ_TQ_QUEUE_IDX 1
 #define VTNET_SQ_CQ_QUEUE_IDX 2
@@ -255,6 +274,7 @@ struct virtqueue {
 
        void *vq_ring_virt_mem;  /**< linear address of vring*/
        unsigned int vq_ring_size;
+       uint16_t mbuf_addr_offset;
 
        union {
                struct virtnet_rx rxq;
@@ -739,7 +759,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        do {
                uint16_t flags;
 
-               start_dp[idx].addr = rte_mbuf_data_iova(cookie);
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
                start_dp[idx].len  = cookie->data_len;
                if (prepend_header) {
                        start_dp[idx].addr -= head_size;
@@ -803,25 +823,26 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
 }
 
 static void
-virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
+virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, uint16_t num)
 {
        uint16_t used_idx, id, curr_id, free_cnt = 0;
        uint16_t size = vq->vq_nentries;
        struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
        struct vq_desc_extra *dxp;
+       int nb = num;
 
        used_idx = vq->vq_used_cons_idx;
        /* desc_is_used has a load-acquire or rte_io_rmb inside
         * and wait for used desc in virtqueue.
         */
-       while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
+       while (nb > 0 && desc_is_used(&desc[used_idx], vq)) {
                id = desc[used_idx].id;
                do {
                        curr_id = used_idx;
                        dxp = &vq->vq_descx[used_idx];
                        used_idx += dxp->ndescs;
                        free_cnt += dxp->ndescs;
-                       num -= dxp->ndescs;
+                       nb -= dxp->ndescs;
                        if (used_idx >= size) {
                                used_idx -= size;
                                vq->vq_packed.used_wrap_counter ^= 1;
@@ -837,7 +858,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
 }
 
 static void
-virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
+virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, uint16_t num)
 {
        uint16_t used_idx, id;
        uint16_t size = vq->vq_nentries;
@@ -867,7 +888,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
 
 /* Cleanup from completed transmits. */
 static inline void
-virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
+virtio_xmit_cleanup_packed(struct virtqueue *vq, uint16_t num, int in_order)
 {
        if (in_order)
                virtio_xmit_cleanup_inorder_packed(vq, num);