net/virtio: fix avail descriptor ID
[dpdk.git] / drivers / net / virtio / virtqueue.h
index 3a9ce29..4f3dd31 100644 (file)
@@ -12,7 +12,7 @@
 #include <rte_mempool.h>
 #include <rte_net.h>
 
-#include "virtio_pci.h"
+#include "virtio.h"
 #include "virtio_ring.h"
 #include "virtio_logs.h"
 #include "virtio_rxtx.h"
@@ -113,6 +113,25 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp,
 
 #define VIRTQUEUE_MAX_NAME_SZ 32
 
+/**
+ * Return the IOVA (or virtual address in case of virtio-user) of mbuf
+ * data buffer.
+ *
+ * The address is firstly casted to the word size (sizeof(uintptr_t))
+ * before casting it to uint64_t. This is to make it work with different
+ * combination of word size (64 bit and 32 bit) and virtio device
+ * (virtio-pci and virtio-user).
+ */
+#define VIRTIO_MBUF_ADDR(mb, vq) \
+       ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset)))
+
+/**
+ * Return the physical address (or virtual address in case of
+ * virtio-user) of mbuf data buffer, taking care of mbuf data offset
+ */
+#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \
+       (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off)
+
 #define VTNET_SQ_RQ_QUEUE_IDX 0
 #define VTNET_SQ_TQ_QUEUE_IDX 1
 #define VTNET_SQ_CQ_QUEUE_IDX 2
@@ -217,6 +236,10 @@ struct vq_desc_extra {
        uint16_t next;
 };
 
+#define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq)
+#define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq)
+#define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq)
+
 struct virtqueue {
        struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
        union {
@@ -240,8 +263,18 @@ struct virtqueue {
        uint16_t vq_avail_idx; /**< sync until needed */
        uint16_t vq_free_thresh; /**< free threshold */
 
+       /**
+        * Head of the free chain in the descriptor table. If
+        * there are no free descriptors, this will be set to
+        * VQ_RING_DESC_CHAIN_END.
+        */
+       uint16_t  vq_desc_head_idx;
+       uint16_t  vq_desc_tail_idx;
+       uint16_t  vq_queue_index;   /**< PCI queue index */
+
        void *vq_ring_virt_mem;  /**< linear address of vring*/
        unsigned int vq_ring_size;
+       uint16_t mbuf_addr_offset;
 
        union {
                struct virtnet_rx rxq;
@@ -252,15 +285,6 @@ struct virtqueue {
        rte_iova_t vq_ring_mem; /**< physical address of vring,
                                 * or virtual address for virtio_user. */
 
-       /**
-        * Head of the free chain in the descriptor table. If
-        * there are no free descriptors, this will be set to
-        * VQ_RING_DESC_CHAIN_END.
-        */
-       uint16_t  vq_desc_head_idx;
-       uint16_t  vq_desc_tail_idx;
-       uint16_t  vq_queue_index;   /**< PCI queue index */
-       uint16_t offset; /**< relative offset to obtain addr in mbuf */
        uint16_t  *notify_addr;
        struct rte_mbuf **sw_ring;  /**< RX software ring. */
        struct vq_desc_extra vq_descx[0];
@@ -385,7 +409,7 @@ virtqueue_disable_intr_split(struct virtqueue *vq)
 static inline void
 virtqueue_disable_intr(struct virtqueue *vq)
 {
-       if (vtpci_packed_queue(vq->hw))
+       if (virtio_with_packed_queue(vq->hw))
                virtqueue_disable_intr_packed(vq);
        else
                virtqueue_disable_intr_split(vq);
@@ -419,7 +443,7 @@ virtqueue_enable_intr_split(struct virtqueue *vq)
 static inline void
 virtqueue_enable_intr(struct virtqueue *vq)
 {
-       if (vtpci_packed_queue(vq->hw))
+       if (virtio_with_packed_queue(vq->hw))
                virtqueue_enable_intr_packed(vq);
        else
                virtqueue_enable_intr_split(vq);
@@ -448,11 +472,11 @@ virtqueue_full(const struct virtqueue *vq)
 }
 
 static inline int
-virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
+virtio_get_queue_type(struct virtio_hw *hw, uint16_t vq_idx)
 {
-       if (vtpci_queue_idx == hw->max_queue_pairs * 2)
+       if (vq_idx == hw->max_queue_pairs * 2)
                return VTNET_CQ;
-       else if (vtpci_queue_idx % 2 == 0)
+       else if (vq_idx % 2 == 0)
                return VTNET_RQ;
        else
                return VTNET_TQ;
@@ -563,7 +587,7 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq)
 static inline void
 virtqueue_notify(struct virtqueue *vq)
 {
-       VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq);
+       VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq);
 }
 
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
@@ -572,7 +596,7 @@ virtqueue_notify(struct virtqueue *vq)
        used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \
                                   __ATOMIC_RELAXED); \
        nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
-       if (vtpci_packed_queue((vq)->hw)) { \
+       if (virtio_with_packed_queue((vq)->hw)) { \
                PMD_INIT_LOG(DEBUG, \
                "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
                " cached_flags=0x%x; used_wrap_counter=%d", \
@@ -613,50 +637,47 @@ virtqueue_notify(struct virtqueue *vq)
 } while (0)
 
 static inline void
-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
-                       struct rte_mbuf *cookie,
-                       uint8_t offload)
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
 {
-       if (offload) {
-               if (cookie->ol_flags & PKT_TX_TCP_SEG)
-                       cookie->ol_flags |= PKT_TX_TCP_CKSUM;
-
-               switch (cookie->ol_flags & PKT_TX_L4_MASK) {
-               case PKT_TX_UDP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_udp_hdr,
-                               dgram_cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               case PKT_TX_TCP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               default:
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
-                       break;
-               }
+       uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+       uint16_t o_l23_len = (cookie->ol_flags & PKT_TX_TUNNEL_MASK) ?
+                            cookie->outer_l2_len + cookie->outer_l3_len : 0;
+
+       if (cookie->ol_flags & PKT_TX_TCP_SEG)
+               csum_l4 |= PKT_TX_TCP_CKSUM;
+
+       switch (csum_l4) {
+       case PKT_TX_UDP_CKSUM:
+               hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       case PKT_TX_TCP_CKSUM:
+               hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       default:
+               ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+               break;
+       }
 
-               /* TCP Segmentation Offload */
-               if (cookie->ol_flags & PKT_TX_TCP_SEG) {
-                       hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
-                               VIRTIO_NET_HDR_GSO_TCPV6 :
-                               VIRTIO_NET_HDR_GSO_TCPV4;
-                       hdr->gso_size = cookie->tso_segsz;
-                       hdr->hdr_len =
-                               cookie->l2_len +
-                               cookie->l3_len +
-                               cookie->l4_len;
-               } else {
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
-               }
+       /* TCP Segmentation Offload */
+       if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+               hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+                       VIRTIO_NET_HDR_GSO_TCPV6 :
+                       VIRTIO_NET_HDR_GSO_TCPV4;
+               hdr->gso_size = cookie->tso_segsz;
+               hdr->hdr_len = o_l23_len + cookie->l2_len + cookie->l3_len +
+                              cookie->l4_len;
+       } else {
+               ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
        }
 }
 
@@ -667,7 +688,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
 {
        struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
        struct vq_desc_extra *dxp;
-       struct virtqueue *vq = txvq->vq;
+       struct virtqueue *vq = virtnet_txq_to_vq(txvq);
        struct vring_packed_desc *start_dp, *head_dp;
        uint16_t idx, id, head_idx, head_flags;
        int16_t head_size = vq->hw->vtnet_hdr_size;
@@ -711,6 +732,9 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                        RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
                start_dp[idx].len   = (seg_num + 1) *
                        sizeof(struct vring_packed_desc);
+               /* Packed descriptor id needs to be restored when inorder. */
+               if (in_order)
+                       start_dp[idx].id = idx;
                /* reset flags for indirect desc */
                head_flags = VRING_DESC_F_INDIRECT;
                head_flags |= vq->vq_packed.cached_flags;
@@ -735,12 +759,13 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                }
        }
 
-       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+       if (vq->hw->has_tx_offload)
+               virtqueue_xmit_offload(hdr, cookie);
 
        do {
                uint16_t flags;
 
-               start_dp[idx].addr = rte_mbuf_data_iova(cookie);
+               start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
                start_dp[idx].len  = cookie->data_len;
                if (prepend_header) {
                        start_dp[idx].addr -= head_size;
@@ -804,25 +829,26 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id)
 }
 
 static void
-virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
+virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, uint16_t num)
 {
        uint16_t used_idx, id, curr_id, free_cnt = 0;
        uint16_t size = vq->vq_nentries;
        struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
        struct vq_desc_extra *dxp;
+       int nb = num;
 
        used_idx = vq->vq_used_cons_idx;
        /* desc_is_used has a load-acquire or rte_io_rmb inside
         * and wait for used desc in virtqueue.
         */
-       while (num > 0 && desc_is_used(&desc[used_idx], vq)) {
+       while (nb > 0 && desc_is_used(&desc[used_idx], vq)) {
                id = desc[used_idx].id;
                do {
                        curr_id = used_idx;
                        dxp = &vq->vq_descx[used_idx];
                        used_idx += dxp->ndescs;
                        free_cnt += dxp->ndescs;
-                       num -= dxp->ndescs;
+                       nb -= dxp->ndescs;
                        if (used_idx >= size) {
                                used_idx -= size;
                                vq->vq_packed.used_wrap_counter ^= 1;
@@ -838,7 +864,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
 }
 
 static void
-virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
+virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, uint16_t num)
 {
        uint16_t used_idx, id;
        uint16_t size = vq->vq_nentries;
@@ -868,7 +894,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
 
 /* Cleanup from completed transmits. */
 static inline void
-virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order)
+virtio_xmit_cleanup_packed(struct virtqueue *vq, uint16_t num, int in_order)
 {
        if (in_order)
                virtio_xmit_cleanup_inorder_packed(vq, num);