net/virtio: refactor virtqueue structure
authorTiwei Bie <tiwei.bie@intel.com>
Tue, 19 Mar 2019 06:43:07 +0000 (14:43 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 20 Mar 2019 17:15:42 +0000 (18:15 +0100)
Put split ring and packed ring specific fields into separate
sub-structures, and also union them as they won't be available
at the same time.

Signed-off-by: Tiwei Bie <tiwei.bie@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
drivers/net/virtio/virtio_ethdev.c
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_rxtx_simple.h
drivers/net/virtio/virtio_rxtx_simple_neon.c
drivers/net/virtio/virtio_rxtx_simple_sse.c
drivers/net/virtio/virtqueue.c
drivers/net/virtio/virtqueue.h

index 9060b6b..bc91ad4 100644 (file)
@@ -147,7 +147,7 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
 {
        struct virtqueue *vq = cvq->vq;
        int head;
-       struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+       struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
        struct virtio_pmd_ctrl *result;
        uint16_t flags;
        int sum = 0;
@@ -161,14 +161,14 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
         * One RX packet for ACK.
         */
        head = vq->vq_avail_idx;
-       flags = vq->cached_flags;
+       flags = vq->vq_packed.cached_flags;
        desc[head].addr = cvq->virtio_net_hdr_mem;
        desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
        vq->vq_free_cnt--;
        nb_descs++;
        if (++vq->vq_avail_idx >= vq->vq_nentries) {
                vq->vq_avail_idx -= vq->vq_nentries;
-               vq->cached_flags ^=
+               vq->vq_packed.cached_flags ^=
                        VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
        }
 
@@ -178,13 +178,13 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
                        + sizeof(ctrl->status) + sizeof(uint8_t) * sum;
                desc[vq->vq_avail_idx].len = dlen[k];
                desc[vq->vq_avail_idx].flags = VRING_DESC_F_NEXT |
-                       vq->cached_flags;
+                       vq->vq_packed.cached_flags;
                sum += dlen[k];
                vq->vq_free_cnt--;
                nb_descs++;
                if (++vq->vq_avail_idx >= vq->vq_nentries) {
                        vq->vq_avail_idx -= vq->vq_nentries;
-                       vq->cached_flags ^=
+                       vq->vq_packed.cached_flags ^=
                                VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
                }
        }
@@ -192,12 +192,13 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
        desc[vq->vq_avail_idx].addr = cvq->virtio_net_hdr_mem
                + sizeof(struct virtio_net_ctrl_hdr);
        desc[vq->vq_avail_idx].len = sizeof(ctrl->status);
-       desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE | vq->cached_flags;
+       desc[vq->vq_avail_idx].flags = VRING_DESC_F_WRITE |
+               vq->vq_packed.cached_flags;
        vq->vq_free_cnt--;
        nb_descs++;
        if (++vq->vq_avail_idx >= vq->vq_nentries) {
                vq->vq_avail_idx -= vq->vq_nentries;
-               vq->cached_flags ^=
+               vq->vq_packed.cached_flags ^=
                        VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
        }
 
@@ -218,19 +219,19 @@ virtio_send_command_packed(struct virtnet_ctl *cvq,
        vq->vq_used_cons_idx += nb_descs;
        if (vq->vq_used_cons_idx >= vq->vq_nentries) {
                vq->vq_used_cons_idx -= vq->vq_nentries;
-               vq->used_wrap_counter ^= 1;
+               vq->vq_packed.used_wrap_counter ^= 1;
        }
 
        PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\n"
                        "vq->vq_avail_idx=%d\n"
                        "vq->vq_used_cons_idx=%d\n"
-                       "vq->cached_flags=0x%x\n"
-                       "vq->used_wrap_counter=%d\n",
+                       "vq->vq_packed.cached_flags=0x%x\n"
+                       "vq->vq_packed.used_wrap_counter=%d\n",
                        vq->vq_free_cnt,
                        vq->vq_avail_idx,
                        vq->vq_used_cons_idx,
-                       vq->cached_flags,
-                       vq->used_wrap_counter);
+                       vq->vq_packed.cached_flags,
+                       vq->vq_packed.used_wrap_counter);
 
        result = cvq->virtio_net_hdr_mz->addr;
        return result;
@@ -280,30 +281,30 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
         * At least one TX packet per argument;
         * One RX packet for ACK.
         */
-       vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT;
-       vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem;
-       vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
+       vq->vq_split.ring.desc[head].flags = VRING_DESC_F_NEXT;
+       vq->vq_split.ring.desc[head].addr = cvq->virtio_net_hdr_mem;
+       vq->vq_split.ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr);
        vq->vq_free_cnt--;
-       i = vq->vq_ring.desc[head].next;
+       i = vq->vq_split.ring.desc[head].next;
 
        for (k = 0; k < pkt_num; k++) {
-               vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT;
-               vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+               vq->vq_split.ring.desc[i].flags = VRING_DESC_F_NEXT;
+               vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
                        + sizeof(struct virtio_net_ctrl_hdr)
                        + sizeof(ctrl->status) + sizeof(uint8_t)*sum;
-               vq->vq_ring.desc[i].len = dlen[k];
+               vq->vq_split.ring.desc[i].len = dlen[k];
                sum += dlen[k];
                vq->vq_free_cnt--;
-               i = vq->vq_ring.desc[i].next;
+               i = vq->vq_split.ring.desc[i].next;
        }
 
-       vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE;
-       vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem
+       vq->vq_split.ring.desc[i].flags = VRING_DESC_F_WRITE;
+       vq->vq_split.ring.desc[i].addr = cvq->virtio_net_hdr_mem
                        + sizeof(struct virtio_net_ctrl_hdr);
-       vq->vq_ring.desc[i].len = sizeof(ctrl->status);
+       vq->vq_split.ring.desc[i].len = sizeof(ctrl->status);
        vq->vq_free_cnt--;
 
-       vq->vq_desc_head_idx = vq->vq_ring.desc[i].next;
+       vq->vq_desc_head_idx = vq->vq_split.ring.desc[i].next;
 
        vq_update_avail_ring(vq, head);
        vq_update_avail_idx(vq);
@@ -324,16 +325,17 @@ virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl,
 
                used_idx = (uint32_t)(vq->vq_used_cons_idx
                                & (vq->vq_nentries - 1));
-               uep = &vq->vq_ring.used->ring[used_idx];
+               uep = &vq->vq_split.ring.used->ring[used_idx];
                idx = (uint32_t) uep->id;
                desc_idx = idx;
 
-               while (vq->vq_ring.desc[desc_idx].flags & VRING_DESC_F_NEXT) {
-                       desc_idx = vq->vq_ring.desc[desc_idx].next;
+               while (vq->vq_split.ring.desc[desc_idx].flags &
+                               VRING_DESC_F_NEXT) {
+                       desc_idx = vq->vq_split.ring.desc[desc_idx].next;
                        vq->vq_free_cnt++;
                }
 
-               vq->vq_ring.desc[desc_idx].next = vq->vq_desc_head_idx;
+               vq->vq_split.ring.desc[desc_idx].next = vq->vq_desc_head_idx;
                vq->vq_desc_head_idx = idx;
 
                vq->vq_used_cons_idx++;
@@ -395,7 +397,6 @@ static void
 virtio_init_vring(struct virtqueue *vq)
 {
        int size = vq->vq_nentries;
-       struct vring *vr = &vq->vq_ring;
        uint8_t *ring_mem = vq->vq_ring_virt_mem;
 
        PMD_INIT_FUNC_TRACE();
@@ -409,10 +410,12 @@ virtio_init_vring(struct virtqueue *vq)
        vq->vq_free_cnt = vq->vq_nentries;
        memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
        if (vtpci_packed_queue(vq->hw)) {
-               vring_init_packed(&vq->ring_packed, ring_mem,
+               vring_init_packed(&vq->vq_packed.ring, ring_mem,
                                  VIRTIO_PCI_VRING_ALIGN, size);
                vring_desc_init_packed(vq, size);
        } else {
+               struct vring *vr = &vq->vq_split.ring;
+
                vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
                vring_desc_init_split(vr->desc, size);
        }
@@ -487,12 +490,12 @@ virtio_init_queue(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx)
        vq->hw = hw;
        vq->vq_queue_index = vtpci_queue_idx;
        vq->vq_nentries = vq_size;
-       vq->event_flags_shadow = 0;
        if (vtpci_packed_queue(hw)) {
-               vq->used_wrap_counter = 1;
-               vq->cached_flags = VRING_DESC_F_AVAIL(1);
+               vq->vq_packed.used_wrap_counter = 1;
+               vq->vq_packed.cached_flags = VRING_DESC_F_AVAIL(1);
+               vq->vq_packed.event_flags_shadow = 0;
                if (queue_type == VTNET_RQ)
-                       vq->cached_flags |= VRING_DESC_F_WRITE;
+                       vq->vq_packed.cached_flags |= VRING_DESC_F_WRITE;
        }
 
        /*
index 3c354ba..02f8d94 100644 (file)
@@ -62,13 +62,13 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
        struct vq_desc_extra *dxp;
        uint16_t desc_idx_last = desc_idx;
 
-       dp  = &vq->vq_ring.desc[desc_idx];
+       dp  = &vq->vq_split.ring.desc[desc_idx];
        dxp = &vq->vq_descx[desc_idx];
        vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
        if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
                while (dp->flags & VRING_DESC_F_NEXT) {
                        desc_idx_last = dp->next;
-                       dp = &vq->vq_ring.desc[dp->next];
+                       dp = &vq->vq_split.ring.desc[dp->next];
                }
        }
        dxp->ndescs = 0;
@@ -81,7 +81,7 @@ vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
        if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
                vq->vq_desc_head_idx = desc_idx;
        } else {
-               dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
+               dp_tail = &vq->vq_split.ring.desc[vq->vq_desc_tail_idx];
                dp_tail->next = desc_idx;
        }
 
@@ -118,7 +118,7 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
        struct vring_packed_desc *desc;
        uint16_t i;
 
-       desc = vq->ring_packed.desc_packed;
+       desc = vq->vq_packed.ring.desc_packed;
 
        for (i = 0; i < num; i++) {
                used_idx = vq->vq_used_cons_idx;
@@ -141,7 +141,7 @@ virtqueue_dequeue_burst_rx_packed(struct virtqueue *vq,
                vq->vq_used_cons_idx++;
                if (vq->vq_used_cons_idx >= vq->vq_nentries) {
                        vq->vq_used_cons_idx -= vq->vq_nentries;
-                       vq->used_wrap_counter ^= 1;
+                       vq->vq_packed.used_wrap_counter ^= 1;
                }
        }
 
@@ -160,7 +160,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
        /*  Caller does the check */
        for (i = 0; i < num ; i++) {
                used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
-               uep = &vq->vq_ring.used->ring[used_idx];
+               uep = &vq->vq_split.ring.used->ring[used_idx];
                desc_idx = (uint16_t) uep->id;
                len[i] = uep->len;
                cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
@@ -199,7 +199,7 @@ virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
        for (i = 0; i < num; i++) {
                used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
                /* Desc idx same as used idx */
-               uep = &vq->vq_ring.used->ring[used_idx];
+               uep = &vq->vq_split.ring.used->ring[used_idx];
                len[i] = uep->len;
                cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
 
@@ -229,7 +229,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
 {
        uint16_t used_idx, id, curr_id, free_cnt = 0;
        uint16_t size = vq->vq_nentries;
-       struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+       struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
        struct vq_desc_extra *dxp;
 
        used_idx = vq->vq_used_cons_idx;
@@ -244,7 +244,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num)
                        num -= dxp->ndescs;
                        if (used_idx >= size) {
                                used_idx -= size;
-                               vq->used_wrap_counter ^= 1;
+                               vq->vq_packed.used_wrap_counter ^= 1;
                        }
                        if (dxp->cookie != NULL) {
                                rte_pktmbuf_free(dxp->cookie);
@@ -261,7 +261,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
 {
        uint16_t used_idx, id;
        uint16_t size = vq->vq_nentries;
-       struct vring_packed_desc *desc = vq->ring_packed.desc_packed;
+       struct vring_packed_desc *desc = vq->vq_packed.ring.desc_packed;
        struct vq_desc_extra *dxp;
 
        used_idx = vq->vq_used_cons_idx;
@@ -272,7 +272,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num)
                vq->vq_used_cons_idx += dxp->ndescs;
                if (vq->vq_used_cons_idx >= size) {
                        vq->vq_used_cons_idx -= size;
-                       vq->used_wrap_counter ^= 1;
+                       vq->vq_packed.used_wrap_counter ^= 1;
                }
                vq_ring_free_id_packed(vq, id);
                if (dxp->cookie != NULL) {
@@ -302,7 +302,7 @@ virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
                struct vq_desc_extra *dxp;
 
                used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
-               uep = &vq->vq_ring.used->ring[used_idx];
+               uep = &vq->vq_split.ring.used->ring[used_idx];
 
                desc_idx = (uint16_t) uep->id;
                dxp = &vq->vq_descx[desc_idx];
@@ -356,7 +356,7 @@ virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
                return -EMSGSIZE;
 
        head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
-       start_dp = vq->vq_ring.desc;
+       start_dp = vq->vq_split.ring.desc;
 
        while (i < num) {
                idx = head_idx & (vq->vq_nentries - 1);
@@ -389,7 +389,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf **cookie,
 {
        struct vq_desc_extra *dxp;
        struct virtio_hw *hw = vq->hw;
-       struct vring_desc *start_dp = vq->vq_ring.desc;
+       struct vring_desc *start_dp = vq->vq_split.ring.desc;
        uint16_t idx, i;
 
        if (unlikely(vq->vq_free_cnt == 0))
@@ -430,8 +430,8 @@ static inline int
 virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
                                     struct rte_mbuf **cookie, uint16_t num)
 {
-       struct vring_packed_desc *start_dp = vq->ring_packed.desc_packed;
-       uint16_t flags = vq->cached_flags;
+       struct vring_packed_desc *start_dp = vq->vq_packed.ring.desc_packed;
+       uint16_t flags = vq->vq_packed.cached_flags;
        struct virtio_hw *hw = vq->hw;
        struct vq_desc_extra *dxp;
        uint16_t idx;
@@ -460,9 +460,9 @@ virtqueue_enqueue_recv_refill_packed(struct virtqueue *vq,
                start_dp[idx].flags = flags;
                if (++vq->vq_avail_idx >= vq->vq_nentries) {
                        vq->vq_avail_idx -= vq->vq_nentries;
-                       vq->cached_flags ^=
+                       vq->vq_packed.cached_flags ^=
                                VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
-                       flags = vq->cached_flags;
+                       flags = vq->vq_packed.cached_flags;
                }
        }
        vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
@@ -589,7 +589,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
        uint16_t i = 0;
 
        idx = vq->vq_desc_head_idx;
-       start_dp = vq->vq_ring.desc;
+       start_dp = vq->vq_split.ring.desc;
 
        while (i < num) {
                idx = idx & (vq->vq_nentries - 1);
@@ -635,13 +635,13 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
 
        id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx;
        idx = vq->vq_avail_idx;
-       dp = &vq->ring_packed.desc_packed[idx];
+       dp = &vq->vq_packed.ring.desc_packed[idx];
 
        dxp = &vq->vq_descx[id];
        dxp->ndescs = 1;
        dxp->cookie = cookie;
 
-       flags = vq->cached_flags;
+       flags = vq->vq_packed.cached_flags;
 
        /* prepend cannot fail, checked by caller */
        hdr = (struct virtio_net_hdr *)
@@ -660,7 +660,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
 
        if (++vq->vq_avail_idx >= vq->vq_nentries) {
                vq->vq_avail_idx -= vq->vq_nentries;
-               vq->cached_flags ^=
+               vq->vq_packed.cached_flags ^=
                        VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
        }
 
@@ -698,11 +698,11 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        head_idx = vq->vq_avail_idx;
        idx = head_idx;
        prev = head_idx;
-       start_dp = vq->ring_packed.desc_packed;
+       start_dp = vq->vq_packed.ring.desc_packed;
 
-       head_dp = &vq->ring_packed.desc_packed[idx];
+       head_dp = &vq->vq_packed.ring.desc_packed[idx];
        head_flags = cookie->next ? VRING_DESC_F_NEXT : 0;
-       head_flags |= vq->cached_flags;
+       head_flags |= vq->vq_packed.cached_flags;
 
        if (can_push) {
                /* prepend cannot fail, checked by caller */
@@ -727,7 +727,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                idx++;
                if (idx >= vq->vq_nentries) {
                        idx -= vq->vq_nentries;
-                       vq->cached_flags ^=
+                       vq->vq_packed.cached_flags ^=
                                VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
                }
        }
@@ -741,14 +741,14 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                start_dp[idx].len  = cookie->data_len;
                if (likely(idx != head_idx)) {
                        flags = cookie->next ? VRING_DESC_F_NEXT : 0;
-                       flags |= vq->cached_flags;
+                       flags |= vq->vq_packed.cached_flags;
                        start_dp[idx].flags = flags;
                }
                prev = idx;
                idx++;
                if (idx >= vq->vq_nentries) {
                        idx -= vq->vq_nentries;
-                       vq->cached_flags ^=
+                       vq->vq_packed.cached_flags ^=
                                VRING_DESC_F_AVAIL(1) | VRING_DESC_F_USED(1);
                }
        } while ((cookie = cookie->next) != NULL);
@@ -791,7 +791,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        dxp->cookie = (void *)cookie;
        dxp->ndescs = needed;
 
-       start_dp = vq->vq_ring.desc;
+       start_dp = vq->vq_split.ring.desc;
 
        if (can_push) {
                /* prepend cannot fail, checked by caller */
@@ -844,7 +844,7 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
        } while ((cookie = cookie->next) != NULL);
 
        if (use_indirect)
-               idx = vq->vq_ring.desc[head_idx].next;
+               idx = vq->vq_split.ring.desc[head_idx].next;
 
        vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
 
@@ -919,8 +919,8 @@ virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
        if (hw->use_simple_rx) {
                for (desc_idx = 0; desc_idx < vq->vq_nentries;
                     desc_idx++) {
-                       vq->vq_ring.avail->ring[desc_idx] = desc_idx;
-                       vq->vq_ring.desc[desc_idx].flags =
+                       vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
+                       vq->vq_split.ring.desc[desc_idx].flags =
                                VRING_DESC_F_WRITE;
                }
 
@@ -1050,7 +1050,7 @@ virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
 
        if (!vtpci_packed_queue(hw)) {
                if (hw->use_inorder_tx)
-                       vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
+                       vq->vq_split.ring.desc[vq->vq_nentries - 1].next = 0;
        }
 
        VIRTQUEUE_DUMP(vq);
index dc97e4c..3d1296a 100644 (file)
@@ -27,7 +27,7 @@ virtio_rxq_rearm_vec(struct virtnet_rx *rxvq)
 
        desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1);
        sw_ring = &vq->sw_ring[desc_idx];
-       start_dp = &vq->vq_ring.desc[desc_idx];
+       start_dp = &vq->vq_split.ring.desc[desc_idx];
 
        ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring,
                RTE_VIRTIO_VPMD_RX_REARM_THRESH);
index d6207d7..cdc2a4d 100644 (file)
@@ -93,7 +93,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        nb_used = RTE_MIN(nb_used, nb_pkts);
 
        desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
-       rused = &vq->vq_ring.used->ring[desc_idx];
+       rused = &vq->vq_split.ring.used->ring[desc_idx];
        sw_ring  = &vq->sw_ring[desc_idx];
        sw_ring_end = &vq->sw_ring[vq->vq_nentries];
 
index d768d07..af76708 100644 (file)
@@ -95,7 +95,7 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
        nb_used = RTE_MIN(nb_used, nb_pkts);
 
        desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
-       rused = &vq->vq_ring.used->ring[desc_idx];
+       rused = &vq->vq_split.ring.used->ring[desc_idx];
        sw_ring  = &vq->sw_ring[desc_idx];
        sw_ring_end = &vq->sw_ring[vq->vq_nentries];
 
index 5b03f7a..79491db 100644 (file)
@@ -61,7 +61,7 @@ virtqueue_rxvq_flush_packed(struct virtqueue *vq)
        struct vq_desc_extra *dxp;
        uint16_t i;
 
-       struct vring_packed_desc *descs = vq->ring_packed.desc_packed;
+       struct vring_packed_desc *descs = vq->vq_packed.ring.desc_packed;
        int cnt = 0;
 
        i = vq->vq_used_cons_idx;
@@ -75,7 +75,7 @@ virtqueue_rxvq_flush_packed(struct virtqueue *vq)
                vq->vq_used_cons_idx++;
                if (vq->vq_used_cons_idx >= vq->vq_nentries) {
                        vq->vq_used_cons_idx -= vq->vq_nentries;
-                       vq->used_wrap_counter ^= 1;
+                       vq->vq_packed.used_wrap_counter ^= 1;
                }
                i = vq->vq_used_cons_idx;
        }
@@ -96,7 +96,7 @@ virtqueue_rxvq_flush_split(struct virtqueue *vq)
 
        for (i = 0; i < nb_used; i++) {
                used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
-               uep = &vq->vq_ring.used->ring[used_idx];
+               uep = &vq->vq_split.ring.used->ring[used_idx];
                if (hw->use_simple_rx) {
                        desc_idx = used_idx;
                        rte_pktmbuf_free(vq->sw_ring[desc_idx]);
index 80c0c43..48b3912 100644 (file)
@@ -191,17 +191,22 @@ struct vq_desc_extra {
 
 struct virtqueue {
        struct virtio_hw  *hw; /**< virtio_hw structure pointer. */
-       struct vring vq_ring;  /**< vring keeping desc, used and avail */
-       struct vring_packed ring_packed;  /**< vring keeping descs */
-       bool used_wrap_counter;
-       uint16_t cached_flags; /**< cached flags for descs */
-       uint16_t event_flags_shadow;
+       union {
+               struct {
+                       /**< vring keeping desc, used and avail */
+                       struct vring ring;
+               } vq_split;
+
+               struct {
+                       /**< vring keeping descs and events */
+                       struct vring_packed ring;
+                       bool used_wrap_counter;
+                       uint16_t cached_flags; /**< cached flags for descs */
+                       uint16_t event_flags_shadow;
+               } vq_packed;
+       };
 
-       /**
-        * Last consumed descriptor in the used table,
-        * trails vq_ring.used->idx.
-        */
-       uint16_t vq_used_cons_idx;
+       uint16_t vq_used_cons_idx; /**< last consumed descriptor */
        uint16_t vq_nentries;  /**< vring desc numbers */
        uint16_t vq_free_cnt;  /**< num of desc available */
        uint16_t vq_avail_idx; /**< sync until needed */
@@ -289,7 +294,7 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq)
        used = !!(flags & VRING_DESC_F_USED(1));
        avail = !!(flags & VRING_DESC_F_AVAIL(1));
 
-       return avail == used && used == vq->used_wrap_counter;
+       return avail == used && used == vq->vq_packed.used_wrap_counter;
 }
 
 static inline void
@@ -297,10 +302,10 @@ vring_desc_init_packed(struct virtqueue *vq, int n)
 {
        int i;
        for (i = 0; i < n - 1; i++) {
-               vq->ring_packed.desc_packed[i].id = i;
+               vq->vq_packed.ring.desc_packed[i].id = i;
                vq->vq_descx[i].next = i + 1;
        }
-       vq->ring_packed.desc_packed[i].id = i;
+       vq->vq_packed.ring.desc_packed[i].id = i;
        vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END;
 }
 
@@ -321,10 +326,10 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n)
 static inline void
 virtqueue_disable_intr_packed(struct virtqueue *vq)
 {
-       if (vq->event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
-               vq->event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
-               vq->ring_packed.driver_event->desc_event_flags =
-                       vq->event_flags_shadow;
+       if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) {
+               vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE;
+               vq->vq_packed.ring.driver_event->desc_event_flags =
+                       vq->vq_packed.event_flags_shadow;
        }
 }
 
@@ -337,7 +342,7 @@ virtqueue_disable_intr(struct virtqueue *vq)
        if (vtpci_packed_queue(vq->hw))
                virtqueue_disable_intr_packed(vq);
        else
-               vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+               vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 }
 
 /**
@@ -346,11 +351,10 @@ virtqueue_disable_intr(struct virtqueue *vq)
 static inline void
 virtqueue_enable_intr_packed(struct virtqueue *vq)
 {
-       uint16_t *event_flags = &vq->ring_packed.driver_event->desc_event_flags;
-
-       if (vq->event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
-               vq->event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
-               *event_flags = vq->event_flags_shadow;
+       if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) {
+               vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE;
+               vq->vq_packed.ring.driver_event->desc_event_flags =
+                       vq->vq_packed.event_flags_shadow;
        }
 }
 
@@ -360,7 +364,7 @@ virtqueue_enable_intr_packed(struct virtqueue *vq)
 static inline void
 virtqueue_enable_intr_split(struct virtqueue *vq)
 {
-       vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
+       vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT);
 }
 
 /**
@@ -404,7 +408,8 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx)
                return VTNET_TQ;
 }
 
-#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
+#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_split.ring.used->idx - \
+                                       (vq)->vq_used_cons_idx))
 
 void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx);
 void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx);
@@ -415,7 +420,7 @@ static inline void
 vq_update_avail_idx(struct virtqueue *vq)
 {
        virtio_wmb(vq->hw->weak_barriers);
-       vq->vq_ring.avail->idx = vq->vq_avail_idx;
+       vq->vq_split.ring.avail->idx = vq->vq_avail_idx;
 }
 
 static inline void
@@ -430,8 +435,8 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx)
         * descriptor.
         */
        avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1));
-       if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx))
-               vq->vq_ring.avail->ring[avail_idx] = desc_idx;
+       if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx))
+               vq->vq_split.ring.avail->ring[avail_idx] = desc_idx;
        vq->vq_avail_idx++;
 }
 
@@ -443,7 +448,7 @@ virtqueue_kick_prepare(struct virtqueue *vq)
         * the used->flags.
         */
        virtio_mb(vq->hw->weak_barriers);
-       return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
+       return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY);
 }
 
 static inline int
@@ -455,7 +460,7 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq)
         * Ensure updated data is visible to vhost before reading the flags.
         */
        virtio_mb(vq->hw->weak_barriers);
-       flags = vq->ring_packed.device_event->desc_event_flags;
+       flags = vq->vq_packed.ring.device_event->desc_event_flags;
 
        return flags != RING_EVENT_FLAGS_DISABLE;
 }
@@ -473,15 +478,15 @@ virtqueue_notify(struct virtqueue *vq)
 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
 #define VIRTQUEUE_DUMP(vq) do { \
        uint16_t used_idx, nused; \
-       used_idx = (vq)->vq_ring.used->idx; \
+       used_idx = (vq)->vq_split.ring.used->idx; \
        nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \
        if (vtpci_packed_queue((vq)->hw)) { \
                PMD_INIT_LOG(DEBUG, \
                "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \
                " cached_flags=0x%x; used_wrap_counter=%d", \
                (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \
-               (vq)->vq_avail_idx, (vq)->cached_flags, \
-               (vq)->used_wrap_counter); \
+               (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \
+               (vq)->vq_packed.used_wrap_counter); \
                break; \
        } \
        PMD_INIT_LOG(DEBUG, \
@@ -489,9 +494,9 @@ virtqueue_notify(struct virtqueue *vq)
          " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \
          " avail.flags=0x%x; used.flags=0x%x", \
          (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \
-         (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \
-         (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \
-         (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \
+         (vq)->vq_desc_head_idx, (vq)->vq_split.ring.avail->idx, \
+         (vq)->vq_used_cons_idx, (vq)->vq_split.ring.used->idx, \
+         (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \
 } while (0)
 #else
 #define VIRTQUEUE_DUMP(vq) do { } while (0)