X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=8d7f197b139cd6459293cfe6557a3eafb3e870ad;hb=5fc66630bed5db8b0e2507e7324f1c8f98e0dd9a;hp=48b3912e66294bd1401a5e3f2905a6a28c2fe29a;hpb=dfd33aa4e586e9fca964faaebbfa74ebbb342613;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 48b3912e66..8d7f197b13 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -54,6 +54,53 @@ virtio_wmb(uint8_t weak_barriers) rte_cio_wmb(); } +static inline uint16_t +virtqueue_fetch_flags_packed(struct vring_packed_desc *dp, + uint8_t weak_barriers) +{ + uint16_t flags; + + if (weak_barriers) { +/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + flags = dp->flags; + rte_smp_rmb(); +#else + flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE); +#endif + } else { + flags = dp->flags; + rte_cio_rmb(); + } + + return flags; +} + +static inline void +virtqueue_store_flags_packed(struct vring_packed_desc *dp, + uint16_t flags, uint8_t weak_barriers) +{ + if (weak_barriers) { +/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + dp->flags = flags; +#else + __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + dp->flags = flags; + } +} #ifdef RTE_PMD_PACKET_PREFETCH #define rte_packet_prefetch(p) rte_prefetch1(p) #else @@ -134,7 +181,7 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; */ struct virtio_net_ctrl_mac { uint32_t entries; - uint8_t macs[][ETHER_ADDR_LEN]; + uint8_t macs[][RTE_ETHER_ADDR_LEN]; } __attribute__((__packed__)); #define VIRTIO_NET_CTRL_MAC 1 @@ -277,12 +324,8 @@ struct virtio_net_hdr_mrg_rxbuf { #define VIRTIO_MAX_TX_INDIRECT 8 struct virtio_tx_region { struct virtio_net_hdr_mrg_rxbuf tx_hdr; - union { - struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] - __attribute__((__aligned__(16))); - struct vring_packed_desc tx_indir_pq[VIRTIO_MAX_TX_INDIRECT] - __attribute__((__aligned__(16))); - }; + struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] + __attribute__((__aligned__(16))); }; static inline int @@ -290,9 +333,9 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) { uint16_t used, avail, flags; - flags = desc->flags; - used = !!(flags & VRING_DESC_F_USED(1)); - avail = !!(flags & VRING_DESC_F_AVAIL(1)); + flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers); + used = !!(flags & VRING_PACKED_DESC_F_USED); + avail = !!(flags & VRING_PACKED_DESC_F_AVAIL); return avail == used && used == vq->vq_packed.used_wrap_counter; } @@ -302,10 +345,10 @@ vring_desc_init_packed(struct virtqueue *vq, int n) { int i; for (i = 0; i < n - 1; i++) { - vq->vq_packed.ring.desc_packed[i].id = i; + vq->vq_packed.ring.desc[i].id = i; vq->vq_descx[i].next = i + 1; } - vq->vq_packed.ring.desc_packed[i].id = i; + vq->vq_packed.ring.desc[i].id = i; vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END; } @@ -321,18 +364,27 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n) } /** - * Tell the backend not to interrupt us. + * Tell the backend not to interrupt us. Implementation for packed virtqueues. */ static inline void virtqueue_disable_intr_packed(struct virtqueue *vq) { if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) { vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE; - vq->vq_packed.ring.driver_event->desc_event_flags = + vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow; } } +/** + * Tell the backend not to interrupt us. Implementation for split virtqueues. + */ +static inline void +virtqueue_disable_intr_split(struct virtqueue *vq) +{ + vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} + /** * Tell the backend not to interrupt us. */ @@ -342,7 +394,7 @@ virtqueue_disable_intr(struct virtqueue *vq) if (vtpci_packed_queue(vq->hw)) virtqueue_disable_intr_packed(vq); else - vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + virtqueue_disable_intr_split(vq); } /** @@ -353,7 +405,7 @@ virtqueue_enable_intr_packed(struct virtqueue *vq) { if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) { vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE; - vq->vq_packed.ring.driver_event->desc_event_flags = + vq->vq_packed.ring.driver->desc_event_flags = vq->vq_packed.event_flags_shadow; } } @@ -460,7 +512,7 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq) * Ensure updated data is visible to vhost before reading the flags. */ virtio_mb(vq->hw->weak_barriers); - flags = vq->vq_packed.ring.device_event->desc_event_flags; + flags = vq->vq_packed.ring.device->desc_event_flags; return flags != RING_EVENT_FLAGS_DISABLE; }