X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=e3d38b5a671bbdc931f80ab3e1a417ea0c46298f;hb=397b4b3c509574a55c58ae161f5a01cc8a4da56a;hp=6dab7db8e0d2618aae7e0bb4a985ebfb154dd8ec;hpb=bc80357cd6775f94bacc83b60b01a3c886df8ceb;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 6dab7db8e0..e3d38b5a67 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -54,6 +54,53 @@ virtio_wmb(uint8_t weak_barriers) rte_cio_wmb(); } +static inline uint16_t +virtqueue_fetch_flags_packed(struct vring_packed_desc *dp, + uint8_t weak_barriers) +{ + uint16_t flags; + + if (weak_barriers) { +/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + flags = dp->flags; + rte_smp_rmb(); +#else + flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE); +#endif + } else { + flags = dp->flags; + rte_cio_rmb(); + } + + return flags; +} + +static inline void +virtqueue_store_flags_packed(struct vring_packed_desc *dp, + uint16_t flags, uint8_t weak_barriers) +{ + if (weak_barriers) { +/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + dp->flags = flags; +#else + __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + dp->flags = flags; + } +} #ifdef RTE_PMD_PACKET_PREFETCH #define rte_packet_prefetch(p) rte_prefetch1(p) #else @@ -134,8 +181,8 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; */ struct virtio_net_ctrl_mac { uint32_t entries; - uint8_t macs[][ETHER_ADDR_LEN]; -} __attribute__((__packed__)); + uint8_t macs[][RTE_ETHER_ADDR_LEN]; +} __rte_packed; #define VIRTIO_NET_CTRL_MAC 1 #define VIRTIO_NET_CTRL_MAC_TABLE_SET 0 @@ -168,7 +215,7 @@ struct virtio_net_ctrl_mac { struct virtio_net_ctrl_hdr { uint8_t class; uint8_t cmd; -} __attribute__((packed)); +} __rte_packed; typedef uint8_t virtio_net_ctrl_ack; @@ -278,7 +325,7 @@ struct virtio_net_hdr_mrg_rxbuf { struct virtio_tx_region { struct virtio_net_hdr_mrg_rxbuf tx_hdr; struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] - __attribute__((__aligned__(16))); + __rte_aligned(16); }; static inline int @@ -286,9 +333,9 @@ desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) { uint16_t used, avail, flags; - flags = desc->flags; - used = !!(flags & VRING_DESC_F_USED(1)); - avail = !!(flags & VRING_DESC_F_AVAIL(1)); + flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers); + used = !!(flags & VRING_PACKED_DESC_F_USED); + avail = !!(flags & VRING_PACKED_DESC_F_AVAIL); return avail == used && used == vq->vq_packed.used_wrap_counter; } @@ -317,7 +364,7 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n) } /** - * Tell the backend not to interrupt us. + * Tell the backend not to interrupt us. Implementation for packed virtqueues. */ static inline void virtqueue_disable_intr_packed(struct virtqueue *vq) @@ -329,6 +376,15 @@ virtqueue_disable_intr_packed(struct virtqueue *vq) } } +/** + * Tell the backend not to interrupt us. Implementation for split virtqueues. + */ +static inline void +virtqueue_disable_intr_split(struct virtqueue *vq) +{ + vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} + /** * Tell the backend not to interrupt us. */ @@ -338,7 +394,7 @@ virtqueue_disable_intr(struct virtqueue *vq) if (vtpci_packed_queue(vq->hw)) virtqueue_disable_intr_packed(vq); else - vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + virtqueue_disable_intr_split(vq); } /** @@ -387,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq); /* Flush the elements in the used ring. */ void virtqueue_rxvq_flush(struct virtqueue *vq); +int virtqueue_rxvq_reset_packed(struct virtqueue *vq); + +int virtqueue_txvq_reset_packed(struct virtqueue *vq); + static inline int virtqueue_full(const struct virtqueue *vq) {