X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=d8cf5d4e2f3f68f101236b52d12f154832925d28;hb=f35e5b3e07b2e7999f7d3085236cc366c9cb4da6;hp=14364f3564b117abd8af0528ee904b6a84c74327;hpb=727411f50ae33c5e318f3c9579c9cdd56eb2a5be;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 14364f3564..d8cf5d4e2f 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -19,16 +19,88 @@ struct rte_mbuf; /* - * Per virtio_config.h in Linux. + * Per virtio_ring.h in Linux. * For virtio_pci on SMP, we don't need to order with respect to MMIO * accesses through relaxed memory I/O windows, so smp_mb() et al are * sufficient. * + * For using virtio to talk to real devices (eg. vDPA) we do need real + * barriers. */ -#define virtio_mb() rte_smp_mb() -#define virtio_rmb() rte_smp_rmb() -#define virtio_wmb() rte_smp_wmb() +static inline void +virtio_mb(uint8_t weak_barriers) +{ + if (weak_barriers) + rte_smp_mb(); + else + rte_mb(); +} + +static inline void +virtio_rmb(uint8_t weak_barriers) +{ + if (weak_barriers) + rte_smp_rmb(); + else + rte_cio_rmb(); +} + +static inline void +virtio_wmb(uint8_t weak_barriers) +{ + if (weak_barriers) + rte_smp_wmb(); + else + rte_cio_wmb(); +} + +static inline uint16_t +virtqueue_fetch_flags_packed(struct vring_packed_desc *dp, + uint8_t weak_barriers) +{ + uint16_t flags; + + if (weak_barriers) { +/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + flags = dp->flags; + rte_smp_rmb(); +#else + flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE); +#endif + } else { + flags = dp->flags; + rte_cio_rmb(); + } + + return flags; +} +static inline void +virtqueue_store_flags_packed(struct vring_packed_desc *dp, + uint16_t flags, uint8_t weak_barriers) +{ + if (weak_barriers) { +/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports + * a better perf(~1.5%), which comes from the saved branch by the compiler. + * The if and else branch are identical with the smp and cio barriers both + * defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + dp->flags = flags; +#else + __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE); +#endif + } else { + rte_cio_wmb(); + dp->flags = flags; + } +} #ifdef RTE_PMD_PACKET_PREFETCH #define rte_packet_prefetch(p) rte_prefetch1(p) #else @@ -109,7 +181,7 @@ enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 }; */ struct virtio_net_ctrl_mac { uint32_t entries; - uint8_t macs[][ETHER_ADDR_LEN]; + uint8_t macs[][RTE_ETHER_ADDR_LEN]; } __attribute__((__packed__)); #define VIRTIO_NET_CTRL_MAC 1 @@ -161,16 +233,27 @@ struct virtio_pmd_ctrl { struct vq_desc_extra { void *cookie; uint16_t ndescs; + uint16_t next; }; struct virtqueue { struct virtio_hw *hw; /**< virtio_hw structure pointer. */ - struct vring vq_ring; /**< vring keeping desc, used and avail */ - /** - * Last consumed descriptor in the used table, - * trails vq_ring.used->idx. - */ - uint16_t vq_used_cons_idx; + union { + struct { + /**< vring keeping desc, used and avail */ + struct vring ring; + } vq_split; + + struct { + /**< vring keeping descs and events */ + struct vring_packed ring; + bool used_wrap_counter; + uint16_t cached_flags; /**< cached flags for descs */ + uint16_t event_flags_shadow; + } vq_packed; + }; + + uint16_t vq_used_cons_idx; /**< last consumed descriptor */ uint16_t vq_nentries; /**< vring desc numbers */ uint16_t vq_free_cnt; /**< num of desc available */ uint16_t vq_avail_idx; /**< sync until needed */ @@ -242,12 +325,36 @@ struct virtio_net_hdr_mrg_rxbuf { struct virtio_tx_region { struct virtio_net_hdr_mrg_rxbuf tx_hdr; struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] - __attribute__((__aligned__(16))); + __rte_aligned(16); }; +static inline int +desc_is_used(struct vring_packed_desc *desc, struct virtqueue *vq) +{ + uint16_t used, avail, flags; + + flags = virtqueue_fetch_flags_packed(desc, vq->hw->weak_barriers); + used = !!(flags & VRING_PACKED_DESC_F_USED); + avail = !!(flags & VRING_PACKED_DESC_F_AVAIL); + + return avail == used && used == vq->vq_packed.used_wrap_counter; +} + +static inline void +vring_desc_init_packed(struct virtqueue *vq, int n) +{ + int i; + for (i = 0; i < n - 1; i++) { + vq->vq_packed.ring.desc[i].id = i; + vq->vq_descx[i].next = i + 1; + } + vq->vq_packed.ring.desc[i].id = i; + vq->vq_descx[i].next = VQ_RING_DESC_CHAIN_END; +} + /* Chain all the descriptors in the ring with an END */ static inline void -vring_desc_init(struct vring_desc *dp, uint16_t n) +vring_desc_init_split(struct vring_desc *dp, uint16_t n) { uint16_t i; @@ -256,13 +363,60 @@ vring_desc_init(struct vring_desc *dp, uint16_t n) dp[i].next = VQ_RING_DESC_CHAIN_END; } +/** + * Tell the backend not to interrupt us. Implementation for packed virtqueues. + */ +static inline void +virtqueue_disable_intr_packed(struct virtqueue *vq) +{ + if (vq->vq_packed.event_flags_shadow != RING_EVENT_FLAGS_DISABLE) { + vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_DISABLE; + vq->vq_packed.ring.driver->desc_event_flags = + vq->vq_packed.event_flags_shadow; + } +} + +/** + * Tell the backend not to interrupt us. Implementation for split virtqueues. + */ +static inline void +virtqueue_disable_intr_split(struct virtqueue *vq) +{ + vq->vq_split.ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} + /** * Tell the backend not to interrupt us. */ static inline void virtqueue_disable_intr(struct virtqueue *vq) { - vq->vq_ring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + if (vtpci_packed_queue(vq->hw)) + virtqueue_disable_intr_packed(vq); + else + virtqueue_disable_intr_split(vq); +} + +/** + * Tell the backend to interrupt. Implementation for packed virtqueues. + */ +static inline void +virtqueue_enable_intr_packed(struct virtqueue *vq) +{ + if (vq->vq_packed.event_flags_shadow == RING_EVENT_FLAGS_DISABLE) { + vq->vq_packed.event_flags_shadow = RING_EVENT_FLAGS_ENABLE; + vq->vq_packed.ring.driver->desc_event_flags = + vq->vq_packed.event_flags_shadow; + } +} + +/** + * Tell the backend to interrupt. Implementation for split virtqueues. + */ +static inline void +virtqueue_enable_intr_split(struct virtqueue *vq) +{ + vq->vq_split.ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT); } /** @@ -271,7 +425,10 @@ virtqueue_disable_intr(struct virtqueue *vq) static inline void virtqueue_enable_intr(struct virtqueue *vq) { - vq->vq_ring.avail->flags &= (~VRING_AVAIL_F_NO_INTERRUPT); + if (vtpci_packed_queue(vq->hw)) + virtqueue_enable_intr_packed(vq); + else + virtqueue_enable_intr_split(vq); } /** @@ -286,6 +443,10 @@ struct rte_mbuf *virtqueue_detach_unused(struct virtqueue *vq); /* Flush the elements in the used ring. */ void virtqueue_rxvq_flush(struct virtqueue *vq); +int virtqueue_rxvq_reset_packed(struct virtqueue *vq); + +int virtqueue_txvq_reset_packed(struct virtqueue *vq); + static inline int virtqueue_full(const struct virtqueue *vq) { @@ -303,15 +464,19 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx) return VTNET_TQ; } -#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) +#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_split.ring.used->idx - \ + (vq)->vq_used_cons_idx)) void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx); +void vq_ring_free_chain_packed(struct virtqueue *vq, uint16_t used_idx); +void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, + uint16_t num); static inline void vq_update_avail_idx(struct virtqueue *vq) { - virtio_wmb(); - vq->vq_ring.avail->idx = vq->vq_avail_idx; + virtio_wmb(vq->hw->weak_barriers); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; } static inline void @@ -326,41 +491,68 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) * descriptor. */ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); - if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx)) - vq->vq_ring.avail->ring[avail_idx] = desc_idx; + if (unlikely(vq->vq_split.ring.avail->ring[avail_idx] != desc_idx)) + vq->vq_split.ring.avail->ring[avail_idx] = desc_idx; vq->vq_avail_idx++; } static inline int virtqueue_kick_prepare(struct virtqueue *vq) { - return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); + /* + * Ensure updated avail->idx is visible to vhost before reading + * the used->flags. + */ + virtio_mb(vq->hw->weak_barriers); + return !(vq->vq_split.ring.used->flags & VRING_USED_F_NO_NOTIFY); } -static inline void -virtqueue_notify(struct virtqueue *vq) +static inline int +virtqueue_kick_prepare_packed(struct virtqueue *vq) { + uint16_t flags; + /* - * Ensure updated avail->idx is visible to host. - * For virtio on IA, the notificaiton is through io port operation - * which is a serialization instruction itself. + * Ensure updated data is visible to vhost before reading the flags. */ + virtio_mb(vq->hw->weak_barriers); + flags = vq->vq_packed.ring.device->desc_event_flags; + + return flags != RING_EVENT_FLAGS_DISABLE; +} + +/* + * virtqueue_kick_prepare*() or the virtio_wmb() should be called + * before this function to be sure that all the data is visible to vhost. + */ +static inline void +virtqueue_notify(struct virtqueue *vq) +{ VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); } #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTQUEUE_DUMP(vq) do { \ uint16_t used_idx, nused; \ - used_idx = (vq)->vq_ring.used->idx; \ + used_idx = (vq)->vq_split.ring.used->idx; \ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ + if (vtpci_packed_queue((vq)->hw)) { \ + PMD_INIT_LOG(DEBUG, \ + "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \ + " cached_flags=0x%x; used_wrap_counter=%d", \ + (vq)->vq_nentries, (vq)->vq_free_cnt, (vq)->vq_used_cons_idx, \ + (vq)->vq_avail_idx, (vq)->vq_packed.cached_flags, \ + (vq)->vq_packed.used_wrap_counter); \ + break; \ + } \ PMD_INIT_LOG(DEBUG, \ "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ " avail.flags=0x%x; used.flags=0x%x", \ (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ - (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ - (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ - (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \ + (vq)->vq_desc_head_idx, (vq)->vq_split.ring.avail->idx, \ + (vq)->vq_used_cons_idx, (vq)->vq_split.ring.used->idx, \ + (vq)->vq_split.ring.avail->flags, (vq)->vq_split.ring.used->flags); \ } while (0) #else #define VIRTQUEUE_DUMP(vq) do { } while (0)