X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=42c4c9882f8f6001f8b251f8bd89fd9fc0992e7d;hb=bd885ab120e2335f978a28ee0aa4303017390e15;hp=4f6956d8458395330f288bb6e3876e9c7a8a15e2;hpb=ea5207c158edb00d7e5da17369ea45a462a40dcc;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 4f6956d845..42c4c9882f 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -47,7 +47,7 @@ virtio_rmb(uint8_t weak_barriers) if (weak_barriers) rte_smp_rmb(); else - rte_cio_rmb(); + rte_io_rmb(); } static inline void @@ -56,7 +56,7 @@ virtio_wmb(uint8_t weak_barriers) if (weak_barriers) rte_smp_wmb(); else - rte_cio_wmb(); + rte_io_wmb(); } static inline uint16_t @@ -68,7 +68,7 @@ virtqueue_fetch_flags_packed(struct vring_packed_desc *dp, if (weak_barriers) { /* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports * a better perf(~1.5%), which comes from the saved branch by the compiler. - * The if and else branch are identical with the smp and cio barriers both + * The if and else branch are identical with the smp and io barriers both * defined as compiler barriers on x86. */ #ifdef RTE_ARCH_X86_64 @@ -79,7 +79,7 @@ virtqueue_fetch_flags_packed(struct vring_packed_desc *dp, #endif } else { flags = dp->flags; - rte_cio_rmb(); + rte_io_rmb(); } return flags; @@ -92,7 +92,7 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, if (weak_barriers) { /* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports * a better perf(~1.5%), which comes from the saved branch by the compiler. - * The if and else branch are identical with the smp and cio barriers both + * The if and else branch are identical with the smp and io barriers both * defined as compiler barriers on x86. */ #ifdef RTE_ARCH_X86_64 @@ -102,7 +102,7 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE); #endif } else { - rte_cio_wmb(); + rte_io_wmb(); dp->flags = flags; } } @@ -329,8 +329,11 @@ struct virtio_net_hdr_mrg_rxbuf { #define VIRTIO_MAX_TX_INDIRECT 8 struct virtio_tx_region { struct virtio_net_hdr_mrg_rxbuf tx_hdr; - struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] - __rte_aligned(16); + union { + struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]; + struct vring_packed_desc + tx_packed_indir[VIRTIO_MAX_TX_INDIRECT]; + } __rte_aligned(16); }; static inline int @@ -368,6 +371,16 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n) dp[i].next = VQ_RING_DESC_CHAIN_END; } +static inline void +vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n) +{ + int i; + for (i = 0; i < n; i++) { + dp[i].id = (uint16_t)i; + dp[i].flags = VRING_DESC_F_WRITE; + } +} + /** * Tell the backend not to interrupt us. Implementation for packed virtqueues. */ @@ -469,7 +482,7 @@ virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx) return VTNET_TQ; } -/* virtqueue_nused has load-acquire or rte_cio_rmb insed */ +/* virtqueue_nused has load-acquire or rte_io_rmb insed */ static inline uint16_t virtqueue_nused(const struct virtqueue *vq) { @@ -480,7 +493,7 @@ virtqueue_nused(const struct virtqueue *vq) * x86 prefers to using rte_smp_rmb over __atomic_load_n as it * reports a slightly better perf, which comes from the saved * branch by the compiler. - * The if and else branches are identical with the smp and cio + * The if and else branches are identical with the smp and io * barriers both defined as compiler barriers on x86. */ #ifdef RTE_ARCH_X86_64 @@ -492,7 +505,7 @@ virtqueue_nused(const struct virtqueue *vq) #endif } else { idx = vq->vq_split.ring.used->idx; - rte_cio_rmb(); + rte_io_rmb(); } return idx - vq->vq_used_cons_idx; } @@ -505,8 +518,24 @@ void vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, static inline void vq_update_avail_idx(struct virtqueue *vq) { - virtio_wmb(vq->hw->weak_barriers); - vq->vq_split.ring.avail->idx = vq->vq_avail_idx; + if (vq->hw->weak_barriers) { + /* x86 prefers to using rte_smp_wmb over __atomic_store_n as + * it reports a slightly better perf, which comes from the + * saved branch by the compiler. + * The if and else branches are identical with the smp and + * io barriers both defined as compiler barriers on x86. + */ +#ifdef RTE_ARCH_X86_64 + rte_smp_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; +#else + __atomic_store_n(&vq->vq_split.ring.avail->idx, + vq->vq_avail_idx, __ATOMIC_RELEASE); +#endif + } else { + rte_io_wmb(); + vq->vq_split.ring.avail->idx = vq->vq_avail_idx; + } } static inline void @@ -591,10 +620,10 @@ virtqueue_notify(struct virtqueue *vq) /* avoid write operation when necessary, to lessen cache issues */ #define ASSIGN_UNLESS_EQUAL(var, val) do { \ - typeof(var) var_ = (var); \ - typeof(val) val_ = (val); \ - if ((var_) != (val_)) \ - (var_) = (val_); \ + typeof(var) *const var_ = &(var); \ + typeof(val) const val_ = (val); \ + if (*var_ != val_) \ + *var_ = val_; \ } while (0) #define virtqueue_clear_net_hdr(hdr) do { \ @@ -657,7 +686,8 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, static inline void virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, - uint16_t needed, int can_push, int in_order) + uint16_t needed, int use_indirect, int can_push, + int in_order) { struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; struct vq_desc_extra *dxp; @@ -668,6 +698,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, struct virtio_net_hdr *hdr; uint16_t prev; bool prepend_header = false; + uint16_t seg_num = cookie->nb_segs; id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; @@ -693,6 +724,25 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, /* if offload disabled, it is not zeroed below, do it now */ if (!vq->hw->has_tx_offload) virtqueue_clear_net_hdr(hdr); + } else if (use_indirect) { + /* setup tx ring slot to point to indirect + * descriptor list stored in reserved region. + * + * the first slot in indirect ring is already preset + * to point to the header in reserved region + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr); + start_dp[idx].len = (seg_num + 1) * + sizeof(struct vring_packed_desc); + /* reset flags for indirect desc */ + head_flags = VRING_DESC_F_INDIRECT; + head_flags |= vq->vq_packed.cached_flags; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + + /* loop below will fill in rest of the indirect elements */ + start_dp = txr[idx].tx_packed_indir; + idx = 1; } else { /* setup first tx ring slot to point to header * stored in reserved region. @@ -738,6 +788,15 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, start_dp[prev].id = id; + if (use_indirect) { + idx = head_idx; + if (++idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + } + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); vq->vq_avail_idx = idx; @@ -777,7 +836,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) struct vq_desc_extra *dxp; used_idx = vq->vq_used_cons_idx; - /* desc_is_used has a load-acquire or rte_cio_rmb inside + /* desc_is_used has a load-acquire or rte_io_rmb inside * and wait for used desc in virtqueue. */ while (num > 0 && desc_is_used(&desc[used_idx], vq)) { @@ -811,7 +870,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) struct vq_desc_extra *dxp; used_idx = vq->vq_used_cons_idx; - /* desc_is_used has a load-acquire or rte_cio_rmb inside + /* desc_is_used has a load-acquire or rte_io_rmb inside * and wait for used desc in virtqueue. */ while (num-- && desc_is_used(&desc[used_idx], vq)) {