X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=e9992b745d86dc5406128b13bee87c892c84ed54;hb=55c1238246d53d196420fd2768dd3d4210ab654b;hp=6ed50648c4974a5703d5574a73c4672e4d0f8efe;hpb=a1412e05caa2678757156d4de2755ab5140ecc24;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 6ed50648c4..e9992b745d 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -12,7 +12,7 @@ #include #include -#include "virtio_pci.h" +#include "virtio.h" #include "virtio_ring.h" #include "virtio_logs.h" #include "virtio_rxtx.h" @@ -26,7 +26,7 @@ struct rte_mbuf; /* * Per virtio_ring.h in Linux. * For virtio_pci on SMP, we don't need to order with respect to MMIO - * accesses through relaxed memory I/O windows, so smp_mb() et al are + * accesses through relaxed memory I/O windows, so thread_fence is * sufficient. * * For using virtio to talk to real devices (eg. vDPA) we do need real @@ -36,7 +36,7 @@ static inline void virtio_mb(uint8_t weak_barriers) { if (weak_barriers) - rte_smp_mb(); + rte_atomic_thread_fence(__ATOMIC_SEQ_CST); else rte_mb(); } @@ -45,18 +45,18 @@ static inline void virtio_rmb(uint8_t weak_barriers) { if (weak_barriers) - rte_smp_rmb(); + rte_atomic_thread_fence(__ATOMIC_ACQUIRE); else - rte_cio_rmb(); + rte_io_rmb(); } static inline void virtio_wmb(uint8_t weak_barriers) { if (weak_barriers) - rte_smp_wmb(); + rte_atomic_thread_fence(__ATOMIC_RELEASE); else - rte_cio_wmb(); + rte_io_wmb(); } static inline uint16_t @@ -66,20 +66,19 @@ virtqueue_fetch_flags_packed(struct vring_packed_desc *dp, uint16_t flags; if (weak_barriers) { -/* x86 prefers to using rte_smp_rmb over __atomic_load_n as it reports +/* x86 prefers to using rte_io_rmb over __atomic_load_n as it reports * a better perf(~1.5%), which comes from the saved branch by the compiler. - * The if and else branch are identical with the smp and cio barriers both - * defined as compiler barriers on x86. + * The if and else branch are identical on the platforms except Arm. */ -#ifdef RTE_ARCH_X86_64 - flags = dp->flags; - rte_smp_rmb(); -#else +#ifdef RTE_ARCH_ARM flags = __atomic_load_n(&dp->flags, __ATOMIC_ACQUIRE); +#else + flags = dp->flags; + rte_io_rmb(); #endif } else { flags = dp->flags; - rte_cio_rmb(); + rte_io_rmb(); } return flags; @@ -90,22 +89,22 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, uint16_t flags, uint8_t weak_barriers) { if (weak_barriers) { -/* x86 prefers to using rte_smp_wmb over __atomic_store_n as it reports +/* x86 prefers to using rte_io_wmb over __atomic_store_n as it reports * a better perf(~1.5%), which comes from the saved branch by the compiler. - * The if and else branch are identical with the smp and cio barriers both - * defined as compiler barriers on x86. + * The if and else branch are identical on the platforms except Arm. */ -#ifdef RTE_ARCH_X86_64 - rte_smp_wmb(); - dp->flags = flags; -#else +#ifdef RTE_ARCH_ARM __atomic_store_n(&dp->flags, flags, __ATOMIC_RELEASE); +#else + rte_io_wmb(); + dp->flags = flags; #endif } else { - rte_cio_wmb(); + rte_io_wmb(); dp->flags = flags; } } + #ifdef RTE_PMD_PACKET_PREFETCH #define rte_packet_prefetch(p) rte_prefetch1(p) #else @@ -114,29 +113,6 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, #define VIRTQUEUE_MAX_NAME_SZ 32 -#ifdef RTE_VIRTIO_USER -/** - * Return the physical address (or virtual address in case of - * virtio-user) of mbuf data buffer. - * - * The address is firstly casted to the word size (sizeof(uintptr_t)) - * before casting it to uint64_t. This is to make it work with different - * combination of word size (64 bit and 32 bit) and virtio device - * (virtio-pci and virtio-user). - */ -#define VIRTIO_MBUF_ADDR(mb, vq) \ - ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->offset))) -#else -#define VIRTIO_MBUF_ADDR(mb, vq) ((mb)->buf_iova) -#endif - -/** - * Return the physical address (or virtual address in case of - * virtio-user) of mbuf data buffer, taking care of mbuf data offset - */ -#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \ - (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off) - #define VTNET_SQ_RQ_QUEUE_IDX 0 #define VTNET_SQ_TQ_QUEUE_IDX 1 #define VTNET_SQ_CQ_QUEUE_IDX 2 @@ -241,6 +217,10 @@ struct vq_desc_extra { uint16_t next; }; +#define virtnet_rxq_to_vq(rxvq) container_of(rxvq, struct virtqueue, rxq) +#define virtnet_txq_to_vq(txvq) container_of(txvq, struct virtqueue, txq) +#define virtnet_cq_to_vq(cvq) container_of(cvq, struct virtqueue, cq) + struct virtqueue { struct virtio_hw *hw; /**< virtio_hw structure pointer. */ union { @@ -264,6 +244,15 @@ struct virtqueue { uint16_t vq_avail_idx; /**< sync until needed */ uint16_t vq_free_thresh; /**< free threshold */ + /** + * Head of the free chain in the descriptor table. If + * there are no free descriptors, this will be set to + * VQ_RING_DESC_CHAIN_END. + */ + uint16_t vq_desc_head_idx; + uint16_t vq_desc_tail_idx; + uint16_t vq_queue_index; /**< PCI queue index */ + void *vq_ring_virt_mem; /**< linear address of vring*/ unsigned int vq_ring_size; @@ -276,15 +265,6 @@ struct virtqueue { rte_iova_t vq_ring_mem; /**< physical address of vring, * or virtual address for virtio_user. */ - /** - * Head of the free chain in the descriptor table. If - * there are no free descriptors, this will be set to - * VQ_RING_DESC_CHAIN_END. - */ - uint16_t vq_desc_head_idx; - uint16_t vq_desc_tail_idx; - uint16_t vq_queue_index; /**< PCI queue index */ - uint16_t offset; /**< relative offset to obtain addr in mbuf */ uint16_t *notify_addr; struct rte_mbuf **sw_ring; /**< RX software ring. */ struct vq_desc_extra vq_descx[0]; @@ -329,8 +309,11 @@ struct virtio_net_hdr_mrg_rxbuf { #define VIRTIO_MAX_TX_INDIRECT 8 struct virtio_tx_region { struct virtio_net_hdr_mrg_rxbuf tx_hdr; - struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT] - __rte_aligned(16); + union { + struct vring_desc tx_indir[VIRTIO_MAX_TX_INDIRECT]; + struct vring_packed_desc + tx_packed_indir[VIRTIO_MAX_TX_INDIRECT]; + } __rte_aligned(16); }; static inline int @@ -368,6 +351,16 @@ vring_desc_init_split(struct vring_desc *dp, uint16_t n) dp[i].next = VQ_RING_DESC_CHAIN_END; } +static inline void +vring_desc_init_indirect_packed(struct vring_packed_desc *dp, int n) +{ + int i; + for (i = 0; i < n; i++) { + dp[i].id = (uint16_t)i; + dp[i].flags = VRING_DESC_F_WRITE; + } +} + /** * Tell the backend not to interrupt us. Implementation for packed virtqueues. */ @@ -396,7 +389,7 @@ virtqueue_disable_intr_split(struct virtqueue *vq) static inline void virtqueue_disable_intr(struct virtqueue *vq) { - if (vtpci_packed_queue(vq->hw)) + if (virtio_with_packed_queue(vq->hw)) virtqueue_disable_intr_packed(vq); else virtqueue_disable_intr_split(vq); @@ -430,7 +423,7 @@ virtqueue_enable_intr_split(struct virtqueue *vq) static inline void virtqueue_enable_intr(struct virtqueue *vq) { - if (vtpci_packed_queue(vq->hw)) + if (virtio_with_packed_queue(vq->hw)) virtqueue_enable_intr_packed(vq); else virtqueue_enable_intr_split(vq); @@ -459,17 +452,17 @@ virtqueue_full(const struct virtqueue *vq) } static inline int -virtio_get_queue_type(struct virtio_hw *hw, uint16_t vtpci_queue_idx) +virtio_get_queue_type(struct virtio_hw *hw, uint16_t vq_idx) { - if (vtpci_queue_idx == hw->max_queue_pairs * 2) + if (vq_idx == hw->max_queue_pairs * 2) return VTNET_CQ; - else if (vtpci_queue_idx % 2 == 0) + else if (vq_idx % 2 == 0) return VTNET_RQ; else return VTNET_TQ; } -/* virtqueue_nused has load-acquire or rte_cio_rmb insed */ +/* virtqueue_nused has load-acquire or rte_io_rmb insed */ static inline uint16_t virtqueue_nused(const struct virtqueue *vq) { @@ -480,7 +473,7 @@ virtqueue_nused(const struct virtqueue *vq) * x86 prefers to using rte_smp_rmb over __atomic_load_n as it * reports a slightly better perf, which comes from the saved * branch by the compiler. - * The if and else branches are identical with the smp and cio + * The if and else branches are identical with the smp and io * barriers both defined as compiler barriers on x86. */ #ifdef RTE_ARCH_X86_64 @@ -492,7 +485,7 @@ virtqueue_nused(const struct virtqueue *vq) #endif } else { idx = vq->vq_split.ring.used->idx; - rte_cio_rmb(); + rte_io_rmb(); } return idx - vq->vq_used_cons_idx; } @@ -510,7 +503,7 @@ vq_update_avail_idx(struct virtqueue *vq) * it reports a slightly better perf, which comes from the * saved branch by the compiler. * The if and else branches are identical with the smp and - * cio barriers both defined as compiler barriers on x86. + * io barriers both defined as compiler barriers on x86. */ #ifdef RTE_ARCH_X86_64 rte_smp_wmb(); @@ -520,7 +513,7 @@ vq_update_avail_idx(struct virtqueue *vq) vq->vq_avail_idx, __ATOMIC_RELEASE); #endif } else { - rte_cio_wmb(); + rte_io_wmb(); vq->vq_split.ring.avail->idx = vq->vq_avail_idx; } } @@ -574,7 +567,7 @@ virtqueue_kick_prepare_packed(struct virtqueue *vq) static inline void virtqueue_notify(struct virtqueue *vq) { - VTPCI_OPS(vq->hw)->notify_queue(vq->hw, vq); + VIRTIO_OPS(vq->hw)->notify_queue(vq->hw, vq); } #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP @@ -583,7 +576,7 @@ virtqueue_notify(struct virtqueue *vq) used_idx = __atomic_load_n(&(vq)->vq_split.ring.used->idx, \ __ATOMIC_RELAXED); \ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ - if (vtpci_packed_queue((vq)->hw)) { \ + if (virtio_with_packed_queue((vq)->hw)) { \ PMD_INIT_LOG(DEBUG, \ "VQ: - size=%d; free=%d; used_cons_idx=%d; avail_idx=%d;" \ " cached_flags=0x%x; used_wrap_counter=%d", \ @@ -626,7 +619,7 @@ virtqueue_notify(struct virtqueue *vq) static inline void virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie, - bool offload) + uint8_t offload) { if (offload) { if (cookie->ol_flags & PKT_TX_TCP_SEG) @@ -673,17 +666,19 @@ virtqueue_xmit_offload(struct virtio_net_hdr *hdr, static inline void virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, - uint16_t needed, int can_push, int in_order) + uint16_t needed, int use_indirect, int can_push, + int in_order) { struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr; struct vq_desc_extra *dxp; - struct virtqueue *vq = txvq->vq; + struct virtqueue *vq = virtnet_txq_to_vq(txvq); struct vring_packed_desc *start_dp, *head_dp; uint16_t idx, id, head_idx, head_flags; int16_t head_size = vq->hw->vtnet_hdr_size; struct virtio_net_hdr *hdr; uint16_t prev; bool prepend_header = false; + uint16_t seg_num = cookie->nb_segs; id = in_order ? vq->vq_avail_idx : vq->vq_desc_head_idx; @@ -709,6 +704,25 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, /* if offload disabled, it is not zeroed below, do it now */ if (!vq->hw->has_tx_offload) virtqueue_clear_net_hdr(hdr); + } else if (use_indirect) { + /* setup tx ring slot to point to indirect + * descriptor list stored in reserved region. + * + * the first slot in indirect ring is already preset + * to point to the header in reserved region + */ + start_dp[idx].addr = txvq->virtio_net_hdr_mem + + RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr); + start_dp[idx].len = (seg_num + 1) * + sizeof(struct vring_packed_desc); + /* reset flags for indirect desc */ + head_flags = VRING_DESC_F_INDIRECT; + head_flags |= vq->vq_packed.cached_flags; + hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr; + + /* loop below will fill in rest of the indirect elements */ + start_dp = txr[idx].tx_packed_indir; + idx = 1; } else { /* setup first tx ring slot to point to header * stored in reserved region. @@ -730,7 +744,7 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, do { uint16_t flags; - start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); + start_dp[idx].addr = rte_mbuf_data_iova(cookie); start_dp[idx].len = cookie->data_len; if (prepend_header) { start_dp[idx].addr -= head_size; @@ -754,6 +768,15 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, start_dp[prev].id = id; + if (use_indirect) { + idx = head_idx; + if (++idx >= vq->vq_nentries) { + idx -= vq->vq_nentries; + vq->vq_packed.cached_flags ^= + VRING_PACKED_DESC_F_AVAIL_USED; + } + } + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); vq->vq_avail_idx = idx; @@ -793,7 +816,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) struct vq_desc_extra *dxp; used_idx = vq->vq_used_cons_idx; - /* desc_is_used has a load-acquire or rte_cio_rmb inside + /* desc_is_used has a load-acquire or rte_io_rmb inside * and wait for used desc in virtqueue. */ while (num > 0 && desc_is_used(&desc[used_idx], vq)) { @@ -827,7 +850,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) struct vq_desc_extra *dxp; used_idx = vq->vq_used_cons_idx; - /* desc_is_used has a load-acquire or rte_cio_rmb inside + /* desc_is_used has a load-acquire or rte_io_rmb inside * and wait for used desc in virtqueue. */ while (num-- && desc_is_used(&desc[used_idx], vq)) {