X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtqueue.h;h=5baac221f7923fb40b5548db3dee9e09ac71438b;hb=4382a7ccf7815c139c9507618fd8e046fa4d927e;hp=17e76f0e8c928f833ef27fc4d8c682aac749a863;hpb=3169550f03c121616cf8995521b956701b837175;p=dpdk.git diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 17e76f0e8c..5baac221f7 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -113,6 +113,25 @@ virtqueue_store_flags_packed(struct vring_packed_desc *dp, #define VIRTQUEUE_MAX_NAME_SZ 32 +/** + * Return the IOVA (or virtual address in case of virtio-user) of mbuf + * data buffer. + * + * The address is firstly casted to the word size (sizeof(uintptr_t)) + * before casting it to uint64_t. This is to make it work with different + * combination of word size (64 bit and 32 bit) and virtio device + * (virtio-pci and virtio-user). + */ +#define VIRTIO_MBUF_ADDR(mb, vq) \ + ((uint64_t)(*(uintptr_t *)((uintptr_t)(mb) + (vq)->mbuf_addr_offset))) + +/** + * Return the physical address (or virtual address in case of + * virtio-user) of mbuf data buffer, taking care of mbuf data offset + */ +#define VIRTIO_MBUF_DATA_DMA_ADDR(mb, vq) \ + (VIRTIO_MBUF_ADDR(mb, vq) + (mb)->data_off) + #define VTNET_SQ_RQ_QUEUE_IDX 0 #define VTNET_SQ_TQ_QUEUE_IDX 1 #define VTNET_SQ_CQ_QUEUE_IDX 2 @@ -244,8 +263,18 @@ struct virtqueue { uint16_t vq_avail_idx; /**< sync until needed */ uint16_t vq_free_thresh; /**< free threshold */ + /** + * Head of the free chain in the descriptor table. If + * there are no free descriptors, this will be set to + * VQ_RING_DESC_CHAIN_END. + */ + uint16_t vq_desc_head_idx; + uint16_t vq_desc_tail_idx; + uint16_t vq_queue_index; /**< PCI queue index */ + void *vq_ring_virt_mem; /**< linear address of vring*/ unsigned int vq_ring_size; + uint16_t mbuf_addr_offset; union { struct virtnet_rx rxq; @@ -256,15 +285,6 @@ struct virtqueue { rte_iova_t vq_ring_mem; /**< physical address of vring, * or virtual address for virtio_user. */ - /** - * Head of the free chain in the descriptor table. If - * there are no free descriptors, this will be set to - * VQ_RING_DESC_CHAIN_END. - */ - uint16_t vq_desc_head_idx; - uint16_t vq_desc_tail_idx; - uint16_t vq_queue_index; - uint16_t offset; /**< relative offset to obtain addr in mbuf */ uint16_t *notify_addr; struct rte_mbuf **sw_ring; /**< RX software ring. */ struct vq_desc_extra vq_descx[0]; @@ -617,50 +637,44 @@ virtqueue_notify(struct virtqueue *vq) } while (0) static inline void -virtqueue_xmit_offload(struct virtio_net_hdr *hdr, - struct rte_mbuf *cookie, - uint8_t offload) +virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie) { - if (offload) { - if (cookie->ol_flags & PKT_TX_TCP_SEG) - cookie->ol_flags |= PKT_TX_TCP_CKSUM; - - switch (cookie->ol_flags & PKT_TX_L4_MASK) { - case PKT_TX_UDP_CKSUM: - hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct rte_udp_hdr, - dgram_cksum); - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - break; - - case PKT_TX_TCP_CKSUM: - hdr->csum_start = cookie->l2_len + cookie->l3_len; - hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum); - hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; - break; - - default: - ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); - ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); - ASSIGN_UNLESS_EQUAL(hdr->flags, 0); - break; - } + uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK; + + if (cookie->ol_flags & PKT_TX_TCP_SEG) + csum_l4 |= PKT_TX_TCP_CKSUM; + + switch (csum_l4) { + case PKT_TX_UDP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; + + case PKT_TX_TCP_CKSUM: + hdr->csum_start = cookie->l2_len + cookie->l3_len; + hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum); + hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + break; + + default: + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + break; + } - /* TCP Segmentation Offload */ - if (cookie->ol_flags & PKT_TX_TCP_SEG) { - hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? - VIRTIO_NET_HDR_GSO_TCPV6 : - VIRTIO_NET_HDR_GSO_TCPV4; - hdr->gso_size = cookie->tso_segsz; - hdr->hdr_len = - cookie->l2_len + - cookie->l3_len + - cookie->l4_len; - } else { - ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); - ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); - ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); - } + /* TCP Segmentation Offload */ + if (cookie->ol_flags & PKT_TX_TCP_SEG) { + hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ? + VIRTIO_NET_HDR_GSO_TCPV6 : + VIRTIO_NET_HDR_GSO_TCPV4; + hdr->gso_size = cookie->tso_segsz; + hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len; + } else { + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); } } @@ -739,12 +753,13 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie, } } - virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload); + if (vq->hw->has_tx_offload) + virtqueue_xmit_offload(hdr, cookie); do { uint16_t flags; - start_dp[idx].addr = rte_mbuf_data_iova(cookie); + start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq); start_dp[idx].len = cookie->data_len; if (prepend_header) { start_dp[idx].addr -= head_size; @@ -808,25 +823,26 @@ vq_ring_free_id_packed(struct virtqueue *vq, uint16_t id) } static void -virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) +virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, uint16_t num) { uint16_t used_idx, id, curr_id, free_cnt = 0; uint16_t size = vq->vq_nentries; struct vring_packed_desc *desc = vq->vq_packed.ring.desc; struct vq_desc_extra *dxp; + int nb = num; used_idx = vq->vq_used_cons_idx; /* desc_is_used has a load-acquire or rte_io_rmb inside * and wait for used desc in virtqueue. */ - while (num > 0 && desc_is_used(&desc[used_idx], vq)) { + while (nb > 0 && desc_is_used(&desc[used_idx], vq)) { id = desc[used_idx].id; do { curr_id = used_idx; dxp = &vq->vq_descx[used_idx]; used_idx += dxp->ndescs; free_cnt += dxp->ndescs; - num -= dxp->ndescs; + nb -= dxp->ndescs; if (used_idx >= size) { used_idx -= size; vq->vq_packed.used_wrap_counter ^= 1; @@ -842,7 +858,7 @@ virtio_xmit_cleanup_inorder_packed(struct virtqueue *vq, int num) } static void -virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) +virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, uint16_t num) { uint16_t used_idx, id; uint16_t size = vq->vq_nentries; @@ -872,7 +888,7 @@ virtio_xmit_cleanup_normal_packed(struct virtqueue *vq, int num) /* Cleanup from completed transmits. */ static inline void -virtio_xmit_cleanup_packed(struct virtqueue *vq, int num, int in_order) +virtio_xmit_cleanup_packed(struct virtqueue *vq, uint16_t num, int in_order) { if (in_order) virtio_xmit_cleanup_inorder_packed(vq, num);