X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_virtio%2Fvirtqueue.h;h=fdee0547dd82c9b5b84a126d9bf617087b483315;hb=62945e029e86bba5fb833e969c3c22f84d58c1cb;hp=48b5092c94c8c288acd1117f67ca76a4b2c57e73;hpb=3031749c2df04a63cdcef186dcce3781e61436e8;p=dpdk.git diff --git a/lib/librte_pmd_virtio/virtqueue.h b/lib/librte_pmd_virtio/virtqueue.h index 48b5092c94..fdee0547dd 100644 --- a/lib/librte_pmd_virtio/virtqueue.h +++ b/lib/librte_pmd_virtio/virtqueue.h @@ -59,8 +59,7 @@ #define VIRTQUEUE_MAX_NAME_SZ 32 #define RTE_MBUF_DATA_DMA_ADDR(mb) \ - (uint64_t) ((mb)->buf_physaddr + (uint64_t)((char *)((mb)->pkt.data) - \ - (char *)(mb)->buf_addr)) + (uint64_t) ((mb)->buf_physaddr + (mb)->data_off) #define VTNET_SQ_RQ_QUEUE_IDX 0 #define VTNET_SQ_TQ_QUEUE_IDX 1 @@ -122,7 +121,6 @@ struct virtio_pmd_ctrl { }; struct virtqueue { - char vq_name[VIRTQUEUE_MAX_NAME_SZ]; struct virtio_hw *hw; /**< virtio_hw structure pointer. */ const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */ @@ -152,7 +150,12 @@ struct virtqueue { */ uint16_t vq_used_cons_idx; uint16_t vq_avail_idx; - void *virtio_net_hdr_mem; /**< hdr for each xmit packet */ + phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ + + /* Statistics */ + uint64_t packets; + uint64_t bytes; + uint64_t errors; struct vq_desc_extra { void *cookie; @@ -209,24 +212,24 @@ void virtqueue_dump(struct virtqueue *vq); /** * Get all mbufs to be freed. */ -struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq); +struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq); static inline int virtqueue_full(const struct virtqueue *vq) { - return (vq->vq_free_cnt == 0); + return vq->vq_free_cnt == 0; } #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx)) -static inline void __attribute__((always_inline)) +static inline void vq_update_avail_idx(struct virtqueue *vq) { rte_compiler_barrier(); vq->vq_ring.avail->idx = vq->vq_avail_idx; } -static inline void __attribute__((always_inline)) +static inline void vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) { uint16_t avail_idx; @@ -242,13 +245,13 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) vq->vq_avail_idx++; } -static inline int __attribute__((always_inline)) -virtqueue_kick_prepare(struct virtqueue * vq) +static inline int +virtqueue_kick_prepare(struct virtqueue *vq) { return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY); } -static inline void __attribute__((always_inline)) +static inline void virtqueue_notify(struct virtqueue *vq) { /* @@ -259,177 +262,16 @@ virtqueue_notify(struct virtqueue *vq) VIRTIO_WRITE_REG_2(vq->hw, VIRTIO_PCI_QUEUE_NOTIFY, vq->vq_queue_index); } -static inline void __attribute__((always_inline)) -vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) -{ - struct vring_desc *dp, *dp_tail; - struct vq_desc_extra *dxp; - uint16_t desc_idx_last = desc_idx; - - dp = &vq->vq_ring.desc[desc_idx]; - dxp = &vq->vq_descx[desc_idx]; - vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs); - if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) { - while (dp->flags & VRING_DESC_F_NEXT) { - desc_idx_last = dp->next; - dp = &vq->vq_ring.desc[dp->next]; - } - } - dxp->ndescs = 0; - - /* - * We must append the existing free chain, if any, to the end of - * newly freed chain. If the virtqueue was completely used, then - * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above). - */ - if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) { - vq->vq_desc_head_idx = desc_idx; - } else { - dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx]; - dp_tail->next = desc_idx; - } - - vq->vq_desc_tail_idx = desc_idx_last; - dp->next = VQ_RING_DESC_CHAIN_END; -} - -static inline int __attribute__((always_inline)) -virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) -{ - struct vq_desc_extra *dxp; - struct vring_desc *start_dp; - uint16_t needed; - uint16_t head_idx, idx; - needed = 1; - - if (unlikely(vq->vq_free_cnt == 0)) - return (-ENOSPC); - if (unlikely(vq->vq_free_cnt < needed)) - return (-EMSGSIZE); - - head_idx = vq->vq_desc_head_idx; - if (unlikely(head_idx >= vq->vq_nentries)) - return (-EFAULT); - - idx = head_idx; - dxp = &vq->vq_descx[idx]; - dxp->cookie = (void *)cookie; - dxp->ndescs = needed; - - start_dp = vq->vq_ring.desc; - start_dp[idx].addr = - (uint64_t) (cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr)); - start_dp[idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr); - start_dp[idx].flags = VRING_DESC_F_WRITE; - idx = start_dp[idx].next; - vq->vq_desc_head_idx = idx; - if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - vq->vq_desc_tail_idx = idx; - vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); - vq_update_avail_ring(vq, head_idx); - - return (0); -} - -static inline int __attribute__((always_inline)) -virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie) -{ - struct vq_desc_extra *dxp; - struct vring_desc *start_dp; - uint16_t needed; - uint16_t head_idx, idx; - needed = 2; - if (unlikely(txvq->vq_free_cnt == 0)) - return (-ENOSPC); - if (unlikely(txvq->vq_free_cnt < needed)) - return (-EMSGSIZE); - head_idx = txvq->vq_desc_head_idx; - if (unlikely(head_idx >= txvq->vq_nentries)) - return (-EFAULT); - - idx = head_idx; - dxp = &txvq->vq_descx[idx]; - if (dxp->cookie != NULL) - rte_pktmbuf_free_seg(dxp->cookie); - dxp->cookie = (void *)cookie; - dxp->ndescs = needed; - - start_dp = txvq->vq_ring.desc; - start_dp[idx].addr = (uint64_t)(uintptr_t)txvq->virtio_net_hdr_mem + idx * sizeof(struct virtio_net_hdr); - start_dp[idx].len = sizeof(struct virtio_net_hdr); - start_dp[idx].flags = VRING_DESC_F_NEXT; - idx = start_dp[idx].next; - start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie); - start_dp[idx].len = cookie->pkt.data_len; - start_dp[idx].flags = 0; - idx = start_dp[idx].next; - txvq->vq_desc_head_idx = idx; - if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - txvq->vq_desc_tail_idx = idx; - txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); - vq_update_avail_ring(txvq, head_idx); - - return (0); -} - -static inline uint16_t __attribute__((always_inline)) -virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num) -{ - struct vring_used_elem *uep; - struct rte_mbuf *cookie; - uint16_t used_idx, desc_idx; - uint16_t i; - - /* Caller does the check */ - for (i = 0; i < num ; i ++) { - used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; - desc_idx = (uint16_t) uep->id; - len[i] = uep->len; - cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; - - if (unlikely(cookie == NULL)) { - PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", - vq->vq_used_cons_idx); - break; - } - - rte_prefetch0(cookie); - rte_packet_prefetch(cookie->pkt.data); - rx_pkts[i] = cookie; - vq->vq_used_cons_idx++; - vq_ring_free_chain(vq, desc_idx); - vq->vq_descx[desc_idx].cookie = NULL; - } - - return (i); -} - -static inline uint16_t __attribute__((always_inline)) -virtqueue_dequeue_pkt_tx(struct virtqueue *vq) -{ - struct vring_used_elem *uep; - uint16_t used_idx, desc_idx; - - used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); - uep = &vq->vq_ring.used->ring[used_idx]; - desc_idx = (uint16_t) uep->id; - vq->vq_used_cons_idx++; - vq_ring_free_chain(vq, desc_idx); - - return 0; -} - -#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP +#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTQUEUE_DUMP(vq) do { \ uint16_t used_idx, nused; \ used_idx = (vq)->vq_ring.used->idx; \ nused = (uint16_t)(used_idx - (vq)->vq_used_cons_idx); \ PMD_INIT_LOG(DEBUG, \ - "VQ: %s - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ + "VQ: - size=%d; free=%d; used=%d; desc_head_idx=%d;" \ " avail.idx=%d; used_cons_idx=%d; used.idx=%d;" \ - " avail.flags=0x%x; used.flags=0x%x\n", \ - (vq)->vq_name, (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ + " avail.flags=0x%x; used.flags=0x%x", \ + (vq)->vq_nentries, (vq)->vq_free_cnt, nused, \ (vq)->vq_desc_head_idx, (vq)->vq_ring.avail->idx, \ (vq)->vq_used_cons_idx, (vq)->vq_ring.used->idx, \ (vq)->vq_ring.avail->flags, (vq)->vq_ring.used->flags); \