/* The alignment to use between consumer and producer parts of vring. */
#define VIRTIO_PCI_VRING_ALIGN 4096
-/*
- * Address translatio is between gva<->hva,
- * rather than gpa<->hva in virito spec.
- */
-#define RTE_MBUF_DATA_DMA_ADDR(mb) \
- rte_pktmbuf_mtod(mb, uint64_t)
-
enum { VTNET_RQ = 0, VTNET_TQ = 1, VTNET_CQ = 2 };
/**
*/
struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_full(const struct virtqueue *vq)
{
- return (vq->vq_free_cnt == 0);
+ return vq->vq_free_cnt == 0;
}
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vq_ring_update_avail(struct virtqueue *vq, uint16_t desc_idx)
{
uint16_t avail_idx;
*/
avail_idx = (uint16_t)(vq->vq_ring.avail->idx & (vq->vq_nentries - 1));
vq->vq_ring.avail->ring[avail_idx] = desc_idx;
- rte_compiler_barrier(); /* wmb , for IA memory model barrier is enough*/
+ rte_smp_wmb();
vq->vq_ring.avail->idx++;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
{
struct vring_desc *dp;
vq->vq_desc_head_idx = desc_idx;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_enqueue_recv_refill(struct virtqueue *rxvq, struct rte_mbuf *cookie)
{
const uint16_t needed = 1;
dxp->ndescs = needed;
start_dp[head_idx].addr =
- (uint64_t) ((uint64_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
+ (uint64_t) ((uintptr_t)cookie->buf_addr + RTE_PKTMBUF_HEADROOM - sizeof(struct virtio_net_hdr));
start_dp[head_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + sizeof(struct virtio_net_hdr);
start_dp[head_idx].flags = VRING_DESC_F_WRITE;
rxvq->vq_desc_head_idx = start_dp[head_idx].next;
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie)
{
start_dp[idx].flags = VRING_DESC_F_NEXT;
start_dp[idx].addr = (uintptr_t)NULL;
idx = start_dp[idx].next;
- start_dp[idx].addr = RTE_MBUF_DATA_DMA_ADDR(cookie);
+ start_dp[idx].addr = (uint64_t)rte_pktmbuf_mtod(cookie, uintptr_t);
start_dp[idx].len = cookie->data_len;
start_dp[idx].flags = 0;
idx = start_dp[idx].next;
return 0;
}
-static inline uint16_t __attribute__((always_inline))
+static __rte_always_inline uint16_t
virtqueue_dequeue_burst(struct virtqueue *vq, struct rte_mbuf **rx_pkts, uint32_t *len, uint16_t num)
{
struct vring_used_elem *uep;