return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t to, uint16_t from, uint16_t size)
{
size * sizeof(struct vring_used_elem));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
{
uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
sizeof(vq->used->idx));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_shadow_used_ring(struct vhost_virtqueue *vq,
uint16_t desc_idx, uint16_t len)
{
vq->shadow_used_ring[i].len = len;
}
+/* avoid write operation when necessary, to lessen cache issues */
+#define ASSIGN_UNLESS_EQUAL(var, val) do { \
+ if ((var) != (val)) \
+ (var) = (val); \
+} while (0)
+
static void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
cksum));
break;
}
+ } else {
+ ASSIGN_UNLESS_EQUAL(net_hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0);
}
if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
+ m_buf->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(net_hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(net_hdr->hdr_len, 0);
}
}
-static inline void
-copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
- struct virtio_net_hdr_mrg_rxbuf hdr)
-{
- if (dev->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf))
- *(struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc_addr = hdr;
- else
- *(struct virtio_net_hdr *)(uintptr_t)desc_addr = hdr.hdr;
-}
-
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
{
uint32_t cpy_len;
struct vring_desc *desc;
uint64_t desc_addr;
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
/* A counter to avoid desc dead loop chain */
uint16_t nr_desc = 1;
rte_prefetch0((void *)(uintptr_t)desc_addr);
- virtio_enqueue_offload(m, &virtio_hdr.hdr);
- copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
+ virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr);
vhost_log_write(dev, desc->addr, dev->vhost_hlen);
PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
* added to the RX queue. This function works when the mbuf is scattered, but
* it doesn't support the mergeable feature.
*/
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
return count;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t avail_idx, uint32_t *vec_idx,
struct buf_vector *buf_vec, uint16_t *desc_chain_head,
return 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
struct buf_vector *buf_vec, uint16_t num_buffers)
{
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
uint32_t vec_idx = 0;
uint64_t desc_addr;
uint32_t mbuf_offset, mbuf_avail;
hdr_phys_addr = buf_vec[vec_idx].buf_addr;
rte_prefetch0((void *)(uintptr_t)hdr_addr);
- virtio_hdr.num_buffers = num_buffers;
LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
dev->vid, num_buffers);
}
if (hdr_addr) {
- virtio_enqueue_offload(hdr_mbuf, &virtio_hdr.hdr);
- copy_virtio_net_hdr(dev, hdr_addr, virtio_hdr);
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)
+ hdr_addr;
+ virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
+ ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers);
+
vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
PRINT_PACKET(dev, (uintptr_t)hdr_addr,
dev->vhost_hlen, 0);
return 0;
}
-static inline uint32_t __attribute__((always_inline))
+static __rte_always_inline uint32_t
virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint32_t count)
{
}
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
{
uint16_t l4_proto = 0;
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
put_zmbuf(struct zcopy_mbuf *zmbuf)
{
zmbuf->in_use = 0;
}
-static inline int __attribute__((always_inline))
+static __rte_always_inline int
copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
struct rte_mempool *mbuf_pool)
"allocate memory for mbuf.\n");
return -1;
}
+ if (unlikely(dev->dequeue_zero_copy))
+ rte_mbuf_refcnt_update(cur, 1);
prev->next = cur;
prev->data_len = mbuf_offset;
return 0;
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t used_idx, uint32_t desc_idx)
{
sizeof(vq->used->ring[used_idx]));
}
-static inline void __attribute__((always_inline))
+static __rte_always_inline void
update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t count)
{
eventfd_write(vq->callfd, (eventfd_t)1);
}
-static inline struct zcopy_mbuf *__attribute__((always_inline))
+static __rte_always_inline struct zcopy_mbuf *
get_zmbuf(struct vhost_virtqueue *vq)
{
uint16_t i;
return NULL;
}
-static inline bool __attribute__((always_inline))
+static __rte_always_inline bool
mbuf_is_consumed(struct rte_mbuf *m)
{
while (m) {