vq->sw_ring[desc_idx] = cookie;
start_dp = vq->vq_ring.desc;
- start_dp[desc_idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset) -
- vq->hw->vtnet_hdr_size;
+ start_dp[desc_idx].addr =
+ VIRTIO_MBUF_ADDR(cookie, vq) +
+ RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
start_dp[desc_idx].len = cookie->buf_len -
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
*(uint64_t *)p = rxvq->mbuf_initializer;
start_dp[i].addr =
- MBUF_DATA_DMA_ADDR(sw_ring[i], vq->offset) -
- vq->hw->vtnet_hdr_size;
+ VIRTIO_MBUF_ADDR(sw_ring[i], vq) +
+ RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size;
start_dp[i].len = sw_ring[i]->buf_len -
RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size;
}
if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP))
return 0;
- nb_used = *(volatile uint16_t *)&vq->vq_ring.used->idx -
- vq->vq_used_cons_idx;
+ nb_used = VIRTQUEUE_NUSED(vq);
rte_compiler_barrier();
virtio_xmit_cleanup(struct virtqueue *vq)
{
uint16_t i, desc_idx;
- int nb_free = 0;
+ uint32_t nb_free = 0;
struct rte_mbuf *m, *free[VIRTIO_TX_MAX_FREE_BUF_SZ];
desc_idx = (uint16_t)(vq->vq_used_cons_idx &
free[nb_free++] = m;
else {
rte_mempool_put_bulk(free[0]->pool,
- (void **)free, nb_free);
+ (void **)free,
+ RTE_MIN(RTE_DIM(free),
+ nb_free));
free[0] = m;
nb_free = 1;
}
}
}
- rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
+ rte_mempool_put_bulk(free[0]->pool, (void **)free,
+ RTE_MIN(RTE_DIM(free), nb_free));
} else {
for (i = 1; i < VIRTIO_TX_FREE_NR; i++) {
m = (struct rte_mbuf *)vq->vq_descx[desc_idx++].cookie;
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
for (i = 0; i < nb_tail; i++) {
start_dp[desc_idx].addr =
- MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
+ VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
tx_pkts++;
desc_idx++;
vq->vq_descx[desc_idx + i].cookie = tx_pkts[i];
for (i = 0; i < nb_commit; i++) {
start_dp[desc_idx].addr =
- MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset);
+ VIRTIO_MBUF_DATA_DMA_ADDR(*tx_pkts, vq);
start_dp[desc_idx].len = (*tx_pkts)->pkt_len;
tx_pkts++;
desc_idx++;