#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
#endif
-int
-virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
-{
- struct virtnet_rx *rxvq = rxq;
- struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
-
- return virtqueue_nused(vq) >= offset;
-}
-
void
vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
{
dxp->cookie = (void *)cookies[i];
dxp->ndescs = 1;
- start_dp[idx].addr = cookies[i]->buf_iova +
+ start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookies[i], vq) +
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
start_dp[idx].len = cookies[i]->buf_len -
RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
dxp->cookie = (void *)cookie[i];
dxp->ndescs = 1;
- start_dp[idx].addr = cookie[i]->buf_iova +
+ start_dp[idx].addr = VIRTIO_MBUF_ADDR(cookie[i], vq) +
RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- start_dp[idx].len = cookie[i]->buf_len -
- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ start_dp[idx].len = cookie[i]->buf_len - RTE_PKTMBUF_HEADROOM +
+ hw->vtnet_hdr_size;
start_dp[idx].flags = VRING_DESC_F_WRITE;
vq->vq_desc_head_idx = start_dp[idx].next;
vq_update_avail_ring(vq, idx);
uint16_t flags = vq->vq_packed.cached_flags;
struct virtio_hw *hw = vq->hw;
- dp->addr = cookie->buf_iova +
- RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
- dp->len = cookie->buf_len -
- RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
+ dp->addr = VIRTIO_MBUF_ADDR(cookie, vq) + RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
+ dp->len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
- virtqueue_store_flags_packed(dp, flags,
- hw->weak_barriers);
+ virtqueue_store_flags_packed(dp, flags, hw->weak_barriers);
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
else
virtqueue_xmit_offload(hdr, cookies[i]);
- start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
- start_dp[idx].len = cookies[i]->data_len + head_size;
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq) - head_size;
+ start_dp[idx].len = cookies[i]->data_len + head_size;
start_dp[idx].flags = 0;
else
virtqueue_xmit_offload(hdr, cookie);
- dp->addr = rte_mbuf_data_iova(cookie) - head_size;
- dp->len = cookie->data_len + head_size;
- dp->id = id;
+ dp->addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq) - head_size;
+ dp->len = cookie->data_len + head_size;
+ dp->id = id;
if (++vq->vq_avail_idx >= vq->vq_nentries) {
vq->vq_avail_idx -= vq->vq_nentries;
virtqueue_xmit_offload(hdr, cookie);
do {
- start_dp[idx].addr = rte_mbuf_data_iova(cookie);
- start_dp[idx].len = cookie->data_len;
+ start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
+ start_dp[idx].len = cookie->data_len;
if (prepend_header) {
start_dp[idx].addr -= head_size;
start_dp[idx].len += head_size;
RTE_MIN(vq->vq_nentries / 4, DEFAULT_RX_FREE_THRESH);
if (rx_free_thresh & 0x3) {
- RTE_LOG(ERR, PMD, "rx_free_thresh must be multiples of four."
- " (rx_free_thresh=%u port=%u queue=%u)\n",
+ PMD_INIT_LOG(ERR, "rx_free_thresh must be multiples of four."
+ " (rx_free_thresh=%u port=%u queue=%u)",
rx_free_thresh, dev->data->port_id, queue_idx);
return -EINVAL;
}
if (rx_free_thresh >= vq->vq_nentries) {
- RTE_LOG(ERR, PMD, "rx_free_thresh must be less than the "
+ PMD_INIT_LOG(ERR, "rx_free_thresh must be less than the "
"number of RX entries (%u)."
- " (rx_free_thresh=%u port=%u queue=%u)\n",
+ " (rx_free_thresh=%u port=%u queue=%u)",
vq->vq_nentries,
rx_free_thresh, dev->data->port_id, queue_idx);
return -EINVAL;
if (tx_free_thresh >= (vq->vq_nentries - 3)) {
PMD_DRV_LOG(ERR, "tx_free_thresh must be less than the "
"number of TX entries minus 3 (%u)."
- " (tx_free_thresh=%u port=%u queue=%u)\n",
+ " (tx_free_thresh=%u port=%u queue=%u)",
vq->vq_nentries - 3,
tx_free_thresh, dev->data->port_id, queue_idx);
return -EINVAL;
if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
return 0;
- m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
m->packet_type = ptype;
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
if (hdr->csum_start <= hdrlen && l4_supported) {
- m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
} else {
/* Unknown proto or tunnel, do sw cksum. We can assume
* the cksum field is in the first segment since the
off) = csum;
}
} else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
- m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
}
/* GSO request, save required information in mbuf */
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
- m->ol_flags |= PKT_RX_LRO | \
- PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO |
+ RTE_MBUF_F_RX_L4_CKSUM_NONE;
break;
default:
return -EINVAL;
#endif
/* Do VLAN tag insertion */
- if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ if (unlikely(m->ol_flags & RTE_MBUF_F_TX_VLAN)) {
error = rte_vlan_insert(&m);
/* rte_vlan_insert() may change pointer
* even in the case of failure
break;
}
- if (m->ol_flags & PKT_TX_TCP_SEG)
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
virtio_tso_fix_cksum(m);
}