static inline void
virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
{
- uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+ uint64_t csum_l4 = cookie->ol_flags & RTE_MBUF_F_TX_L4_MASK;
+ uint16_t o_l23_len = (cookie->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) ?
+ cookie->outer_l2_len + cookie->outer_l3_len : 0;
- if (cookie->ol_flags & PKT_TX_TCP_SEG)
- csum_l4 |= PKT_TX_TCP_CKSUM;
+ if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
switch (csum_l4) {
- case PKT_TX_UDP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ case RTE_MBUF_F_TX_UDP_CKSUM:
+ hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
- case PKT_TX_TCP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ case RTE_MBUF_F_TX_TCP_CKSUM:
+ hdr->csum_start = o_l23_len + cookie->l2_len + cookie->l3_len;
hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
break;
}
/* TCP Segmentation Offload */
- if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ if (cookie->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+ hdr->gso_type = (cookie->ol_flags & RTE_MBUF_F_TX_IPV6) ?
VIRTIO_NET_HDR_GSO_TCPV6 :
VIRTIO_NET_HDR_GSO_TCPV4;
hdr->gso_size = cookie->tso_segsz;
- hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
+ hdr->hdr_len = o_l23_len + cookie->l2_len + cookie->l3_len +
+ cookie->l4_len;
} else {
ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
RTE_PTR_DIFF(&txr[idx].tx_packed_indir, txr);
start_dp[idx].len = (seg_num + 1) *
sizeof(struct vring_packed_desc);
+ /* Packed descriptor id needs to be restored when inorder. */
+ if (in_order)
+ start_dp[idx].id = idx;
/* reset flags for indirect desc */
head_flags = VRING_DESC_F_INDIRECT;
head_flags |= vq->vq_packed.cached_flags;