if (!vq->hw->has_tx_offload)
virtqueue_clear_net_hdr(hdr);
else
- virtqueue_xmit_offload(hdr, cookies[i], true);
+ virtqueue_xmit_offload(hdr, cookies[i]);
start_dp[idx].addr = rte_mbuf_data_iova(cookies[i]) - head_size;
start_dp[idx].len = cookies[i]->data_len + head_size;
if (!vq->hw->has_tx_offload)
virtqueue_clear_net_hdr(hdr);
else
- virtqueue_xmit_offload(hdr, cookie, true);
+ virtqueue_xmit_offload(hdr, cookie);
dp->addr = rte_mbuf_data_iova(cookie) - head_size;
dp->len = cookie->data_len + head_size;
idx = start_dp[idx].next;
}
- virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+ if (vq->hw->has_tx_offload)
+ virtqueue_xmit_offload(hdr, cookie);
do {
start_dp[idx].addr = rte_mbuf_data_iova(cookie);
} while (0)
static inline void
-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
- struct rte_mbuf *cookie,
- uint8_t offload)
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
{
- if (offload) {
- uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
-
- if (cookie->ol_flags & PKT_TX_TCP_SEG)
- csum_l4 |= PKT_TX_TCP_CKSUM;
-
- switch (csum_l4) {
- case PKT_TX_UDP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct rte_udp_hdr,
- dgram_cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- case PKT_TX_TCP_CKSUM:
- hdr->csum_start = cookie->l2_len + cookie->l3_len;
- hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
- hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- break;
-
- default:
- ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
- ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
- ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
- break;
- }
+ uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+
+ if (cookie->ol_flags & PKT_TX_TCP_SEG)
+ csum_l4 |= PKT_TX_TCP_CKSUM;
+
+ switch (csum_l4) {
+ case PKT_TX_UDP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ case PKT_TX_TCP_CKSUM:
+ hdr->csum_start = cookie->l2_len + cookie->l3_len;
+ hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+ hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ break;
+
+ default:
+ ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+ break;
+ }
- /* TCP Segmentation Offload */
- if (cookie->ol_flags & PKT_TX_TCP_SEG) {
- hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
- VIRTIO_NET_HDR_GSO_TCPV6 :
- VIRTIO_NET_HDR_GSO_TCPV4;
- hdr->gso_size = cookie->tso_segsz;
- hdr->hdr_len =
- cookie->l2_len +
- cookie->l3_len +
- cookie->l4_len;
- } else {
- ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
- ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
- ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
- }
+ /* TCP Segmentation Offload */
+ if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+ hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+ VIRTIO_NET_HDR_GSO_TCPV6 :
+ VIRTIO_NET_HDR_GSO_TCPV4;
+ hdr->gso_size = cookie->tso_segsz;
+ hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
+ } else {
+ ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+ ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
}
}
}
}
- virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+ if (vq->hw->has_tx_offload)
+ virtqueue_xmit_offload(hdr, cookie);
do {
uint16_t flags;