net/virtio: refactor Tx offload helper
authorDavid Marchand <david.marchand@redhat.com>
Mon, 3 May 2021 16:43:43 +0000 (18:43 +0200)
committerMaxime Coquelin <maxime.coquelin@redhat.com>
Tue, 4 May 2021 08:22:17 +0000 (10:22 +0200)
Purely cosmetic but it is rather odd to have an "offload" helper that
checks if it actually must do something.
We already have the same checks in most callers, so move this branch
in them.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Flavio Leitner <fbl@sysclose.org>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_rxtx_packed_avx.h
drivers/net/virtio/virtio_rxtx_packed_neon.h
drivers/net/virtio/virtqueue.h

index 8df913b..34108fb 100644 (file)
@@ -448,7 +448,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                if (!vq->hw->has_tx_offload)
                        virtqueue_clear_net_hdr(hdr);
                else
-                       virtqueue_xmit_offload(hdr, cookies[i], true);
+                       virtqueue_xmit_offload(hdr, cookies[i]);
 
                start_dp[idx].addr  = rte_mbuf_data_iova(cookies[i]) - head_size;
                start_dp[idx].len   = cookies[i]->data_len + head_size;
@@ -495,7 +495,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        if (!vq->hw->has_tx_offload)
                virtqueue_clear_net_hdr(hdr);
        else
-               virtqueue_xmit_offload(hdr, cookie, true);
+               virtqueue_xmit_offload(hdr, cookie);
 
        dp->addr = rte_mbuf_data_iova(cookie) - head_size;
        dp->len  = cookie->data_len + head_size;
@@ -581,7 +581,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                idx = start_dp[idx].next;
        }
 
-       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+       if (vq->hw->has_tx_offload)
+               virtqueue_xmit_offload(hdr, cookie);
 
        do {
                start_dp[idx].addr  = rte_mbuf_data_iova(cookie);
index 228cf54..c819d2e 100644 (file)
@@ -115,7 +115,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
                virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
                        hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
                                        struct virtio_net_hdr *, -head_size);
-                       virtqueue_xmit_offload(hdr, tx_pkts[i], true);
+                       virtqueue_xmit_offload(hdr, tx_pkts[i]);
                }
        }
 
index d4257e6..f19e618 100644 (file)
@@ -134,7 +134,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
                virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
                        hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
                                        struct virtio_net_hdr *, -head_size);
-                       virtqueue_xmit_offload(hdr, tx_pkts[i], true);
+                       virtqueue_xmit_offload(hdr, tx_pkts[i]);
                }
        }
 
index ed3b850..03957b2 100644 (file)
@@ -617,52 +617,44 @@ virtqueue_notify(struct virtqueue *vq)
 } while (0)
 
 static inline void
-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
-                       struct rte_mbuf *cookie,
-                       uint8_t offload)
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
 {
-       if (offload) {
-               uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
-
-               if (cookie->ol_flags & PKT_TX_TCP_SEG)
-                       csum_l4 |= PKT_TX_TCP_CKSUM;
-
-               switch (csum_l4) {
-               case PKT_TX_UDP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_udp_hdr,
-                               dgram_cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               case PKT_TX_TCP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               default:
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
-                       break;
-               }
+       uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+
+       if (cookie->ol_flags & PKT_TX_TCP_SEG)
+               csum_l4 |= PKT_TX_TCP_CKSUM;
+
+       switch (csum_l4) {
+       case PKT_TX_UDP_CKSUM:
+               hdr->csum_start = cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       case PKT_TX_TCP_CKSUM:
+               hdr->csum_start = cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       default:
+               ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+               break;
+       }
 
-               /* TCP Segmentation Offload */
-               if (cookie->ol_flags & PKT_TX_TCP_SEG) {
-                       hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
-                               VIRTIO_NET_HDR_GSO_TCPV6 :
-                               VIRTIO_NET_HDR_GSO_TCPV4;
-                       hdr->gso_size = cookie->tso_segsz;
-                       hdr->hdr_len =
-                               cookie->l2_len +
-                               cookie->l3_len +
-                               cookie->l4_len;
-               } else {
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
-               }
+       /* TCP Segmentation Offload */
+       if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+               hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+                       VIRTIO_NET_HDR_GSO_TCPV6 :
+                       VIRTIO_NET_HDR_GSO_TCPV4;
+               hdr->gso_size = cookie->tso_segsz;
+               hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
+       } else {
+               ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
        }
 }
 
@@ -741,7 +733,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                }
        }
 
-       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+       if (vq->hw->has_tx_offload)
+               virtqueue_xmit_offload(hdr, cookie);
 
        do {
                uint16_t flags;