]> git.droids-corp.org - dpdk.git/commitdiff
net/virtio: refactor Tx offload helper
authorDavid Marchand <david.marchand@redhat.com>
Mon, 3 May 2021 16:43:43 +0000 (18:43 +0200)
committerMaxime Coquelin <maxime.coquelin@redhat.com>
Tue, 4 May 2021 08:22:17 +0000 (10:22 +0200)
Purely cosmetic but it is rather odd to have an "offload" helper that
checks if it actually must do something.
We already have the same checks in most callers, so move this branch
in them.

Signed-off-by: David Marchand <david.marchand@redhat.com>
Reviewed-by: Flavio Leitner <fbl@sysclose.org>
Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
drivers/net/virtio/virtio_rxtx.c
drivers/net/virtio/virtio_rxtx_packed_avx.h
drivers/net/virtio/virtio_rxtx_packed_neon.h
drivers/net/virtio/virtqueue.h

index 8df913b0ba992d55621a61b3a8dff61c9635e435..34108fb9463da8124c6372298ad31c9bf0328f86 100644 (file)
@@ -448,7 +448,7 @@ virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
                if (!vq->hw->has_tx_offload)
                        virtqueue_clear_net_hdr(hdr);
                else
-                       virtqueue_xmit_offload(hdr, cookies[i], true);
+                       virtqueue_xmit_offload(hdr, cookies[i]);
 
                start_dp[idx].addr  = rte_mbuf_data_iova(cookies[i]) - head_size;
                start_dp[idx].len   = cookies[i]->data_len + head_size;
@@ -495,7 +495,7 @@ virtqueue_enqueue_xmit_packed_fast(struct virtnet_tx *txvq,
        if (!vq->hw->has_tx_offload)
                virtqueue_clear_net_hdr(hdr);
        else
-               virtqueue_xmit_offload(hdr, cookie, true);
+               virtqueue_xmit_offload(hdr, cookie);
 
        dp->addr = rte_mbuf_data_iova(cookie) - head_size;
        dp->len  = cookie->data_len + head_size;
@@ -581,7 +581,8 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                idx = start_dp[idx].next;
        }
 
-       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+       if (vq->hw->has_tx_offload)
+               virtqueue_xmit_offload(hdr, cookie);
 
        do {
                start_dp[idx].addr  = rte_mbuf_data_iova(cookie);
index 228cf5437ba8ff6c8ffe331fa0f5a64655900add..c819d2e4f2825caa6154900e75f3a2d66ed891b7 100644 (file)
@@ -115,7 +115,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
                virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
                        hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
                                        struct virtio_net_hdr *, -head_size);
-                       virtqueue_xmit_offload(hdr, tx_pkts[i], true);
+                       virtqueue_xmit_offload(hdr, tx_pkts[i]);
                }
        }
 
index d4257e68f03a030d9088c1b9d2b55337f3236d1f..f19e6186357a6844f6cac61f9542dfd5e890c915 100644 (file)
@@ -134,7 +134,7 @@ virtqueue_enqueue_batch_packed_vec(struct virtnet_tx *txvq,
                virtio_for_each_try_unroll(i, 0, PACKED_BATCH_SIZE) {
                        hdr = rte_pktmbuf_mtod_offset(tx_pkts[i],
                                        struct virtio_net_hdr *, -head_size);
-                       virtqueue_xmit_offload(hdr, tx_pkts[i], true);
+                       virtqueue_xmit_offload(hdr, tx_pkts[i]);
                }
        }
 
index ed3b85080ebaf51de9ac460d61e5bdcc5a99acf6..03957b2bd0debd6f93d347accc909a558909289a 100644 (file)
@@ -617,52 +617,44 @@ virtqueue_notify(struct virtqueue *vq)
 } while (0)
 
 static inline void
-virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
-                       struct rte_mbuf *cookie,
-                       uint8_t offload)
+virtqueue_xmit_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *cookie)
 {
-       if (offload) {
-               uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
-
-               if (cookie->ol_flags & PKT_TX_TCP_SEG)
-                       csum_l4 |= PKT_TX_TCP_CKSUM;
-
-               switch (csum_l4) {
-               case PKT_TX_UDP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_udp_hdr,
-                               dgram_cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               case PKT_TX_TCP_CKSUM:
-                       hdr->csum_start = cookie->l2_len + cookie->l3_len;
-                       hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
-                       hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-                       break;
-
-               default:
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
-                       break;
-               }
+       uint64_t csum_l4 = cookie->ol_flags & PKT_TX_L4_MASK;
+
+       if (cookie->ol_flags & PKT_TX_TCP_SEG)
+               csum_l4 |= PKT_TX_TCP_CKSUM;
+
+       switch (csum_l4) {
+       case PKT_TX_UDP_CKSUM:
+               hdr->csum_start = cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_udp_hdr, dgram_cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       case PKT_TX_TCP_CKSUM:
+               hdr->csum_start = cookie->l2_len + cookie->l3_len;
+               hdr->csum_offset = offsetof(struct rte_tcp_hdr, cksum);
+               hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+               break;
+
+       default:
+               ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
+               break;
+       }
 
-               /* TCP Segmentation Offload */
-               if (cookie->ol_flags & PKT_TX_TCP_SEG) {
-                       hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
-                               VIRTIO_NET_HDR_GSO_TCPV6 :
-                               VIRTIO_NET_HDR_GSO_TCPV4;
-                       hdr->gso_size = cookie->tso_segsz;
-                       hdr->hdr_len =
-                               cookie->l2_len +
-                               cookie->l3_len +
-                               cookie->l4_len;
-               } else {
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
-                       ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
-               }
+       /* TCP Segmentation Offload */
+       if (cookie->ol_flags & PKT_TX_TCP_SEG) {
+               hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
+                       VIRTIO_NET_HDR_GSO_TCPV6 :
+                       VIRTIO_NET_HDR_GSO_TCPV4;
+               hdr->gso_size = cookie->tso_segsz;
+               hdr->hdr_len = cookie->l2_len + cookie->l3_len + cookie->l4_len;
+       } else {
+               ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
+               ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
        }
 }
 
@@ -741,7 +733,8 @@ virtqueue_enqueue_xmit_packed(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
                }
        }
 
-       virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
+       if (vq->hw->has_tx_offload)
+               virtqueue_xmit_offload(hdr, cookie);
 
        do {
                uint16_t flags;