app/testpmd: batch MAC swap for performance on x86
authorQi Zhang <qi.z.zhang@intel.com>
Sun, 16 Dec 2018 00:58:35 +0000 (08:58 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 21 Dec 2018 15:22:40 +0000 (16:22 +0100)
Do four packets macswap in same loop iterate to squeeze more
CPU cycles.

Signed-off-by: Qi Zhang <qi.z.zhang@intel.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Bernard Iremonger <bernard.iremonger@intel.com>
app/test-pmd/macswap_sse.h

index 79c7e98..7d268bf 100644 (file)
@@ -11,11 +11,12 @@ static inline void
 do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
                struct rte_port *txp)
 {
-       struct ether_hdr *eth_hdr;
-       struct rte_mbuf *mb;
+       struct ether_hdr *eth_hdr[4];
+       struct rte_mbuf *mb[4];
        uint64_t ol_flags;
        int i;
-       __m128i addr;
+       int r;
+       __m128i addr0, addr1, addr2, addr3;
        /**
         * shuffle mask be used to shuffle the 16 bytes.
         * byte 0-5 wills be swapped with byte 6-11.
@@ -30,19 +31,56 @@ do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
        vlan_qinq_set(pkts, nb, ol_flags,
                        txp->tx_vlan_id, txp->tx_vlan_id_outer);
 
-       for (i = 0; i < nb; i++) {
-               if (likely(i < nb - 1))
-                       rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], void *));
-               mb = pkts[i];
+       i = 0;
+       r = nb;
+
+       while (r >= 4) {
+               mb[0] = pkts[i++];
+               eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct ether_hdr *);
+               addr0 = _mm_loadu_si128((__m128i *)eth_hdr[0]);
+
+               mb[1] = pkts[i++];
+               eth_hdr[1] = rte_pktmbuf_mtod(mb[1], struct ether_hdr *);
+               addr1 = _mm_loadu_si128((__m128i *)eth_hdr[1]);
+
+
+               mb[2] = pkts[i++];
+               eth_hdr[2] = rte_pktmbuf_mtod(mb[2], struct ether_hdr *);
+               addr2 = _mm_loadu_si128((__m128i *)eth_hdr[2]);
+
+               mb[3] = pkts[i++];
+               eth_hdr[3] = rte_pktmbuf_mtod(mb[3], struct ether_hdr *);
+               addr3 = _mm_loadu_si128((__m128i *)eth_hdr[3]);
 
-               eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+               addr0 = _mm_shuffle_epi8(addr0, shfl_msk);
+               addr1 = _mm_shuffle_epi8(addr1, shfl_msk);
+               addr2 = _mm_shuffle_epi8(addr2, shfl_msk);
+               addr3 = _mm_shuffle_epi8(addr3, shfl_msk);
+
+               _mm_storeu_si128((__m128i *)eth_hdr[0], addr0);
+               _mm_storeu_si128((__m128i *)eth_hdr[1], addr1);
+               _mm_storeu_si128((__m128i *)eth_hdr[2], addr2);
+               _mm_storeu_si128((__m128i *)eth_hdr[3], addr3);
+
+               mbuf_field_set(mb[0], ol_flags);
+               mbuf_field_set(mb[1], ol_flags);
+               mbuf_field_set(mb[2], ol_flags);
+               mbuf_field_set(mb[3], ol_flags);
+               r -= 4;
+       }
+
+       for ( ; i < nb; i++) {
+               if (i < nb - 1)
+                       rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], void *));
+               mb[0] = pkts[i];
+               eth_hdr[0] = rte_pktmbuf_mtod(mb[0], struct ether_hdr *);
 
                /* Swap dest and src mac addresses. */
-               addr = _mm_loadu_si128((__m128i *)eth_hdr);
-               addr = _mm_shuffle_epi8(addr, shfl_msk);
-               _mm_storeu_si128((__m128i *)eth_hdr, addr);
+               addr0 = _mm_loadu_si128((__m128i *)eth_hdr);
+               addr0 = _mm_shuffle_epi8(addr0, shfl_msk);
+               _mm_storeu_si128((__m128i *)eth_hdr[0], addr0);
 
-               mbuf_field_set(mb, ol_flags);
+               mbuf_field_set(mb[0], ol_flags);
        }
 }