#include <rte_flow.h>
#include "testpmd.h"
+#include "macswap.h"
/*
* MAC swap forwarding mode: Swap the source and the destination Ethernet
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_port *txp;
- struct rte_mbuf *mb;
- struct ether_hdr *eth_hdr;
- struct ether_addr addr;
uint16_t nb_rx;
uint16_t nb_tx;
- uint16_t i;
uint32_t retry;
- uint64_t ol_flags = 0;
- uint64_t tx_offloads;
#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t start_tsc;
uint64_t end_tsc;
#endif
fs->rx_packets += nb_rx;
txp = &ports[fs->tx_port];
- tx_offloads = txp->dev_conf.txmode.offloads;
- if (tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
- ol_flags = PKT_TX_VLAN_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_QINQ_INSERT)
- ol_flags |= PKT_TX_QINQ_PKT;
- if (tx_offloads & DEV_TX_OFFLOAD_MACSEC_INSERT)
- ol_flags |= PKT_TX_MACSEC;
- for (i = 0; i < nb_rx; i++) {
- if (likely(i < nb_rx - 1))
- rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
- void *));
- mb = pkts_burst[i];
- eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
- /* Swap dest and src mac addresses. */
- ether_addr_copy(ð_hdr->d_addr, &addr);
- ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
- ether_addr_copy(&addr, ð_hdr->s_addr);
+ do_macswap(pkts_burst, nb_rx, txp);
- mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
- mb->ol_flags |= ol_flags;
- mb->l2_len = sizeof(struct ether_hdr);
- mb->l3_len = sizeof(struct ipv4_hdr);
- mb->vlan_tci = txp->tx_vlan_id;
- mb->vlan_tci_outer = txp->tx_vlan_id_outer;
- }
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
/*
* Retry if necessary
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _MACSWAP_H_
+#define _MACSWAP_H_
+
+#include "macswap_common.h"
+
+static inline void
+do_macswap(struct rte_mbuf *pkts[], uint16_t nb,
+ struct rte_port *txp)
+{
+ struct ether_hdr *eth_hdr;
+ struct rte_mbuf *mb;
+ struct ether_addr addr;
+ uint64_t ol_flags;
+ int i;
+
+ ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads);
+ vlan_qinq_set(pkts, nb, ol_flags,
+ txp->tx_vlan_id, txp->tx_vlan_id_outer);
+
+ for (i = 0; i < nb; i++) {
+ if (likely(i < nb - 1))
+ rte_prefetch0(rte_pktmbuf_mtod(pkts[i+1], void *));
+ mb = pkts[i];
+
+ eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
+
+ /* Swap dest and src mac addresses. */
+ ether_addr_copy(ð_hdr->d_addr, &addr);
+ ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
+ ether_addr_copy(&addr, ð_hdr->s_addr);
+
+ mbuf_field_set(mb, ol_flags);
+ }
+}
+
+#endif /* _MACSWAP_H_ */
--- /dev/null
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#ifndef _MACSWAP_COMMON_H_
+#define _MACSWAP_COMMON_H_
+
+static inline uint64_t
+ol_flags_init(uint64_t tx_offload)
+{
+ uint64_t ol_flags = 0;
+
+ ol_flags |= (tx_offload & DEV_TX_OFFLOAD_VLAN_INSERT) ?
+ PKT_TX_VLAN : 0;
+ ol_flags |= (tx_offload & DEV_TX_OFFLOAD_QINQ_INSERT) ?
+ PKT_TX_QINQ : 0;
+ ol_flags |= (tx_offload & DEV_TX_OFFLOAD_MACSEC_INSERT) ?
+ PKT_TX_MACSEC : 0;
+
+ return ol_flags;
+}
+
+static inline void
+vlan_qinq_set(struct rte_mbuf *pkts[], uint16_t nb,
+ uint64_t ol_flags, uint16_t vlan, uint16_t outer_vlan)
+{
+ int i;
+
+ if (ol_flags & PKT_TX_VLAN)
+ for (i = 0; i < nb; i++)
+ pkts[i]->vlan_tci = vlan;
+ if (ol_flags & PKT_TX_QINQ)
+ for (i = 0; i < nb; i++)
+ pkts[i]->vlan_tci_outer = outer_vlan;
+}
+
+static inline void
+mbuf_field_set(struct rte_mbuf *mb, uint64_t ol_flags)
+{
+ mb->ol_flags &= IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF;
+ mb->ol_flags |= ol_flags;
+ mb->l2_len = sizeof(struct ether_hdr);
+ mb->l3_len = sizeof(struct ipv4_hdr);
+}
+
+#endif /* _MACSWAP_COMMON_H_ */