1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_mempool.h>
31 #include <rte_interrupts.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
36 #include <rte_string_fns.h>
42 * Forwarding of packets in MAC mode.
43 * Change the source and the destination Ethernet addressed of packets
44 * before forwarding them.
47 pkt_burst_mac_forward(struct fwd_stream *fs)
49 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
52 struct ether_hdr *eth_hdr;
57 uint64_t ol_flags = 0;
58 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
64 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
65 start_tsc = rte_rdtsc();
69 * Receive a burst of packets and forward them.
71 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
73 if (unlikely(nb_rx == 0))
76 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
77 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
79 fs->rx_packets += nb_rx;
80 txp = &ports[fs->tx_port];
81 if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_VLAN)
82 ol_flags = PKT_TX_VLAN_PKT;
83 if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_INSERT_QINQ)
84 ol_flags |= PKT_TX_QINQ_PKT;
85 if (txp->tx_ol_flags & TESTPMD_TX_OFFLOAD_MACSEC)
86 ol_flags |= PKT_TX_MACSEC;
87 for (i = 0; i < nb_rx; i++) {
88 if (likely(i < nb_rx - 1))
89 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
92 eth_hdr = rte_pktmbuf_mtod(mb, struct ether_hdr *);
93 ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
95 ether_addr_copy(&ports[fs->tx_port].eth_addr,
97 mb->ol_flags = ol_flags;
98 mb->l2_len = sizeof(struct ether_hdr);
99 mb->l3_len = sizeof(struct ipv4_hdr);
100 mb->vlan_tci = txp->tx_vlan_id;
101 mb->vlan_tci_outer = txp->tx_vlan_id_outer;
103 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
107 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
109 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
110 rte_delay_us(burst_tx_delay_time);
111 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
112 &pkts_burst[nb_tx], nb_rx - nb_tx);
116 fs->tx_packets += nb_tx;
117 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
118 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
120 if (unlikely(nb_tx < nb_rx)) {
121 fs->fwd_dropped += (nb_rx - nb_tx);
123 rte_pktmbuf_free(pkts_burst[nb_tx]);
124 } while (++nb_tx < nb_rx);
126 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
127 end_tsc = rte_rdtsc();
128 core_cycles = (end_tsc - start_tsc);
129 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
133 struct fwd_engine mac_fwd_engine = {
134 .fwd_mode_name = "mac",
135 .port_fwd_begin = NULL,
136 .port_fwd_end = NULL,
137 .packet_fwd = pkt_burst_mac_forward,