1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2014-2020 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_common.h>
15 #include <rte_ether.h>
16 #include <rte_ethdev.h>
20 #include "macswap_common.h"
25 swap_mac(struct rte_ether_hdr *eth_hdr)
27 struct rte_ether_addr addr;
29 /* Swap dest and src mac addresses. */
30 rte_ether_addr_copy(ð_hdr->d_addr, &addr);
31 rte_ether_addr_copy(ð_hdr->s_addr, ð_hdr->d_addr);
32 rte_ether_addr_copy(&addr, ð_hdr->s_addr);
36 swap_ipv4(struct rte_ipv4_hdr *ipv4_hdr)
40 /* Swap dest and src ipv4 addresses. */
41 addr = ipv4_hdr->src_addr;
42 ipv4_hdr->src_addr = ipv4_hdr->dst_addr;
43 ipv4_hdr->dst_addr = addr;
47 swap_ipv6(struct rte_ipv6_hdr *ipv6_hdr)
51 /* Swap dest and src ipv6 addresses. */
52 memcpy(&addr, &ipv6_hdr->src_addr, 16);
53 memcpy(&ipv6_hdr->src_addr, &ipv6_hdr->dst_addr, 16);
54 memcpy(&ipv6_hdr->dst_addr, &addr, 16);
58 swap_tcp(struct rte_tcp_hdr *tcp_hdr)
62 /* Swap dest and src tcp port. */
63 port = tcp_hdr->src_port;
64 tcp_hdr->src_port = tcp_hdr->dst_port;
65 tcp_hdr->dst_port = port;
69 swap_udp(struct rte_udp_hdr *udp_hdr)
73 /* Swap dest and src udp port */
74 port = udp_hdr->src_port;
75 udp_hdr->src_port = udp_hdr->dst_port;
76 udp_hdr->dst_port = port;
80 * 5 tuple swap forwarding mode: Swap the source and the destination of layers
81 * 2,3,4. Swaps source and destination for MAC, IPv4/IPv6, UDP/TCP.
82 * Parses each layer and swaps it. When the next layer doesn't match it stops.
85 pkt_burst_5tuple_swap(struct fwd_stream *fs)
87 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
99 struct rte_ether_hdr *eth;
100 struct rte_vlan_hdr *vlan;
101 struct rte_ipv4_hdr *ipv4;
102 struct rte_ipv6_hdr *ipv6;
103 struct rte_tcp_hdr *tcp;
104 struct rte_udp_hdr *udp;
108 uint64_t start_tsc = 0;
110 get_start_cycles(&start_tsc);
113 * Receive a burst of packets and forward them.
115 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
117 if (unlikely(nb_rx == 0))
120 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
121 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
124 fs->rx_packets += nb_rx;
125 txp = &ports[fs->tx_port];
126 ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads);
127 vlan_qinq_set(pkts_burst, nb_rx, ol_flags,
128 txp->tx_vlan_id, txp->tx_vlan_id_outer);
129 for (i = 0; i < nb_rx; i++) {
130 if (likely(i < nb_rx - 1))
131 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i+1],
134 h.eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
135 proto = h.eth->ether_type;
137 mb->l2_len = sizeof(struct rte_ether_hdr);
139 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
140 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
141 proto = h.vlan->eth_proto;
143 mb->l2_len += sizeof(struct rte_vlan_hdr);
145 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
147 next_proto = h.ipv4->next_proto_id;
148 mb->l3_len = (h.ipv4->version_ihl & 0x0f) * 4;
149 h.byte += mb->l3_len;
150 } else if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV6)) {
152 next_proto = h.ipv6->proto;
154 mb->l3_len = sizeof(struct rte_ipv6_hdr);
156 mbuf_field_set(mb, ol_flags);
159 if (next_proto == IPPROTO_UDP) {
161 mb->l4_len = sizeof(struct rte_udp_hdr);
162 } else if (next_proto == IPPROTO_TCP) {
164 mb->l4_len = (h.tcp->data_off & 0xf0) >> 2;
166 mbuf_field_set(mb, ol_flags);
168 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
172 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
174 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
175 rte_delay_us(burst_tx_delay_time);
176 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
177 &pkts_burst[nb_tx], nb_rx - nb_tx);
180 fs->tx_packets += nb_tx;
181 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
182 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
184 if (unlikely(nb_tx < nb_rx)) {
185 fs->fwd_dropped += (nb_rx - nb_tx);
187 rte_pktmbuf_free(pkts_burst[nb_tx]);
188 } while (++nb_tx < nb_rx);
190 get_end_cycles(fs, start_tsc);
193 struct fwd_engine five_tuple_swap_fwd_engine = {
194 .fwd_mode_name = "5tswap",
195 .port_fwd_begin = NULL,
196 .port_fwd_end = NULL,
197 .packet_fwd = pkt_burst_5tuple_swap,