1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2014-2020 Mellanox Technologies, Ltd
11 #include <sys/queue.h>
14 #include <rte_common.h>
15 #include <rte_ether.h>
16 #include <rte_ethdev.h>
20 #include "macswap_common.h"
25 swap_mac(struct rte_ether_hdr *eth_hdr)
27 struct rte_ether_addr addr;
29 /* Swap dest and src mac addresses. */
30 rte_ether_addr_copy(ð_hdr->dst_addr, &addr);
31 rte_ether_addr_copy(ð_hdr->src_addr, ð_hdr->dst_addr);
32 rte_ether_addr_copy(&addr, ð_hdr->src_addr);
36 swap_ipv4(struct rte_ipv4_hdr *ipv4_hdr)
40 /* Swap dest and src ipv4 addresses. */
41 addr = ipv4_hdr->src_addr;
42 ipv4_hdr->src_addr = ipv4_hdr->dst_addr;
43 ipv4_hdr->dst_addr = addr;
47 swap_ipv6(struct rte_ipv6_hdr *ipv6_hdr)
51 /* Swap dest and src ipv6 addresses. */
52 memcpy(&addr, &ipv6_hdr->src_addr, 16);
53 memcpy(&ipv6_hdr->src_addr, &ipv6_hdr->dst_addr, 16);
54 memcpy(&ipv6_hdr->dst_addr, &addr, 16);
58 swap_tcp(struct rte_tcp_hdr *tcp_hdr)
62 /* Swap dest and src tcp port. */
63 port = tcp_hdr->src_port;
64 tcp_hdr->src_port = tcp_hdr->dst_port;
65 tcp_hdr->dst_port = port;
69 swap_udp(struct rte_udp_hdr *udp_hdr)
73 /* Swap dest and src udp port */
74 port = udp_hdr->src_port;
75 udp_hdr->src_port = udp_hdr->dst_port;
76 udp_hdr->dst_port = port;
80 * 5 tuple swap forwarding mode: Swap the source and the destination of layers
81 * 2,3,4. Swaps source and destination for MAC, IPv4/IPv6, UDP/TCP.
82 * Parses each layer and swaps it. When the next layer doesn't match it stops.
85 pkt_burst_5tuple_swap(struct fwd_stream *fs)
87 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
99 struct rte_ether_hdr *eth;
100 struct rte_vlan_hdr *vlan;
101 struct rte_ipv4_hdr *ipv4;
102 struct rte_ipv6_hdr *ipv6;
103 struct rte_tcp_hdr *tcp;
104 struct rte_udp_hdr *udp;
108 uint64_t start_tsc = 0;
110 get_start_cycles(&start_tsc);
113 * Receive a burst of packets and forward them.
115 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
117 inc_rx_burst_stats(fs, nb_rx);
118 if (unlikely(nb_rx == 0))
121 fs->rx_packets += nb_rx;
122 txp = &ports[fs->tx_port];
123 ol_flags = ol_flags_init(txp->dev_conf.txmode.offloads);
124 vlan_qinq_set(pkts_burst, nb_rx, ol_flags,
125 txp->tx_vlan_id, txp->tx_vlan_id_outer);
126 for (i = 0; i < nb_rx; i++) {
127 if (likely(i < nb_rx - 1))
128 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i+1],
131 h.eth = rte_pktmbuf_mtod(mb, struct rte_ether_hdr *);
132 proto = h.eth->ether_type;
134 mb->l2_len = sizeof(struct rte_ether_hdr);
136 while (proto == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
137 proto == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
138 proto = h.vlan->eth_proto;
140 mb->l2_len += sizeof(struct rte_vlan_hdr);
142 if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV4)) {
144 next_proto = h.ipv4->next_proto_id;
145 mb->l3_len = rte_ipv4_hdr_len(h.ipv4);
146 h.byte += mb->l3_len;
147 } else if (proto == RTE_BE16(RTE_ETHER_TYPE_IPV6)) {
149 next_proto = h.ipv6->proto;
151 mb->l3_len = sizeof(struct rte_ipv6_hdr);
153 mbuf_field_set(mb, ol_flags);
156 if (next_proto == IPPROTO_UDP) {
158 mb->l4_len = sizeof(struct rte_udp_hdr);
159 } else if (next_proto == IPPROTO_TCP) {
161 mb->l4_len = (h.tcp->data_off & 0xf0) >> 2;
163 mbuf_field_set(mb, ol_flags);
165 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
169 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
171 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
172 rte_delay_us(burst_tx_delay_time);
173 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
174 &pkts_burst[nb_tx], nb_rx - nb_tx);
177 fs->tx_packets += nb_tx;
178 inc_tx_burst_stats(fs, nb_tx);
179 if (unlikely(nb_tx < nb_rx)) {
180 fs->fwd_dropped += (nb_rx - nb_tx);
182 rte_pktmbuf_free(pkts_burst[nb_tx]);
183 } while (++nb_tx < nb_rx);
185 get_end_cycles(fs, start_tsc);
188 struct fwd_engine five_tuple_swap_fwd_engine = {
189 .fwd_mode_name = "5tswap",
190 .port_fwd_begin = NULL,
191 .port_fwd_end = NULL,
192 .packet_fwd = pkt_burst_5tuple_swap,