4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_debug.h>
49 #include <rte_cycles.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_memory.h>
62 #include <rte_mempool.h>
64 #include <rte_memcpy.h>
65 #include <rte_interrupts.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
73 #include <rte_prefetch.h>
74 #include <rte_string_fns.h>
77 #define IP_DEFTTL 64 /* from RFC 1340. */
78 #define IP_VERSION 0x40
79 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
80 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
82 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
83 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
84 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
90 get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)
92 if (ethertype == _htons(ETHER_TYPE_IPv4))
93 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
94 else /* assume ethertype == ETHER_TYPE_IPv6 */
95 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
99 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
101 if (ethertype == _htons(ETHER_TYPE_IPv4))
102 return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
103 else /* assume ethertype == ETHER_TYPE_IPv6 */
104 return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
108 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
109 * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
110 * header. The l4_len argument is only set in case of TCP (useful for TSO).
113 parse_ethernet(struct ether_hdr *eth_hdr, uint16_t *ethertype, uint16_t *l2_len,
114 uint16_t *l3_len, uint8_t *l4_proto, uint16_t *l4_len)
116 struct ipv4_hdr *ipv4_hdr;
117 struct ipv6_hdr *ipv6_hdr;
118 struct tcp_hdr *tcp_hdr;
120 *l2_len = sizeof(struct ether_hdr);
121 *ethertype = eth_hdr->ether_type;
123 if (*ethertype == _htons(ETHER_TYPE_VLAN)) {
124 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
126 *l2_len += sizeof(struct vlan_hdr);
127 *ethertype = vlan_hdr->eth_proto;
130 switch (*ethertype) {
131 case _htons(ETHER_TYPE_IPv4):
132 ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + *l2_len);
133 *l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
134 *l4_proto = ipv4_hdr->next_proto_id;
136 case _htons(ETHER_TYPE_IPv6):
137 ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + *l2_len);
138 *l3_len = sizeof(struct ipv6_hdr);
139 *l4_proto = ipv6_hdr->proto;
147 if (*l4_proto == IPPROTO_TCP) {
148 tcp_hdr = (struct tcp_hdr *)((char *)eth_hdr +
150 *l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
155 /* modify the IPv4 or IPv4 source address of a packet */
157 change_ip_addresses(void *l3_hdr, uint16_t ethertype)
159 struct ipv4_hdr *ipv4_hdr = l3_hdr;
160 struct ipv6_hdr *ipv6_hdr = l3_hdr;
162 if (ethertype == _htons(ETHER_TYPE_IPv4)) {
164 rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1);
165 } else if (ethertype == _htons(ETHER_TYPE_IPv6)) {
166 ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1;
170 /* if possible, calculate the checksum of a packet in hw or sw,
171 * depending on the testpmd command line configuration */
173 process_inner_cksums(void *l3_hdr, uint16_t ethertype, uint16_t l3_len,
174 uint8_t l4_proto, uint16_t tso_segsz, uint16_t testpmd_ol_flags)
176 struct ipv4_hdr *ipv4_hdr = l3_hdr;
177 struct udp_hdr *udp_hdr;
178 struct tcp_hdr *tcp_hdr;
179 struct sctp_hdr *sctp_hdr;
180 uint64_t ol_flags = 0;
182 if (ethertype == _htons(ETHER_TYPE_IPv4)) {
184 ipv4_hdr->hdr_checksum = 0;
186 if (tso_segsz != 0 && l4_proto == IPPROTO_TCP) {
187 ol_flags |= PKT_TX_IP_CKSUM;
189 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
190 ol_flags |= PKT_TX_IP_CKSUM;
192 ipv4_hdr->hdr_checksum =
193 rte_ipv4_cksum(ipv4_hdr);
195 ol_flags |= PKT_TX_IPV4;
196 } else if (ethertype == _htons(ETHER_TYPE_IPv6))
197 ol_flags |= PKT_TX_IPV6;
199 return 0; /* packet type not supported, nothing to do */
201 if (l4_proto == IPPROTO_UDP) {
202 udp_hdr = (struct udp_hdr *)((char *)l3_hdr + l3_len);
203 /* do not recalculate udp cksum if it was 0 */
204 if (udp_hdr->dgram_cksum != 0) {
205 udp_hdr->dgram_cksum = 0;
206 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
207 ol_flags |= PKT_TX_UDP_CKSUM;
208 udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
209 ethertype, ol_flags);
211 udp_hdr->dgram_cksum =
212 get_udptcp_checksum(l3_hdr, udp_hdr,
216 } else if (l4_proto == IPPROTO_TCP) {
217 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + l3_len);
219 if (tso_segsz != 0) {
220 ol_flags |= PKT_TX_TCP_SEG;
221 tcp_hdr->cksum = get_psd_sum(l3_hdr, ethertype, ol_flags);
222 } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
223 ol_flags |= PKT_TX_TCP_CKSUM;
224 tcp_hdr->cksum = get_psd_sum(l3_hdr, ethertype, ol_flags);
227 get_udptcp_checksum(l3_hdr, tcp_hdr, ethertype);
229 } else if (l4_proto == IPPROTO_SCTP) {
230 sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + l3_len);
232 /* sctp payload must be a multiple of 4 to be
234 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
235 ((ipv4_hdr->total_length & 0x3) == 0)) {
236 ol_flags |= PKT_TX_SCTP_CKSUM;
238 /* XXX implement CRC32c, example available in
246 /* Calculate the checksum of outer header (only vxlan is supported,
247 * meaning IP + UDP). The caller already checked that it's a vxlan
250 process_outer_cksums(void *outer_l3_hdr, uint16_t outer_ethertype,
251 uint16_t outer_l3_len, uint16_t testpmd_ol_flags)
253 struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
254 struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
255 struct udp_hdr *udp_hdr;
256 uint64_t ol_flags = 0;
258 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM)
259 ol_flags |= PKT_TX_UDP_TUNNEL_PKT;
261 if (outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
262 ipv4_hdr->hdr_checksum = 0;
264 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) == 0)
265 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
268 udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + outer_l3_len);
269 /* do not recalculate udp cksum if it was 0 */
270 if (udp_hdr->dgram_cksum != 0) {
271 udp_hdr->dgram_cksum = 0;
272 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) == 0) {
273 if (outer_ethertype == _htons(ETHER_TYPE_IPv4))
274 udp_hdr->dgram_cksum =
275 rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
277 udp_hdr->dgram_cksum =
278 rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
286 * Receive a burst of packets, and for each packet:
287 * - parse packet, and try to recognize a supported packet type (1)
288 * - if it's not a supported packet type, don't touch the packet, else:
289 * - modify the IPs in inner headers and in outer headers if any
290 * - reprocess the checksum of all supported layers. This is done in SW
291 * or HW, depending on testpmd command line configuration
292 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
293 * segmentation offload (this implies HW TCP checksum)
294 * Then transmit packets on the output port.
296 * (1) Supported packets are:
297 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
298 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
301 * The testpmd command line for this forward engine sets the flags
302 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
303 * wether a checksum must be calculated in software or in hardware. The
304 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
305 * VxLAN flag concerns the outer IP and UDP layer (if packet is
306 * recognized as a vxlan packet).
309 pkt_burst_checksum_forward(struct fwd_stream *fs)
311 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
312 struct rte_port *txp;
314 struct ether_hdr *eth_hdr;
315 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
316 struct udp_hdr *udp_hdr;
321 uint16_t testpmd_ol_flags;
323 uint16_t ethertype = 0, outer_ethertype = 0;
324 uint16_t l2_len = 0, l3_len = 0, l4_len = 0;
325 uint16_t outer_l2_len = 0, outer_l3_len = 0;
328 uint32_t rx_bad_ip_csum;
329 uint32_t rx_bad_l4_csum;
331 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
334 uint64_t core_cycles;
337 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
338 start_tsc = rte_rdtsc();
341 /* receive a burst of packet */
342 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
344 if (unlikely(nb_rx == 0))
347 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
348 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
350 fs->rx_packets += nb_rx;
354 txp = &ports[fs->tx_port];
355 testpmd_ol_flags = txp->tx_ol_flags;
356 tso_segsz = txp->tso_segsz;
358 for (i = 0; i < nb_rx; i++) {
364 /* Update the L3/L4 checksum error packet statistics */
365 rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
366 rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
368 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
369 * and inner headers */
371 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
372 parse_ethernet(eth_hdr, ðertype, &l2_len, &l3_len,
374 l3_hdr = (char *)eth_hdr + l2_len;
376 /* check if it's a supported tunnel (only vxlan for now) */
377 if (l4_proto == IPPROTO_UDP) {
378 udp_hdr = (struct udp_hdr *)((char *)l3_hdr + l3_len);
380 /* currently, this flag is set by i40e only if the
382 if (((m->ol_flags & PKT_RX_TUNNEL_IPV4_HDR) ||
383 (m->ol_flags & PKT_RX_TUNNEL_IPV6_HDR)))
385 /* else check udp destination port, 4789 is the default
386 * vxlan port (rfc7348) */
387 else if (udp_hdr->dst_port == _htons(4789))
391 outer_ethertype = ethertype;
392 outer_l2_len = l2_len;
393 outer_l3_len = l3_len;
394 outer_l3_hdr = l3_hdr;
396 eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
397 sizeof(struct udp_hdr) +
398 sizeof(struct vxlan_hdr));
400 parse_ethernet(eth_hdr, ðertype, &l2_len,
401 &l3_len, &l4_proto, &l4_len);
402 l3_hdr = (char *)eth_hdr + l2_len;
406 /* step 2: change all source IPs (v4 or v6) so we need
407 * to recompute the chksums even if they were correct */
409 change_ip_addresses(l3_hdr, ethertype);
411 change_ip_addresses(outer_l3_hdr, outer_ethertype);
413 /* step 3: depending on user command line configuration,
414 * recompute checksum either in software or flag the
415 * mbuf to offload the calculation to the NIC. If TSO
416 * is configured, prepare the mbuf for TCP segmentation. */
418 /* process checksums of inner headers first */
419 ol_flags |= process_inner_cksums(l3_hdr, ethertype,
420 l3_len, l4_proto, tso_segsz, testpmd_ol_flags);
422 /* Then process outer headers if any. Note that the software
423 * checksum will be wrong if one of the inner checksums is
424 * processed in hardware. */
426 ol_flags |= process_outer_cksums(outer_l3_hdr,
427 outer_ethertype, outer_l3_len, testpmd_ol_flags);
430 /* step 4: fill the mbuf meta data (flags and header lengths) */
433 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) {
434 m->l2_len = outer_l2_len;
435 m->l3_len = outer_l3_len;
436 m->inner_l2_len = l2_len;
437 m->inner_l3_len = l3_len;
440 /* if we don't do vxlan cksum in hw,
441 outer checksum will be wrong because
442 we changed the ip, but it shows that
443 we can process the inner header cksum
445 m->l2_len = outer_l2_len + outer_l3_len +
446 sizeof(struct udp_hdr) +
447 sizeof(struct vxlan_hdr) + l2_len;
452 /* this is only useful if an offload flag is
453 * set, but it does not hurt to fill it in any
459 m->tso_segsz = tso_segsz;
460 m->ol_flags = ol_flags;
462 /* if verbose mode is enabled, dump debug info */
463 if (verbose_level > 0) {
468 { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM },
469 { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK },
470 { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK },
471 { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK },
472 { PKT_TX_UDP_TUNNEL_PKT, PKT_TX_UDP_TUNNEL_PKT },
473 { PKT_TX_IPV4, PKT_TX_IPV4 },
474 { PKT_TX_IPV6, PKT_TX_IPV6 },
475 { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM },
476 { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4 },
477 { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6 },
478 { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG },
483 printf("-----------------\n");
484 /* dump rx parsed packet info */
485 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
486 "l4_proto=%d l4_len=%d\n",
487 l2_len, rte_be_to_cpu_16(ethertype),
488 l3_len, l4_proto, l4_len);
490 printf("rx: outer_l2_len=%d outer_ethertype=%x "
491 "outer_l3_len=%d\n", outer_l2_len,
492 rte_be_to_cpu_16(outer_ethertype),
494 /* dump tx packet info */
495 if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM |
496 TESTPMD_TX_OFFLOAD_UDP_CKSUM |
497 TESTPMD_TX_OFFLOAD_TCP_CKSUM |
498 TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) ||
500 printf("tx: m->l2_len=%d m->l3_len=%d "
502 m->l2_len, m->l3_len, m->l4_len);
504 (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM))
505 printf("tx: m->inner_l2_len=%d m->inner_l3_len=%d\n",
506 m->inner_l2_len, m->inner_l3_len);
508 printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
509 printf("tx: flags=");
510 for (j = 0; j < sizeof(tx_flags)/sizeof(*tx_flags); j++) {
511 name = rte_get_tx_ol_flag_name(tx_flags[j].flag);
512 if ((m->ol_flags & tx_flags[j].mask) ==
519 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
520 fs->tx_packets += nb_tx;
521 fs->rx_bad_ip_csum += rx_bad_ip_csum;
522 fs->rx_bad_l4_csum += rx_bad_l4_csum;
524 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
525 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
527 if (unlikely(nb_tx < nb_rx)) {
528 fs->fwd_dropped += (nb_rx - nb_tx);
530 rte_pktmbuf_free(pkts_burst[nb_tx]);
531 } while (++nb_tx < nb_rx);
533 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
534 end_tsc = rte_rdtsc();
535 core_cycles = (end_tsc - start_tsc);
536 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
540 struct fwd_engine csum_fwd_engine = {
541 .fwd_mode_name = "csum",
542 .port_fwd_begin = NULL,
543 .port_fwd_end = NULL,
544 .packet_fwd = pkt_burst_checksum_forward,