4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_debug.h>
49 #include <rte_cycles.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_memory.h>
62 #include <rte_mempool.h>
64 #include <rte_memcpy.h>
65 #include <rte_interrupts.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
73 #include <rte_prefetch.h>
74 #include <rte_string_fns.h>
77 #define IP_DEFTTL 64 /* from RFC 1340. */
78 #define IP_VERSION 0x40
79 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
80 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
82 /* we cannot use htons() from arpa/inet.h due to name conflicts, and we
83 * cannot use rte_cpu_to_be_16() on a constant in a switch/case */
84 #if __BYTE_ORDER == __LITTLE_ENDIAN
85 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
90 static inline uint16_t
91 get_16b_sum(uint16_t *ptr16, uint32_t nr)
97 nr -= sizeof(uint16_t);
103 /* If length is in odd bytes */
105 sum += *((uint8_t*)ptr16);
107 sum = ((sum & 0xffff0000) >> 16) + (sum & 0xffff);
109 return (uint16_t)sum;
112 static inline uint16_t
113 get_ipv4_cksum(struct ipv4_hdr *ipv4_hdr)
116 cksum = get_16b_sum((uint16_t*)ipv4_hdr, sizeof(struct ipv4_hdr));
117 return (uint16_t)((cksum == 0xffff)?cksum:~cksum);
121 static inline uint16_t
122 get_ipv4_psd_sum(struct ipv4_hdr *ip_hdr)
124 /* Pseudo Header for IPv4/UDP/TCP checksum */
125 union ipv4_psd_header {
127 uint32_t src_addr; /* IP address of source host. */
128 uint32_t dst_addr; /* IP address of destination host(s). */
129 uint8_t zero; /* zero. */
130 uint8_t proto; /* L4 protocol type. */
131 uint16_t len; /* L4 length. */
132 } __attribute__((__packed__));
136 psd_hdr.src_addr = ip_hdr->src_addr;
137 psd_hdr.dst_addr = ip_hdr->dst_addr;
139 psd_hdr.proto = ip_hdr->next_proto_id;
140 psd_hdr.len = rte_cpu_to_be_16((uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length)
141 - sizeof(struct ipv4_hdr)));
142 return get_16b_sum(psd_hdr.u16_arr, sizeof(psd_hdr));
145 static inline uint16_t
146 get_ipv6_psd_sum(struct ipv6_hdr *ip_hdr)
148 /* Pseudo Header for IPv6/UDP/TCP checksum */
149 union ipv6_psd_header {
151 uint8_t src_addr[16]; /* IP address of source host. */
152 uint8_t dst_addr[16]; /* IP address of destination host(s). */
153 uint32_t len; /* L4 length. */
154 uint32_t proto; /* L4 protocol - top 3 bytes must be zero */
155 } __attribute__((__packed__));
157 uint16_t u16_arr[0]; /* allow use as 16-bit values with safe aliasing */
160 rte_memcpy(&psd_hdr.src_addr, ip_hdr->src_addr,
161 sizeof(ip_hdr->src_addr) + sizeof(ip_hdr->dst_addr));
162 psd_hdr.len = ip_hdr->payload_len;
163 psd_hdr.proto = (ip_hdr->proto << 24);
165 return get_16b_sum(psd_hdr.u16_arr, sizeof(psd_hdr));
169 get_psd_sum(void *l3_hdr, uint16_t ethertype)
171 if (ethertype == _htons(ETHER_TYPE_IPv4))
172 return get_ipv4_psd_sum(l3_hdr);
173 else /* assume ethertype == ETHER_TYPE_IPv6 */
174 return get_ipv6_psd_sum(l3_hdr);
177 static inline uint16_t
178 get_ipv4_udptcp_checksum(struct ipv4_hdr *ipv4_hdr, uint16_t *l4_hdr)
183 l4_len = rte_be_to_cpu_16(ipv4_hdr->total_length) - sizeof(struct ipv4_hdr);
185 cksum = get_16b_sum(l4_hdr, l4_len);
186 cksum += get_ipv4_psd_sum(ipv4_hdr);
188 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
189 cksum = (~cksum) & 0xffff;
192 return (uint16_t)cksum;
195 static inline uint16_t
196 get_ipv6_udptcp_checksum(struct ipv6_hdr *ipv6_hdr, uint16_t *l4_hdr)
201 l4_len = rte_be_to_cpu_16(ipv6_hdr->payload_len);
203 cksum = get_16b_sum(l4_hdr, l4_len);
204 cksum += get_ipv6_psd_sum(ipv6_hdr);
206 cksum = ((cksum & 0xffff0000) >> 16) + (cksum & 0xffff);
207 cksum = (~cksum) & 0xffff;
211 return (uint16_t)cksum;
215 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
217 if (ethertype == _htons(ETHER_TYPE_IPv4))
218 return get_ipv4_udptcp_checksum(l3_hdr, l4_hdr);
219 else /* assume ethertype == ETHER_TYPE_IPv6 */
220 return get_ipv6_udptcp_checksum(l3_hdr, l4_hdr);
224 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
225 * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
229 parse_ethernet(struct ether_hdr *eth_hdr, uint16_t *ethertype, uint16_t *l2_len,
230 uint16_t *l3_len, uint8_t *l4_proto)
232 struct ipv4_hdr *ipv4_hdr;
233 struct ipv6_hdr *ipv6_hdr;
235 *l2_len = sizeof(struct ether_hdr);
236 *ethertype = eth_hdr->ether_type;
238 if (*ethertype == _htons(ETHER_TYPE_VLAN)) {
239 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
241 *l2_len += sizeof(struct vlan_hdr);
242 *ethertype = vlan_hdr->eth_proto;
245 switch (*ethertype) {
246 case _htons(ETHER_TYPE_IPv4):
247 ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + *l2_len);
248 *l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
249 *l4_proto = ipv4_hdr->next_proto_id;
251 case _htons(ETHER_TYPE_IPv6):
252 ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + *l2_len);
253 *l3_len = sizeof(struct ipv6_hdr);
254 *l4_proto = ipv6_hdr->proto;
263 /* modify the IPv4 or IPv4 source address of a packet */
265 change_ip_addresses(void *l3_hdr, uint16_t ethertype)
267 struct ipv4_hdr *ipv4_hdr = l3_hdr;
268 struct ipv6_hdr *ipv6_hdr = l3_hdr;
270 if (ethertype == _htons(ETHER_TYPE_IPv4)) {
272 rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1);
273 } else if (ethertype == _htons(ETHER_TYPE_IPv6)) {
274 ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1;
278 /* if possible, calculate the checksum of a packet in hw or sw,
279 * depending on the testpmd command line configuration */
281 process_inner_cksums(void *l3_hdr, uint16_t ethertype, uint16_t l3_len,
282 uint8_t l4_proto, uint16_t testpmd_ol_flags)
284 struct ipv4_hdr *ipv4_hdr = l3_hdr;
285 struct udp_hdr *udp_hdr;
286 struct tcp_hdr *tcp_hdr;
287 struct sctp_hdr *sctp_hdr;
288 uint64_t ol_flags = 0;
290 if (ethertype == _htons(ETHER_TYPE_IPv4)) {
292 ipv4_hdr->hdr_checksum = 0;
294 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
295 ol_flags |= PKT_TX_IP_CKSUM;
297 ipv4_hdr->hdr_checksum = get_ipv4_cksum(ipv4_hdr);
299 ol_flags |= PKT_TX_IPV4;
300 } else if (ethertype == _htons(ETHER_TYPE_IPv6))
301 ol_flags |= PKT_TX_IPV6;
303 return 0; /* packet type not supported, nothing to do */
305 if (l4_proto == IPPROTO_UDP) {
306 udp_hdr = (struct udp_hdr *)((char *)l3_hdr + l3_len);
307 /* do not recalculate udp cksum if it was 0 */
308 if (udp_hdr->dgram_cksum != 0) {
309 udp_hdr->dgram_cksum = 0;
310 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
311 ol_flags |= PKT_TX_UDP_CKSUM;
312 udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
315 udp_hdr->dgram_cksum =
316 get_udptcp_checksum(l3_hdr, udp_hdr,
320 } else if (l4_proto == IPPROTO_TCP) {
321 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + l3_len);
323 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
324 ol_flags |= PKT_TX_TCP_CKSUM;
325 tcp_hdr->cksum = get_psd_sum(l3_hdr, ethertype);
328 get_udptcp_checksum(l3_hdr, tcp_hdr, ethertype);
330 } else if (l4_proto == IPPROTO_SCTP) {
331 sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + l3_len);
333 /* sctp payload must be a multiple of 4 to be
335 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
336 ((ipv4_hdr->total_length & 0x3) == 0)) {
337 ol_flags |= PKT_TX_SCTP_CKSUM;
339 /* XXX implement CRC32c, example available in
347 /* Calculate the checksum of outer header (only vxlan is supported,
348 * meaning IP + UDP). The caller already checked that it's a vxlan
351 process_outer_cksums(void *outer_l3_hdr, uint16_t outer_ethertype,
352 uint16_t outer_l3_len, uint16_t testpmd_ol_flags)
354 struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
355 struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
356 struct udp_hdr *udp_hdr;
357 uint64_t ol_flags = 0;
359 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM)
360 ol_flags |= PKT_TX_VXLAN_CKSUM;
362 if (outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
363 ipv4_hdr->hdr_checksum = 0;
365 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) == 0)
366 ipv4_hdr->hdr_checksum = get_ipv4_cksum(ipv4_hdr);
369 udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + outer_l3_len);
370 /* do not recalculate udp cksum if it was 0 */
371 if (udp_hdr->dgram_cksum != 0) {
372 udp_hdr->dgram_cksum = 0;
373 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) == 0) {
374 if (outer_ethertype == _htons(ETHER_TYPE_IPv4))
375 udp_hdr->dgram_cksum =
376 get_ipv4_udptcp_checksum(ipv4_hdr,
377 (uint16_t *)udp_hdr);
379 udp_hdr->dgram_cksum =
380 get_ipv6_udptcp_checksum(ipv6_hdr,
381 (uint16_t *)udp_hdr);
389 * Receive a burst of packets, and for each packet:
390 * - parse packet, and try to recognize a supported packet type (1)
391 * - if it's not a supported packet type, don't touch the packet, else:
392 * - modify the IPs in inner headers and in outer headers if any
393 * - reprocess the checksum of all supported layers. This is done in SW
394 * or HW, depending on testpmd command line configuration
395 * Then transmit packets on the output port.
397 * (1) Supported packets are:
398 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
399 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
402 * The testpmd command line for this forward engine sets the flags
403 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
404 * wether a checksum must be calculated in software or in hardware. The
405 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
406 * VxLAN flag concerns the outer IP and UDP layer (if packet is
407 * recognized as a vxlan packet).
410 pkt_burst_checksum_forward(struct fwd_stream *fs)
412 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
413 struct rte_port *txp;
415 struct ether_hdr *eth_hdr;
416 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
417 struct udp_hdr *udp_hdr;
422 uint16_t testpmd_ol_flags;
424 uint16_t ethertype = 0, outer_ethertype = 0;
425 uint16_t l2_len = 0, l3_len = 0, outer_l2_len = 0, outer_l3_len = 0;
427 uint32_t rx_bad_ip_csum;
428 uint32_t rx_bad_l4_csum;
430 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
433 uint64_t core_cycles;
436 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
437 start_tsc = rte_rdtsc();
440 /* receive a burst of packet */
441 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
443 if (unlikely(nb_rx == 0))
446 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
447 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
449 fs->rx_packets += nb_rx;
453 txp = &ports[fs->tx_port];
454 testpmd_ol_flags = txp->tx_ol_flags;
456 for (i = 0; i < nb_rx; i++) {
462 /* Update the L3/L4 checksum error packet statistics */
463 rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
464 rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
466 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
467 * and inner headers */
469 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
470 parse_ethernet(eth_hdr, ðertype, &l2_len, &l3_len, &l4_proto);
471 l3_hdr = (char *)eth_hdr + l2_len;
473 /* check if it's a supported tunnel (only vxlan for now) */
474 if (l4_proto == IPPROTO_UDP) {
475 udp_hdr = (struct udp_hdr *)((char *)l3_hdr + l3_len);
477 /* currently, this flag is set by i40e only if the
479 if (((m->ol_flags & PKT_RX_TUNNEL_IPV4_HDR) ||
480 (m->ol_flags & PKT_RX_TUNNEL_IPV6_HDR)))
482 /* else check udp destination port, 4789 is the default
483 * vxlan port (rfc7348) */
484 else if (udp_hdr->dst_port == _htons(4789))
488 outer_ethertype = ethertype;
489 outer_l2_len = l2_len;
490 outer_l3_len = l3_len;
491 outer_l3_hdr = l3_hdr;
493 eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
494 sizeof(struct udp_hdr) +
495 sizeof(struct vxlan_hdr));
497 parse_ethernet(eth_hdr, ðertype, &l2_len,
499 l3_hdr = (char *)eth_hdr + l2_len;
503 /* step 2: change all source IPs (v4 or v6) so we need
504 * to recompute the chksums even if they were correct */
506 change_ip_addresses(l3_hdr, ethertype);
508 change_ip_addresses(outer_l3_hdr, outer_ethertype);
510 /* step 3: depending on user command line configuration,
511 * recompute checksum either in software or flag the
512 * mbuf to offload the calculation to the NIC */
514 /* process checksums of inner headers first */
515 ol_flags |= process_inner_cksums(l3_hdr, ethertype,
516 l3_len, l4_proto, testpmd_ol_flags);
518 /* Then process outer headers if any. Note that the software
519 * checksum will be wrong if one of the inner checksums is
520 * processed in hardware. */
522 ol_flags |= process_outer_cksums(outer_l3_hdr,
523 outer_ethertype, outer_l3_len, testpmd_ol_flags);
526 /* step 4: fill the mbuf meta data (flags and header lengths) */
529 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_VXLAN_CKSUM) {
530 m->l2_len = outer_l2_len;
531 m->l3_len = outer_l3_len;
532 m->inner_l2_len = l2_len;
533 m->inner_l3_len = l3_len;
536 /* if we don't do vxlan cksum in hw,
537 outer checksum will be wrong because
538 we changed the ip, but it shows that
539 we can process the inner header cksum
541 m->l2_len = outer_l2_len + outer_l3_len +
542 sizeof(struct udp_hdr) +
543 sizeof(struct vxlan_hdr) + l2_len;
547 /* this is only useful if an offload flag is
548 * set, but it does not hurt to fill it in any
553 m->ol_flags = ol_flags;
556 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
557 fs->tx_packets += nb_tx;
558 fs->rx_bad_ip_csum += rx_bad_ip_csum;
559 fs->rx_bad_l4_csum += rx_bad_l4_csum;
561 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
562 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
564 if (unlikely(nb_tx < nb_rx)) {
565 fs->fwd_dropped += (nb_rx - nb_tx);
567 rte_pktmbuf_free(pkts_burst[nb_tx]);
568 } while (++nb_tx < nb_rx);
570 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
571 end_tsc = rte_rdtsc();
572 core_cycles = (end_tsc - start_tsc);
573 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
577 struct fwd_engine csum_fwd_engine = {
578 .fwd_mode_name = "csum",
579 .port_fwd_begin = NULL,
580 .port_fwd_end = NULL,
581 .packet_fwd = pkt_burst_checksum_forward,