4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * Copyright 2014 6WIND S.A.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
12 * * Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * * Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the
18 * * Neither the name of Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/queue.h>
45 #include <rte_common.h>
46 #include <rte_byteorder.h>
48 #include <rte_debug.h>
49 #include <rte_cycles.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_memory.h>
62 #include <rte_mempool.h>
64 #include <rte_memcpy.h>
65 #include <rte_interrupts.h>
67 #include <rte_ether.h>
68 #include <rte_ethdev.h>
73 #include <rte_prefetch.h>
74 #include <rte_string_fns.h>
77 #define IP_DEFTTL 64 /* from RFC 1340. */
78 #define IP_VERSION 0x40
79 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
80 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
82 #define GRE_KEY_PRESENT 0x2000
84 #define GRE_SUPPORTED_FIELDS GRE_KEY_PRESENT
86 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
87 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
88 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
93 /* structure that caches offload info for the current packet */
94 struct testpmd_offload_info {
101 uint16_t outer_ethertype;
102 uint16_t outer_l2_len;
103 uint16_t outer_l3_len;
104 uint8_t outer_l4_proto;
108 /* simplified GRE header */
109 struct simple_gre_hdr {
115 get_psd_sum(void *l3_hdr, uint16_t ethertype, uint64_t ol_flags)
117 if (ethertype == _htons(ETHER_TYPE_IPv4))
118 return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
119 else /* assume ethertype == ETHER_TYPE_IPv6 */
120 return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
124 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
126 if (ethertype == _htons(ETHER_TYPE_IPv4))
127 return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
128 else /* assume ethertype == ETHER_TYPE_IPv6 */
129 return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
132 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
134 parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
136 struct tcp_hdr *tcp_hdr;
138 info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
139 info->l4_proto = ipv4_hdr->next_proto_id;
141 /* only fill l4_len for TCP, it's useful for TSO */
142 if (info->l4_proto == IPPROTO_TCP) {
143 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
144 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
149 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
151 parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
153 struct tcp_hdr *tcp_hdr;
155 info->l3_len = sizeof(struct ipv6_hdr);
156 info->l4_proto = ipv6_hdr->proto;
158 /* only fill l4_len for TCP, it's useful for TSO */
159 if (info->l4_proto == IPPROTO_TCP) {
160 tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
161 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
167 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
168 * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
169 * header. The l4_len argument is only set in case of TCP (useful for TSO).
172 parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info)
174 struct ipv4_hdr *ipv4_hdr;
175 struct ipv6_hdr *ipv6_hdr;
177 info->l2_len = sizeof(struct ether_hdr);
178 info->ethertype = eth_hdr->ether_type;
180 if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
181 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
183 info->l2_len += sizeof(struct vlan_hdr);
184 info->ethertype = vlan_hdr->eth_proto;
187 switch (info->ethertype) {
188 case _htons(ETHER_TYPE_IPv4):
189 ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
190 parse_ipv4(ipv4_hdr, info);
192 case _htons(ETHER_TYPE_IPv6):
193 ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
194 parse_ipv6(ipv6_hdr, info);
204 /* Parse a vxlan header */
206 parse_vxlan(struct udp_hdr *udp_hdr, struct testpmd_offload_info *info,
207 uint64_t mbuf_olflags)
209 struct ether_hdr *eth_hdr;
211 /* check udp destination port, 4789 is the default vxlan port
212 * (rfc7348) or that the rx offload flag is set (i40e only
214 if (udp_hdr->dst_port != _htons(4789) &&
215 (mbuf_olflags & (PKT_RX_TUNNEL_IPV4_HDR |
216 PKT_RX_TUNNEL_IPV6_HDR)) == 0)
220 info->outer_ethertype = info->ethertype;
221 info->outer_l2_len = info->l2_len;
222 info->outer_l3_len = info->l3_len;
223 info->outer_l4_proto = info->l4_proto;
225 eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
226 sizeof(struct udp_hdr) +
227 sizeof(struct vxlan_hdr));
229 parse_ethernet(eth_hdr, info);
230 info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
233 /* Parse a gre header */
235 parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
237 struct ether_hdr *eth_hdr;
238 struct ipv4_hdr *ipv4_hdr;
239 struct ipv6_hdr *ipv6_hdr;
242 /* check which fields are supported */
243 if ((gre_hdr->flags & _htons(~GRE_SUPPORTED_FIELDS)) != 0)
246 gre_len += sizeof(struct simple_gre_hdr);
248 if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
249 gre_len += GRE_KEY_LEN;
251 if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
253 info->outer_ethertype = info->ethertype;
254 info->outer_l2_len = info->l2_len;
255 info->outer_l3_len = info->l3_len;
256 info->outer_l4_proto = info->l4_proto;
258 ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
260 parse_ipv4(ipv4_hdr, info);
261 info->ethertype = _htons(ETHER_TYPE_IPv4);
264 } else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {
266 info->outer_ethertype = info->ethertype;
267 info->outer_l2_len = info->l2_len;
268 info->outer_l3_len = info->l3_len;
269 info->outer_l4_proto = info->l4_proto;
271 ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
273 info->ethertype = _htons(ETHER_TYPE_IPv6);
274 parse_ipv6(ipv6_hdr, info);
277 } else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {
279 info->outer_ethertype = info->ethertype;
280 info->outer_l2_len = info->l2_len;
281 info->outer_l3_len = info->l3_len;
282 info->outer_l4_proto = info->l4_proto;
284 eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len);
286 parse_ethernet(eth_hdr, info);
290 info->l2_len += gre_len;
294 /* Parse an encapsulated ip or ipv6 header */
296 parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
298 struct ipv4_hdr *ipv4_hdr = encap_ip;
299 struct ipv6_hdr *ipv6_hdr = encap_ip;
302 ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
304 if (ip_version != 4 && ip_version != 6)
308 info->outer_ethertype = info->ethertype;
309 info->outer_l2_len = info->l2_len;
310 info->outer_l3_len = info->l3_len;
312 if (ip_version == 4) {
313 parse_ipv4(ipv4_hdr, info);
314 info->ethertype = _htons(ETHER_TYPE_IPv4);
316 parse_ipv6(ipv6_hdr, info);
317 info->ethertype = _htons(ETHER_TYPE_IPv6);
322 /* modify the IPv4 or IPv4 source address of a packet */
324 change_ip_addresses(void *l3_hdr, uint16_t ethertype)
326 struct ipv4_hdr *ipv4_hdr = l3_hdr;
327 struct ipv6_hdr *ipv6_hdr = l3_hdr;
329 if (ethertype == _htons(ETHER_TYPE_IPv4)) {
331 rte_cpu_to_be_32(rte_be_to_cpu_32(ipv4_hdr->src_addr) + 1);
332 } else if (ethertype == _htons(ETHER_TYPE_IPv6)) {
333 ipv6_hdr->src_addr[15] = ipv6_hdr->src_addr[15] + 1;
337 /* if possible, calculate the checksum of a packet in hw or sw,
338 * depending on the testpmd command line configuration */
340 process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
341 uint16_t testpmd_ol_flags)
343 struct ipv4_hdr *ipv4_hdr = l3_hdr;
344 struct udp_hdr *udp_hdr;
345 struct tcp_hdr *tcp_hdr;
346 struct sctp_hdr *sctp_hdr;
347 uint64_t ol_flags = 0;
349 if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
351 ipv4_hdr->hdr_checksum = 0;
353 ol_flags |= PKT_TX_IPV4;
354 if (info->tso_segsz != 0 && info->l4_proto == IPPROTO_TCP) {
355 ol_flags |= PKT_TX_IP_CKSUM;
357 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_IP_CKSUM)
358 ol_flags |= PKT_TX_IP_CKSUM;
360 ipv4_hdr->hdr_checksum =
361 rte_ipv4_cksum(ipv4_hdr);
363 } else if (info->ethertype == _htons(ETHER_TYPE_IPv6))
364 ol_flags |= PKT_TX_IPV6;
366 return 0; /* packet type not supported, nothing to do */
368 if (info->l4_proto == IPPROTO_UDP) {
369 udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
370 /* do not recalculate udp cksum if it was 0 */
371 if (udp_hdr->dgram_cksum != 0) {
372 udp_hdr->dgram_cksum = 0;
373 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_UDP_CKSUM) {
374 ol_flags |= PKT_TX_UDP_CKSUM;
375 udp_hdr->dgram_cksum = get_psd_sum(l3_hdr,
376 info->ethertype, ol_flags);
378 udp_hdr->dgram_cksum =
379 get_udptcp_checksum(l3_hdr, udp_hdr,
383 } else if (info->l4_proto == IPPROTO_TCP) {
384 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
386 if (info->tso_segsz != 0) {
387 ol_flags |= PKT_TX_TCP_SEG;
388 tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
390 } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_TCP_CKSUM) {
391 ol_flags |= PKT_TX_TCP_CKSUM;
392 tcp_hdr->cksum = get_psd_sum(l3_hdr, info->ethertype,
396 get_udptcp_checksum(l3_hdr, tcp_hdr,
399 } else if (info->l4_proto == IPPROTO_SCTP) {
400 sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len);
402 /* sctp payload must be a multiple of 4 to be
404 if ((testpmd_ol_flags & TESTPMD_TX_OFFLOAD_SCTP_CKSUM) &&
405 ((ipv4_hdr->total_length & 0x3) == 0)) {
406 ol_flags |= PKT_TX_SCTP_CKSUM;
408 /* XXX implement CRC32c, example available in
416 /* Calculate the checksum of outer header (only vxlan is supported,
417 * meaning IP + UDP). The caller already checked that it's a vxlan
420 process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
421 uint16_t testpmd_ol_flags)
423 struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
424 struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
425 struct udp_hdr *udp_hdr;
426 uint64_t ol_flags = 0;
428 if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
429 ipv4_hdr->hdr_checksum = 0;
430 ol_flags |= PKT_TX_OUTER_IPV4;
432 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
433 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
435 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
436 } else if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM)
437 ol_flags |= PKT_TX_OUTER_IPV6;
439 if (info->outer_l4_proto != IPPROTO_UDP)
442 /* outer UDP checksum is always done in software as we have no
443 * hardware supporting it today, and no API for it. */
445 udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
446 /* do not recalculate udp cksum if it was 0 */
447 if (udp_hdr->dgram_cksum != 0) {
448 udp_hdr->dgram_cksum = 0;
449 if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))
450 udp_hdr->dgram_cksum =
451 rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
453 udp_hdr->dgram_cksum =
454 rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
461 * Receive a burst of packets, and for each packet:
462 * - parse packet, and try to recognize a supported packet type (1)
463 * - if it's not a supported packet type, don't touch the packet, else:
464 * - modify the IPs in inner headers and in outer headers if any
465 * - reprocess the checksum of all supported layers. This is done in SW
466 * or HW, depending on testpmd command line configuration
467 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
468 * segmentation offload (this implies HW TCP checksum)
469 * Then transmit packets on the output port.
471 * (1) Supported packets are:
472 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
473 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
475 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
476 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
477 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
479 * The testpmd command line for this forward engine sets the flags
480 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
481 * wether a checksum must be calculated in software or in hardware. The
482 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
483 * OUTER_IP is only useful for tunnel packets.
486 pkt_burst_checksum_forward(struct fwd_stream *fs)
488 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
489 struct rte_port *txp;
491 struct ether_hdr *eth_hdr;
492 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
497 uint16_t testpmd_ol_flags;
498 uint32_t rx_bad_ip_csum;
499 uint32_t rx_bad_l4_csum;
500 struct testpmd_offload_info info;
502 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
505 uint64_t core_cycles;
508 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
509 start_tsc = rte_rdtsc();
512 /* receive a burst of packet */
513 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
515 if (unlikely(nb_rx == 0))
518 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
519 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
521 fs->rx_packets += nb_rx;
525 txp = &ports[fs->tx_port];
526 testpmd_ol_flags = txp->tx_ol_flags;
527 memset(&info, 0, sizeof(info));
528 info.tso_segsz = txp->tso_segsz;
530 for (i = 0; i < nb_rx; i++) {
536 /* Update the L3/L4 checksum error packet statistics */
537 rx_bad_ip_csum += ((m->ol_flags & PKT_RX_IP_CKSUM_BAD) != 0);
538 rx_bad_l4_csum += ((m->ol_flags & PKT_RX_L4_CKSUM_BAD) != 0);
540 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
541 * and inner headers */
543 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
544 parse_ethernet(eth_hdr, &info);
545 l3_hdr = (char *)eth_hdr + info.l2_len;
547 /* check if it's a supported tunnel */
548 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_PARSE_TUNNEL) {
549 if (info.l4_proto == IPPROTO_UDP) {
550 struct udp_hdr *udp_hdr;
551 udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
553 parse_vxlan(udp_hdr, &info, m->ol_flags);
554 } else if (info.l4_proto == IPPROTO_GRE) {
555 struct simple_gre_hdr *gre_hdr;
556 gre_hdr = (struct simple_gre_hdr *)
557 ((char *)l3_hdr + info.l3_len);
558 parse_gre(gre_hdr, &info);
559 } else if (info.l4_proto == IPPROTO_IPIP) {
561 encap_ip_hdr = (char *)l3_hdr + info.l3_len;
562 parse_encap_ip(encap_ip_hdr, &info);
566 /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
567 if (info.is_tunnel) {
568 outer_l3_hdr = l3_hdr;
569 l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
572 /* step 2: change all source IPs (v4 or v6) so we need
573 * to recompute the chksums even if they were correct */
575 change_ip_addresses(l3_hdr, info.ethertype);
576 if (info.is_tunnel == 1)
577 change_ip_addresses(outer_l3_hdr, info.outer_ethertype);
579 /* step 3: depending on user command line configuration,
580 * recompute checksum either in software or flag the
581 * mbuf to offload the calculation to the NIC. If TSO
582 * is configured, prepare the mbuf for TCP segmentation. */
584 /* process checksums of inner headers first */
585 ol_flags |= process_inner_cksums(l3_hdr, &info, testpmd_ol_flags);
587 /* Then process outer headers if any. Note that the software
588 * checksum will be wrong if one of the inner checksums is
589 * processed in hardware. */
590 if (info.is_tunnel == 1) {
591 ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
595 /* step 4: fill the mbuf meta data (flags and header lengths) */
597 if (info.is_tunnel == 1) {
598 if (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM) {
599 m->outer_l2_len = info.outer_l2_len;
600 m->outer_l3_len = info.outer_l3_len;
601 m->l2_len = info.l2_len;
602 m->l3_len = info.l3_len;
603 m->l4_len = info.l4_len;
606 /* if there is a outer UDP cksum
607 processed in sw and the inner in hw,
608 the outer checksum will be wrong as
609 the payload will be modified by the
611 m->l2_len = info.outer_l2_len +
612 info.outer_l3_len + info.l2_len;
613 m->l3_len = info.l3_len;
614 m->l4_len = info.l4_len;
617 /* this is only useful if an offload flag is
618 * set, but it does not hurt to fill it in any
620 m->l2_len = info.l2_len;
621 m->l3_len = info.l3_len;
622 m->l4_len = info.l4_len;
624 m->tso_segsz = info.tso_segsz;
625 m->ol_flags = ol_flags;
627 /* if verbose mode is enabled, dump debug info */
628 if (verbose_level > 0) {
633 { PKT_TX_IP_CKSUM, PKT_TX_IP_CKSUM },
634 { PKT_TX_UDP_CKSUM, PKT_TX_L4_MASK },
635 { PKT_TX_TCP_CKSUM, PKT_TX_L4_MASK },
636 { PKT_TX_SCTP_CKSUM, PKT_TX_L4_MASK },
637 { PKT_TX_IPV4, PKT_TX_IPV4 },
638 { PKT_TX_IPV6, PKT_TX_IPV6 },
639 { PKT_TX_OUTER_IP_CKSUM, PKT_TX_OUTER_IP_CKSUM },
640 { PKT_TX_OUTER_IPV4, PKT_TX_OUTER_IPV4 },
641 { PKT_TX_OUTER_IPV6, PKT_TX_OUTER_IPV6 },
642 { PKT_TX_TCP_SEG, PKT_TX_TCP_SEG },
647 printf("-----------------\n");
648 /* dump rx parsed packet info */
649 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
650 "l4_proto=%d l4_len=%d\n",
651 info.l2_len, rte_be_to_cpu_16(info.ethertype),
652 info.l3_len, info.l4_proto, info.l4_len);
653 if (info.is_tunnel == 1)
654 printf("rx: outer_l2_len=%d outer_ethertype=%x "
655 "outer_l3_len=%d\n", info.outer_l2_len,
656 rte_be_to_cpu_16(info.outer_ethertype),
658 /* dump tx packet info */
659 if ((testpmd_ol_flags & (TESTPMD_TX_OFFLOAD_IP_CKSUM |
660 TESTPMD_TX_OFFLOAD_UDP_CKSUM |
661 TESTPMD_TX_OFFLOAD_TCP_CKSUM |
662 TESTPMD_TX_OFFLOAD_SCTP_CKSUM)) ||
664 printf("tx: m->l2_len=%d m->l3_len=%d "
666 m->l2_len, m->l3_len, m->l4_len);
667 if ((info.is_tunnel == 1) &&
668 (testpmd_ol_flags & TESTPMD_TX_OFFLOAD_OUTER_IP_CKSUM))
669 printf("tx: m->outer_l2_len=%d m->outer_l3_len=%d\n",
670 m->outer_l2_len, m->outer_l3_len);
671 if (info.tso_segsz != 0)
672 printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
673 printf("tx: flags=");
674 for (j = 0; j < sizeof(tx_flags)/sizeof(*tx_flags); j++) {
675 name = rte_get_tx_ol_flag_name(tx_flags[j].flag);
676 if ((m->ol_flags & tx_flags[j].mask) ==
683 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
684 fs->tx_packets += nb_tx;
685 fs->rx_bad_ip_csum += rx_bad_ip_csum;
686 fs->rx_bad_l4_csum += rx_bad_l4_csum;
688 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
689 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
691 if (unlikely(nb_tx < nb_rx)) {
692 fs->fwd_dropped += (nb_rx - nb_tx);
694 rte_pktmbuf_free(pkts_burst[nb_tx]);
695 } while (++nb_tx < nb_rx);
697 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
698 end_tsc = rte_rdtsc();
699 core_cycles = (end_tsc - start_tsc);
700 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
704 struct fwd_engine csum_fwd_engine = {
705 .fwd_mode_name = "csum",
706 .port_fwd_begin = NULL,
707 .port_fwd_end = NULL,
708 .packet_fwd = pkt_burst_checksum_forward,