1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_mempool.h>
31 #include <rte_interrupts.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
39 #include <rte_prefetch.h>
40 #include <rte_string_fns.h>
47 #define IP_DEFTTL 64 /* from RFC 1340. */
48 #define IP_VERSION 0x40
49 #define IP_HDRLEN 0x05 /* default IP header length == five 32-bits words. */
50 #define IP_VHL_DEF (IP_VERSION | IP_HDRLEN)
52 #define GRE_CHECKSUM_PRESENT 0x8000
53 #define GRE_KEY_PRESENT 0x2000
54 #define GRE_SEQUENCE_PRESENT 0x1000
56 #define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
59 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
60 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
61 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
66 uint16_t vxlan_gpe_udp_port = 4790;
68 /* structure that caches offload info for the current packet */
69 struct testpmd_offload_info {
77 uint16_t outer_ethertype;
78 uint16_t outer_l2_len;
79 uint16_t outer_l3_len;
80 uint8_t outer_l4_proto;
82 uint16_t tunnel_tso_segsz;
86 /* simplified GRE header */
87 struct simple_gre_hdr {
90 } __attribute__((__packed__));
93 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
95 if (ethertype == _htons(ETHER_TYPE_IPv4))
96 return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
97 else /* assume ethertype == ETHER_TYPE_IPv6 */
98 return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
101 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
103 parse_ipv4(struct ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
105 struct tcp_hdr *tcp_hdr;
107 info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
108 info->l4_proto = ipv4_hdr->next_proto_id;
110 /* only fill l4_len for TCP, it's useful for TSO */
111 if (info->l4_proto == IPPROTO_TCP) {
112 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + info->l3_len);
113 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
114 } else if (info->l4_proto == IPPROTO_UDP)
115 info->l4_len = sizeof(struct udp_hdr);
120 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
122 parse_ipv6(struct ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
124 struct tcp_hdr *tcp_hdr;
126 info->l3_len = sizeof(struct ipv6_hdr);
127 info->l4_proto = ipv6_hdr->proto;
129 /* only fill l4_len for TCP, it's useful for TSO */
130 if (info->l4_proto == IPPROTO_TCP) {
131 tcp_hdr = (struct tcp_hdr *)((char *)ipv6_hdr + info->l3_len);
132 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
133 } else if (info->l4_proto == IPPROTO_UDP)
134 info->l4_len = sizeof(struct udp_hdr);
140 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
141 * ipproto. This function is able to recognize IPv4/IPv6 with one optional vlan
142 * header. The l4_len argument is only set in case of TCP (useful for TSO).
145 parse_ethernet(struct ether_hdr *eth_hdr, struct testpmd_offload_info *info)
147 struct ipv4_hdr *ipv4_hdr;
148 struct ipv6_hdr *ipv6_hdr;
150 info->l2_len = sizeof(struct ether_hdr);
151 info->ethertype = eth_hdr->ether_type;
153 if (info->ethertype == _htons(ETHER_TYPE_VLAN)) {
154 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
156 info->l2_len += sizeof(struct vlan_hdr);
157 info->ethertype = vlan_hdr->eth_proto;
160 switch (info->ethertype) {
161 case _htons(ETHER_TYPE_IPv4):
162 ipv4_hdr = (struct ipv4_hdr *) ((char *)eth_hdr + info->l2_len);
163 parse_ipv4(ipv4_hdr, info);
165 case _htons(ETHER_TYPE_IPv6):
166 ipv6_hdr = (struct ipv6_hdr *) ((char *)eth_hdr + info->l2_len);
167 parse_ipv6(ipv6_hdr, info);
177 /* Parse a vxlan header */
179 parse_vxlan(struct udp_hdr *udp_hdr,
180 struct testpmd_offload_info *info,
183 struct ether_hdr *eth_hdr;
185 /* check udp destination port, 4789 is the default vxlan port
186 * (rfc7348) or that the rx offload flag is set (i40e only
188 if (udp_hdr->dst_port != _htons(4789) &&
189 RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0)
193 info->outer_ethertype = info->ethertype;
194 info->outer_l2_len = info->l2_len;
195 info->outer_l3_len = info->l3_len;
196 info->outer_l4_proto = info->l4_proto;
198 eth_hdr = (struct ether_hdr *)((char *)udp_hdr +
199 sizeof(struct udp_hdr) +
200 sizeof(struct vxlan_hdr));
202 parse_ethernet(eth_hdr, info);
203 info->l2_len += ETHER_VXLAN_HLEN; /* add udp + vxlan */
206 /* Parse a vxlan-gpe header */
208 parse_vxlan_gpe(struct udp_hdr *udp_hdr,
209 struct testpmd_offload_info *info)
211 struct ether_hdr *eth_hdr;
212 struct ipv4_hdr *ipv4_hdr;
213 struct ipv6_hdr *ipv6_hdr;
214 struct vxlan_gpe_hdr *vxlan_gpe_hdr;
215 uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
217 /* Check udp destination port. */
218 if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port))
221 vxlan_gpe_hdr = (struct vxlan_gpe_hdr *)((char *)udp_hdr +
222 sizeof(struct udp_hdr));
224 if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
225 VXLAN_GPE_TYPE_IPV4) {
227 info->outer_ethertype = info->ethertype;
228 info->outer_l2_len = info->l2_len;
229 info->outer_l3_len = info->l3_len;
230 info->outer_l4_proto = info->l4_proto;
232 ipv4_hdr = (struct ipv4_hdr *)((char *)vxlan_gpe_hdr +
235 parse_ipv4(ipv4_hdr, info);
236 info->ethertype = _htons(ETHER_TYPE_IPv4);
239 } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_IPV6) {
241 info->outer_ethertype = info->ethertype;
242 info->outer_l2_len = info->l2_len;
243 info->outer_l3_len = info->l3_len;
244 info->outer_l4_proto = info->l4_proto;
246 ipv6_hdr = (struct ipv6_hdr *)((char *)vxlan_gpe_hdr +
249 info->ethertype = _htons(ETHER_TYPE_IPv6);
250 parse_ipv6(ipv6_hdr, info);
253 } else if (vxlan_gpe_hdr->proto == VXLAN_GPE_TYPE_ETH) {
255 info->outer_ethertype = info->ethertype;
256 info->outer_l2_len = info->l2_len;
257 info->outer_l3_len = info->l3_len;
258 info->outer_l4_proto = info->l4_proto;
260 eth_hdr = (struct ether_hdr *)((char *)vxlan_gpe_hdr +
263 parse_ethernet(eth_hdr, info);
267 info->l2_len += ETHER_VXLAN_GPE_HLEN;
270 /* Parse a gre header */
272 parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
274 struct ether_hdr *eth_hdr;
275 struct ipv4_hdr *ipv4_hdr;
276 struct ipv6_hdr *ipv6_hdr;
279 gre_len += sizeof(struct simple_gre_hdr);
281 if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
282 gre_len += GRE_EXT_LEN;
283 if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT))
284 gre_len += GRE_EXT_LEN;
285 if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
286 gre_len += GRE_EXT_LEN;
288 if (gre_hdr->proto == _htons(ETHER_TYPE_IPv4)) {
290 info->outer_ethertype = info->ethertype;
291 info->outer_l2_len = info->l2_len;
292 info->outer_l3_len = info->l3_len;
293 info->outer_l4_proto = info->l4_proto;
295 ipv4_hdr = (struct ipv4_hdr *)((char *)gre_hdr + gre_len);
297 parse_ipv4(ipv4_hdr, info);
298 info->ethertype = _htons(ETHER_TYPE_IPv4);
301 } else if (gre_hdr->proto == _htons(ETHER_TYPE_IPv6)) {
303 info->outer_ethertype = info->ethertype;
304 info->outer_l2_len = info->l2_len;
305 info->outer_l3_len = info->l3_len;
306 info->outer_l4_proto = info->l4_proto;
308 ipv6_hdr = (struct ipv6_hdr *)((char *)gre_hdr + gre_len);
310 info->ethertype = _htons(ETHER_TYPE_IPv6);
311 parse_ipv6(ipv6_hdr, info);
314 } else if (gre_hdr->proto == _htons(ETHER_TYPE_TEB)) {
316 info->outer_ethertype = info->ethertype;
317 info->outer_l2_len = info->l2_len;
318 info->outer_l3_len = info->l3_len;
319 info->outer_l4_proto = info->l4_proto;
321 eth_hdr = (struct ether_hdr *)((char *)gre_hdr + gre_len);
323 parse_ethernet(eth_hdr, info);
327 info->l2_len += gre_len;
331 /* Parse an encapsulated ip or ipv6 header */
333 parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
335 struct ipv4_hdr *ipv4_hdr = encap_ip;
336 struct ipv6_hdr *ipv6_hdr = encap_ip;
339 ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
341 if (ip_version != 4 && ip_version != 6)
345 info->outer_ethertype = info->ethertype;
346 info->outer_l2_len = info->l2_len;
347 info->outer_l3_len = info->l3_len;
349 if (ip_version == 4) {
350 parse_ipv4(ipv4_hdr, info);
351 info->ethertype = _htons(ETHER_TYPE_IPv4);
353 parse_ipv6(ipv6_hdr, info);
354 info->ethertype = _htons(ETHER_TYPE_IPv6);
359 /* if possible, calculate the checksum of a packet in hw or sw,
360 * depending on the testpmd command line configuration */
362 process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
363 uint64_t tx_offloads)
365 struct ipv4_hdr *ipv4_hdr = l3_hdr;
366 struct udp_hdr *udp_hdr;
367 struct tcp_hdr *tcp_hdr;
368 struct sctp_hdr *sctp_hdr;
369 uint64_t ol_flags = 0;
370 uint32_t max_pkt_len, tso_segsz = 0;
372 /* ensure packet is large enough to require tso */
373 if (!info->is_tunnel) {
374 max_pkt_len = info->l2_len + info->l3_len + info->l4_len +
376 if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len)
377 tso_segsz = info->tso_segsz;
379 max_pkt_len = info->outer_l2_len + info->outer_l3_len +
380 info->l2_len + info->l3_len + info->l4_len +
381 info->tunnel_tso_segsz;
382 if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len)
383 tso_segsz = info->tunnel_tso_segsz;
386 if (info->ethertype == _htons(ETHER_TYPE_IPv4)) {
388 ipv4_hdr->hdr_checksum = 0;
390 ol_flags |= PKT_TX_IPV4;
391 if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
392 ol_flags |= PKT_TX_IP_CKSUM;
394 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
395 ol_flags |= PKT_TX_IP_CKSUM;
397 ipv4_hdr->hdr_checksum =
398 rte_ipv4_cksum(ipv4_hdr);
400 } else if (info->ethertype == _htons(ETHER_TYPE_IPv6))
401 ol_flags |= PKT_TX_IPV6;
403 return 0; /* packet type not supported, nothing to do */
405 if (info->l4_proto == IPPROTO_UDP) {
406 udp_hdr = (struct udp_hdr *)((char *)l3_hdr + info->l3_len);
407 /* do not recalculate udp cksum if it was 0 */
408 if (udp_hdr->dgram_cksum != 0) {
409 udp_hdr->dgram_cksum = 0;
410 if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
411 ol_flags |= PKT_TX_UDP_CKSUM;
413 udp_hdr->dgram_cksum =
414 get_udptcp_checksum(l3_hdr, udp_hdr,
418 if (info->gso_enable)
419 ol_flags |= PKT_TX_UDP_SEG;
420 } else if (info->l4_proto == IPPROTO_TCP) {
421 tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + info->l3_len);
424 ol_flags |= PKT_TX_TCP_SEG;
425 else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
426 ol_flags |= PKT_TX_TCP_CKSUM;
429 get_udptcp_checksum(l3_hdr, tcp_hdr,
432 if (info->gso_enable)
433 ol_flags |= PKT_TX_TCP_SEG;
434 } else if (info->l4_proto == IPPROTO_SCTP) {
435 sctp_hdr = (struct sctp_hdr *)((char *)l3_hdr + info->l3_len);
437 /* sctp payload must be a multiple of 4 to be
439 if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
440 ((ipv4_hdr->total_length & 0x3) == 0)) {
441 ol_flags |= PKT_TX_SCTP_CKSUM;
443 /* XXX implement CRC32c, example available in
451 /* Calculate the checksum of outer header */
453 process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
454 uint64_t tx_offloads, int tso_enabled)
456 struct ipv4_hdr *ipv4_hdr = outer_l3_hdr;
457 struct ipv6_hdr *ipv6_hdr = outer_l3_hdr;
458 struct udp_hdr *udp_hdr;
459 uint64_t ol_flags = 0;
461 if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4)) {
462 ipv4_hdr->hdr_checksum = 0;
463 ol_flags |= PKT_TX_OUTER_IPV4;
465 if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
466 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
468 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
470 ol_flags |= PKT_TX_OUTER_IPV6;
472 if (info->outer_l4_proto != IPPROTO_UDP)
475 /* Skip SW outer UDP checksum generation if HW supports it */
476 if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
477 ol_flags |= PKT_TX_OUTER_UDP_CKSUM;
481 udp_hdr = (struct udp_hdr *)((char *)outer_l3_hdr + info->outer_l3_len);
483 /* outer UDP checksum is done in software. In the other side, for
484 * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
487 * If a packet will be TSOed into small packets by NIC, we cannot
488 * set/calculate a non-zero checksum, because it will be a wrong
489 * value after the packet be split into several small packets.
492 udp_hdr->dgram_cksum = 0;
494 /* do not recalculate udp cksum if it was 0 */
495 if (udp_hdr->dgram_cksum != 0) {
496 udp_hdr->dgram_cksum = 0;
497 if (info->outer_ethertype == _htons(ETHER_TYPE_IPv4))
498 udp_hdr->dgram_cksum =
499 rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
501 udp_hdr->dgram_cksum =
502 rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
510 * Performs actual copying.
511 * Returns number of segments in the destination mbuf on success,
512 * or negative error code on failure.
515 mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
516 uint16_t seglen[], uint8_t nb_seg)
518 uint32_t dlen, slen, tlen;
520 const struct rte_mbuf *m;
533 while (ms != NULL && i != nb_seg) {
536 slen = rte_pktmbuf_data_len(ms);
537 src = rte_pktmbuf_mtod(ms, const uint8_t *);
541 dlen = RTE_MIN(seglen[i], slen);
542 md[i]->data_len = dlen;
543 md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
544 dst = rte_pktmbuf_mtod(md[i], uint8_t *);
547 len = RTE_MIN(slen, dlen);
548 memcpy(dst, src, len);
563 else if (tlen != m->pkt_len)
566 md[0]->nb_segs = nb_seg;
567 md[0]->pkt_len = tlen;
568 md[0]->vlan_tci = m->vlan_tci;
569 md[0]->vlan_tci_outer = m->vlan_tci_outer;
570 md[0]->ol_flags = m->ol_flags;
571 md[0]->tx_offload = m->tx_offload;
577 * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
578 * Copy packet contents and offload information into then new segmented mbuf.
580 static struct rte_mbuf *
581 pkt_copy_split(const struct rte_mbuf *pkt)
584 uint32_t i, len, nb_seg;
585 struct rte_mempool *mp;
586 uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
587 struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
589 mp = current_fwd_lcore()->mbp;
591 if (tx_pkt_split == TX_PKT_SPLIT_RND)
592 nb_seg = random() % tx_pkt_nb_segs + 1;
594 nb_seg = tx_pkt_nb_segs;
596 memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
598 /* calculate number of segments to use and their length. */
600 for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
605 n = pkt->pkt_len - len;
607 /* update size of the last segment to fit rest of the packet */
615 p = rte_pktmbuf_alloc(mp);
618 "failed to allocate %u-th of %u mbuf "
619 "from mempool: %s\n",
620 nb_seg - i, nb_seg, mp->name);
625 if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
626 TESTPMD_LOG(ERR, "mempool %s, %u-th segment: "
627 "expected seglen: %u, "
628 "actual mbuf tailroom: %u\n",
629 mp->name, i, seglen[i],
630 rte_pktmbuf_tailroom(md[i]));
635 /* all mbufs successfully allocated, do copy */
637 rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
640 "mbuf_copy_split for %p(len=%u, nb_seg=%u) "
641 "into %u segments failed with error code: %d\n",
642 pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
644 /* figure out how many mbufs to free. */
648 /* free unused mbufs */
649 for (; i != nb_seg; i++) {
650 rte_pktmbuf_free_seg(md[i]);
658 * Receive a burst of packets, and for each packet:
659 * - parse packet, and try to recognize a supported packet type (1)
660 * - if it's not a supported packet type, don't touch the packet, else:
661 * - reprocess the checksum of all supported layers. This is done in SW
662 * or HW, depending on testpmd command line configuration
663 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
664 * segmentation offload (this implies HW TCP checksum)
665 * Then transmit packets on the output port.
667 * (1) Supported packets are:
668 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
669 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
671 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
673 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
675 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
676 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
677 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
679 * The testpmd command line for this forward engine sets the flags
680 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
681 * wether a checksum must be calculated in software or in hardware. The
682 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
683 * OUTER_IP is only useful for tunnel packets.
686 pkt_burst_checksum_forward(struct fwd_stream *fs)
688 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
689 struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
690 struct rte_gso_ctx *gso_ctx;
691 struct rte_mbuf **tx_pkts_burst;
692 struct rte_port *txp;
693 struct rte_mbuf *m, *p;
694 struct ether_hdr *eth_hdr;
695 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
697 uint16_t gro_pkts_num;
703 uint64_t rx_ol_flags, tx_ol_flags;
704 uint64_t tx_offloads;
706 uint32_t rx_bad_ip_csum;
707 uint32_t rx_bad_l4_csum;
708 uint32_t rx_bad_outer_l4_csum;
709 struct testpmd_offload_info info;
710 uint16_t nb_segments = 0;
713 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
716 uint64_t core_cycles;
719 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
720 start_tsc = rte_rdtsc();
723 /* receive a burst of packet */
724 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
726 if (unlikely(nb_rx == 0))
728 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
729 fs->rx_burst_stats.pkt_burst_spread[nb_rx]++;
731 fs->rx_packets += nb_rx;
734 rx_bad_outer_l4_csum = 0;
735 gro_enable = gro_ports[fs->rx_port].enable;
737 txp = &ports[fs->tx_port];
738 tx_offloads = txp->dev_conf.txmode.offloads;
739 memset(&info, 0, sizeof(info));
740 info.tso_segsz = txp->tso_segsz;
741 info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
742 if (gso_ports[fs->tx_port].enable)
745 for (i = 0; i < nb_rx; i++) {
746 if (likely(i < nb_rx - 1))
747 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
752 info.pkt_len = rte_pktmbuf_pkt_len(m);
753 tx_ol_flags = m->ol_flags &
754 (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF);
755 rx_ol_flags = m->ol_flags;
757 /* Update the L3/L4 checksum error packet statistics */
758 if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD)
760 if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)
762 if (rx_ol_flags & PKT_RX_OUTER_L4_CKSUM_BAD)
763 rx_bad_outer_l4_csum += 1;
765 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
766 * and inner headers */
768 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
769 ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
771 ether_addr_copy(&ports[fs->tx_port].eth_addr,
773 parse_ethernet(eth_hdr, &info);
774 l3_hdr = (char *)eth_hdr + info.l2_len;
776 /* check if it's a supported tunnel */
777 if (txp->parse_tunnel) {
778 if (info.l4_proto == IPPROTO_UDP) {
779 struct udp_hdr *udp_hdr;
781 udp_hdr = (struct udp_hdr *)((char *)l3_hdr +
783 parse_vxlan_gpe(udp_hdr, &info);
784 if (info.is_tunnel) {
785 tx_ol_flags |= PKT_TX_TUNNEL_VXLAN_GPE;
787 parse_vxlan(udp_hdr, &info,
793 } else if (info.l4_proto == IPPROTO_GRE) {
794 struct simple_gre_hdr *gre_hdr;
796 gre_hdr = (struct simple_gre_hdr *)
797 ((char *)l3_hdr + info.l3_len);
798 parse_gre(gre_hdr, &info);
800 tx_ol_flags |= PKT_TX_TUNNEL_GRE;
801 } else if (info.l4_proto == IPPROTO_IPIP) {
804 encap_ip_hdr = (char *)l3_hdr + info.l3_len;
805 parse_encap_ip(encap_ip_hdr, &info);
807 tx_ol_flags |= PKT_TX_TUNNEL_IPIP;
811 /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
812 if (info.is_tunnel) {
813 outer_l3_hdr = l3_hdr;
814 l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
817 /* step 2: depending on user command line configuration,
818 * recompute checksum either in software or flag the
819 * mbuf to offload the calculation to the NIC. If TSO
820 * is configured, prepare the mbuf for TCP segmentation. */
822 /* process checksums of inner headers first */
823 tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
826 /* Then process outer headers if any. Note that the software
827 * checksum will be wrong if one of the inner checksums is
828 * processed in hardware. */
829 if (info.is_tunnel == 1) {
830 tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
832 !!(tx_ol_flags & PKT_TX_TCP_SEG));
835 /* step 3: fill the mbuf meta data (flags and header lengths) */
838 if (info.is_tunnel == 1) {
839 if (info.tunnel_tso_segsz ||
841 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
843 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
844 (tx_ol_flags & PKT_TX_OUTER_IPV6)) {
845 m->outer_l2_len = info.outer_l2_len;
846 m->outer_l3_len = info.outer_l3_len;
847 m->l2_len = info.l2_len;
848 m->l3_len = info.l3_len;
849 m->l4_len = info.l4_len;
850 m->tso_segsz = info.tunnel_tso_segsz;
853 /* if there is a outer UDP cksum
854 processed in sw and the inner in hw,
855 the outer checksum will be wrong as
856 the payload will be modified by the
858 m->l2_len = info.outer_l2_len +
859 info.outer_l3_len + info.l2_len;
860 m->l3_len = info.l3_len;
861 m->l4_len = info.l4_len;
864 /* this is only useful if an offload flag is
865 * set, but it does not hurt to fill it in any
867 m->l2_len = info.l2_len;
868 m->l3_len = info.l3_len;
869 m->l4_len = info.l4_len;
870 m->tso_segsz = info.tso_segsz;
872 m->ol_flags = tx_ol_flags;
874 /* Do split & copy for the packet. */
875 if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
876 p = pkt_copy_split(m);
884 /* if verbose mode is enabled, dump debug info */
885 if (verbose_level > 0) {
888 printf("-----------------\n");
889 printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n",
890 fs->rx_port, m, m->pkt_len, m->nb_segs);
891 /* dump rx parsed packet info */
892 rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
893 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
894 "l4_proto=%d l4_len=%d flags=%s\n",
895 info.l2_len, rte_be_to_cpu_16(info.ethertype),
896 info.l3_len, info.l4_proto, info.l4_len, buf);
897 if (rx_ol_flags & PKT_RX_LRO)
898 printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
899 if (info.is_tunnel == 1)
900 printf("rx: outer_l2_len=%d outer_ethertype=%x "
901 "outer_l3_len=%d\n", info.outer_l2_len,
902 rte_be_to_cpu_16(info.outer_ethertype),
904 /* dump tx packet info */
905 if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
906 DEV_TX_OFFLOAD_UDP_CKSUM |
907 DEV_TX_OFFLOAD_TCP_CKSUM |
908 DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
910 printf("tx: m->l2_len=%d m->l3_len=%d "
912 m->l2_len, m->l3_len, m->l4_len);
913 if (info.is_tunnel == 1) {
915 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
917 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
918 (tx_ol_flags & PKT_TX_OUTER_IPV6))
919 printf("tx: m->outer_l2_len=%d "
920 "m->outer_l3_len=%d\n",
923 if (info.tunnel_tso_segsz != 0 &&
924 (m->ol_flags & PKT_TX_TCP_SEG))
925 printf("tx: m->tso_segsz=%d\n",
927 } else if (info.tso_segsz != 0 &&
928 (m->ol_flags & PKT_TX_TCP_SEG))
929 printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
930 rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
931 printf("tx: flags=%s", buf);
936 if (unlikely(gro_enable)) {
937 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
938 nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
939 &(gro_ports[fs->rx_port].param));
941 gro_ctx = current_fwd_lcore()->gro_ctx;
942 nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
944 if (++fs->gro_times >= gro_flush_cycles) {
945 gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
946 if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
947 gro_pkts_num = MAX_PKT_BURST - nb_rx;
949 nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
958 if (gso_ports[fs->tx_port].enable == 0)
959 tx_pkts_burst = pkts_burst;
961 gso_ctx = &(current_fwd_lcore()->gso_ctx);
962 gso_ctx->gso_size = gso_max_segment_size;
963 for (i = 0; i < nb_rx; i++) {
964 ret = rte_gso_segment(pkts_burst[i], gso_ctx,
965 &gso_segments[nb_segments],
966 GSO_MAX_PKT_BURST - nb_segments);
970 TESTPMD_LOG(DEBUG, "Unable to segment packet");
971 rte_pktmbuf_free(pkts_burst[i]);
975 tx_pkts_burst = gso_segments;
979 nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
980 tx_pkts_burst, nb_rx);
981 if (nb_prep != nb_rx)
982 printf("Preparing packet burst to transmit failed: %s\n",
983 rte_strerror(rte_errno));
985 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
991 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
993 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
994 rte_delay_us(burst_tx_delay_time);
995 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
996 &tx_pkts_burst[nb_tx], nb_rx - nb_tx);
999 fs->tx_packets += nb_tx;
1000 fs->rx_bad_ip_csum += rx_bad_ip_csum;
1001 fs->rx_bad_l4_csum += rx_bad_l4_csum;
1002 fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
1004 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1005 fs->tx_burst_stats.pkt_burst_spread[nb_tx]++;
1007 if (unlikely(nb_tx < nb_rx)) {
1008 fs->fwd_dropped += (nb_rx - nb_tx);
1010 rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
1011 } while (++nb_tx < nb_rx);
1014 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1015 end_tsc = rte_rdtsc();
1016 core_cycles = (end_tsc - start_tsc);
1017 fs->core_cycles = (uint64_t) (fs->core_cycles + core_cycles);
1021 struct fwd_engine csum_fwd_engine = {
1022 .fwd_mode_name = "csum",
1023 .port_fwd_begin = NULL,
1024 .port_fwd_end = NULL,
1025 .packet_fwd = pkt_burst_checksum_forward,