1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_mempool.h>
30 #include <rte_interrupts.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
37 #include <rte_vxlan.h>
40 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
49 #include <rte_geneve.h>
53 #define IP_DEFTTL 64 /* from RFC 1340. */
55 #define GRE_CHECKSUM_PRESENT 0x8000
56 #define GRE_KEY_PRESENT 0x2000
57 #define GRE_SEQUENCE_PRESENT 0x1000
59 #define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
62 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
63 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
64 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
69 uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
70 uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
72 /* structure that caches offload info for the current packet */
73 struct testpmd_offload_info {
83 uint16_t outer_ethertype;
84 uint16_t outer_l2_len;
85 uint16_t outer_l3_len;
86 uint8_t outer_l4_proto;
88 uint16_t tunnel_tso_segsz;
92 /* simplified GRE header */
93 struct simple_gre_hdr {
99 get_udptcp_checksum(struct rte_mbuf *m, void *l3_hdr, uint16_t l4_off,
102 if (ethertype == _htons(RTE_ETHER_TYPE_IPV4))
103 return rte_ipv4_udptcp_cksum_mbuf(m, l3_hdr, l4_off);
104 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
105 return rte_ipv6_udptcp_cksum_mbuf(m, l3_hdr, l4_off);
108 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
110 parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
112 struct rte_tcp_hdr *tcp_hdr;
114 info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
115 info->l4_proto = ipv4_hdr->next_proto_id;
117 /* only fill l4_len for TCP, it's useful for TSO */
118 if (info->l4_proto == IPPROTO_TCP) {
119 tcp_hdr = (struct rte_tcp_hdr *)
120 ((char *)ipv4_hdr + info->l3_len);
121 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
122 } else if (info->l4_proto == IPPROTO_UDP)
123 info->l4_len = sizeof(struct rte_udp_hdr);
128 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
130 parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
132 struct rte_tcp_hdr *tcp_hdr;
134 info->l3_len = sizeof(struct rte_ipv6_hdr);
135 info->l4_proto = ipv6_hdr->proto;
137 /* only fill l4_len for TCP, it's useful for TSO */
138 if (info->l4_proto == IPPROTO_TCP) {
139 tcp_hdr = (struct rte_tcp_hdr *)
140 ((char *)ipv6_hdr + info->l3_len);
141 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
142 } else if (info->l4_proto == IPPROTO_UDP)
143 info->l4_len = sizeof(struct rte_udp_hdr);
149 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
150 * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
151 * headers. The l4_len argument is only set in case of TCP (useful for TSO).
154 parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
156 struct rte_ipv4_hdr *ipv4_hdr;
157 struct rte_ipv6_hdr *ipv6_hdr;
158 struct rte_vlan_hdr *vlan_hdr;
160 info->l2_len = sizeof(struct rte_ether_hdr);
161 info->ethertype = eth_hdr->ether_type;
163 while (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN) ||
164 info->ethertype == _htons(RTE_ETHER_TYPE_QINQ)) {
165 vlan_hdr = (struct rte_vlan_hdr *)
166 ((char *)eth_hdr + info->l2_len);
167 info->l2_len += sizeof(struct rte_vlan_hdr);
168 info->ethertype = vlan_hdr->eth_proto;
171 switch (info->ethertype) {
172 case _htons(RTE_ETHER_TYPE_IPV4):
173 ipv4_hdr = (struct rte_ipv4_hdr *)
174 ((char *)eth_hdr + info->l2_len);
175 parse_ipv4(ipv4_hdr, info);
177 case _htons(RTE_ETHER_TYPE_IPV6):
178 ipv6_hdr = (struct rte_ipv6_hdr *)
179 ((char *)eth_hdr + info->l2_len);
180 parse_ipv6(ipv6_hdr, info);
190 /* Fill in outer layers length */
192 update_tunnel_outer(struct testpmd_offload_info *info)
195 info->outer_ethertype = info->ethertype;
196 info->outer_l2_len = info->l2_len;
197 info->outer_l3_len = info->l3_len;
198 info->outer_l4_proto = info->l4_proto;
202 * Parse a GTP protocol header.
203 * No optional fields and next extension header type.
206 parse_gtp(struct rte_udp_hdr *udp_hdr,
207 struct testpmd_offload_info *info)
209 struct rte_ipv4_hdr *ipv4_hdr;
210 struct rte_ipv6_hdr *ipv6_hdr;
211 struct rte_gtp_hdr *gtp_hdr;
212 uint8_t gtp_len = sizeof(*gtp_hdr);
215 /* Check udp destination port. */
216 if (udp_hdr->dst_port != _htons(RTE_GTPC_UDP_PORT) &&
217 udp_hdr->src_port != _htons(RTE_GTPC_UDP_PORT) &&
218 udp_hdr->dst_port != _htons(RTE_GTPU_UDP_PORT))
221 update_tunnel_outer(info);
224 gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
225 sizeof(struct rte_udp_hdr));
228 * Check message type. If message type is 0xff, it is
229 * a GTP data packet. If not, it is a GTP control packet
231 if (gtp_hdr->msg_type == 0xff) {
232 ip_ver = *(uint8_t *)((char *)udp_hdr +
233 sizeof(struct rte_udp_hdr) +
234 sizeof(struct rte_gtp_hdr));
235 ip_ver = (ip_ver) & 0xf0;
237 if (ip_ver == RTE_GTP_TYPE_IPV4) {
238 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
240 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
241 parse_ipv4(ipv4_hdr, info);
242 } else if (ip_ver == RTE_GTP_TYPE_IPV6) {
243 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
245 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
246 parse_ipv6(ipv6_hdr, info);
255 info->l2_len += RTE_ETHER_GTP_HLEN;
258 /* Parse a vxlan header */
260 parse_vxlan(struct rte_udp_hdr *udp_hdr,
261 struct testpmd_offload_info *info)
263 struct rte_ether_hdr *eth_hdr;
265 /* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the
266 * default vxlan port (rfc7348) or that the rx offload flag is set
267 * (i40e only currently)
269 if (udp_hdr->dst_port != _htons(RTE_VXLAN_DEFAULT_PORT))
272 update_tunnel_outer(info);
274 eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
275 sizeof(struct rte_udp_hdr) +
276 sizeof(struct rte_vxlan_hdr));
278 parse_ethernet(eth_hdr, info);
279 info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
282 /* Parse a vxlan-gpe header */
284 parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
285 struct testpmd_offload_info *info)
287 struct rte_ether_hdr *eth_hdr;
288 struct rte_ipv4_hdr *ipv4_hdr;
289 struct rte_ipv6_hdr *ipv6_hdr;
290 struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
291 uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
293 /* Check udp destination port. */
294 if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port))
297 vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
298 sizeof(struct rte_udp_hdr));
300 if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
301 RTE_VXLAN_GPE_TYPE_IPV4) {
302 update_tunnel_outer(info);
304 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
307 parse_ipv4(ipv4_hdr, info);
308 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
311 } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
312 update_tunnel_outer(info);
314 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
317 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
318 parse_ipv6(ipv6_hdr, info);
321 } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
322 update_tunnel_outer(info);
324 eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
327 parse_ethernet(eth_hdr, info);
331 info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
334 /* Parse a geneve header */
336 parse_geneve(struct rte_udp_hdr *udp_hdr,
337 struct testpmd_offload_info *info)
339 struct rte_ether_hdr *eth_hdr;
340 struct rte_ipv4_hdr *ipv4_hdr;
341 struct rte_ipv6_hdr *ipv6_hdr;
342 struct rte_geneve_hdr *geneve_hdr;
345 /* Check udp destination port. */
346 if (udp_hdr->dst_port != _htons(geneve_udp_port))
349 geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
350 sizeof(struct rte_udp_hdr));
351 geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
352 if (!geneve_hdr->proto || geneve_hdr->proto ==
353 _htons(RTE_ETHER_TYPE_IPV4)) {
354 update_tunnel_outer(info);
355 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
357 parse_ipv4(ipv4_hdr, info);
358 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
360 } else if (geneve_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
361 update_tunnel_outer(info);
362 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
364 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
365 parse_ipv6(ipv6_hdr, info);
368 } else if (geneve_hdr->proto == _htons(RTE_GENEVE_TYPE_ETH)) {
369 update_tunnel_outer(info);
370 eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
372 parse_ethernet(eth_hdr, info);
377 (sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
378 ((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
381 /* Parse a gre header */
383 parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
385 struct rte_ether_hdr *eth_hdr;
386 struct rte_ipv4_hdr *ipv4_hdr;
387 struct rte_ipv6_hdr *ipv6_hdr;
390 gre_len += sizeof(struct simple_gre_hdr);
392 if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
393 gre_len += GRE_EXT_LEN;
394 if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT))
395 gre_len += GRE_EXT_LEN;
396 if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
397 gre_len += GRE_EXT_LEN;
399 if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV4)) {
400 update_tunnel_outer(info);
402 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
404 parse_ipv4(ipv4_hdr, info);
405 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
408 } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
409 update_tunnel_outer(info);
411 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
413 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
414 parse_ipv6(ipv6_hdr, info);
417 } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) {
418 update_tunnel_outer(info);
420 eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
422 parse_ethernet(eth_hdr, info);
426 info->l2_len += gre_len;
430 /* Parse an encapsulated ip or ipv6 header */
432 parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
434 struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
435 struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
438 ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
440 if (ip_version != 4 && ip_version != 6)
444 info->outer_ethertype = info->ethertype;
445 info->outer_l2_len = info->l2_len;
446 info->outer_l3_len = info->l3_len;
448 if (ip_version == 4) {
449 parse_ipv4(ipv4_hdr, info);
450 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
452 parse_ipv6(ipv6_hdr, info);
453 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
458 /* if possible, calculate the checksum of a packet in hw or sw,
459 * depending on the testpmd command line configuration */
461 process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
462 uint64_t tx_offloads, struct rte_mbuf *m)
464 struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
465 struct rte_udp_hdr *udp_hdr;
466 struct rte_tcp_hdr *tcp_hdr;
467 struct rte_sctp_hdr *sctp_hdr;
468 uint64_t ol_flags = 0;
469 uint32_t max_pkt_len, tso_segsz = 0;
472 /* ensure packet is large enough to require tso */
473 if (!info->is_tunnel) {
474 max_pkt_len = info->l2_len + info->l3_len + info->l4_len +
476 if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len)
477 tso_segsz = info->tso_segsz;
479 max_pkt_len = info->outer_l2_len + info->outer_l3_len +
480 info->l2_len + info->l3_len + info->l4_len +
481 info->tunnel_tso_segsz;
482 if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len)
483 tso_segsz = info->tunnel_tso_segsz;
486 if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
489 ol_flags |= RTE_MBUF_F_TX_IPV4;
490 if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
491 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
493 if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
494 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
496 ipv4_hdr->hdr_checksum = 0;
497 ipv4_hdr->hdr_checksum =
498 rte_ipv4_cksum(ipv4_hdr);
501 } else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6))
502 ol_flags |= RTE_MBUF_F_TX_IPV6;
504 return 0; /* packet type not supported, nothing to do */
506 if (info->l4_proto == IPPROTO_UDP) {
507 udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
508 /* do not recalculate udp cksum if it was 0 */
509 if (udp_hdr->dgram_cksum != 0) {
510 if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
511 ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
514 l4_off = info->l2_len +
516 info->l2_len + info->l3_len;
518 l4_off = info->l2_len + info->l3_len;
519 udp_hdr->dgram_cksum = 0;
520 udp_hdr->dgram_cksum =
521 get_udptcp_checksum(m, l3_hdr, l4_off,
526 if (info->gso_enable)
527 ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
529 } else if (info->l4_proto == IPPROTO_TCP) {
530 tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
532 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
533 else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
534 ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
537 l4_off = info->l2_len + info->outer_l3_len +
538 info->l2_len + info->l3_len;
540 l4_off = info->l2_len + info->l3_len;
543 get_udptcp_checksum(m, l3_hdr, l4_off,
547 if (info->gso_enable)
548 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
550 } else if (info->l4_proto == IPPROTO_SCTP) {
551 sctp_hdr = (struct rte_sctp_hdr *)
552 ((char *)l3_hdr + info->l3_len);
553 /* sctp payload must be a multiple of 4 to be
555 if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
556 ((ipv4_hdr->total_length & 0x3) == 0)) {
557 ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
560 /* XXX implement CRC32c, example available in
568 /* Calculate the checksum of outer header */
570 process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
571 uint64_t tx_offloads, int tso_enabled, struct rte_mbuf *m)
573 struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
574 struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
575 struct rte_udp_hdr *udp_hdr;
576 uint64_t ol_flags = 0;
578 if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
579 ipv4_hdr->hdr_checksum = 0;
580 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
582 if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
583 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
585 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
587 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6;
589 if (info->outer_l4_proto != IPPROTO_UDP)
592 udp_hdr = (struct rte_udp_hdr *)
593 ((char *)outer_l3_hdr + info->outer_l3_len);
596 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
598 /* Skip SW outer UDP checksum generation if HW supports it */
599 if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
600 if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
602 = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
605 = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
607 ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM;
611 /* outer UDP checksum is done in software. In the other side, for
612 * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
615 * If a packet will be TSOed into small packets by NIC, we cannot
616 * set/calculate a non-zero checksum, because it will be a wrong
617 * value after the packet be split into several small packets.
620 udp_hdr->dgram_cksum = 0;
622 /* do not recalculate udp cksum if it was 0 */
623 if (udp_hdr->dgram_cksum != 0) {
624 udp_hdr->dgram_cksum = 0;
625 udp_hdr->dgram_cksum = get_udptcp_checksum(m, outer_l3_hdr,
626 info->l2_len + info->outer_l3_len,
627 info->outer_ethertype);
635 * Performs actual copying.
636 * Returns number of segments in the destination mbuf on success,
637 * or negative error code on failure.
640 mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
641 uint16_t seglen[], uint8_t nb_seg)
643 uint32_t dlen, slen, tlen;
645 const struct rte_mbuf *m;
658 while (ms != NULL && i != nb_seg) {
661 slen = rte_pktmbuf_data_len(ms);
662 src = rte_pktmbuf_mtod(ms, const uint8_t *);
666 dlen = RTE_MIN(seglen[i], slen);
667 md[i]->data_len = dlen;
668 md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
669 dst = rte_pktmbuf_mtod(md[i], uint8_t *);
672 len = RTE_MIN(slen, dlen);
673 memcpy(dst, src, len);
688 else if (tlen != m->pkt_len)
691 md[0]->nb_segs = nb_seg;
692 md[0]->pkt_len = tlen;
693 md[0]->vlan_tci = m->vlan_tci;
694 md[0]->vlan_tci_outer = m->vlan_tci_outer;
695 md[0]->ol_flags = m->ol_flags;
696 md[0]->tx_offload = m->tx_offload;
702 * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
703 * Copy packet contents and offload information into the new segmented mbuf.
705 static struct rte_mbuf *
706 pkt_copy_split(const struct rte_mbuf *pkt)
709 uint32_t i, len, nb_seg;
710 struct rte_mempool *mp;
711 uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
712 struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
714 mp = current_fwd_lcore()->mbp;
716 if (tx_pkt_split == TX_PKT_SPLIT_RND)
717 nb_seg = rte_rand() % tx_pkt_nb_segs + 1;
719 nb_seg = tx_pkt_nb_segs;
721 memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
723 /* calculate number of segments to use and their length. */
725 for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
730 n = pkt->pkt_len - len;
732 /* update size of the last segment to fit rest of the packet */
740 p = rte_pktmbuf_alloc(mp);
743 "failed to allocate %u-th of %u mbuf "
744 "from mempool: %s\n",
745 nb_seg - i, nb_seg, mp->name);
750 if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
751 TESTPMD_LOG(ERR, "mempool %s, %u-th segment: "
752 "expected seglen: %u, "
753 "actual mbuf tailroom: %u\n",
754 mp->name, i, seglen[i],
755 rte_pktmbuf_tailroom(md[i]));
760 /* all mbufs successfully allocated, do copy */
762 rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
765 "mbuf_copy_split for %p(len=%u, nb_seg=%u) "
766 "into %u segments failed with error code: %d\n",
767 pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
769 /* figure out how many mbufs to free. */
773 /* free unused mbufs */
774 for (; i != nb_seg; i++) {
775 rte_pktmbuf_free_seg(md[i]);
783 * Receive a burst of packets, and for each packet:
784 * - parse packet, and try to recognize a supported packet type (1)
785 * - if it's not a supported packet type, don't touch the packet, else:
786 * - reprocess the checksum of all supported layers. This is done in SW
787 * or HW, depending on testpmd command line configuration
788 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
789 * segmentation offload (this implies HW TCP checksum)
790 * Then transmit packets on the output port.
792 * (1) Supported packets are:
793 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
794 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
796 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
798 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
800 * Ether / (vlan) / outer IP / outer UDP / GTP / IP|IP6 / UDP|TCP|SCTP
801 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
802 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
803 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
805 * The testpmd command line for this forward engine sets the flags
806 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
807 * whether a checksum must be calculated in software or in hardware. The
808 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
809 * OUTER_IP is only useful for tunnel packets.
812 pkt_burst_checksum_forward(struct fwd_stream *fs)
814 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
816 struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
817 struct rte_gso_ctx *gso_ctx;
819 struct rte_mbuf **tx_pkts_burst;
820 struct rte_port *txp;
821 struct rte_mbuf *m, *p;
822 struct rte_ether_hdr *eth_hdr;
823 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
826 uint16_t gro_pkts_num;
833 uint64_t rx_ol_flags, tx_ol_flags;
834 uint64_t tx_offloads;
836 uint32_t rx_bad_ip_csum;
837 uint32_t rx_bad_l4_csum;
838 uint32_t rx_bad_outer_l4_csum;
839 uint32_t rx_bad_outer_ip_csum;
840 struct testpmd_offload_info info;
842 uint64_t start_tsc = 0;
844 get_start_cycles(&start_tsc);
846 /* receive a burst of packet */
847 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
849 inc_rx_burst_stats(fs, nb_rx);
850 if (unlikely(nb_rx == 0))
853 fs->rx_packets += nb_rx;
856 rx_bad_outer_l4_csum = 0;
857 rx_bad_outer_ip_csum = 0;
859 gro_enable = gro_ports[fs->rx_port].enable;
862 txp = &ports[fs->tx_port];
863 tx_offloads = txp->dev_conf.txmode.offloads;
864 memset(&info, 0, sizeof(info));
865 info.tso_segsz = txp->tso_segsz;
866 info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
868 if (gso_ports[fs->tx_port].enable)
872 for (i = 0; i < nb_rx; i++) {
873 if (likely(i < nb_rx - 1))
874 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
879 info.pkt_len = rte_pktmbuf_pkt_len(m);
880 tx_ol_flags = m->ol_flags &
881 (RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL);
882 rx_ol_flags = m->ol_flags;
884 /* Update the L3/L4 checksum error packet statistics */
885 if ((rx_ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) == RTE_MBUF_F_RX_IP_CKSUM_BAD)
887 if ((rx_ol_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) == RTE_MBUF_F_RX_L4_CKSUM_BAD)
889 if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD)
890 rx_bad_outer_l4_csum += 1;
891 if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD)
892 rx_bad_outer_ip_csum += 1;
894 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
895 * and inner headers */
897 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
898 rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
900 rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
902 parse_ethernet(eth_hdr, &info);
903 l3_hdr = (char *)eth_hdr + info.l2_len;
905 /* check if it's a supported tunnel */
906 if (txp->parse_tunnel) {
907 if (info.l4_proto == IPPROTO_UDP) {
908 struct rte_udp_hdr *udp_hdr;
910 udp_hdr = (struct rte_udp_hdr *)
911 ((char *)l3_hdr + info.l3_len);
912 parse_gtp(udp_hdr, &info);
913 if (info.is_tunnel) {
914 tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GTP;
917 parse_vxlan_gpe(udp_hdr, &info);
918 if (info.is_tunnel) {
920 RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE;
923 parse_vxlan(udp_hdr, &info);
924 if (info.is_tunnel) {
926 RTE_MBUF_F_TX_TUNNEL_VXLAN;
929 parse_geneve(udp_hdr, &info);
930 if (info.is_tunnel) {
932 RTE_MBUF_F_TX_TUNNEL_GENEVE;
935 /* Always keep last. */
936 if (unlikely(RTE_ETH_IS_TUNNEL_PKT(
937 m->packet_type) != 0)) {
938 TESTPMD_LOG(DEBUG, "Unknown tunnel packet. UDP dst port: %hu",
941 } else if (info.l4_proto == IPPROTO_GRE) {
942 struct simple_gre_hdr *gre_hdr;
944 gre_hdr = (struct simple_gre_hdr *)
945 ((char *)l3_hdr + info.l3_len);
946 parse_gre(gre_hdr, &info);
948 tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GRE;
949 } else if (info.l4_proto == IPPROTO_IPIP) {
952 encap_ip_hdr = (char *)l3_hdr + info.l3_len;
953 parse_encap_ip(encap_ip_hdr, &info);
955 tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_IPIP;
960 /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
961 if (info.is_tunnel) {
962 outer_l3_hdr = l3_hdr;
963 l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
966 /* step 2: depending on user command line configuration,
967 * recompute checksum either in software or flag the
968 * mbuf to offload the calculation to the NIC. If TSO
969 * is configured, prepare the mbuf for TCP segmentation. */
971 /* process checksums of inner headers first */
972 tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
975 /* Then process outer headers if any. Note that the software
976 * checksum will be wrong if one of the inner checksums is
977 * processed in hardware. */
978 if (info.is_tunnel == 1) {
979 tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
981 !!(tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG),
985 /* step 3: fill the mbuf meta data (flags and header lengths) */
988 if (info.is_tunnel == 1) {
989 if (info.tunnel_tso_segsz ||
991 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
993 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
994 m->outer_l2_len = info.outer_l2_len;
995 m->outer_l3_len = info.outer_l3_len;
996 m->l2_len = info.l2_len;
997 m->l3_len = info.l3_len;
998 m->l4_len = info.l4_len;
999 m->tso_segsz = info.tunnel_tso_segsz;
1002 /* if there is a outer UDP cksum
1003 processed in sw and the inner in hw,
1004 the outer checksum will be wrong as
1005 the payload will be modified by the
1007 m->l2_len = info.outer_l2_len +
1008 info.outer_l3_len + info.l2_len;
1009 m->l3_len = info.l3_len;
1010 m->l4_len = info.l4_len;
1013 /* this is only useful if an offload flag is
1014 * set, but it does not hurt to fill it in any
1016 m->l2_len = info.l2_len;
1017 m->l3_len = info.l3_len;
1018 m->l4_len = info.l4_len;
1019 m->tso_segsz = info.tso_segsz;
1021 m->ol_flags = tx_ol_flags;
1023 /* Do split & copy for the packet. */
1024 if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
1025 p = pkt_copy_split(m);
1027 rte_pktmbuf_free(m);
1033 /* if verbose mode is enabled, dump debug info */
1034 if (verbose_level > 0) {
1037 printf("-----------------\n");
1038 printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n",
1039 fs->rx_port, m, m->pkt_len, m->nb_segs);
1040 /* dump rx parsed packet info */
1041 rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
1042 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
1043 "l4_proto=%d l4_len=%d flags=%s\n",
1044 info.l2_len, rte_be_to_cpu_16(info.ethertype),
1045 info.l3_len, info.l4_proto, info.l4_len, buf);
1046 if (rx_ol_flags & RTE_MBUF_F_RX_LRO)
1047 printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
1048 if (info.is_tunnel == 1)
1049 printf("rx: outer_l2_len=%d outer_ethertype=%x "
1050 "outer_l3_len=%d\n", info.outer_l2_len,
1051 rte_be_to_cpu_16(info.outer_ethertype),
1053 /* dump tx packet info */
1054 if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1055 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1056 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1057 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
1058 info.tso_segsz != 0)
1059 printf("tx: m->l2_len=%d m->l3_len=%d "
1061 m->l2_len, m->l3_len, m->l4_len);
1062 if (info.is_tunnel == 1) {
1064 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1066 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
1067 (tx_ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))
1068 printf("tx: m->outer_l2_len=%d "
1069 "m->outer_l3_len=%d\n",
1072 if (info.tunnel_tso_segsz != 0 &&
1073 (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
1074 printf("tx: m->tso_segsz=%d\n",
1076 } else if (info.tso_segsz != 0 &&
1077 (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
1078 printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
1079 rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
1080 printf("tx: flags=%s", buf);
1086 if (unlikely(gro_enable)) {
1087 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
1088 nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
1089 &(gro_ports[fs->rx_port].param));
1091 gro_ctx = current_fwd_lcore()->gro_ctx;
1092 nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
1094 if (++fs->gro_times >= gro_flush_cycles) {
1095 gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
1096 if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
1097 gro_pkts_num = MAX_PKT_BURST - nb_rx;
1099 nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
1110 if (gso_ports[fs->tx_port].enable != 0) {
1111 uint16_t nb_segments = 0;
1113 gso_ctx = &(current_fwd_lcore()->gso_ctx);
1114 gso_ctx->gso_size = gso_max_segment_size;
1115 for (i = 0; i < nb_rx; i++) {
1118 ret = rte_gso_segment(pkts_burst[i], gso_ctx,
1119 &gso_segments[nb_segments],
1120 GSO_MAX_PKT_BURST - nb_segments);
1122 /* pkts_burst[i] can be freed safely here. */
1123 rte_pktmbuf_free(pkts_burst[i]);
1125 } else if (ret == 0) {
1126 /* 0 means it can be transmitted directly
1129 gso_segments[nb_segments] = pkts_burst[i];
1132 TESTPMD_LOG(DEBUG, "Unable to segment packet");
1133 rte_pktmbuf_free(pkts_burst[i]);
1137 tx_pkts_burst = gso_segments;
1138 nb_rx = nb_segments;
1141 tx_pkts_burst = pkts_burst;
1143 nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
1144 tx_pkts_burst, nb_rx);
1145 if (nb_prep != nb_rx)
1147 "Preparing packet burst to transmit failed: %s\n",
1148 rte_strerror(rte_errno));
1150 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
1154 * Retry if necessary
1156 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
1158 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
1159 rte_delay_us(burst_tx_delay_time);
1160 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
1161 &tx_pkts_burst[nb_tx], nb_rx - nb_tx);
1164 fs->tx_packets += nb_tx;
1165 fs->rx_bad_ip_csum += rx_bad_ip_csum;
1166 fs->rx_bad_l4_csum += rx_bad_l4_csum;
1167 fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
1168 fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum;
1170 inc_tx_burst_stats(fs, nb_tx);
1171 if (unlikely(nb_tx < nb_rx)) {
1172 fs->fwd_dropped += (nb_rx - nb_tx);
1174 rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
1175 } while (++nb_tx < nb_rx);
1178 get_end_cycles(fs, start_tsc);
1181 struct fwd_engine csum_fwd_engine = {
1182 .fwd_mode_name = "csum",
1183 .port_fwd_begin = NULL,
1184 .port_fwd_end = NULL,
1185 .packet_fwd = pkt_burst_checksum_forward,