1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_branch_prediction.h>
28 #include <rte_mempool.h>
30 #include <rte_interrupts.h>
32 #include <rte_ether.h>
33 #include <rte_ethdev.h>
37 #include <rte_vxlan.h>
40 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
49 #include <rte_geneve.h>
53 #define IP_DEFTTL 64 /* from RFC 1340. */
55 #define GRE_CHECKSUM_PRESENT 0x8000
56 #define GRE_KEY_PRESENT 0x2000
57 #define GRE_SEQUENCE_PRESENT 0x1000
59 #define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
62 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
63 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
64 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
69 uint16_t vxlan_gpe_udp_port = RTE_VXLAN_GPE_DEFAULT_PORT;
70 uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
72 /* structure that caches offload info for the current packet */
73 struct testpmd_offload_info {
83 uint16_t outer_ethertype;
84 uint16_t outer_l2_len;
85 uint16_t outer_l3_len;
86 uint8_t outer_l4_proto;
88 uint16_t tunnel_tso_segsz;
92 /* simplified GRE header */
93 struct simple_gre_hdr {
99 get_udptcp_checksum(struct rte_mbuf *m, void *l3_hdr, uint16_t l4_off,
102 if (ethertype == _htons(RTE_ETHER_TYPE_IPV4))
103 return rte_ipv4_udptcp_cksum_mbuf(m, l3_hdr, l4_off);
104 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
105 return rte_ipv6_udptcp_cksum_mbuf(m, l3_hdr, l4_off);
108 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
110 parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
112 struct rte_tcp_hdr *tcp_hdr;
114 info->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
115 info->l4_proto = ipv4_hdr->next_proto_id;
117 /* only fill l4_len for TCP, it's useful for TSO */
118 if (info->l4_proto == IPPROTO_TCP) {
119 tcp_hdr = (struct rte_tcp_hdr *)
120 ((char *)ipv4_hdr + info->l3_len);
121 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
122 } else if (info->l4_proto == IPPROTO_UDP)
123 info->l4_len = sizeof(struct rte_udp_hdr);
128 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
130 parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
132 struct rte_tcp_hdr *tcp_hdr;
134 info->l3_len = sizeof(struct rte_ipv6_hdr);
135 info->l4_proto = ipv6_hdr->proto;
137 /* only fill l4_len for TCP, it's useful for TSO */
138 if (info->l4_proto == IPPROTO_TCP) {
139 tcp_hdr = (struct rte_tcp_hdr *)
140 ((char *)ipv6_hdr + info->l3_len);
141 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
142 } else if (info->l4_proto == IPPROTO_UDP)
143 info->l4_len = sizeof(struct rte_udp_hdr);
149 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
150 * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
151 * headers. The l4_len argument is only set in case of TCP (useful for TSO).
154 parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
156 struct rte_ipv4_hdr *ipv4_hdr;
157 struct rte_ipv6_hdr *ipv6_hdr;
158 struct rte_vlan_hdr *vlan_hdr;
160 info->l2_len = sizeof(struct rte_ether_hdr);
161 info->ethertype = eth_hdr->ether_type;
163 while (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN) ||
164 info->ethertype == _htons(RTE_ETHER_TYPE_QINQ)) {
165 vlan_hdr = (struct rte_vlan_hdr *)
166 ((char *)eth_hdr + info->l2_len);
167 info->l2_len += sizeof(struct rte_vlan_hdr);
168 info->ethertype = vlan_hdr->eth_proto;
171 switch (info->ethertype) {
172 case _htons(RTE_ETHER_TYPE_IPV4):
173 ipv4_hdr = (struct rte_ipv4_hdr *)
174 ((char *)eth_hdr + info->l2_len);
175 parse_ipv4(ipv4_hdr, info);
177 case _htons(RTE_ETHER_TYPE_IPV6):
178 ipv6_hdr = (struct rte_ipv6_hdr *)
179 ((char *)eth_hdr + info->l2_len);
180 parse_ipv6(ipv6_hdr, info);
190 /* Fill in outer layers length */
192 update_tunnel_outer(struct testpmd_offload_info *info)
195 info->outer_ethertype = info->ethertype;
196 info->outer_l2_len = info->l2_len;
197 info->outer_l3_len = info->l3_len;
198 info->outer_l4_proto = info->l4_proto;
202 * Parse a GTP protocol header.
203 * No optional fields and next extension header type.
206 parse_gtp(struct rte_udp_hdr *udp_hdr,
207 struct testpmd_offload_info *info)
209 struct rte_ipv4_hdr *ipv4_hdr;
210 struct rte_ipv6_hdr *ipv6_hdr;
211 struct rte_gtp_hdr *gtp_hdr;
212 uint8_t gtp_len = sizeof(*gtp_hdr);
215 /* Check udp destination port. */
216 if (udp_hdr->dst_port != _htons(RTE_GTPC_UDP_PORT) &&
217 udp_hdr->src_port != _htons(RTE_GTPC_UDP_PORT) &&
218 udp_hdr->dst_port != _htons(RTE_GTPU_UDP_PORT))
221 update_tunnel_outer(info);
224 gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
225 sizeof(struct rte_udp_hdr));
226 if (gtp_hdr->e || gtp_hdr->s || gtp_hdr->pn)
227 gtp_len += sizeof(struct rte_gtp_hdr_ext_word);
229 * Check message type. If message type is 0xff, it is
230 * a GTP data packet. If not, it is a GTP control packet
232 if (gtp_hdr->msg_type == 0xff) {
233 ip_ver = *(uint8_t *)((char *)gtp_hdr + gtp_len);
234 ip_ver = (ip_ver) & 0xf0;
236 if (ip_ver == RTE_GTP_TYPE_IPV4) {
237 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
239 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
240 parse_ipv4(ipv4_hdr, info);
241 } else if (ip_ver == RTE_GTP_TYPE_IPV6) {
242 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
244 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
245 parse_ipv6(ipv6_hdr, info);
254 info->l2_len += RTE_ETHER_GTP_HLEN;
257 /* Parse a vxlan header */
259 parse_vxlan(struct rte_udp_hdr *udp_hdr,
260 struct testpmd_offload_info *info)
262 struct rte_ether_hdr *eth_hdr;
264 /* check udp destination port, RTE_VXLAN_DEFAULT_PORT (4789) is the
265 * default vxlan port (rfc7348) or that the rx offload flag is set
266 * (i40e only currently)
268 if (udp_hdr->dst_port != _htons(RTE_VXLAN_DEFAULT_PORT))
271 update_tunnel_outer(info);
273 eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
274 sizeof(struct rte_udp_hdr) +
275 sizeof(struct rte_vxlan_hdr));
277 parse_ethernet(eth_hdr, info);
278 info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
281 /* Parse a vxlan-gpe header */
283 parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
284 struct testpmd_offload_info *info)
286 struct rte_ether_hdr *eth_hdr;
287 struct rte_ipv4_hdr *ipv4_hdr;
288 struct rte_ipv6_hdr *ipv6_hdr;
289 struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
290 uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
292 /* Check udp destination port. */
293 if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port))
296 vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
297 sizeof(struct rte_udp_hdr));
299 if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
300 RTE_VXLAN_GPE_TYPE_IPV4) {
301 update_tunnel_outer(info);
303 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
306 parse_ipv4(ipv4_hdr, info);
307 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
310 } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
311 update_tunnel_outer(info);
313 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
316 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
317 parse_ipv6(ipv6_hdr, info);
320 } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
321 update_tunnel_outer(info);
323 eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
326 parse_ethernet(eth_hdr, info);
330 info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
333 /* Parse a geneve header */
335 parse_geneve(struct rte_udp_hdr *udp_hdr,
336 struct testpmd_offload_info *info)
338 struct rte_ether_hdr *eth_hdr;
339 struct rte_ipv4_hdr *ipv4_hdr;
340 struct rte_ipv6_hdr *ipv6_hdr;
341 struct rte_geneve_hdr *geneve_hdr;
344 /* Check udp destination port. */
345 if (udp_hdr->dst_port != _htons(geneve_udp_port))
348 geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
349 sizeof(struct rte_udp_hdr));
350 geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
351 if (!geneve_hdr->proto || geneve_hdr->proto ==
352 _htons(RTE_ETHER_TYPE_IPV4)) {
353 update_tunnel_outer(info);
354 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
356 parse_ipv4(ipv4_hdr, info);
357 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
359 } else if (geneve_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
360 update_tunnel_outer(info);
361 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
363 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
364 parse_ipv6(ipv6_hdr, info);
367 } else if (geneve_hdr->proto == _htons(RTE_GENEVE_TYPE_ETH)) {
368 update_tunnel_outer(info);
369 eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
371 parse_ethernet(eth_hdr, info);
376 (sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
377 ((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
380 /* Parse a gre header */
382 parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
384 struct rte_ether_hdr *eth_hdr;
385 struct rte_ipv4_hdr *ipv4_hdr;
386 struct rte_ipv6_hdr *ipv6_hdr;
389 gre_len += sizeof(struct simple_gre_hdr);
391 if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
392 gre_len += GRE_EXT_LEN;
393 if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT))
394 gre_len += GRE_EXT_LEN;
395 if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
396 gre_len += GRE_EXT_LEN;
398 if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV4)) {
399 update_tunnel_outer(info);
401 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
403 parse_ipv4(ipv4_hdr, info);
404 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
407 } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
408 update_tunnel_outer(info);
410 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
412 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
413 parse_ipv6(ipv6_hdr, info);
416 } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) {
417 update_tunnel_outer(info);
419 eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
421 parse_ethernet(eth_hdr, info);
425 info->l2_len += gre_len;
429 /* Parse an encapsulated ip or ipv6 header */
431 parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
433 struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
434 struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
437 ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
439 if (ip_version != 4 && ip_version != 6)
443 info->outer_ethertype = info->ethertype;
444 info->outer_l2_len = info->l2_len;
445 info->outer_l3_len = info->l3_len;
447 if (ip_version == 4) {
448 parse_ipv4(ipv4_hdr, info);
449 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
451 parse_ipv6(ipv6_hdr, info);
452 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
457 /* if possible, calculate the checksum of a packet in hw or sw,
458 * depending on the testpmd command line configuration */
460 process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
461 uint64_t tx_offloads, struct rte_mbuf *m)
463 struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
464 struct rte_udp_hdr *udp_hdr;
465 struct rte_tcp_hdr *tcp_hdr;
466 struct rte_sctp_hdr *sctp_hdr;
467 uint64_t ol_flags = 0;
468 uint32_t max_pkt_len, tso_segsz = 0;
471 /* ensure packet is large enough to require tso */
472 if (!info->is_tunnel) {
473 max_pkt_len = info->l2_len + info->l3_len + info->l4_len +
475 if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len)
476 tso_segsz = info->tso_segsz;
478 max_pkt_len = info->outer_l2_len + info->outer_l3_len +
479 info->l2_len + info->l3_len + info->l4_len +
480 info->tunnel_tso_segsz;
481 if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len)
482 tso_segsz = info->tunnel_tso_segsz;
485 if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
488 ol_flags |= RTE_MBUF_F_TX_IPV4;
489 if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
490 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
492 if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) {
493 ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
495 ipv4_hdr->hdr_checksum = 0;
496 ipv4_hdr->hdr_checksum =
497 rte_ipv4_cksum(ipv4_hdr);
500 } else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6))
501 ol_flags |= RTE_MBUF_F_TX_IPV6;
503 return 0; /* packet type not supported, nothing to do */
505 if (info->l4_proto == IPPROTO_UDP) {
506 udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
507 /* do not recalculate udp cksum if it was 0 */
508 if (udp_hdr->dgram_cksum != 0) {
509 if (tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) {
510 ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
513 l4_off = info->outer_l2_len +
515 info->l2_len + info->l3_len;
517 l4_off = info->l2_len + info->l3_len;
518 udp_hdr->dgram_cksum = 0;
519 udp_hdr->dgram_cksum =
520 get_udptcp_checksum(m, l3_hdr, l4_off,
525 if (info->gso_enable)
526 ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
528 } else if (info->l4_proto == IPPROTO_TCP) {
529 tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
531 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
532 else if (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) {
533 ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
536 l4_off = info->outer_l2_len + info->outer_l3_len +
537 info->l2_len + info->l3_len;
539 l4_off = info->l2_len + info->l3_len;
542 get_udptcp_checksum(m, l3_hdr, l4_off,
546 if (info->gso_enable)
547 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
549 } else if (info->l4_proto == IPPROTO_SCTP) {
550 sctp_hdr = (struct rte_sctp_hdr *)
551 ((char *)l3_hdr + info->l3_len);
552 /* sctp payload must be a multiple of 4 to be
554 if ((tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM) &&
555 ((ipv4_hdr->total_length & 0x3) == 0)) {
556 ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
559 /* XXX implement CRC32c, example available in
567 /* Calculate the checksum of outer header */
569 process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
570 uint64_t tx_offloads, int tso_enabled, struct rte_mbuf *m)
572 struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
573 struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
574 struct rte_udp_hdr *udp_hdr;
575 uint64_t ol_flags = 0;
577 if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
578 ipv4_hdr->hdr_checksum = 0;
579 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
581 if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM)
582 ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
584 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
586 ol_flags |= RTE_MBUF_F_TX_OUTER_IPV6;
588 if (info->outer_l4_proto != IPPROTO_UDP)
591 udp_hdr = (struct rte_udp_hdr *)
592 ((char *)outer_l3_hdr + info->outer_l3_len);
595 ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
597 /* Skip SW outer UDP checksum generation if HW supports it */
598 if (tx_offloads & RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) {
599 if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
601 = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
604 = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
606 ol_flags |= RTE_MBUF_F_TX_OUTER_UDP_CKSUM;
610 /* outer UDP checksum is done in software. In the other side, for
611 * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
614 * If a packet will be TSOed into small packets by NIC, we cannot
615 * set/calculate a non-zero checksum, because it will be a wrong
616 * value after the packet be split into several small packets.
619 udp_hdr->dgram_cksum = 0;
621 /* do not recalculate udp cksum if it was 0 */
622 if (udp_hdr->dgram_cksum != 0) {
623 udp_hdr->dgram_cksum = 0;
624 udp_hdr->dgram_cksum = get_udptcp_checksum(m, outer_l3_hdr,
625 info->outer_l2_len + info->outer_l3_len,
626 info->outer_ethertype);
634 * Performs actual copying.
635 * Returns number of segments in the destination mbuf on success,
636 * or negative error code on failure.
639 mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
640 uint16_t seglen[], uint8_t nb_seg)
642 uint32_t dlen, slen, tlen;
644 const struct rte_mbuf *m;
657 while (ms != NULL && i != nb_seg) {
660 slen = rte_pktmbuf_data_len(ms);
661 src = rte_pktmbuf_mtod(ms, const uint8_t *);
665 dlen = RTE_MIN(seglen[i], slen);
666 md[i]->data_len = dlen;
667 md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
668 dst = rte_pktmbuf_mtod(md[i], uint8_t *);
671 len = RTE_MIN(slen, dlen);
672 memcpy(dst, src, len);
687 else if (tlen != m->pkt_len)
690 md[0]->nb_segs = nb_seg;
691 md[0]->pkt_len = tlen;
692 md[0]->vlan_tci = m->vlan_tci;
693 md[0]->vlan_tci_outer = m->vlan_tci_outer;
694 md[0]->ol_flags = m->ol_flags;
695 md[0]->tx_offload = m->tx_offload;
701 * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
702 * Copy packet contents and offload information into the new segmented mbuf.
704 static struct rte_mbuf *
705 pkt_copy_split(const struct rte_mbuf *pkt)
708 uint32_t i, len, nb_seg;
709 struct rte_mempool *mp;
710 uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
711 struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
713 mp = current_fwd_lcore()->mbp;
715 if (tx_pkt_split == TX_PKT_SPLIT_RND)
716 nb_seg = rte_rand() % tx_pkt_nb_segs + 1;
718 nb_seg = tx_pkt_nb_segs;
720 memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
722 /* calculate number of segments to use and their length. */
724 for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
729 n = pkt->pkt_len - len;
731 /* update size of the last segment to fit rest of the packet */
739 p = rte_pktmbuf_alloc(mp);
742 "failed to allocate %u-th of %u mbuf "
743 "from mempool: %s\n",
744 nb_seg - i, nb_seg, mp->name);
749 if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
750 TESTPMD_LOG(ERR, "mempool %s, %u-th segment: "
751 "expected seglen: %u, "
752 "actual mbuf tailroom: %u\n",
753 mp->name, i, seglen[i],
754 rte_pktmbuf_tailroom(md[i]));
759 /* all mbufs successfully allocated, do copy */
761 rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
764 "mbuf_copy_split for %p(len=%u, nb_seg=%u) "
765 "into %u segments failed with error code: %d\n",
766 pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
768 /* figure out how many mbufs to free. */
772 /* free unused mbufs */
773 for (; i != nb_seg; i++) {
774 rte_pktmbuf_free_seg(md[i]);
781 #if defined(RTE_LIB_GRO) || defined(RTE_LIB_GSO)
783 * Re-calculate IP checksum for merged/fragmented packets.
786 pkts_ip_csum_recalc(struct rte_mbuf **pkts_burst, const uint16_t nb_pkts, uint64_t tx_offloads)
789 struct rte_ipv4_hdr *ipv4_hdr;
790 for (i = 0; i < nb_pkts; i++) {
791 if ((pkts_burst[i]->ol_flags & RTE_MBUF_F_TX_IPV4) &&
792 (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) == 0) {
793 ipv4_hdr = rte_pktmbuf_mtod_offset(pkts_burst[i],
794 struct rte_ipv4_hdr *,
795 pkts_burst[i]->l2_len);
796 ipv4_hdr->hdr_checksum = 0;
797 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
804 * Receive a burst of packets, and for each packet:
805 * - parse packet, and try to recognize a supported packet type (1)
806 * - if it's not a supported packet type, don't touch the packet, else:
807 * - reprocess the checksum of all supported layers. This is done in SW
808 * or HW, depending on testpmd command line configuration
809 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
810 * segmentation offload (this implies HW TCP checksum)
811 * Then transmit packets on the output port.
813 * (1) Supported packets are:
814 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
815 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
817 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
819 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
821 * Ether / (vlan) / outer IP / outer UDP / GTP / IP|IP6 / UDP|TCP|SCTP
822 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
823 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
824 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
826 * The testpmd command line for this forward engine sets the flags
827 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
828 * whether a checksum must be calculated in software or in hardware. The
829 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
830 * OUTER_IP is only useful for tunnel packets.
833 pkt_burst_checksum_forward(struct fwd_stream *fs)
835 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
837 struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
838 struct rte_gso_ctx *gso_ctx;
840 struct rte_mbuf **tx_pkts_burst;
841 struct rte_port *txp;
842 struct rte_mbuf *m, *p;
843 struct rte_ether_hdr *eth_hdr;
844 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
847 uint16_t gro_pkts_num;
854 uint64_t rx_ol_flags, tx_ol_flags;
855 uint64_t tx_offloads;
857 uint32_t rx_bad_ip_csum;
858 uint32_t rx_bad_l4_csum;
859 uint32_t rx_bad_outer_l4_csum;
860 uint32_t rx_bad_outer_ip_csum;
861 struct testpmd_offload_info info;
863 uint64_t start_tsc = 0;
865 get_start_cycles(&start_tsc);
867 /* receive a burst of packet */
868 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
870 inc_rx_burst_stats(fs, nb_rx);
871 if (unlikely(nb_rx == 0))
874 fs->rx_packets += nb_rx;
877 rx_bad_outer_l4_csum = 0;
878 rx_bad_outer_ip_csum = 0;
880 gro_enable = gro_ports[fs->rx_port].enable;
883 txp = &ports[fs->tx_port];
884 tx_offloads = txp->dev_conf.txmode.offloads;
885 memset(&info, 0, sizeof(info));
886 info.tso_segsz = txp->tso_segsz;
887 info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
889 if (gso_ports[fs->tx_port].enable)
893 for (i = 0; i < nb_rx; i++) {
894 if (likely(i < nb_rx - 1))
895 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
900 info.pkt_len = rte_pktmbuf_pkt_len(m);
901 tx_ol_flags = m->ol_flags &
902 (RTE_MBUF_F_INDIRECT | RTE_MBUF_F_EXTERNAL);
903 rx_ol_flags = m->ol_flags;
905 /* Update the L3/L4 checksum error packet statistics */
906 if ((rx_ol_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) == RTE_MBUF_F_RX_IP_CKSUM_BAD)
908 if ((rx_ol_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) == RTE_MBUF_F_RX_L4_CKSUM_BAD)
910 if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD)
911 rx_bad_outer_l4_csum += 1;
912 if (rx_ol_flags & RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD)
913 rx_bad_outer_ip_csum += 1;
915 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
916 * and inner headers */
918 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
919 rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
921 rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
923 parse_ethernet(eth_hdr, &info);
924 l3_hdr = (char *)eth_hdr + info.l2_len;
926 /* check if it's a supported tunnel */
927 if (txp->parse_tunnel) {
928 if (info.l4_proto == IPPROTO_UDP) {
929 struct rte_udp_hdr *udp_hdr;
931 udp_hdr = (struct rte_udp_hdr *)
932 ((char *)l3_hdr + info.l3_len);
933 parse_gtp(udp_hdr, &info);
934 if (info.is_tunnel) {
935 tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GTP;
938 parse_vxlan_gpe(udp_hdr, &info);
939 if (info.is_tunnel) {
941 RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE;
944 parse_vxlan(udp_hdr, &info);
945 if (info.is_tunnel) {
947 RTE_MBUF_F_TX_TUNNEL_VXLAN;
950 parse_geneve(udp_hdr, &info);
951 if (info.is_tunnel) {
953 RTE_MBUF_F_TX_TUNNEL_GENEVE;
956 /* Always keep last. */
957 if (unlikely(RTE_ETH_IS_TUNNEL_PKT(
958 m->packet_type) != 0)) {
959 TESTPMD_LOG(DEBUG, "Unknown tunnel packet. UDP dst port: %hu",
962 } else if (info.l4_proto == IPPROTO_GRE) {
963 struct simple_gre_hdr *gre_hdr;
965 gre_hdr = (struct simple_gre_hdr *)
966 ((char *)l3_hdr + info.l3_len);
967 parse_gre(gre_hdr, &info);
969 tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_GRE;
970 } else if (info.l4_proto == IPPROTO_IPIP) {
973 encap_ip_hdr = (char *)l3_hdr + info.l3_len;
974 parse_encap_ip(encap_ip_hdr, &info);
976 tx_ol_flags |= RTE_MBUF_F_TX_TUNNEL_IPIP;
981 /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
982 if (info.is_tunnel) {
983 outer_l3_hdr = l3_hdr;
984 l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
987 /* step 2: depending on user command line configuration,
988 * recompute checksum either in software or flag the
989 * mbuf to offload the calculation to the NIC. If TSO
990 * is configured, prepare the mbuf for TCP segmentation. */
992 /* process checksums of inner headers first */
993 tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
996 /* Then process outer headers if any. Note that the software
997 * checksum will be wrong if one of the inner checksums is
998 * processed in hardware. */
999 if (info.is_tunnel == 1) {
1000 tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
1002 !!(tx_ol_flags & RTE_MBUF_F_TX_TCP_SEG),
1006 /* step 3: fill the mbuf meta data (flags and header lengths) */
1009 if (info.is_tunnel == 1) {
1010 if (info.tunnel_tso_segsz ||
1012 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1014 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM)) {
1015 m->outer_l2_len = info.outer_l2_len;
1016 m->outer_l3_len = info.outer_l3_len;
1017 m->l2_len = info.l2_len;
1018 m->l3_len = info.l3_len;
1019 m->l4_len = info.l4_len;
1020 m->tso_segsz = info.tunnel_tso_segsz;
1023 /* if there is a outer UDP cksum
1024 processed in sw and the inner in hw,
1025 the outer checksum will be wrong as
1026 the payload will be modified by the
1028 m->l2_len = info.outer_l2_len +
1029 info.outer_l3_len + info.l2_len;
1030 m->l3_len = info.l3_len;
1031 m->l4_len = info.l4_len;
1034 /* this is only useful if an offload flag is
1035 * set, but it does not hurt to fill it in any
1037 m->l2_len = info.l2_len;
1038 m->l3_len = info.l3_len;
1039 m->l4_len = info.l4_len;
1040 m->tso_segsz = info.tso_segsz;
1042 m->ol_flags = tx_ol_flags;
1044 /* Do split & copy for the packet. */
1045 if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
1046 p = pkt_copy_split(m);
1048 rte_pktmbuf_free(m);
1054 /* if verbose mode is enabled, dump debug info */
1055 if (verbose_level > 0) {
1058 printf("-----------------\n");
1059 printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n",
1060 fs->rx_port, m, m->pkt_len, m->nb_segs);
1061 /* dump rx parsed packet info */
1062 rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
1063 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
1064 "l4_proto=%d l4_len=%d flags=%s\n",
1065 info.l2_len, rte_be_to_cpu_16(info.ethertype),
1066 info.l3_len, info.l4_proto, info.l4_len, buf);
1067 if (rx_ol_flags & RTE_MBUF_F_RX_LRO)
1068 printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
1069 if (info.is_tunnel == 1)
1070 printf("rx: outer_l2_len=%d outer_ethertype=%x "
1071 "outer_l3_len=%d\n", info.outer_l2_len,
1072 rte_be_to_cpu_16(info.outer_ethertype),
1074 /* dump tx packet info */
1075 if ((tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1076 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1077 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1078 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) ||
1079 info.tso_segsz != 0)
1080 printf("tx: m->l2_len=%d m->l3_len=%d "
1082 m->l2_len, m->l3_len, m->l4_len);
1083 if (info.is_tunnel == 1) {
1085 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1087 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
1088 (tx_ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))
1089 printf("tx: m->outer_l2_len=%d "
1090 "m->outer_l3_len=%d\n",
1093 if (info.tunnel_tso_segsz != 0 &&
1094 (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
1095 printf("tx: m->tso_segsz=%d\n",
1097 } else if (info.tso_segsz != 0 &&
1098 (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG))
1099 printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
1100 rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
1101 printf("tx: flags=%s", buf);
1107 if (unlikely(gro_enable)) {
1108 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
1109 nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
1110 &(gro_ports[fs->rx_port].param));
1112 gro_ctx = current_fwd_lcore()->gro_ctx;
1113 nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
1115 if (++fs->gro_times >= gro_flush_cycles) {
1116 gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
1117 if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
1118 gro_pkts_num = MAX_PKT_BURST - nb_rx;
1120 nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
1128 pkts_ip_csum_recalc(pkts_burst, nb_rx, tx_offloads);
1133 if (gso_ports[fs->tx_port].enable != 0) {
1134 uint16_t nb_segments = 0;
1136 gso_ctx = &(current_fwd_lcore()->gso_ctx);
1137 gso_ctx->gso_size = gso_max_segment_size;
1138 for (i = 0; i < nb_rx; i++) {
1141 ret = rte_gso_segment(pkts_burst[i], gso_ctx,
1142 &gso_segments[nb_segments],
1143 GSO_MAX_PKT_BURST - nb_segments);
1145 /* pkts_burst[i] can be freed safely here. */
1146 rte_pktmbuf_free(pkts_burst[i]);
1148 } else if (ret == 0) {
1149 /* 0 means it can be transmitted directly
1152 gso_segments[nb_segments] = pkts_burst[i];
1155 TESTPMD_LOG(DEBUG, "Unable to segment packet");
1156 rte_pktmbuf_free(pkts_burst[i]);
1160 tx_pkts_burst = gso_segments;
1161 nb_rx = nb_segments;
1163 pkts_ip_csum_recalc(tx_pkts_burst, nb_rx, tx_offloads);
1166 tx_pkts_burst = pkts_burst;
1168 nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
1169 tx_pkts_burst, nb_rx);
1170 if (nb_prep != nb_rx)
1172 "Preparing packet burst to transmit failed: %s\n",
1173 rte_strerror(rte_errno));
1175 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
1179 * Retry if necessary
1181 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
1183 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
1184 rte_delay_us(burst_tx_delay_time);
1185 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
1186 &tx_pkts_burst[nb_tx], nb_rx - nb_tx);
1189 fs->tx_packets += nb_tx;
1190 fs->rx_bad_ip_csum += rx_bad_ip_csum;
1191 fs->rx_bad_l4_csum += rx_bad_l4_csum;
1192 fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
1193 fs->rx_bad_outer_ip_csum += rx_bad_outer_ip_csum;
1195 inc_tx_burst_stats(fs, nb_tx);
1196 if (unlikely(nb_tx < nb_rx)) {
1197 fs->fwd_dropped += (nb_rx - nb_tx);
1199 rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
1200 } while (++nb_tx < nb_rx);
1203 get_end_cycles(fs, start_tsc);
1207 stream_init_checksum_forward(struct fwd_stream *fs)
1209 bool rx_stopped, tx_stopped;
1211 rx_stopped = ports[fs->rx_port].rxq[fs->rx_queue].state ==
1212 RTE_ETH_QUEUE_STATE_STOPPED;
1213 tx_stopped = ports[fs->tx_port].txq[fs->tx_queue].state ==
1214 RTE_ETH_QUEUE_STATE_STOPPED;
1215 fs->disabled = rx_stopped || tx_stopped;
1218 struct fwd_engine csum_fwd_engine = {
1219 .fwd_mode_name = "csum",
1220 .port_fwd_begin = NULL,
1221 .port_fwd_end = NULL,
1222 .stream_init = stream_init_checksum_forward,
1223 .packet_fwd = pkt_burst_checksum_forward,