1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation.
3 * Copyright 2014 6WIND S.A.
13 #include <sys/queue.h>
16 #include <rte_common.h>
17 #include <rte_byteorder.h>
19 #include <rte_debug.h>
20 #include <rte_cycles.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_launch.h>
25 #include <rte_per_lcore.h>
26 #include <rte_lcore.h>
27 #include <rte_atomic.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_mempool.h>
31 #include <rte_interrupts.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev.h>
38 #include <rte_vxlan.h>
41 #include <rte_prefetch.h>
42 #include <rte_string_fns.h>
46 #include <rte_geneve.h>
50 #define IP_DEFTTL 64 /* from RFC 1340. */
52 #define GRE_CHECKSUM_PRESENT 0x8000
53 #define GRE_KEY_PRESENT 0x2000
54 #define GRE_SEQUENCE_PRESENT 0x1000
56 #define GRE_SUPPORTED_FIELDS (GRE_CHECKSUM_PRESENT | GRE_KEY_PRESENT |\
59 /* We cannot use rte_cpu_to_be_16() on a constant in a switch/case */
60 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
61 #define _htons(x) ((uint16_t)((((x) & 0x00ffU) << 8) | (((x) & 0xff00U) >> 8)))
66 uint16_t vxlan_gpe_udp_port = 4790;
67 uint16_t geneve_udp_port = RTE_GENEVE_DEFAULT_PORT;
69 /* structure that caches offload info for the current packet */
70 struct testpmd_offload_info {
78 uint16_t outer_ethertype;
79 uint16_t outer_l2_len;
80 uint16_t outer_l3_len;
81 uint8_t outer_l4_proto;
83 uint16_t tunnel_tso_segsz;
87 /* simplified GRE header */
88 struct simple_gre_hdr {
94 get_udptcp_checksum(void *l3_hdr, void *l4_hdr, uint16_t ethertype)
96 if (ethertype == _htons(RTE_ETHER_TYPE_IPV4))
97 return rte_ipv4_udptcp_cksum(l3_hdr, l4_hdr);
98 else /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
99 return rte_ipv6_udptcp_cksum(l3_hdr, l4_hdr);
102 /* Parse an IPv4 header to fill l3_len, l4_len, and l4_proto */
104 parse_ipv4(struct rte_ipv4_hdr *ipv4_hdr, struct testpmd_offload_info *info)
106 struct rte_tcp_hdr *tcp_hdr;
108 info->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
109 info->l4_proto = ipv4_hdr->next_proto_id;
111 /* only fill l4_len for TCP, it's useful for TSO */
112 if (info->l4_proto == IPPROTO_TCP) {
113 tcp_hdr = (struct rte_tcp_hdr *)
114 ((char *)ipv4_hdr + info->l3_len);
115 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
116 } else if (info->l4_proto == IPPROTO_UDP)
117 info->l4_len = sizeof(struct rte_udp_hdr);
122 /* Parse an IPv6 header to fill l3_len, l4_len, and l4_proto */
124 parse_ipv6(struct rte_ipv6_hdr *ipv6_hdr, struct testpmd_offload_info *info)
126 struct rte_tcp_hdr *tcp_hdr;
128 info->l3_len = sizeof(struct rte_ipv6_hdr);
129 info->l4_proto = ipv6_hdr->proto;
131 /* only fill l4_len for TCP, it's useful for TSO */
132 if (info->l4_proto == IPPROTO_TCP) {
133 tcp_hdr = (struct rte_tcp_hdr *)
134 ((char *)ipv6_hdr + info->l3_len);
135 info->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
136 } else if (info->l4_proto == IPPROTO_UDP)
137 info->l4_len = sizeof(struct rte_udp_hdr);
143 * Parse an ethernet header to fill the ethertype, l2_len, l3_len and
144 * ipproto. This function is able to recognize IPv4/IPv6 with optional VLAN
145 * headers. The l4_len argument is only set in case of TCP (useful for TSO).
148 parse_ethernet(struct rte_ether_hdr *eth_hdr, struct testpmd_offload_info *info)
150 struct rte_ipv4_hdr *ipv4_hdr;
151 struct rte_ipv6_hdr *ipv6_hdr;
152 struct rte_vlan_hdr *vlan_hdr;
154 info->l2_len = sizeof(struct rte_ether_hdr);
155 info->ethertype = eth_hdr->ether_type;
157 while (info->ethertype == _htons(RTE_ETHER_TYPE_VLAN) ||
158 info->ethertype == _htons(RTE_ETHER_TYPE_QINQ)) {
159 vlan_hdr = (struct rte_vlan_hdr *)
160 ((char *)eth_hdr + info->l2_len);
161 info->l2_len += sizeof(struct rte_vlan_hdr);
162 info->ethertype = vlan_hdr->eth_proto;
165 switch (info->ethertype) {
166 case _htons(RTE_ETHER_TYPE_IPV4):
167 ipv4_hdr = (struct rte_ipv4_hdr *)
168 ((char *)eth_hdr + info->l2_len);
169 parse_ipv4(ipv4_hdr, info);
171 case _htons(RTE_ETHER_TYPE_IPV6):
172 ipv6_hdr = (struct rte_ipv6_hdr *)
173 ((char *)eth_hdr + info->l2_len);
174 parse_ipv6(ipv6_hdr, info);
185 * Parse a GTP protocol header.
186 * No optional fields and next extension header type.
189 parse_gtp(struct rte_udp_hdr *udp_hdr,
190 struct testpmd_offload_info *info)
192 struct rte_ipv4_hdr *ipv4_hdr;
193 struct rte_ipv6_hdr *ipv6_hdr;
194 struct rte_gtp_hdr *gtp_hdr;
195 uint8_t gtp_len = sizeof(*gtp_hdr);
198 /* Check udp destination port. */
199 if (udp_hdr->dst_port != _htons(RTE_GTPC_UDP_PORT) &&
200 udp_hdr->src_port != _htons(RTE_GTPC_UDP_PORT) &&
201 udp_hdr->dst_port != _htons(RTE_GTPU_UDP_PORT))
205 info->outer_ethertype = info->ethertype;
206 info->outer_l2_len = info->l2_len;
207 info->outer_l3_len = info->l3_len;
208 info->outer_l4_proto = info->l4_proto;
211 gtp_hdr = (struct rte_gtp_hdr *)((char *)udp_hdr +
212 sizeof(struct rte_udp_hdr));
215 * Check message type. If message type is 0xff, it is
216 * a GTP data packet. If not, it is a GTP control packet
218 if (gtp_hdr->msg_type == 0xff) {
219 ip_ver = *(uint8_t *)((char *)udp_hdr +
220 sizeof(struct rte_udp_hdr) +
221 sizeof(struct rte_gtp_hdr));
222 ip_ver = (ip_ver) & 0xf0;
224 if (ip_ver == RTE_GTP_TYPE_IPV4) {
225 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gtp_hdr +
227 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
228 parse_ipv4(ipv4_hdr, info);
229 } else if (ip_ver == RTE_GTP_TYPE_IPV6) {
230 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gtp_hdr +
232 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
233 parse_ipv6(ipv6_hdr, info);
242 info->l2_len += RTE_ETHER_GTP_HLEN;
245 /* Parse a vxlan header */
247 parse_vxlan(struct rte_udp_hdr *udp_hdr,
248 struct testpmd_offload_info *info,
251 struct rte_ether_hdr *eth_hdr;
253 /* check udp destination port, 4789 is the default vxlan port
254 * (rfc7348) or that the rx offload flag is set (i40e only
256 if (udp_hdr->dst_port != _htons(4789) &&
257 RTE_ETH_IS_TUNNEL_PKT(pkt_type) == 0)
261 info->outer_ethertype = info->ethertype;
262 info->outer_l2_len = info->l2_len;
263 info->outer_l3_len = info->l3_len;
264 info->outer_l4_proto = info->l4_proto;
266 eth_hdr = (struct rte_ether_hdr *)((char *)udp_hdr +
267 sizeof(struct rte_udp_hdr) +
268 sizeof(struct rte_vxlan_hdr));
270 parse_ethernet(eth_hdr, info);
271 info->l2_len += RTE_ETHER_VXLAN_HLEN; /* add udp + vxlan */
274 /* Parse a vxlan-gpe header */
276 parse_vxlan_gpe(struct rte_udp_hdr *udp_hdr,
277 struct testpmd_offload_info *info)
279 struct rte_ether_hdr *eth_hdr;
280 struct rte_ipv4_hdr *ipv4_hdr;
281 struct rte_ipv6_hdr *ipv6_hdr;
282 struct rte_vxlan_gpe_hdr *vxlan_gpe_hdr;
283 uint8_t vxlan_gpe_len = sizeof(*vxlan_gpe_hdr);
285 /* Check udp destination port. */
286 if (udp_hdr->dst_port != _htons(vxlan_gpe_udp_port))
289 vxlan_gpe_hdr = (struct rte_vxlan_gpe_hdr *)((char *)udp_hdr +
290 sizeof(struct rte_udp_hdr));
292 if (!vxlan_gpe_hdr->proto || vxlan_gpe_hdr->proto ==
293 RTE_VXLAN_GPE_TYPE_IPV4) {
295 info->outer_ethertype = info->ethertype;
296 info->outer_l2_len = info->l2_len;
297 info->outer_l3_len = info->l3_len;
298 info->outer_l4_proto = info->l4_proto;
300 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)vxlan_gpe_hdr +
303 parse_ipv4(ipv4_hdr, info);
304 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
307 } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_IPV6) {
309 info->outer_ethertype = info->ethertype;
310 info->outer_l2_len = info->l2_len;
311 info->outer_l3_len = info->l3_len;
312 info->outer_l4_proto = info->l4_proto;
314 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)vxlan_gpe_hdr +
317 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
318 parse_ipv6(ipv6_hdr, info);
321 } else if (vxlan_gpe_hdr->proto == RTE_VXLAN_GPE_TYPE_ETH) {
323 info->outer_ethertype = info->ethertype;
324 info->outer_l2_len = info->l2_len;
325 info->outer_l3_len = info->l3_len;
326 info->outer_l4_proto = info->l4_proto;
328 eth_hdr = (struct rte_ether_hdr *)((char *)vxlan_gpe_hdr +
331 parse_ethernet(eth_hdr, info);
335 info->l2_len += RTE_ETHER_VXLAN_GPE_HLEN;
338 /* Fill in outer layers length */
340 update_tunnel_outer(struct testpmd_offload_info *info)
343 info->outer_ethertype = info->ethertype;
344 info->outer_l2_len = info->l2_len;
345 info->outer_l3_len = info->l3_len;
346 info->outer_l4_proto = info->l4_proto;
349 /* Parse a geneve header */
351 parse_geneve(struct rte_udp_hdr *udp_hdr,
352 struct testpmd_offload_info *info)
354 struct rte_ether_hdr *eth_hdr;
355 struct rte_ipv4_hdr *ipv4_hdr;
356 struct rte_ipv6_hdr *ipv6_hdr;
357 struct rte_geneve_hdr *geneve_hdr;
360 /* Check udp destination port. */
361 if (udp_hdr->dst_port != _htons(geneve_udp_port))
364 geneve_hdr = (struct rte_geneve_hdr *)((char *)udp_hdr +
365 sizeof(struct rte_udp_hdr));
366 geneve_len = sizeof(struct rte_geneve_hdr) + geneve_hdr->opt_len * 4;
367 if (!geneve_hdr->proto || geneve_hdr->proto ==
368 _htons(RTE_ETHER_TYPE_IPV4)) {
369 update_tunnel_outer(info);
370 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)geneve_hdr +
372 parse_ipv4(ipv4_hdr, info);
373 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
375 } else if (geneve_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
376 update_tunnel_outer(info);
377 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)geneve_hdr +
379 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
380 parse_ipv6(ipv6_hdr, info);
383 } else if (geneve_hdr->proto == _htons(RTE_GENEVE_TYPE_ETH)) {
384 update_tunnel_outer(info);
385 eth_hdr = (struct rte_ether_hdr *)((char *)geneve_hdr +
387 parse_ethernet(eth_hdr, info);
392 (sizeof(struct rte_udp_hdr) + sizeof(struct rte_geneve_hdr) +
393 ((struct rte_geneve_hdr *)geneve_hdr)->opt_len * 4);
396 /* Parse a gre header */
398 parse_gre(struct simple_gre_hdr *gre_hdr, struct testpmd_offload_info *info)
400 struct rte_ether_hdr *eth_hdr;
401 struct rte_ipv4_hdr *ipv4_hdr;
402 struct rte_ipv6_hdr *ipv6_hdr;
405 gre_len += sizeof(struct simple_gre_hdr);
407 if (gre_hdr->flags & _htons(GRE_KEY_PRESENT))
408 gre_len += GRE_EXT_LEN;
409 if (gre_hdr->flags & _htons(GRE_SEQUENCE_PRESENT))
410 gre_len += GRE_EXT_LEN;
411 if (gre_hdr->flags & _htons(GRE_CHECKSUM_PRESENT))
412 gre_len += GRE_EXT_LEN;
414 if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV4)) {
416 info->outer_ethertype = info->ethertype;
417 info->outer_l2_len = info->l2_len;
418 info->outer_l3_len = info->l3_len;
419 info->outer_l4_proto = info->l4_proto;
421 ipv4_hdr = (struct rte_ipv4_hdr *)((char *)gre_hdr + gre_len);
423 parse_ipv4(ipv4_hdr, info);
424 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
427 } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_IPV6)) {
429 info->outer_ethertype = info->ethertype;
430 info->outer_l2_len = info->l2_len;
431 info->outer_l3_len = info->l3_len;
432 info->outer_l4_proto = info->l4_proto;
434 ipv6_hdr = (struct rte_ipv6_hdr *)((char *)gre_hdr + gre_len);
436 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
437 parse_ipv6(ipv6_hdr, info);
440 } else if (gre_hdr->proto == _htons(RTE_ETHER_TYPE_TEB)) {
442 info->outer_ethertype = info->ethertype;
443 info->outer_l2_len = info->l2_len;
444 info->outer_l3_len = info->l3_len;
445 info->outer_l4_proto = info->l4_proto;
447 eth_hdr = (struct rte_ether_hdr *)((char *)gre_hdr + gre_len);
449 parse_ethernet(eth_hdr, info);
453 info->l2_len += gre_len;
457 /* Parse an encapsulated ip or ipv6 header */
459 parse_encap_ip(void *encap_ip, struct testpmd_offload_info *info)
461 struct rte_ipv4_hdr *ipv4_hdr = encap_ip;
462 struct rte_ipv6_hdr *ipv6_hdr = encap_ip;
465 ip_version = (ipv4_hdr->version_ihl & 0xf0) >> 4;
467 if (ip_version != 4 && ip_version != 6)
471 info->outer_ethertype = info->ethertype;
472 info->outer_l2_len = info->l2_len;
473 info->outer_l3_len = info->l3_len;
475 if (ip_version == 4) {
476 parse_ipv4(ipv4_hdr, info);
477 info->ethertype = _htons(RTE_ETHER_TYPE_IPV4);
479 parse_ipv6(ipv6_hdr, info);
480 info->ethertype = _htons(RTE_ETHER_TYPE_IPV6);
485 /* if possible, calculate the checksum of a packet in hw or sw,
486 * depending on the testpmd command line configuration */
488 process_inner_cksums(void *l3_hdr, const struct testpmd_offload_info *info,
489 uint64_t tx_offloads)
491 struct rte_ipv4_hdr *ipv4_hdr = l3_hdr;
492 struct rte_udp_hdr *udp_hdr;
493 struct rte_tcp_hdr *tcp_hdr;
494 struct rte_sctp_hdr *sctp_hdr;
495 uint64_t ol_flags = 0;
496 uint32_t max_pkt_len, tso_segsz = 0;
498 /* ensure packet is large enough to require tso */
499 if (!info->is_tunnel) {
500 max_pkt_len = info->l2_len + info->l3_len + info->l4_len +
502 if (info->tso_segsz != 0 && info->pkt_len > max_pkt_len)
503 tso_segsz = info->tso_segsz;
505 max_pkt_len = info->outer_l2_len + info->outer_l3_len +
506 info->l2_len + info->l3_len + info->l4_len +
507 info->tunnel_tso_segsz;
508 if (info->tunnel_tso_segsz != 0 && info->pkt_len > max_pkt_len)
509 tso_segsz = info->tunnel_tso_segsz;
512 if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
514 ipv4_hdr->hdr_checksum = 0;
516 ol_flags |= PKT_TX_IPV4;
517 if (info->l4_proto == IPPROTO_TCP && tso_segsz) {
518 ol_flags |= PKT_TX_IP_CKSUM;
520 if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
521 ol_flags |= PKT_TX_IP_CKSUM;
523 ipv4_hdr->hdr_checksum =
524 rte_ipv4_cksum(ipv4_hdr);
526 } else if (info->ethertype == _htons(RTE_ETHER_TYPE_IPV6))
527 ol_flags |= PKT_TX_IPV6;
529 return 0; /* packet type not supported, nothing to do */
531 if (info->l4_proto == IPPROTO_UDP) {
532 udp_hdr = (struct rte_udp_hdr *)((char *)l3_hdr + info->l3_len);
533 /* do not recalculate udp cksum if it was 0 */
534 if (udp_hdr->dgram_cksum != 0) {
535 udp_hdr->dgram_cksum = 0;
536 if (tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM)
537 ol_flags |= PKT_TX_UDP_CKSUM;
539 udp_hdr->dgram_cksum =
540 get_udptcp_checksum(l3_hdr, udp_hdr,
544 if (info->gso_enable)
545 ol_flags |= PKT_TX_UDP_SEG;
546 } else if (info->l4_proto == IPPROTO_TCP) {
547 tcp_hdr = (struct rte_tcp_hdr *)((char *)l3_hdr + info->l3_len);
550 ol_flags |= PKT_TX_TCP_SEG;
551 else if (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
552 ol_flags |= PKT_TX_TCP_CKSUM;
555 get_udptcp_checksum(l3_hdr, tcp_hdr,
558 if (info->gso_enable)
559 ol_flags |= PKT_TX_TCP_SEG;
560 } else if (info->l4_proto == IPPROTO_SCTP) {
561 sctp_hdr = (struct rte_sctp_hdr *)
562 ((char *)l3_hdr + info->l3_len);
564 /* sctp payload must be a multiple of 4 to be
566 if ((tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM) &&
567 ((ipv4_hdr->total_length & 0x3) == 0)) {
568 ol_flags |= PKT_TX_SCTP_CKSUM;
570 /* XXX implement CRC32c, example available in
578 /* Calculate the checksum of outer header */
580 process_outer_cksums(void *outer_l3_hdr, struct testpmd_offload_info *info,
581 uint64_t tx_offloads, int tso_enabled)
583 struct rte_ipv4_hdr *ipv4_hdr = outer_l3_hdr;
584 struct rte_ipv6_hdr *ipv6_hdr = outer_l3_hdr;
585 struct rte_udp_hdr *udp_hdr;
586 uint64_t ol_flags = 0;
588 if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4)) {
589 ipv4_hdr->hdr_checksum = 0;
590 ol_flags |= PKT_TX_OUTER_IPV4;
592 if (tx_offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
593 ol_flags |= PKT_TX_OUTER_IP_CKSUM;
595 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
597 ol_flags |= PKT_TX_OUTER_IPV6;
599 if (info->outer_l4_proto != IPPROTO_UDP)
602 udp_hdr = (struct rte_udp_hdr *)
603 ((char *)outer_l3_hdr + info->outer_l3_len);
606 ol_flags |= PKT_TX_TCP_SEG;
608 /* Skip SW outer UDP checksum generation if HW supports it */
609 if (tx_offloads & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) {
610 if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
612 = rte_ipv4_phdr_cksum(ipv4_hdr, ol_flags);
615 = rte_ipv6_phdr_cksum(ipv6_hdr, ol_flags);
617 ol_flags |= PKT_TX_OUTER_UDP_CKSUM;
621 /* outer UDP checksum is done in software. In the other side, for
622 * UDP tunneling, like VXLAN or Geneve, outer UDP checksum can be
625 * If a packet will be TSOed into small packets by NIC, we cannot
626 * set/calculate a non-zero checksum, because it will be a wrong
627 * value after the packet be split into several small packets.
630 udp_hdr->dgram_cksum = 0;
632 /* do not recalculate udp cksum if it was 0 */
633 if (udp_hdr->dgram_cksum != 0) {
634 udp_hdr->dgram_cksum = 0;
635 if (info->outer_ethertype == _htons(RTE_ETHER_TYPE_IPV4))
636 udp_hdr->dgram_cksum =
637 rte_ipv4_udptcp_cksum(ipv4_hdr, udp_hdr);
639 udp_hdr->dgram_cksum =
640 rte_ipv6_udptcp_cksum(ipv6_hdr, udp_hdr);
648 * Performs actual copying.
649 * Returns number of segments in the destination mbuf on success,
650 * or negative error code on failure.
653 mbuf_copy_split(const struct rte_mbuf *ms, struct rte_mbuf *md[],
654 uint16_t seglen[], uint8_t nb_seg)
656 uint32_t dlen, slen, tlen;
658 const struct rte_mbuf *m;
671 while (ms != NULL && i != nb_seg) {
674 slen = rte_pktmbuf_data_len(ms);
675 src = rte_pktmbuf_mtod(ms, const uint8_t *);
679 dlen = RTE_MIN(seglen[i], slen);
680 md[i]->data_len = dlen;
681 md[i]->next = (i + 1 == nb_seg) ? NULL : md[i + 1];
682 dst = rte_pktmbuf_mtod(md[i], uint8_t *);
685 len = RTE_MIN(slen, dlen);
686 memcpy(dst, src, len);
701 else if (tlen != m->pkt_len)
704 md[0]->nb_segs = nb_seg;
705 md[0]->pkt_len = tlen;
706 md[0]->vlan_tci = m->vlan_tci;
707 md[0]->vlan_tci_outer = m->vlan_tci_outer;
708 md[0]->ol_flags = m->ol_flags;
709 md[0]->tx_offload = m->tx_offload;
715 * Allocate a new mbuf with up to tx_pkt_nb_segs segments.
716 * Copy packet contents and offload information into the new segmented mbuf.
718 static struct rte_mbuf *
719 pkt_copy_split(const struct rte_mbuf *pkt)
722 uint32_t i, len, nb_seg;
723 struct rte_mempool *mp;
724 uint16_t seglen[RTE_MAX_SEGS_PER_PKT];
725 struct rte_mbuf *p, *md[RTE_MAX_SEGS_PER_PKT];
727 mp = current_fwd_lcore()->mbp;
729 if (tx_pkt_split == TX_PKT_SPLIT_RND)
730 nb_seg = random() % tx_pkt_nb_segs + 1;
732 nb_seg = tx_pkt_nb_segs;
734 memcpy(seglen, tx_pkt_seg_lengths, nb_seg * sizeof(seglen[0]));
736 /* calculate number of segments to use and their length. */
738 for (i = 0; i != nb_seg && len < pkt->pkt_len; i++) {
743 n = pkt->pkt_len - len;
745 /* update size of the last segment to fit rest of the packet */
753 p = rte_pktmbuf_alloc(mp);
756 "failed to allocate %u-th of %u mbuf "
757 "from mempool: %s\n",
758 nb_seg - i, nb_seg, mp->name);
763 if (rte_pktmbuf_tailroom(md[i]) < seglen[i]) {
764 TESTPMD_LOG(ERR, "mempool %s, %u-th segment: "
765 "expected seglen: %u, "
766 "actual mbuf tailroom: %u\n",
767 mp->name, i, seglen[i],
768 rte_pktmbuf_tailroom(md[i]));
773 /* all mbufs successfully allocated, do copy */
775 rc = mbuf_copy_split(pkt, md, seglen, nb_seg);
778 "mbuf_copy_split for %p(len=%u, nb_seg=%u) "
779 "into %u segments failed with error code: %d\n",
780 pkt, pkt->pkt_len, pkt->nb_segs, nb_seg, rc);
782 /* figure out how many mbufs to free. */
786 /* free unused mbufs */
787 for (; i != nb_seg; i++) {
788 rte_pktmbuf_free_seg(md[i]);
796 * Receive a burst of packets, and for each packet:
797 * - parse packet, and try to recognize a supported packet type (1)
798 * - if it's not a supported packet type, don't touch the packet, else:
799 * - reprocess the checksum of all supported layers. This is done in SW
800 * or HW, depending on testpmd command line configuration
801 * - if TSO is enabled in testpmd command line, also flag the mbuf for TCP
802 * segmentation offload (this implies HW TCP checksum)
803 * Then transmit packets on the output port.
805 * (1) Supported packets are:
806 * Ether / (vlan) / IP|IP6 / UDP|TCP|SCTP .
807 * Ether / (vlan) / outer IP|IP6 / outer UDP / VxLAN / Ether / IP|IP6 /
809 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / Ether / IP|IP6 /
811 * Ether / (vlan) / outer IP|IP6 / outer UDP / VXLAN-GPE / IP|IP6 /
813 * Ether / (vlan) / outer IP / outer UDP / GTP / IP|IP6 / UDP|TCP|SCTP
814 * Ether / (vlan) / outer IP|IP6 / GRE / Ether / IP|IP6 / UDP|TCP|SCTP
815 * Ether / (vlan) / outer IP|IP6 / GRE / IP|IP6 / UDP|TCP|SCTP
816 * Ether / (vlan) / outer IP|IP6 / IP|IP6 / UDP|TCP|SCTP
818 * The testpmd command line for this forward engine sets the flags
819 * TESTPMD_TX_OFFLOAD_* in ports[tx_port].tx_ol_flags. They control
820 * wether a checksum must be calculated in software or in hardware. The
821 * IP, UDP, TCP and SCTP flags always concern the inner layer. The
822 * OUTER_IP is only useful for tunnel packets.
825 pkt_burst_checksum_forward(struct fwd_stream *fs)
827 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
828 struct rte_mbuf *gso_segments[GSO_MAX_PKT_BURST];
829 struct rte_gso_ctx *gso_ctx;
830 struct rte_mbuf **tx_pkts_burst;
831 struct rte_port *txp;
832 struct rte_mbuf *m, *p;
833 struct rte_ether_hdr *eth_hdr;
834 void *l3_hdr = NULL, *outer_l3_hdr = NULL; /* can be IPv4 or IPv6 */
836 uint16_t gro_pkts_num;
842 uint64_t rx_ol_flags, tx_ol_flags;
843 uint64_t tx_offloads;
845 uint32_t rx_bad_ip_csum;
846 uint32_t rx_bad_l4_csum;
847 uint32_t rx_bad_outer_l4_csum;
848 struct testpmd_offload_info info;
849 uint16_t nb_segments = 0;
852 uint64_t start_tsc = 0;
854 get_start_cycles(&start_tsc);
856 /* receive a burst of packet */
857 nb_rx = rte_eth_rx_burst(fs->rx_port, fs->rx_queue, pkts_burst,
859 inc_rx_burst_stats(fs, nb_rx);
860 if (unlikely(nb_rx == 0))
863 fs->rx_packets += nb_rx;
866 rx_bad_outer_l4_csum = 0;
867 gro_enable = gro_ports[fs->rx_port].enable;
869 txp = &ports[fs->tx_port];
870 tx_offloads = txp->dev_conf.txmode.offloads;
871 memset(&info, 0, sizeof(info));
872 info.tso_segsz = txp->tso_segsz;
873 info.tunnel_tso_segsz = txp->tunnel_tso_segsz;
874 if (gso_ports[fs->tx_port].enable)
877 for (i = 0; i < nb_rx; i++) {
878 if (likely(i < nb_rx - 1))
879 rte_prefetch0(rte_pktmbuf_mtod(pkts_burst[i + 1],
884 info.pkt_len = rte_pktmbuf_pkt_len(m);
885 tx_ol_flags = m->ol_flags &
886 (IND_ATTACHED_MBUF | EXT_ATTACHED_MBUF);
887 rx_ol_flags = m->ol_flags;
889 /* Update the L3/L4 checksum error packet statistics */
890 if ((rx_ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD)
892 if ((rx_ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)
894 if (rx_ol_flags & PKT_RX_OUTER_L4_CKSUM_BAD)
895 rx_bad_outer_l4_csum += 1;
897 /* step 1: dissect packet, parsing optional vlan, ip4/ip6, vxlan
898 * and inner headers */
900 eth_hdr = rte_pktmbuf_mtod(m, struct rte_ether_hdr *);
901 rte_ether_addr_copy(&peer_eth_addrs[fs->peer_addr],
903 rte_ether_addr_copy(&ports[fs->tx_port].eth_addr,
905 parse_ethernet(eth_hdr, &info);
906 l3_hdr = (char *)eth_hdr + info.l2_len;
908 /* check if it's a supported tunnel */
909 if (txp->parse_tunnel) {
910 if (info.l4_proto == IPPROTO_UDP) {
911 struct rte_udp_hdr *udp_hdr;
913 udp_hdr = (struct rte_udp_hdr *)
914 ((char *)l3_hdr + info.l3_len);
915 parse_gtp(udp_hdr, &info);
916 if (info.is_tunnel) {
917 tx_ol_flags |= PKT_TX_TUNNEL_GTP;
920 parse_vxlan_gpe(udp_hdr, &info);
921 if (info.is_tunnel) {
923 PKT_TX_TUNNEL_VXLAN_GPE;
926 parse_vxlan(udp_hdr, &info,
928 if (info.is_tunnel) {
933 parse_geneve(udp_hdr, &info);
934 if (info.is_tunnel) {
936 PKT_TX_TUNNEL_GENEVE;
939 } else if (info.l4_proto == IPPROTO_GRE) {
940 struct simple_gre_hdr *gre_hdr;
942 gre_hdr = (struct simple_gre_hdr *)
943 ((char *)l3_hdr + info.l3_len);
944 parse_gre(gre_hdr, &info);
946 tx_ol_flags |= PKT_TX_TUNNEL_GRE;
947 } else if (info.l4_proto == IPPROTO_IPIP) {
950 encap_ip_hdr = (char *)l3_hdr + info.l3_len;
951 parse_encap_ip(encap_ip_hdr, &info);
953 tx_ol_flags |= PKT_TX_TUNNEL_IPIP;
958 /* update l3_hdr and outer_l3_hdr if a tunnel was parsed */
959 if (info.is_tunnel) {
960 outer_l3_hdr = l3_hdr;
961 l3_hdr = (char *)l3_hdr + info.outer_l3_len + info.l2_len;
964 /* step 2: depending on user command line configuration,
965 * recompute checksum either in software or flag the
966 * mbuf to offload the calculation to the NIC. If TSO
967 * is configured, prepare the mbuf for TCP segmentation. */
969 /* process checksums of inner headers first */
970 tx_ol_flags |= process_inner_cksums(l3_hdr, &info,
973 /* Then process outer headers if any. Note that the software
974 * checksum will be wrong if one of the inner checksums is
975 * processed in hardware. */
976 if (info.is_tunnel == 1) {
977 tx_ol_flags |= process_outer_cksums(outer_l3_hdr, &info,
979 !!(tx_ol_flags & PKT_TX_TCP_SEG));
982 /* step 3: fill the mbuf meta data (flags and header lengths) */
985 if (info.is_tunnel == 1) {
986 if (info.tunnel_tso_segsz ||
988 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
990 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
991 (tx_ol_flags & PKT_TX_OUTER_IPV6)) {
992 m->outer_l2_len = info.outer_l2_len;
993 m->outer_l3_len = info.outer_l3_len;
994 m->l2_len = info.l2_len;
995 m->l3_len = info.l3_len;
996 m->l4_len = info.l4_len;
997 m->tso_segsz = info.tunnel_tso_segsz;
1000 /* if there is a outer UDP cksum
1001 processed in sw and the inner in hw,
1002 the outer checksum will be wrong as
1003 the payload will be modified by the
1005 m->l2_len = info.outer_l2_len +
1006 info.outer_l3_len + info.l2_len;
1007 m->l3_len = info.l3_len;
1008 m->l4_len = info.l4_len;
1011 /* this is only useful if an offload flag is
1012 * set, but it does not hurt to fill it in any
1014 m->l2_len = info.l2_len;
1015 m->l3_len = info.l3_len;
1016 m->l4_len = info.l4_len;
1017 m->tso_segsz = info.tso_segsz;
1019 m->ol_flags = tx_ol_flags;
1021 /* Do split & copy for the packet. */
1022 if (tx_pkt_split != TX_PKT_SPLIT_OFF) {
1023 p = pkt_copy_split(m);
1025 rte_pktmbuf_free(m);
1031 /* if verbose mode is enabled, dump debug info */
1032 if (verbose_level > 0) {
1035 printf("-----------------\n");
1036 printf("port=%u, mbuf=%p, pkt_len=%u, nb_segs=%u:\n",
1037 fs->rx_port, m, m->pkt_len, m->nb_segs);
1038 /* dump rx parsed packet info */
1039 rte_get_rx_ol_flag_list(rx_ol_flags, buf, sizeof(buf));
1040 printf("rx: l2_len=%d ethertype=%x l3_len=%d "
1041 "l4_proto=%d l4_len=%d flags=%s\n",
1042 info.l2_len, rte_be_to_cpu_16(info.ethertype),
1043 info.l3_len, info.l4_proto, info.l4_len, buf);
1044 if (rx_ol_flags & PKT_RX_LRO)
1045 printf("rx: m->lro_segsz=%u\n", m->tso_segsz);
1046 if (info.is_tunnel == 1)
1047 printf("rx: outer_l2_len=%d outer_ethertype=%x "
1048 "outer_l3_len=%d\n", info.outer_l2_len,
1049 rte_be_to_cpu_16(info.outer_ethertype),
1051 /* dump tx packet info */
1052 if ((tx_offloads & (DEV_TX_OFFLOAD_IPV4_CKSUM |
1053 DEV_TX_OFFLOAD_UDP_CKSUM |
1054 DEV_TX_OFFLOAD_TCP_CKSUM |
1055 DEV_TX_OFFLOAD_SCTP_CKSUM)) ||
1056 info.tso_segsz != 0)
1057 printf("tx: m->l2_len=%d m->l3_len=%d "
1059 m->l2_len, m->l3_len, m->l4_len);
1060 if (info.is_tunnel == 1) {
1062 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
1064 DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) ||
1065 (tx_ol_flags & PKT_TX_OUTER_IPV6))
1066 printf("tx: m->outer_l2_len=%d "
1067 "m->outer_l3_len=%d\n",
1070 if (info.tunnel_tso_segsz != 0 &&
1071 (m->ol_flags & PKT_TX_TCP_SEG))
1072 printf("tx: m->tso_segsz=%d\n",
1074 } else if (info.tso_segsz != 0 &&
1075 (m->ol_flags & PKT_TX_TCP_SEG))
1076 printf("tx: m->tso_segsz=%d\n", m->tso_segsz);
1077 rte_get_tx_ol_flag_list(m->ol_flags, buf, sizeof(buf));
1078 printf("tx: flags=%s", buf);
1083 if (unlikely(gro_enable)) {
1084 if (gro_flush_cycles == GRO_DEFAULT_FLUSH_CYCLES) {
1085 nb_rx = rte_gro_reassemble_burst(pkts_burst, nb_rx,
1086 &(gro_ports[fs->rx_port].param));
1088 gro_ctx = current_fwd_lcore()->gro_ctx;
1089 nb_rx = rte_gro_reassemble(pkts_burst, nb_rx, gro_ctx);
1091 if (++fs->gro_times >= gro_flush_cycles) {
1092 gro_pkts_num = rte_gro_get_pkt_count(gro_ctx);
1093 if (gro_pkts_num > MAX_PKT_BURST - nb_rx)
1094 gro_pkts_num = MAX_PKT_BURST - nb_rx;
1096 nb_rx += rte_gro_timeout_flush(gro_ctx, 0,
1105 if (gso_ports[fs->tx_port].enable == 0)
1106 tx_pkts_burst = pkts_burst;
1108 gso_ctx = &(current_fwd_lcore()->gso_ctx);
1109 gso_ctx->gso_size = gso_max_segment_size;
1110 for (i = 0; i < nb_rx; i++) {
1111 ret = rte_gso_segment(pkts_burst[i], gso_ctx,
1112 &gso_segments[nb_segments],
1113 GSO_MAX_PKT_BURST - nb_segments);
1117 TESTPMD_LOG(DEBUG, "Unable to segment packet");
1118 rte_pktmbuf_free(pkts_burst[i]);
1122 tx_pkts_burst = gso_segments;
1123 nb_rx = nb_segments;
1126 nb_prep = rte_eth_tx_prepare(fs->tx_port, fs->tx_queue,
1127 tx_pkts_burst, nb_rx);
1128 if (nb_prep != nb_rx)
1129 printf("Preparing packet burst to transmit failed: %s\n",
1130 rte_strerror(rte_errno));
1132 nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, tx_pkts_burst,
1136 * Retry if necessary
1138 if (unlikely(nb_tx < nb_rx) && fs->retry_enabled) {
1140 while (nb_tx < nb_rx && retry++ < burst_tx_retry_num) {
1141 rte_delay_us(burst_tx_delay_time);
1142 nb_tx += rte_eth_tx_burst(fs->tx_port, fs->tx_queue,
1143 &tx_pkts_burst[nb_tx], nb_rx - nb_tx);
1146 fs->tx_packets += nb_tx;
1147 fs->rx_bad_ip_csum += rx_bad_ip_csum;
1148 fs->rx_bad_l4_csum += rx_bad_l4_csum;
1149 fs->rx_bad_outer_l4_csum += rx_bad_outer_l4_csum;
1151 inc_tx_burst_stats(fs, nb_tx);
1152 if (unlikely(nb_tx < nb_rx)) {
1153 fs->fwd_dropped += (nb_rx - nb_tx);
1155 rte_pktmbuf_free(tx_pkts_burst[nb_tx]);
1156 } while (++nb_tx < nb_rx);
1159 get_end_cycles(fs, start_tsc);
1162 struct fwd_engine csum_fwd_engine = {
1163 .fwd_mode_name = "csum",
1164 .port_fwd_begin = NULL,
1165 .port_fwd_end = NULL,
1166 .packet_fwd = pkt_burst_checksum_forward,