From: Mark Kavanagh Date: Sat, 7 Oct 2017 14:56:42 +0000 (+0800) Subject: gso: support GRE GSO X-Git-Tag: spdx-start~1587 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=70e737e448c734b8d728fd17f3e50d28711d475d;p=dpdk.git gso: support GRE GSO This patch adds GSO support for GRE-tunneled packets. Supported GRE packets must contain an outer IPv4 header, and inner TCP/IPv4 headers. They may also contain a single VLAN tag. GRE GSO doesn't check if all input packets have correct checksums and doesn't update checksums for output packets. Additionally, it doesn't process IP fragmented packets. As with VxLAN GSO, GRE GSO uses a two-segment MBUF to organize each output packet, which requires multi-segment mbuf support in the TX functions of the NIC driver. Also, if a packet is GSOed, GRE GSO reduces its MBUF refcnt by 1. As a result, when all of its GSOed segments are freed, the packet is freed automatically. Signed-off-by: Mark Kavanagh Signed-off-by: Jiayu Hu Acked-by: Konstantin Ananyev --- diff --git a/doc/guides/rel_notes/release_17_11.rst b/doc/guides/rel_notes/release_17_11.rst index 3134723d19..b29a9caced 100644 --- a/doc/guides/rel_notes/release_17_11.rst +++ b/doc/guides/rel_notes/release_17_11.rst @@ -95,6 +95,8 @@ New Features * TCP/IPv4 packets. * VxLAN packets, which must have an outer IPv4 header, and contain an inner TCP/IPv4 packet. + * GRE packets, which must contain an outer IPv4 header, and inner + TCP/IPv4 headers. The GSO library doesn't check if the input packets have correct checksums, and doesn't update checksums for output packets. diff --git a/lib/librte_gso/gso_common.h b/lib/librte_gso/gso_common.h index 95d54e7da0..145ea49533 100644 --- a/lib/librte_gso/gso_common.h +++ b/lib/librte_gso/gso_common.h @@ -55,6 +55,11 @@ (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \ PKT_TX_TUNNEL_VXLAN)) +#define IS_IPV4_GRE_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \ + PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_GRE)) == \ + (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \ + PKT_TX_TUNNEL_GRE)) + /** * Internal function which updates the UDP header of a packet, following * segmentation. This is required to update the header's datagram length field. diff --git a/lib/librte_gso/gso_tunnel_tcp4.c b/lib/librte_gso/gso_tunnel_tcp4.c index 5e8c8e58ea..8d0cfd7ad7 100644 --- a/lib/librte_gso/gso_tunnel_tcp4.c +++ b/lib/librte_gso/gso_tunnel_tcp4.c @@ -42,11 +42,13 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, struct tcp_hdr *tcp_hdr; uint32_t sent_seq; uint16_t outer_id, inner_id, tail_idx, i; - uint16_t outer_ipv4_offset, inner_ipv4_offset, udp_offset, tcp_offset; + uint16_t outer_ipv4_offset, inner_ipv4_offset; + uint16_t udp_gre_offset, tcp_offset; + uint8_t update_udp_hdr; outer_ipv4_offset = pkt->outer_l2_len; - udp_offset = outer_ipv4_offset + pkt->outer_l3_len; - inner_ipv4_offset = udp_offset + pkt->l2_len; + udp_gre_offset = outer_ipv4_offset + pkt->outer_l3_len; + inner_ipv4_offset = udp_gre_offset + pkt->l2_len; tcp_offset = inner_ipv4_offset + pkt->l3_len; /* Outer IPv4 header. */ @@ -63,9 +65,13 @@ update_tunnel_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta, sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq); tail_idx = nb_segs - 1; + /* Only update UDP header for VxLAN packets. */ + update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0; + for (i = 0; i < nb_segs; i++) { update_ipv4_header(segs[i], outer_ipv4_offset, outer_id); - update_udp_header(segs[i], udp_offset); + if (update_udp_hdr) + update_udp_header(segs[i], udp_gre_offset); update_ipv4_header(segs[i], inner_ipv4_offset, inner_id); update_tcp_header(segs[i], tcp_offset, sent_seq, i < tail_idx); outer_id++; diff --git a/lib/librte_gso/rte_gso.c b/lib/librte_gso/rte_gso.c index 0a3ef11830..f86e654149 100644 --- a/lib/librte_gso/rte_gso.c +++ b/lib/librte_gso/rte_gso.c @@ -58,7 +58,8 @@ rte_gso_segment(struct rte_mbuf *pkt, nb_pkts_out < 1 || gso_ctx->gso_size < RTE_GSO_SEG_SIZE_MIN || ((gso_ctx->gso_types & (DEV_TX_OFFLOAD_TCP_TSO | - DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) == 0)) + DEV_TX_OFFLOAD_VXLAN_TNL_TSO | + DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0)) return -EINVAL; if (gso_ctx->gso_size >= pkt->pkt_len) { @@ -73,8 +74,10 @@ rte_gso_segment(struct rte_mbuf *pkt, ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED); ol_flags = pkt->ol_flags; - if (IS_IPV4_VXLAN_TCP4(pkt->ol_flags) - && (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) { + if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) && + (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) || + ((IS_IPV4_GRE_TCP4(pkt->ol_flags) && + (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) { pkt->ol_flags &= (~PKT_TX_TCP_SEG); ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta, direct_pool, indirect_pool,