* TCP/IPv4 packets.
* VxLAN packets, which must have an outer IPv4 header, and contain
an inner TCP/IPv4 packet.
+ * GRE packets, which must contain an outer IPv4 header, and inner
+ TCP/IPv4 headers.
The GSO library doesn't check if the input packets have correct
checksums, and doesn't update checksums for output packets.
(PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
PKT_TX_TUNNEL_VXLAN))
+#define IS_IPV4_GRE_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
+ PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_GRE)) == \
+ (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
+ PKT_TX_TUNNEL_GRE))
+
/**
* Internal function which updates the UDP header of a packet, following
* segmentation. This is required to update the header's datagram length field.
struct tcp_hdr *tcp_hdr;
uint32_t sent_seq;
uint16_t outer_id, inner_id, tail_idx, i;
- uint16_t outer_ipv4_offset, inner_ipv4_offset, udp_offset, tcp_offset;
+ uint16_t outer_ipv4_offset, inner_ipv4_offset;
+ uint16_t udp_gre_offset, tcp_offset;
+ uint8_t update_udp_hdr;
outer_ipv4_offset = pkt->outer_l2_len;
- udp_offset = outer_ipv4_offset + pkt->outer_l3_len;
- inner_ipv4_offset = udp_offset + pkt->l2_len;
+ udp_gre_offset = outer_ipv4_offset + pkt->outer_l3_len;
+ inner_ipv4_offset = udp_gre_offset + pkt->l2_len;
tcp_offset = inner_ipv4_offset + pkt->l3_len;
/* Outer IPv4 header. */
sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
tail_idx = nb_segs - 1;
+ /* Only update UDP header for VxLAN packets. */
+ update_udp_hdr = (pkt->ol_flags & PKT_TX_TUNNEL_VXLAN) ? 1 : 0;
+
for (i = 0; i < nb_segs; i++) {
update_ipv4_header(segs[i], outer_ipv4_offset, outer_id);
- update_udp_header(segs[i], udp_offset);
+ if (update_udp_hdr)
+ update_udp_header(segs[i], udp_gre_offset);
update_ipv4_header(segs[i], inner_ipv4_offset, inner_id);
update_tcp_header(segs[i], tcp_offset, sent_seq, i < tail_idx);
outer_id++;
nb_pkts_out < 1 ||
gso_ctx->gso_size < RTE_GSO_SEG_SIZE_MIN ||
((gso_ctx->gso_types & (DEV_TX_OFFLOAD_TCP_TSO |
- DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) == 0))
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
+ DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0))
return -EINVAL;
if (gso_ctx->gso_size >= pkt->pkt_len) {
ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED);
ol_flags = pkt->ol_flags;
- if (IS_IPV4_VXLAN_TCP4(pkt->ol_flags)
- && (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) {
+ if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+ ((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
pkt->ol_flags &= (~PKT_TX_TCP_SEG);
ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
direct_pool, indirect_pool,