1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Intel Corporation
8 #include <rte_ethdev.h>
11 #include "gso_common.h"
13 #include "gso_tunnel_tcp4.h"
16 rte_gso_segment(struct rte_mbuf *pkt,
17 const struct rte_gso_ctx *gso_ctx,
18 struct rte_mbuf **pkts_out,
21 struct rte_mempool *direct_pool, *indirect_pool;
22 struct rte_mbuf *pkt_seg;
28 if (pkt == NULL || pkts_out == NULL || gso_ctx == NULL ||
30 gso_ctx->gso_size < RTE_GSO_SEG_SIZE_MIN ||
31 ((gso_ctx->gso_types & (DEV_TX_OFFLOAD_TCP_TSO |
32 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
33 DEV_TX_OFFLOAD_GRE_TNL_TSO)) == 0))
36 if (gso_ctx->gso_size >= pkt->pkt_len) {
37 pkt->ol_flags &= (~PKT_TX_TCP_SEG);
42 direct_pool = gso_ctx->direct_pool;
43 indirect_pool = gso_ctx->indirect_pool;
44 gso_size = gso_ctx->gso_size;
45 ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED);
46 ol_flags = pkt->ol_flags;
48 if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
49 (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
50 ((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
51 (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
52 pkt->ol_flags &= (~PKT_TX_TCP_SEG);
53 ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
54 direct_pool, indirect_pool,
55 pkts_out, nb_pkts_out);
56 } else if (IS_IPV4_TCP(pkt->ol_flags) &&
57 (gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
58 pkt->ol_flags &= (~PKT_TX_TCP_SEG);
59 ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
60 direct_pool, indirect_pool,
61 pkts_out, nb_pkts_out);
63 /* unsupported packet, skip */
65 RTE_LOG(DEBUG, GSO, "Unsupported packet type\n");
72 rte_mbuf_refcnt_update(pkt_seg, -1);
73 pkt_seg = pkt_seg->next;
76 /* Revert the ol_flags in the event of failure. */
77 pkt->ol_flags = ol_flags;