+ if (gso_ctx->gso_size >= pkt->pkt_len) {
+ pkt->ol_flags &= (~(PKT_TX_TCP_SEG | PKT_TX_UDP_SEG));
+ pkts_out[0] = pkt;
+ return 1;
+ }
+
+ direct_pool = gso_ctx->direct_pool;
+ indirect_pool = gso_ctx->indirect_pool;
+ gso_size = gso_ctx->gso_size;
+ ipid_delta = (gso_ctx->flag != RTE_GSO_FLAG_IPID_FIXED);
+ ol_flags = pkt->ol_flags;
+
+ if ((IS_IPV4_VXLAN_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) ||
+ ((IS_IPV4_GRE_TCP4(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_GRE_TNL_TSO)))) {
+ pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ ret = gso_tunnel_tcp4_segment(pkt, gso_size, ipid_delta,
+ direct_pool, indirect_pool,
+ pkts_out, nb_pkts_out);
+ } else if (IS_IPV4_TCP(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_TCP_TSO)) {
+ pkt->ol_flags &= (~PKT_TX_TCP_SEG);
+ ret = gso_tcp4_segment(pkt, gso_size, ipid_delta,
+ direct_pool, indirect_pool,
+ pkts_out, nb_pkts_out);
+ } else if (IS_IPV4_UDP(pkt->ol_flags) &&
+ (gso_ctx->gso_types & DEV_TX_OFFLOAD_UDP_TSO)) {
+ pkt->ol_flags &= (~PKT_TX_UDP_SEG);
+ ret = gso_udp4_segment(pkt, gso_size, direct_pool,
+ indirect_pool, pkts_out, nb_pkts_out);
+ } else {
+ /* unsupported packet, skip */
+ pkts_out[0] = pkt;
+ RTE_LOG(DEBUG, GSO, "Unsupported packet type\n");
+ return 1;
+ }
+
+ if (ret > 1) {
+ pkt_seg = pkt;
+ while (pkt_seg) {
+ rte_mbuf_refcnt_update(pkt_seg, -1);
+ pkt_seg = pkt_seg->next;
+ }
+ } else if (ret < 0) {
+ /* Revert the ol_flags in the event of failure. */
+ pkt->ol_flags = ol_flags;
+ }