4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "gso_common.h"
38 update_ipv4_tcp_headers(struct rte_mbuf *pkt, uint8_t ipid_delta,
39 struct rte_mbuf **segs, uint16_t nb_segs)
41 struct ipv4_hdr *ipv4_hdr;
42 struct tcp_hdr *tcp_hdr;
44 uint16_t id, tail_idx, i;
45 uint16_t l3_offset = pkt->l2_len;
46 uint16_t l4_offset = l3_offset + pkt->l3_len;
48 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char*) +
50 tcp_hdr = (struct tcp_hdr *)((char *)ipv4_hdr + pkt->l3_len);
51 id = rte_be_to_cpu_16(ipv4_hdr->packet_id);
52 sent_seq = rte_be_to_cpu_32(tcp_hdr->sent_seq);
53 tail_idx = nb_segs - 1;
55 for (i = 0; i < nb_segs; i++) {
56 update_ipv4_header(segs[i], l3_offset, id);
57 update_tcp_header(segs[i], l4_offset, sent_seq, i < tail_idx);
59 sent_seq += (segs[i]->pkt_len - segs[i]->data_len);
64 gso_tcp4_segment(struct rte_mbuf *pkt,
67 struct rte_mempool *direct_pool,
68 struct rte_mempool *indirect_pool,
69 struct rte_mbuf **pkts_out,
72 struct ipv4_hdr *ipv4_hdr;
73 uint16_t pyld_unit_size, hdr_offset;
77 /* Don't process the fragmented packet */
78 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
80 frag_off = rte_be_to_cpu_16(ipv4_hdr->fragment_offset);
81 if (unlikely(IS_FRAGMENTED(frag_off))) {
86 /* Don't process the packet without data */
87 hdr_offset = pkt->l2_len + pkt->l3_len + pkt->l4_len;
88 if (unlikely(hdr_offset >= pkt->pkt_len)) {
93 pyld_unit_size = gso_size - hdr_offset;
95 /* Segment the payload */
96 ret = gso_do_segment(pkt, hdr_offset, pyld_unit_size, direct_pool,
97 indirect_pool, pkts_out, nb_pkts_out);
99 update_ipv4_tcp_headers(pkt, ipid_delta, pkts_out, ret);