4 * Copyright(c) 2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _GSO_COMMON_H_
35 #define _GSO_COMMON_H_
44 #define IS_FRAGMENTED(frag_off) (((frag_off) & IPV4_HDR_OFFSET_MASK) != 0 \
45 || ((frag_off) & IPV4_HDR_MF_FLAG) == IPV4_HDR_MF_FLAG)
47 #define TCP_HDR_PSH_MASK ((uint8_t)0x08)
48 #define TCP_HDR_FIN_MASK ((uint8_t)0x01)
50 #define IS_IPV4_TCP(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4)) == \
51 (PKT_TX_TCP_SEG | PKT_TX_IPV4))
53 #define IS_IPV4_VXLAN_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
54 PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_VXLAN)) == \
55 (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
58 #define IS_IPV4_GRE_TCP4(flag) (((flag) & (PKT_TX_TCP_SEG | PKT_TX_IPV4 | \
59 PKT_TX_OUTER_IPV4 | PKT_TX_TUNNEL_GRE)) == \
60 (PKT_TX_TCP_SEG | PKT_TX_IPV4 | PKT_TX_OUTER_IPV4 | \
64 * Internal function which updates the UDP header of a packet, following
65 * segmentation. This is required to update the header's datagram length field.
68 * The packet containing the UDP header.
70 * The offset of the UDP header from the start of the packet.
73 update_udp_header(struct rte_mbuf *pkt, uint16_t udp_offset)
75 struct udp_hdr *udp_hdr;
77 udp_hdr = (struct udp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
79 udp_hdr->dgram_len = rte_cpu_to_be_16(pkt->pkt_len - udp_offset);
83 * Internal function which updates the TCP header of a packet, following
84 * segmentation. This is required to update the header's 'sent' sequence
85 * number, and also to clear 'PSH' and 'FIN' flags for non-tail segments.
88 * The packet containing the TCP header.
90 * The offset of the TCP header from the start of the packet.
92 * The sent sequence number.
94 * Indicates whether or not this is a tail segment.
97 update_tcp_header(struct rte_mbuf *pkt, uint16_t l4_offset, uint32_t sent_seq,
100 struct tcp_hdr *tcp_hdr;
102 tcp_hdr = (struct tcp_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
104 tcp_hdr->sent_seq = rte_cpu_to_be_32(sent_seq);
105 if (likely(non_tail))
106 tcp_hdr->tcp_flags &= (~(TCP_HDR_PSH_MASK |
111 * Internal function which updates the IPv4 header of a packet, following
112 * segmentation. This is required to update the header's 'total_length' field,
113 * to reflect the reduced length of the now-segmented packet. Furthermore, the
114 * header's 'packet_id' field must be updated to reflect the new ID of the
115 * now-segmented packet.
118 * The packet containing the IPv4 header.
120 * The offset of the IPv4 header from the start of the packet.
122 * The new ID of the packet.
125 update_ipv4_header(struct rte_mbuf *pkt, uint16_t l3_offset, uint16_t id)
127 struct ipv4_hdr *ipv4_hdr;
129 ipv4_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(pkt, char *) +
131 ipv4_hdr->total_length = rte_cpu_to_be_16(pkt->pkt_len - l3_offset);
132 ipv4_hdr->packet_id = rte_cpu_to_be_16(id);
136 * Internal function which divides the input packet into small segments.
137 * Each of the newly-created segments is organized as a two-segment MBUF,
138 * where the first segment is a standard mbuf, which stores a copy of
139 * packet header, and the second is an indirect mbuf which points to a
140 * section of data in the input packet.
144 * @param pkt_hdr_offset
145 * Packet header offset, measured in bytes.
146 * @param pyld_unit_size
147 * The max payload length of a GSO segment.
149 * MBUF pool used for allocating direct buffers for output segments.
150 * @param indirect_pool
151 * MBUF pool used for allocating indirect buffers for output segments.
153 * Pointer array used to keep the mbuf addresses of output segments. If
154 * the memory space in pkts_out is insufficient, gso_do_segment() fails
155 * and returns -EINVAL.
157 * The max number of items that pkts_out can keep.
160 * - The number of segments created in the event of success.
161 * - Return -ENOMEM if run out of memory in MBUF pools.
162 * - Return -EINVAL for invalid parameters.
164 int gso_do_segment(struct rte_mbuf *pkt,
165 uint16_t pkt_hdr_offset,
166 uint16_t pyld_unit_size,
167 struct rte_mempool *direct_pool,
168 struct rte_mempool *indirect_pool,
169 struct rte_mbuf **pkts_out,
170 uint16_t nb_pkts_out);