4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_byteorder.h>
39 #include <rte_memcpy.h>
40 #include <rte_mempool.h>
41 #include <rte_debug.h>
45 #include "rte_ip_frag.h"
48 * MAX number of fragments per packet allowed.
50 #define IPV4_MAX_FRAGS_PER_PACKET 0x80
53 #ifdef RTE_IPV4_FRAG_DEBUG
55 #define RTE_IPV4_FRAG_ASSERT(exp) \
57 rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
58 __func__, __LINE__); \
61 #else /*RTE_IPV4_FRAG_DEBUG*/
63 #define RTE_IPV4_FRAG_ASSERT(exp) do { } while(0)
65 #endif /*RTE_IPV4_FRAG_DEBUG*/
68 #define IPV4_HDR_DF_SHIFT 14
69 #define IPV4_HDR_MF_SHIFT 13
70 #define IPV4_HDR_FO_SHIFT 3
72 #define IPV4_HDR_DF_MASK (1 << IPV4_HDR_DF_SHIFT)
73 #define IPV4_HDR_MF_MASK (1 << IPV4_HDR_MF_SHIFT)
75 #define IPV4_HDR_FO_MASK ((1 << IPV4_HDR_FO_SHIFT) - 1)
77 static inline void __fill_ipv4hdr_frag(struct ipv4_hdr *dst,
78 const struct ipv4_hdr *src, uint16_t len, uint16_t fofs,
79 uint16_t dofs, uint32_t mf)
81 rte_memcpy(dst, src, sizeof(*dst));
82 fofs = (uint16_t)(fofs + (dofs >> IPV4_HDR_FO_SHIFT));
83 fofs = (uint16_t)(fofs | mf << IPV4_HDR_MF_SHIFT);
84 dst->fragment_offset = rte_cpu_to_be_16(fofs);
85 dst->total_length = rte_cpu_to_be_16(len);
86 dst->hdr_checksum = 0;
89 static inline void __free_fragments(struct rte_mbuf *mb[], uint32_t num)
92 for (i = 0; i != num; i++)
93 rte_pktmbuf_free(mb[i]);
99 * This function implements the fragmentation of IPv4 packets.
104 * Array storing the output fragments.
106 * Size in bytes of the Maximum Transfer Unit (MTU) for the outgoing IPv4
107 * datagrams. This value includes the size of the IPv4 header.
109 * MBUF pool used for allocating direct buffers for the output fragments.
110 * @param pool_indirect
111 * MBUF pool used for allocating indirect buffers for the output fragments.
113 * Upon successful completion - number of output fragments placed
114 * in the pkts_out array.
115 * Otherwise - (-1) * <errno>.
118 rte_ipv4_fragmentation(struct rte_mbuf *pkt_in,
119 struct rte_mbuf **pkts_out,
120 uint16_t nb_pkts_out,
122 struct rte_mempool *pool_direct,
123 struct rte_mempool *pool_indirect)
125 struct rte_mbuf *in_seg = NULL;
126 struct ipv4_hdr *in_hdr;
127 uint32_t out_pkt_pos, in_seg_data_pos;
128 uint32_t more_in_segs;
129 uint16_t fragment_offset, flag_offset, frag_size;
131 frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
133 /* Fragment size should be a multiply of 8. */
134 RTE_IPV4_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
136 /* Fragment size should be a multiply of 8. */
137 RTE_IPV4_FRAG_ASSERT(IPV4_MAX_FRAGS_PER_PACKET * frag_size >=
138 (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr)));
140 in_hdr = (struct ipv4_hdr*) pkt_in->pkt.data;
141 flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
143 /* If Don't Fragment flag is set */
144 if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
147 /* Check that pkts_out is big enough to hold all fragments */
148 if (unlikely (frag_size * nb_pkts_out <
149 (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
153 in_seg_data_pos = sizeof(struct ipv4_hdr);
158 while (likely(more_in_segs)) {
159 struct rte_mbuf *out_pkt = NULL, *out_seg_prev = NULL;
160 uint32_t more_out_segs;
161 struct ipv4_hdr *out_hdr;
163 /* Allocate direct buffer */
164 out_pkt = rte_pktmbuf_alloc(pool_direct);
165 if (unlikely(out_pkt == NULL)) {
166 __free_fragments(pkts_out, out_pkt_pos);
170 /* Reserve space for the IP header that will be built later */
171 out_pkt->pkt.data_len = sizeof(struct ipv4_hdr);
172 out_pkt->pkt.pkt_len = sizeof(struct ipv4_hdr);
174 out_seg_prev = out_pkt;
176 while (likely(more_out_segs && more_in_segs)) {
177 struct rte_mbuf *out_seg = NULL;
180 /* Allocate indirect buffer */
181 out_seg = rte_pktmbuf_alloc(pool_indirect);
182 if (unlikely(out_seg == NULL)) {
183 rte_pktmbuf_free(out_pkt);
184 __free_fragments(pkts_out, out_pkt_pos);
187 out_seg_prev->pkt.next = out_seg;
188 out_seg_prev = out_seg;
190 /* Prepare indirect buffer */
191 rte_pktmbuf_attach(out_seg, in_seg);
192 len = mtu_size - out_pkt->pkt.pkt_len;
193 if (len > (in_seg->pkt.data_len - in_seg_data_pos)) {
194 len = in_seg->pkt.data_len - in_seg_data_pos;
196 out_seg->pkt.data = (char*) in_seg->pkt.data + (uint16_t)in_seg_data_pos;
197 out_seg->pkt.data_len = (uint16_t)len;
198 out_pkt->pkt.pkt_len = (uint16_t)(len +
199 out_pkt->pkt.pkt_len);
200 out_pkt->pkt.nb_segs += 1;
201 in_seg_data_pos += len;
203 /* Current output packet (i.e. fragment) done ? */
204 if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
208 /* Current input segment done ? */
209 if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
210 in_seg = in_seg->pkt.next;
213 if (unlikely(in_seg == NULL)) {
219 /* Build the IP header */
221 out_hdr = (struct ipv4_hdr*) out_pkt->pkt.data;
223 __fill_ipv4hdr_frag(out_hdr, in_hdr,
224 (uint16_t)out_pkt->pkt.pkt_len,
225 flag_offset, fragment_offset, more_in_segs);
227 fragment_offset = (uint16_t)(fragment_offset +
228 out_pkt->pkt.pkt_len - sizeof(struct ipv4_hdr));
230 out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
231 out_pkt->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
233 /* Write the fragment to the output list */
234 pkts_out[out_pkt_pos] = out_pkt;
238 return (out_pkt_pos);