4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifndef _IPV4_RSMBL_H_
36 #define _IPV4_RSMBL_H_
42 * Implementation of IPv4 reassemble.
60 * Use <src addr, dst_addr, id> to uniquely indetify fragmented datagram.
62 struct ipv4_frag_key {
67 #define IPV4_FRAG_KEY_INVALIDATE(k) ((k)->src_dst = 0)
68 #define IPV4_FRAG_KEY_EMPTY(k) ((k)->src_dst == 0)
70 #define IPV4_FRAG_KEY_CMP(k1, k2) \
71 (((k1)->src_dst ^ (k2)->src_dst) | ((k1)->id ^ (k2)->id))
75 * Fragmented packet to reassemble.
76 * First two entries in the frags[] array are for the last and first fragments.
78 struct ipv4_frag_pkt {
79 TAILQ_ENTRY(ipv4_frag_pkt) lru; /* LRU list */
80 struct ipv4_frag_key key;
81 uint64_t start; /* creation timestamp */
82 uint32_t total_size; /* expected reassembled size */
83 uint32_t frag_size; /* size of fragments received */
84 uint32_t last_idx; /* index of next entry to fill */
85 struct ipv4_frag frags[MAX_FRAG_NUM];
86 } __rte_cache_aligned;
90 #ifdef IPV4_FRAG_DEBUG
91 #define IPV4_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
93 #define IPV4_FRAG_LOG(lvl, fmt, args...) do {} while(0)
94 #endif /* IPV4_FRAG_DEBUG */
98 ipv4_frag_reset(struct ipv4_frag_pkt *fp, uint64_t tms)
100 static const struct ipv4_frag zero_frag = {
107 fp->total_size = UINT32_MAX;
109 fp->last_idx = MIN_FRAG_NUM;
110 fp->frags[LAST_FRAG_IDX] = zero_frag;
111 fp->frags[FIRST_FRAG_IDX] = zero_frag;
115 ipv4_frag_free(struct ipv4_frag_pkt *fp)
119 for (i = 0; i != fp->last_idx; i++) {
120 if (fp->frags[i].mb != NULL) {
121 rte_pktmbuf_free(fp->frags[i].mb);
122 fp->frags[i].mb = NULL;
131 * Takes 2 mbufs that represents two framents of the same packet and
132 * chains them into one mbuf.
135 ipv4_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
139 /* adjust start of the last fragment data. */
140 rte_pktmbuf_adj(mp, (uint16_t)(mp->pkt.vlan_macip.f.l2_len +
141 mp->pkt.vlan_macip.f.l3_len));
143 /* chain two fragments. */
144 ms = rte_pktmbuf_lastseg(mn);
147 /* accumulate number of segments and total length. */
148 mn->pkt.nb_segs = (uint8_t)(mn->pkt.nb_segs + mp->pkt.nb_segs);
149 mn->pkt.pkt_len += mp->pkt.pkt_len;
151 /* reset pkt_len and nb_segs for chained fragment. */
152 mp->pkt.pkt_len = mp->pkt.data_len;
157 * Reassemble fragments into one packet.
159 static inline struct rte_mbuf *
160 ipv4_frag_reassemble(const struct ipv4_frag_pkt *fp)
162 struct ipv4_hdr *ip_hdr;
163 struct rte_mbuf *m, *prev;
164 uint32_t i, n, ofs, first_len;
166 first_len = fp->frags[FIRST_FRAG_IDX].len;
167 n = fp->last_idx - 1;
169 /*start from the last fragment. */
170 m = fp->frags[LAST_FRAG_IDX].mb;
171 ofs = fp->frags[LAST_FRAG_IDX].ofs;
173 while (ofs != first_len) {
177 for (i = n; i != FIRST_FRAG_IDX && ofs != first_len; i--) {
179 /* previous fragment found. */
180 if(fp->frags[i].ofs + fp->frags[i].len == ofs) {
182 ipv4_frag_chain(fp->frags[i].mb, m);
184 /* update our last fragment and offset. */
186 ofs = fp->frags[i].ofs;
190 /* error - hole in the packet. */
196 /* chain with the first fragment. */
197 ipv4_frag_chain(fp->frags[FIRST_FRAG_IDX].mb, m);
198 m = fp->frags[FIRST_FRAG_IDX].mb;
200 /* update mbuf fields for reassembled packet. */
201 m->ol_flags |= PKT_TX_IP_CKSUM;
203 /* update ipv4 header for the reassmebled packet */
204 ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
205 m->pkt.vlan_macip.f.l2_len);
207 ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
208 m->pkt.vlan_macip.f.l3_len));
209 ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
210 rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
211 ip_hdr->hdr_checksum = 0;
216 static inline struct rte_mbuf *
217 ipv4_frag_process(struct ipv4_frag_pkt *fp, struct rte_mbuf *mb,
218 uint16_t ofs, uint16_t len, uint16_t more_frags)
222 fp->frag_size += len;
224 /* this is the first fragment. */
226 idx = (fp->frags[FIRST_FRAG_IDX].mb == NULL) ?
227 FIRST_FRAG_IDX : UINT32_MAX;
229 /* this is the last fragment. */
230 } else if (more_frags == 0) {
231 fp->total_size = ofs + len;
232 idx = (fp->frags[LAST_FRAG_IDX].mb == NULL) ?
233 LAST_FRAG_IDX : UINT32_MAX;
235 /* this is the intermediate fragment. */
236 } else if ((idx = fp->last_idx) <
237 sizeof (fp->frags) / sizeof (fp->frags[0])) {
242 * errorneous packet: either exceeed max allowed number of fragments,
243 * or duplicate first/last fragment encountered.
245 if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
247 /* report an error. */
248 IPV4_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
249 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
250 "total_size: %u, frag_size: %u, last_idx: %u\n"
251 "first fragment: ofs: %u, len: %u\n"
252 "last fragment: ofs: %u, len: %u\n\n",
254 fp, fp->key.src_dst, fp->key.id,
255 fp->total_size, fp->frag_size, fp->last_idx,
256 fp->frags[FIRST_FRAG_IDX].ofs,
257 fp->frags[FIRST_FRAG_IDX].len,
258 fp->frags[LAST_FRAG_IDX].ofs,
259 fp->frags[LAST_FRAG_IDX].len);
261 /* free all fragments, invalidate the entry. */
263 IPV4_FRAG_KEY_INVALIDATE(&fp->key);
264 rte_pktmbuf_free(mb);
269 fp->frags[idx].ofs = ofs;
270 fp->frags[idx].len = len;
271 fp->frags[idx].mb = mb;
275 /* not all fragments are collected yet. */
276 if (likely (fp->frag_size < fp->total_size)) {
279 /* if we collected all fragments, then try to reassemble. */
280 } else if (fp->frag_size == fp->total_size &&
281 fp->frags[FIRST_FRAG_IDX].mb != NULL) {
282 mb = ipv4_frag_reassemble(fp);
285 /* errorenous set of fragments. */
288 /* report an error. */
289 IPV4_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
290 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
291 "total_size: %u, frag_size: %u, last_idx: %u\n"
292 "first fragment: ofs: %u, len: %u\n"
293 "last fragment: ofs: %u, len: %u\n\n",
295 fp, fp->key.src_dst, fp->key.id,
296 fp->total_size, fp->frag_size, fp->last_idx,
297 fp->frags[FIRST_FRAG_IDX].ofs,
298 fp->frags[FIRST_FRAG_IDX].len,
299 fp->frags[LAST_FRAG_IDX].ofs,
300 fp->frags[LAST_FRAG_IDX].len);
302 /* free associated resources. */
306 /* we are done with that entry, invalidate it. */
307 IPV4_FRAG_KEY_INVALIDATE(&fp->key);
311 #include "ipv4_frag_tbl.h"
314 * Process new mbuf with fragment of IPV4 packet.
315 * Incoming mbuf should have it's l2_len/l3_len fields setuped correclty.
317 * Table where to lookup/add the fragmented packet.
319 * Incoming mbuf with IPV4 fragment.
321 * Fragment arrival timestamp.
323 * Pointer to the IPV4 header inside the fragment.
325 * Fragment's offset (as extracted from the header).
327 * Fragment's MF flag.
329 * Pointer to mbuf for reassebled packet, or NULL if:
330 * - an error occured.
331 * - not all fragments of the packet are collected yet.
333 static inline struct rte_mbuf *
334 ipv4_frag_mbuf(struct ipv4_frag_tbl *tbl, struct rte_mbuf *mb, uint64_t tms,
335 struct ipv4_hdr *ip_hdr, uint16_t ip_ofs, uint16_t ip_flag)
337 struct ipv4_frag_pkt *fp;
338 struct ipv4_frag_key key;
342 psd = (uint64_t *)&ip_hdr->src_addr;
343 key.src_dst = psd[0];
344 key.id = ip_hdr->packet_id;
346 ip_ofs *= IPV4_HDR_OFFSET_UNITS;
347 ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
348 mb->pkt.vlan_macip.f.l3_len);
350 IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
351 "mbuf: %p, tms: %" PRIu64
352 ", key: <%" PRIx64 ", %#x>, ofs: %u, len: %u, flags: %#x\n"
353 "tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
354 "max_entries: %u, use_entries: %u\n\n",
356 mb, tms, key.src_dst, key.id, ip_ofs, ip_len, ip_flag,
357 tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries,
360 /* try to find/add entry into the fragment's table. */
361 if ((fp = ipv4_frag_find(tbl, &key, tms)) == NULL) {
362 rte_pktmbuf_free(mb);
366 IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
367 "tbl: %p, max_entries: %u, use_entries: %u\n"
368 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
369 ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
371 tbl, tbl->max_entries, tbl->use_entries,
372 fp, fp->key.src_dst, fp->key.id, fp->start,
373 fp->total_size, fp->frag_size, fp->last_idx);
376 /* process the fragmented packet. */
377 mb = ipv4_frag_process(fp, mb, ip_ofs, ip_len, ip_flag);
378 ipv4_frag_inuse(tbl, fp);
380 IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
382 "tbl: %p, max_entries: %u, use_entries: %u\n"
383 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
384 ", total_size: %u, frag_size: %u, last_idx: %u\n\n",
385 __func__, __LINE__, mb,
386 tbl, tbl->max_entries, tbl->use_entries,
387 fp, fp->key.src_dst, fp->key.id, fp->start,
388 fp->total_size, fp->frag_size, fp->last_idx);
393 #endif /* _IPV4_RSMBL_H_ */