4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_jhash.h>
37 #include <rte_hash_crc.h>
39 #include "ip_frag_common.h"
41 #define PRIME_VALUE 0xeaad8405
43 #define IP_FRAG_TBL_POS(tbl, sig) \
44 ((tbl)->pkt + ((sig) & (tbl)->entry_mask))
46 #ifdef RTE_LIBRTE_IP_FRAG_TBL_STAT
47 #define IP_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
49 #define IP_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
50 #endif /* IP_FRAG_TBL_STAT */
52 /* local frag table helper functions */
54 ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
55 struct ip_frag_pkt *fp)
58 ip_frag_key_invalidate(&fp->key);
59 TAILQ_REMOVE(&tbl->lru, fp, lru);
61 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
65 ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
66 const struct ip_frag_key *key, uint64_t tms)
69 ip_frag_reset(fp, tms);
70 TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
72 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
76 ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
77 struct ip_frag_pkt *fp, uint64_t tms)
80 ip_frag_reset(fp, tms);
81 TAILQ_REMOVE(&tbl->lru, fp, lru);
82 TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
83 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
88 ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
93 p = (const uint32_t *)&key->src_dst;
96 v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
97 v = rte_hash_crc_4byte(p[1], v);
98 v = rte_hash_crc_4byte(key->id, v);
101 v = rte_jhash_3words(p[0], p[1], key->id, PRIME_VALUE);
102 #endif /* RTE_ARCH_X86 */
105 *v2 = (v << 7) + (v >> 14);
109 ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
114 p = (const uint32_t *) &key->src_dst;
117 v = rte_hash_crc_4byte(p[0], PRIME_VALUE);
118 v = rte_hash_crc_4byte(p[1], v);
119 v = rte_hash_crc_4byte(p[2], v);
120 v = rte_hash_crc_4byte(p[3], v);
121 v = rte_hash_crc_4byte(p[4], v);
122 v = rte_hash_crc_4byte(p[5], v);
123 v = rte_hash_crc_4byte(p[6], v);
124 v = rte_hash_crc_4byte(p[7], v);
125 v = rte_hash_crc_4byte(key->id, v);
128 v = rte_jhash_3words(p[0], p[1], p[2], PRIME_VALUE);
129 v = rte_jhash_3words(p[3], p[4], p[5], v);
130 v = rte_jhash_3words(p[6], p[7], key->id, v);
131 #endif /* RTE_ARCH_X86 */
134 *v2 = (v << 7) + (v >> 14);
138 ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
139 struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
143 fp->frag_size += len;
145 /* this is the first fragment. */
147 idx = (fp->frags[IP_FIRST_FRAG_IDX].mb == NULL) ?
148 IP_FIRST_FRAG_IDX : UINT32_MAX;
150 /* this is the last fragment. */
151 } else if (more_frags == 0) {
152 fp->total_size = ofs + len;
153 idx = (fp->frags[IP_LAST_FRAG_IDX].mb == NULL) ?
154 IP_LAST_FRAG_IDX : UINT32_MAX;
156 /* this is the intermediate fragment. */
157 } else if ((idx = fp->last_idx) <
158 sizeof (fp->frags) / sizeof (fp->frags[0])) {
163 * erroneous packet: either exceed max allowed number of fragments,
164 * or duplicate first/last fragment encountered.
166 if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
168 /* report an error. */
169 if (fp->key.key_len == IPV4_KEYLEN)
170 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
171 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
172 "total_size: %u, frag_size: %u, last_idx: %u\n"
173 "first fragment: ofs: %u, len: %u\n"
174 "last fragment: ofs: %u, len: %u\n\n",
176 fp, fp->key.src_dst[0], fp->key.id,
177 fp->total_size, fp->frag_size, fp->last_idx,
178 fp->frags[IP_FIRST_FRAG_IDX].ofs,
179 fp->frags[IP_FIRST_FRAG_IDX].len,
180 fp->frags[IP_LAST_FRAG_IDX].ofs,
181 fp->frags[IP_LAST_FRAG_IDX].len);
183 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
184 "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
185 "total_size: %u, frag_size: %u, last_idx: %u\n"
186 "first fragment: ofs: %u, len: %u\n"
187 "last fragment: ofs: %u, len: %u\n\n",
189 fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
190 fp->total_size, fp->frag_size, fp->last_idx,
191 fp->frags[IP_FIRST_FRAG_IDX].ofs,
192 fp->frags[IP_FIRST_FRAG_IDX].len,
193 fp->frags[IP_LAST_FRAG_IDX].ofs,
194 fp->frags[IP_LAST_FRAG_IDX].len);
196 /* free all fragments, invalidate the entry. */
197 ip_frag_free(fp, dr);
198 ip_frag_key_invalidate(&fp->key);
199 IP_FRAG_MBUF2DR(dr, mb);
204 fp->frags[idx].ofs = ofs;
205 fp->frags[idx].len = len;
206 fp->frags[idx].mb = mb;
210 /* not all fragments are collected yet. */
211 if (likely (fp->frag_size < fp->total_size)) {
214 /* if we collected all fragments, then try to reassemble. */
215 } else if (fp->frag_size == fp->total_size &&
216 fp->frags[IP_FIRST_FRAG_IDX].mb != NULL) {
217 if (fp->key.key_len == IPV4_KEYLEN)
218 mb = ipv4_frag_reassemble(fp);
220 mb = ipv6_frag_reassemble(fp);
223 /* errorenous set of fragments. */
226 /* report an error. */
227 if (fp->key.key_len == IPV4_KEYLEN)
228 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
229 "ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
230 "total_size: %u, frag_size: %u, last_idx: %u\n"
231 "first fragment: ofs: %u, len: %u\n"
232 "last fragment: ofs: %u, len: %u\n\n",
234 fp, fp->key.src_dst[0], fp->key.id,
235 fp->total_size, fp->frag_size, fp->last_idx,
236 fp->frags[IP_FIRST_FRAG_IDX].ofs,
237 fp->frags[IP_FIRST_FRAG_IDX].len,
238 fp->frags[IP_LAST_FRAG_IDX].ofs,
239 fp->frags[IP_LAST_FRAG_IDX].len);
241 IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
242 "ipv4_frag_pkt: %p, key: <" IPv6_KEY_BYTES_FMT ", %#x>, "
243 "total_size: %u, frag_size: %u, last_idx: %u\n"
244 "first fragment: ofs: %u, len: %u\n"
245 "last fragment: ofs: %u, len: %u\n\n",
247 fp, IPv6_KEY_BYTES(fp->key.src_dst), fp->key.id,
248 fp->total_size, fp->frag_size, fp->last_idx,
249 fp->frags[IP_FIRST_FRAG_IDX].ofs,
250 fp->frags[IP_FIRST_FRAG_IDX].len,
251 fp->frags[IP_LAST_FRAG_IDX].ofs,
252 fp->frags[IP_LAST_FRAG_IDX].len);
254 /* free associated resources. */
255 ip_frag_free(fp, dr);
258 /* we are done with that entry, invalidate it. */
259 ip_frag_key_invalidate(&fp->key);
265 * Find an entry in the table for the corresponding fragment.
266 * If such entry is not present, then allocate a new one.
267 * If the entry is stale, then free and reuse it.
270 ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
271 const struct ip_frag_key *key, uint64_t tms)
273 struct ip_frag_pkt *pkt, *free, *stale, *lru;
277 * Actually the two line below are totally redundant.
278 * they are here, just to make gcc 4.6 happy.
282 max_cycles = tbl->max_cycles;
284 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
286 if ((pkt = ip_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
288 /*timed-out entry, free and invalidate it*/
290 ip_frag_tbl_del(tbl, dr, stale);
294 * we found a free entry, check if we can use it.
295 * If we run out of free entries in the table, then
296 * check if we have a timed out entry to delete.
298 } else if (free != NULL &&
299 tbl->max_entries <= tbl->use_entries) {
300 lru = TAILQ_FIRST(&tbl->lru);
301 if (max_cycles + lru->start < tms) {
302 ip_frag_tbl_del(tbl, dr, lru);
305 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat,
310 /* found a free entry to reuse. */
312 ip_frag_tbl_add(tbl, free, key, tms);
317 * we found the flow, but it is already timed out,
318 * so free associated resources, reposition it in the LRU list,
321 } else if (max_cycles + pkt->start < tms) {
322 ip_frag_tbl_reuse(tbl, dr, pkt, tms);
325 IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
332 ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
333 const struct ip_frag_key *key, uint64_t tms,
334 struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
336 struct ip_frag_pkt *p1, *p2;
337 struct ip_frag_pkt *empty, *old;
339 uint32_t i, assoc, sig1, sig2;
344 max_cycles = tbl->max_cycles;
345 assoc = tbl->bucket_entries;
347 if (tbl->last != NULL && ip_frag_key_cmp(key, &tbl->last->key) == 0)
350 /* different hashing methods for IPv4 and IPv6 */
351 if (key->key_len == IPV4_KEYLEN)
352 ipv4_frag_hash(key, &sig1, &sig2);
354 ipv6_frag_hash(key, &sig1, &sig2);
356 p1 = IP_FRAG_TBL_POS(tbl, sig1);
357 p2 = IP_FRAG_TBL_POS(tbl, sig2);
359 for (i = 0; i != assoc; i++) {
360 if (p1->key.key_len == IPV4_KEYLEN)
361 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
362 "tbl: %p, max_entries: %u, use_entries: %u\n"
363 "ipv6_frag_pkt line0: %p, index: %u from %u\n"
364 "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
366 tbl, tbl->max_entries, tbl->use_entries,
368 p1[i].key.src_dst[0], p1[i].key.id, p1[i].start);
370 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
371 "tbl: %p, max_entries: %u, use_entries: %u\n"
372 "ipv6_frag_pkt line0: %p, index: %u from %u\n"
373 "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
375 tbl, tbl->max_entries, tbl->use_entries,
377 IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
379 if (ip_frag_key_cmp(key, &p1[i].key) == 0)
381 else if (ip_frag_key_is_empty(&p1[i].key))
382 empty = (empty == NULL) ? (p1 + i) : empty;
383 else if (max_cycles + p1[i].start < tms)
384 old = (old == NULL) ? (p1 + i) : old;
386 if (p2->key.key_len == IPV4_KEYLEN)
387 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
388 "tbl: %p, max_entries: %u, use_entries: %u\n"
389 "ipv6_frag_pkt line1: %p, index: %u from %u\n"
390 "key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
392 tbl, tbl->max_entries, tbl->use_entries,
394 p2[i].key.src_dst[0], p2[i].key.id, p2[i].start);
396 IP_FRAG_LOG(DEBUG, "%s:%d:\n"
397 "tbl: %p, max_entries: %u, use_entries: %u\n"
398 "ipv6_frag_pkt line1: %p, index: %u from %u\n"
399 "key: <" IPv6_KEY_BYTES_FMT ", %#x>, start: %" PRIu64 "\n",
401 tbl, tbl->max_entries, tbl->use_entries,
403 IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
405 if (ip_frag_key_cmp(key, &p2[i].key) == 0)
407 else if (ip_frag_key_is_empty(&p2[i].key))
408 empty = (empty == NULL) ?( p2 + i) : empty;
409 else if (max_cycles + p2[i].start < tms)
410 old = (old == NULL) ? (p2 + i) : old;