git.droids-corp.org
/
dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
mlx4: avoid init errors when kernel modules are not loaded
[dpdk.git]
/
lib
/
librte_ip_frag
/
ip_frag_internal.c
diff --git
a/lib/librte_ip_frag/ip_frag_internal.c
b/lib/librte_ip_frag/ip_frag_internal.c
index
5d35037
..
a2c645b
100644
(file)
--- a/
lib/librte_ip_frag/ip_frag_internal.c
+++ b/
lib/librte_ip_frag/ip_frag_internal.c
@@
-32,15
+32,12
@@
*/
#include <stddef.h>
*/
#include <stddef.h>
-#include <stdint.h>
-#include <rte_byteorder.h>
#include <rte_jhash.h>
#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
#include <rte_hash_crc.h>
#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
#include <rte_jhash.h>
#ifdef RTE_MACHINE_CPUFLAG_SSE4_2
#include <rte_hash_crc.h>
#endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-#include "rte_ip_frag.h"
#include "ip_frag_common.h"
#define PRIME_VALUE 0xeaad8405
#include "ip_frag_common.h"
#define PRIME_VALUE 0xeaad8405
@@
-57,7
+54,7
@@
/* local frag table helper functions */
static inline void
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
/* local frag table helper functions */
static inline void
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
- struct
rte_
ip_frag_pkt *fp)
+ struct ip_frag_pkt *fp)
{
ip_frag_free(fp, dr);
ip_frag_key_invalidate(&fp->key);
{
ip_frag_free(fp, dr);
ip_frag_key_invalidate(&fp->key);
@@
-67,7
+64,7
@@
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
}
static inline void
}
static inline void
-ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct
rte_
ip_frag_pkt *fp,
+ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
const struct ip_frag_key *key, uint64_t tms)
{
fp->key = key[0];
const struct ip_frag_key *key, uint64_t tms)
{
fp->key = key[0];
@@
-79,7
+76,7
@@
ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_pkt *fp,
static inline void
ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
static inline void
ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
- struct
rte_
ip_frag_pkt *fp, uint64_t tms)
+ struct ip_frag_pkt *fp, uint64_t tms)
{
ip_frag_free(fp, dr);
ip_frag_reset(fp, tms);
{
ip_frag_free(fp, dr);
ip_frag_reset(fp, tms);
@@
-140,7
+137,7
@@
ipv6_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
}
struct rte_mbuf *
}
struct rte_mbuf *
-ip_frag_process(struct
rte_
ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
+ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
{
uint32_t idx;
struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
{
uint32_t idx;
@@
-271,11
+268,11
@@
ip_frag_process(struct rte_ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
* If such entry is not present, then allocate a new one.
* If the entry is stale, then free and reuse it.
*/
* If such entry is not present, then allocate a new one.
* If the entry is stale, then free and reuse it.
*/
-struct
rte_
ip_frag_pkt *
+struct ip_frag_pkt *
ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
const struct ip_frag_key *key, uint64_t tms)
{
ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
const struct ip_frag_key *key, uint64_t tms)
{
- struct
rte_
ip_frag_pkt *pkt, *free, *stale, *lru;
+ struct ip_frag_pkt *pkt, *free, *stale, *lru;
uint64_t max_cycles;
/*
uint64_t max_cycles;
/*
@@
-333,13
+330,13
@@
ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
return (pkt);
}
return (pkt);
}
-struct
rte_
ip_frag_pkt *
+struct ip_frag_pkt *
ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
const struct ip_frag_key *key, uint64_t tms,
ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
const struct ip_frag_key *key, uint64_t tms,
- struct
rte_ip_frag_pkt **free, struct rte_
ip_frag_pkt **stale)
+ struct
ip_frag_pkt **free, struct
ip_frag_pkt **stale)
{
{
- struct
rte_
ip_frag_pkt *p1, *p2;
- struct
rte_
ip_frag_pkt *empty, *old;
+ struct ip_frag_pkt *p1, *p2;
+ struct ip_frag_pkt *empty, *old;
uint64_t max_cycles;
uint32_t i, assoc, sig1, sig2;
uint64_t max_cycles;
uint32_t i, assoc, sig1, sig2;
@@
-349,11
+346,11
@@
ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
max_cycles = tbl->max_cycles;
assoc = tbl->bucket_entries;
max_cycles = tbl->max_cycles;
assoc = tbl->bucket_entries;
- if (tbl->last != NULL && ip_frag_key_cmp(
&tbl->last->key,
key) == 0)
+ if (tbl->last != NULL && ip_frag_key_cmp(
key, &tbl->last->
key) == 0)
return (tbl->last);
/* different hashing methods for IPv4 and IPv6 */
return (tbl->last);
/* different hashing methods for IPv4 and IPv6 */
- if (key->key_len ==
1
)
+ if (key->key_len ==
IPV4_KEYLEN
)
ipv4_frag_hash(key, &sig1, &sig2);
else
ipv6_frag_hash(key, &sig1, &sig2);
ipv4_frag_hash(key, &sig1, &sig2);
else
ipv6_frag_hash(key, &sig1, &sig2);
@@
-381,7
+378,7
@@
ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
p1, i, assoc,
IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
p1, i, assoc,
IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
- if (ip_frag_key_cmp(
&p1[i].key,
key) == 0)
+ if (ip_frag_key_cmp(
key, &p1[i].
key) == 0)
return (p1 + i);
else if (ip_frag_key_is_empty(&p1[i].key))
empty = (empty == NULL) ? (p1 + i) : empty;
return (p1 + i);
else if (ip_frag_key_is_empty(&p1[i].key))
empty = (empty == NULL) ? (p1 + i) : empty;
@@
-407,7
+404,7
@@
ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
p2, i, assoc,
IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
p2, i, assoc,
IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
- if (ip_frag_key_cmp(
&p2[i].key,
key) == 0)
+ if (ip_frag_key_cmp(
key, &p2[i].
key) == 0)
return (p2 + i);
else if (ip_frag_key_is_empty(&p2[i].key))
empty = (empty == NULL) ?( p2 + i) : empty;
return (p2 + i);
else if (ip_frag_key_is_empty(&p2[i].key))
empty = (empty == NULL) ?( p2 + i) : empty;