*/
/*
- * The ipv4_frag_tbl is a simple hash table:
+ * The ip_frag_tbl is a simple hash table:
* The basic idea is to use two hash functions and <bucket_entries>
* associativity. This provides 2 * <bucket_entries> possible locations in
* the hash table for each key. Sort of simplified Cuckoo hashing,
#define PRIME_VALUE 0xeaad8405
-TAILQ_HEAD(ipv4_pkt_list, ipv4_frag_pkt);
+TAILQ_HEAD(ip_pkt_list, ip_frag_pkt);
-struct ipv4_frag_tbl_stat {
+struct ip_frag_tbl_stat {
uint64_t find_num; /* total # of find/insert attempts. */
uint64_t add_num; /* # of add ops. */
uint64_t del_num; /* # of del ops. */
uint64_t fail_nospace; /* # of 'no space' add failures. */
} __rte_cache_aligned;
-struct ipv4_frag_tbl {
+struct ip_frag_tbl {
uint64_t max_cycles; /* ttl for table entries. */
uint32_t entry_mask; /* hash value mask. */
uint32_t max_entries; /* max entries allowed. */
uint32_t bucket_entries; /* hash assocaitivity. */
uint32_t nb_entries; /* total size of the table. */
uint32_t nb_buckets; /* num of associativity lines. */
- struct ipv4_frag_pkt *last; /* last used entry. */
- struct ipv4_pkt_list lru; /* LRU list for table entries. */
- struct ipv4_frag_tbl_stat stat; /* statistics counters. */
- struct ipv4_frag_pkt pkt[0]; /* hash table. */
+ struct ip_frag_pkt *last; /* last used entry. */
+ struct ip_pkt_list lru; /* LRU list for table entries. */
+ struct ip_frag_tbl_stat stat; /* statistics counters. */
+ struct ip_frag_pkt pkt[0]; /* hash table. */
};
-#define IPV4_FRAG_TBL_POS(tbl, sig) \
+#define IP_FRAG_TBL_POS(tbl, sig) \
((tbl)->pkt + ((sig) & (tbl)->entry_mask))
-#define IPV4_FRAG_HASH_FNUM 2
+#define IP_FRAG_HASH_FNUM 2
-#ifdef IPV4_FRAG_TBL_STAT
-#define IPV4_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
+#ifdef IP_FRAG_TBL_STAT
+#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) ((s)->f += (v))
#else
-#define IPV4_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
+#define IP_FRAG_TBL_STAT_UPDATE(s, f, v) do {} while (0)
#endif /* IPV4_FRAG_TBL_STAT */
static inline void
-ipv4_frag_hash(const struct ipv4_frag_key *key, uint32_t *v1, uint32_t *v2)
+ipv4_frag_hash(const struct ip_frag_key *key, uint32_t *v1, uint32_t *v2)
{
uint32_t v;
const uint32_t *p;
* Update the table, after we finish processing it's entry.
*/
static inline void
-ipv4_frag_inuse(struct ipv4_frag_tbl *tbl, const struct ipv4_frag_pkt *fp)
+ip_frag_inuse(struct ip_frag_tbl *tbl, const struct ip_frag_pkt *fp)
{
- if (IPV4_FRAG_KEY_EMPTY(&fp->key)) {
+ if (IP_FRAG_KEY_EMPTY(&fp->key)) {
TAILQ_REMOVE(&tbl->lru, fp, lru);
tbl->use_entries--;
}
* If such entry doesn't exist, will return free and/or timed-out entry,
* that can be used for that key.
*/
-static inline struct ipv4_frag_pkt *
-ipv4_frag_lookup(struct ipv4_frag_tbl *tbl,
- const struct ipv4_frag_key *key, uint64_t tms,
- struct ipv4_frag_pkt **free, struct ipv4_frag_pkt **stale)
+static inline struct ip_frag_pkt *
+ip_frag_lookup(struct ip_frag_tbl *tbl,
+ const struct ip_frag_key *key, uint64_t tms,
+ struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
{
- struct ipv4_frag_pkt *p1, *p2;
- struct ipv4_frag_pkt *empty, *old;
+ struct ip_frag_pkt *p1, *p2;
+ struct ip_frag_pkt *empty, *old;
uint64_t max_cycles;
uint32_t i, assoc, sig1, sig2;
max_cycles = tbl->max_cycles;
assoc = tbl->bucket_entries;
- if (tbl->last != NULL && IPV4_FRAG_KEY_CMP(&tbl->last->key, key) == 0)
+ if (tbl->last != NULL && IP_FRAG_KEY_CMP(&tbl->last->key, key) == 0)
return (tbl->last);
ipv4_frag_hash(key, &sig1, &sig2);
- p1 = IPV4_FRAG_TBL_POS(tbl, sig1);
- p2 = IPV4_FRAG_TBL_POS(tbl, sig2);
+ p1 = IP_FRAG_TBL_POS(tbl, sig1);
+ p2 = IP_FRAG_TBL_POS(tbl, sig2);
for (i = 0; i != assoc; i++) {
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv4_frag_pkt line0: %p, index: %u from %u\n"
+ "ip_frag_pkt line0: %p, index: %u from %u\n"
"key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
__func__, __LINE__,
tbl, tbl->max_entries, tbl->use_entries,
p1, i, assoc,
p1[i].key.src_dst, p1[i].key.id, p1[i].start);
- if (IPV4_FRAG_KEY_CMP(&p1[i].key, key) == 0)
+ if (IP_FRAG_KEY_CMP(&p1[i].key, key) == 0)
return (p1 + i);
- else if (IPV4_FRAG_KEY_EMPTY(&p1[i].key))
+ else if (IP_FRAG_KEY_EMPTY(&p1[i].key))
empty = (empty == NULL) ? (p1 + i) : empty;
else if (max_cycles + p1[i].start < tms)
old = (old == NULL) ? (p1 + i) : old;
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
- "ipv4_frag_pkt line1: %p, index: %u from %u\n"
+ "ip_frag_pkt line1: %p, index: %u from %u\n"
"key: <%" PRIx64 ", %#x>, start: %" PRIu64 "\n",
__func__, __LINE__,
tbl, tbl->max_entries, tbl->use_entries,
p2, i, assoc,
p2[i].key.src_dst, p2[i].key.id, p2[i].start);
- if (IPV4_FRAG_KEY_CMP(&p2[i].key, key) == 0)
+ if (IP_FRAG_KEY_CMP(&p2[i].key, key) == 0)
return (p2 + i);
- else if (IPV4_FRAG_KEY_EMPTY(&p2[i].key))
+ else if (IP_FRAG_KEY_EMPTY(&p2[i].key))
empty = (empty == NULL) ?( p2 + i) : empty;
else if (max_cycles + p2[i].start < tms)
old = (old == NULL) ? (p2 + i) : old;
}
static inline void
-ipv4_frag_tbl_del(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- struct ipv4_frag_pkt *fp)
+ip_frag_tbl_del(struct ip_frag_tbl *tbl, struct ip_frag_death_row *dr,
+ struct ip_frag_pkt *fp)
{
- ipv4_frag_free(fp, dr);
- IPV4_FRAG_KEY_INVALIDATE(&fp->key);
+ ip_frag_free(fp, dr);
+ IP_FRAG_KEY_INVALIDATE(&fp->key);
TAILQ_REMOVE(&tbl->lru, fp, lru);
tbl->use_entries--;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, del_num, 1);
}
static inline void
-ipv4_frag_tbl_add(struct ipv4_frag_tbl *tbl, struct ipv4_frag_pkt *fp,
- const struct ipv4_frag_key *key, uint64_t tms)
+ip_frag_tbl_add(struct ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
+ const struct ip_frag_key *key, uint64_t tms)
{
fp->key = key[0];
- ipv4_frag_reset(fp, tms);
+ ip_frag_reset(fp, tms);
TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
tbl->use_entries++;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, add_num, 1);
}
static inline void
-ipv4_frag_tbl_reuse(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- struct ipv4_frag_pkt *fp, uint64_t tms)
+ip_frag_tbl_reuse(struct ip_frag_tbl *tbl, struct ip_frag_death_row *dr,
+ struct ip_frag_pkt *fp, uint64_t tms)
{
- ipv4_frag_free(fp, dr);
- ipv4_frag_reset(fp, tms);
+ ip_frag_free(fp, dr);
+ ip_frag_reset(fp, tms);
TAILQ_REMOVE(&tbl->lru, fp, lru);
TAILQ_INSERT_TAIL(&tbl->lru, fp, lru);
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, reuse_num, 1);
}
/*
* If such entry is not present, then allocate a new one.
* If the entry is stale, then free and reuse it.
*/
-static inline struct ipv4_frag_pkt *
-ipv4_frag_find(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- const struct ipv4_frag_key *key, uint64_t tms)
+static inline struct ip_frag_pkt *
+ip_frag_find(struct ip_frag_tbl *tbl, struct ip_frag_death_row *dr,
+ const struct ip_frag_key *key, uint64_t tms)
{
- struct ipv4_frag_pkt *pkt, *free, *stale, *lru;
+ struct ip_frag_pkt *pkt, *free, *stale, *lru;
uint64_t max_cycles;
/*
stale = NULL;
max_cycles = tbl->max_cycles;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, find_num, 1);
- if ((pkt = ipv4_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
+ if ((pkt = ip_frag_lookup(tbl, key, tms, &free, &stale)) == NULL) {
/*timed-out entry, free and invalidate it*/
if (stale != NULL) {
- ipv4_frag_tbl_del(tbl, dr, stale);
+ ip_frag_tbl_del(tbl, dr, stale);
free = stale;
/*
tbl->max_entries <= tbl->use_entries) {
lru = TAILQ_FIRST(&tbl->lru);
if (max_cycles + lru->start < tms) {
- ipv4_frag_tbl_del(tbl, dr, lru);
+ ip_frag_tbl_del(tbl, dr, lru);
} else {
free = NULL;
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat,
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat,
fail_nospace, 1);
}
}
/* found a free entry to reuse. */
if (free != NULL) {
- ipv4_frag_tbl_add(tbl, free, key, tms);
+ ip_frag_tbl_add(tbl, free, key, tms);
pkt = free;
}
* and reuse it.
*/
} else if (max_cycles + pkt->start < tms) {
- ipv4_frag_tbl_reuse(tbl, dr, pkt, tms);
+ ip_frag_tbl_reuse(tbl, dr, pkt, tms);
}
- IPV4_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
+ IP_FRAG_TBL_STAT_UPDATE(&tbl->stat, fail_total, (pkt == NULL));
tbl->last = pkt;
return (pkt);
* @return
* The pointer to the new allocated mempool, on success. NULL on error.
*/
-static struct ipv4_frag_tbl *
-ipv4_frag_tbl_create(uint32_t bucket_num, uint32_t bucket_entries,
+static struct ip_frag_tbl *
+rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries,
uint32_t max_entries, uint64_t max_cycles, int socket_id)
{
- struct ipv4_frag_tbl *tbl;
+ struct ip_frag_tbl *tbl;
size_t sz;
uint64_t nb_entries;
nb_entries = rte_align32pow2(bucket_num);
nb_entries *= bucket_entries;
- nb_entries *= IPV4_FRAG_HASH_FNUM;
+ nb_entries *= IP_FRAG_HASH_FNUM;
/* check input parameters. */
if (rte_is_power_of_2(bucket_entries) == 0 ||
}
static inline void
-ipv4_frag_tbl_destroy( struct ipv4_frag_tbl *tbl)
+rte_ip_frag_table_destroy( struct ip_frag_tbl *tbl)
{
rte_free(tbl);
}
static void
-ipv4_frag_tbl_dump_stat(FILE *f, const struct ipv4_frag_tbl *tbl)
+rte_ip_frag_table_statistics_dump(FILE *f, const struct ip_frag_tbl *tbl)
{
uint64_t fail_total, fail_nospace;
#ifndef _IPV4_RSMBL_H_
#define _IPV4_RSMBL_H_
+#include "ip_frag_common.h"
+
/**
* @file
* IPv4 reassemble
MAX_FRAG_NUM = 4,
};
-struct ipv4_frag {
+struct ip_frag {
uint16_t ofs;
uint16_t len;
struct rte_mbuf *mb;
/*
* Use <src addr, dst_addr, id> to uniquely indetify fragmented datagram.
*/
-struct ipv4_frag_key {
+struct ip_frag_key {
uint64_t src_dst;
uint32_t id;
};
-#define IPV4_FRAG_KEY_INVALIDATE(k) ((k)->src_dst = 0)
-#define IPV4_FRAG_KEY_EMPTY(k) ((k)->src_dst == 0)
+#define IP_FRAG_KEY_INVALIDATE(k) ((k)->src_dst = 0)
+#define IP_FRAG_KEY_EMPTY(k) ((k)->src_dst == 0)
-#define IPV4_FRAG_KEY_CMP(k1, k2) \
+#define IP_FRAG_KEY_CMP(k1, k2) \
(((k1)->src_dst ^ (k2)->src_dst) | ((k1)->id ^ (k2)->id))
* Fragmented packet to reassemble.
* First two entries in the frags[] array are for the last and first fragments.
*/
-struct ipv4_frag_pkt {
- TAILQ_ENTRY(ipv4_frag_pkt) lru; /* LRU list */
- struct ipv4_frag_key key;
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /* LRU list */
+ struct ip_frag_key key;
uint64_t start; /* creation timestamp */
uint32_t total_size; /* expected reassembled size */
uint32_t frag_size; /* size of fragments received */
uint32_t last_idx; /* index of next entry to fill */
- struct ipv4_frag frags[MAX_FRAG_NUM];
+ struct ip_frag frags[MAX_FRAG_NUM];
} __rte_cache_aligned;
-struct ipv4_frag_death_row {
+struct ip_frag_death_row {
uint32_t cnt;
struct rte_mbuf *row[MAX_PKT_BURST * (MAX_FRAG_NUM + 1)];
};
-#define IPV4_FRAG_MBUF2DR(dr, mb) ((dr)->row[(dr)->cnt++] = (mb))
+#define IP_FRAG_MBUF2DR(dr, mb) ((dr)->row[(dr)->cnt++] = (mb))
/* logging macros. */
-#ifdef IPV4_FRAG_DEBUG
-#define IPV4_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
+#ifdef IP_FRAG_DEBUG
+#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
#else
-#define IPV4_FRAG_LOG(lvl, fmt, args...) do {} while(0)
-#endif /* IPV4_FRAG_DEBUG */
+#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
+#endif /* IP_FRAG_DEBUG */
static inline void
-ipv4_frag_reset(struct ipv4_frag_pkt *fp, uint64_t tms)
+ip_frag_reset(struct ip_frag_pkt *fp, uint64_t tms)
{
- static const struct ipv4_frag zero_frag = {
+ static const struct ip_frag zero_frag = {
.ofs = 0,
.len = 0,
.mb = NULL,
}
static inline void
-ipv4_frag_free(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr)
+ip_frag_free(struct ip_frag_pkt *fp, struct ip_frag_death_row *dr)
{
uint32_t i, k;
}
static inline void
-ipv4_frag_free_death_row(struct ipv4_frag_death_row *dr, uint32_t prefetch)
+rte_ip_frag_free_death_row(struct ip_frag_death_row *dr, uint32_t prefetch)
{
uint32_t i, k, n;
* chains them into one mbuf.
*/
static inline void
-ipv4_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
+ip_frag_chain(struct rte_mbuf *mn, struct rte_mbuf *mp)
{
struct rte_mbuf *ms;
* Reassemble fragments into one packet.
*/
static inline struct rte_mbuf *
-ipv4_frag_reassemble(const struct ipv4_frag_pkt *fp)
+ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
{
struct ipv4_hdr *ip_hdr;
struct rte_mbuf *m, *prev;
/* previous fragment found. */
if(fp->frags[i].ofs + fp->frags[i].len == ofs) {
- ipv4_frag_chain(fp->frags[i].mb, m);
+ ip_frag_chain(fp->frags[i].mb, m);
/* update our last fragment and offset. */
m = fp->frags[i].mb;
}
/* chain with the first fragment. */
- ipv4_frag_chain(fp->frags[FIRST_FRAG_IDX].mb, m);
+ ip_frag_chain(fp->frags[FIRST_FRAG_IDX].mb, m);
m = fp->frags[FIRST_FRAG_IDX].mb;
/* update mbuf fields for reassembled packet. */
m->ol_flags |= PKT_TX_IP_CKSUM;
/* update ipv4 header for the reassmebled packet */
- ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
+ ip_hdr = (struct ipv4_hdr *)(rte_pktmbuf_mtod(m, uint8_t *) +
m->pkt.vlan_macip.f.l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
}
static inline struct rte_mbuf *
-ipv4_frag_process(struct ipv4_frag_pkt *fp, struct ipv4_frag_death_row *dr,
+ip_frag_process(struct ip_frag_pkt *fp, struct ip_frag_death_row *dr,
struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
{
uint32_t idx;
if (idx >= sizeof (fp->frags) / sizeof (fp->frags[0])) {
/* report an error. */
- IPV4_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
"total_size: %u, frag_size: %u, last_idx: %u\n"
"first fragment: ofs: %u, len: %u\n"
fp->frags[LAST_FRAG_IDX].len);
/* free all fragments, invalidate the entry. */
- ipv4_frag_free(fp, dr);
- IPV4_FRAG_KEY_INVALIDATE(&fp->key);
- IPV4_FRAG_MBUF2DR(dr, mb);
+ ip_frag_free(fp, dr);
+ IP_FRAG_KEY_INVALIDATE(&fp->key);
+ IP_FRAG_MBUF2DR(dr, mb);
return (NULL);
}
if (mb == NULL) {
/* report an error. */
- IPV4_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d invalid fragmented packet:\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, "
"total_size: %u, frag_size: %u, last_idx: %u\n"
"first fragment: ofs: %u, len: %u\n"
fp->frags[LAST_FRAG_IDX].len);
/* free associated resources. */
- ipv4_frag_free(fp, dr);
+ ip_frag_free(fp, dr);
}
/* we are done with that entry, invalidate it. */
- IPV4_FRAG_KEY_INVALIDATE(&fp->key);
+ IP_FRAG_KEY_INVALIDATE(&fp->key);
return (mb);
}
* - not all fragments of the packet are collected yet.
*/
static inline struct rte_mbuf *
-ipv4_frag_mbuf(struct ipv4_frag_tbl *tbl, struct ipv4_frag_death_row *dr,
- struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr,
- uint16_t ip_ofs, uint16_t ip_flag)
+rte_ipv4_reassemble_packet(struct ip_frag_tbl *tbl,
+ struct ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
+ struct ipv4_hdr *ip_hdr, uint16_t ip_ofs, uint16_t ip_flag)
{
- struct ipv4_frag_pkt *fp;
- struct ipv4_frag_key key;
+ struct ip_frag_pkt *fp;
+ struct ip_frag_key key;
const uint64_t *psd;
uint16_t ip_len;
ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
mb->pkt.vlan_macip.f.l3_len);
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p, tms: %" PRIu64
", key: <%" PRIx64 ", %#x>, ofs: %u, len: %u, flags: %#x\n"
"tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
tbl->use_entries);
/* try to find/add entry into the fragment's table. */
- if ((fp = ipv4_frag_find(tbl, dr, &key, tms)) == NULL) {
- IPV4_FRAG_MBUF2DR(dr, mb);
- return (NULL);
+ if ((fp = ip_frag_find(tbl, dr, &key, tms)) == NULL) {
+ IP_FRAG_MBUF2DR(dr, mb);
+ return NULL;
}
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64
", total_size: %u, frag_size: %u, last_idx: %u\n\n",
/* process the fragmented packet. */
- mb = ipv4_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
- ipv4_frag_inuse(tbl, fp);
+ mb = ip_frag_process(fp, dr, mb, ip_ofs, ip_len, ip_flag);
+ ip_frag_inuse(tbl, fp);
- IPV4_FRAG_LOG(DEBUG, "%s:%d:\n"
+ IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p\n"
"tbl: %p, max_entries: %u, use_entries: %u\n"
"ipv4_frag_pkt: %p, key: <%" PRIx64 ", %#x>, start: %" PRIu64