#define IP_FRAG_LOG(lvl, fmt, args...) RTE_LOG(lvl, USER1, fmt, ##args)
-#define RTE_IP_FRAG_ASSERT(exp) \
+#define IP_FRAG_ASSERT(exp) \
if (!(exp)) { \
rte_panic("function %s, line%d\tassert \"" #exp "\" failed\n", \
__func__, __LINE__); \
}
#else
#define IP_FRAG_LOG(lvl, fmt, args...) do {} while(0)
-#define RTE_IP_FRAG_ASSERT(exp) do { } while(0)
+#define IP_FRAG_ASSERT(exp) do {} while (0)
#endif /* IP_FRAG_DEBUG */
#define IPV4_KEYLEN 1
"%08" PRIx64 "%08" PRIx64 "%08" PRIx64 "%08" PRIx64
/* internal functions declarations */
-struct rte_mbuf * ip_frag_process(struct rte_ip_frag_pkt *fp,
+struct rte_mbuf * ip_frag_process(struct ip_frag_pkt *fp,
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb,
uint16_t ofs, uint16_t len, uint16_t more_frags);
-struct rte_ip_frag_pkt * ip_frag_find(struct rte_ip_frag_tbl *tbl,
+struct ip_frag_pkt * ip_frag_find(struct rte_ip_frag_tbl *tbl,
struct rte_ip_frag_death_row *dr,
const struct ip_frag_key *key, uint64_t tms);
-struct rte_ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
+struct ip_frag_pkt * ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
const struct ip_frag_key *key, uint64_t tms,
- struct rte_ip_frag_pkt **free, struct rte_ip_frag_pkt **stale);
+ struct ip_frag_pkt **free, struct ip_frag_pkt **stale);
/* these functions need to be declared here as ip_frag_process relies on them */
-struct rte_mbuf * ipv4_frag_reassemble(const struct rte_ip_frag_pkt *fp);
-struct rte_mbuf * ipv6_frag_reassemble(const struct rte_ip_frag_pkt *fp);
+struct rte_mbuf * ipv4_frag_reassemble(const struct ip_frag_pkt *fp);
+struct rte_mbuf * ipv6_frag_reassemble(const struct ip_frag_pkt *fp);
/* put fragment on death row */
static inline void
-ip_frag_free(struct rte_ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
+ip_frag_free(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr)
{
uint32_t i, k;
/* if key is empty, mark key as in use */
static inline void
-ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct rte_ip_frag_pkt *fp)
+ip_frag_inuse(struct rte_ip_frag_tbl *tbl, const struct ip_frag_pkt *fp)
{
if (ip_frag_key_is_empty(&fp->key)) {
TAILQ_REMOVE(&tbl->lru, fp, lru);
/* reset the fragment */
static inline void
-ip_frag_reset(struct rte_ip_frag_pkt *fp, uint64_t tms)
+ip_frag_reset(struct ip_frag_pkt *fp, uint64_t tms)
{
static const struct ip_frag zero_frag = {
.ofs = 0,
/* local frag table helper functions */
static inline void
ip_frag_tbl_del(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
- struct rte_ip_frag_pkt *fp)
+ struct ip_frag_pkt *fp)
{
ip_frag_free(fp, dr);
ip_frag_key_invalidate(&fp->key);
}
static inline void
-ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_pkt *fp,
+ip_frag_tbl_add(struct rte_ip_frag_tbl *tbl, struct ip_frag_pkt *fp,
const struct ip_frag_key *key, uint64_t tms)
{
fp->key = key[0];
static inline void
ip_frag_tbl_reuse(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
- struct rte_ip_frag_pkt *fp, uint64_t tms)
+ struct ip_frag_pkt *fp, uint64_t tms)
{
ip_frag_free(fp, dr);
ip_frag_reset(fp, tms);
}
struct rte_mbuf *
-ip_frag_process(struct rte_ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
+ip_frag_process(struct ip_frag_pkt *fp, struct rte_ip_frag_death_row *dr,
struct rte_mbuf *mb, uint16_t ofs, uint16_t len, uint16_t more_frags)
{
uint32_t idx;
* If such entry is not present, then allocate a new one.
* If the entry is stale, then free and reuse it.
*/
-struct rte_ip_frag_pkt *
+struct ip_frag_pkt *
ip_frag_find(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr,
const struct ip_frag_key *key, uint64_t tms)
{
- struct rte_ip_frag_pkt *pkt, *free, *stale, *lru;
+ struct ip_frag_pkt *pkt, *free, *stale, *lru;
uint64_t max_cycles;
/*
return (pkt);
}
-struct rte_ip_frag_pkt *
+struct ip_frag_pkt *
ip_frag_lookup(struct rte_ip_frag_tbl *tbl,
const struct ip_frag_key *key, uint64_t tms,
- struct rte_ip_frag_pkt **free, struct rte_ip_frag_pkt **stale)
+ struct ip_frag_pkt **free, struct ip_frag_pkt **stale)
{
- struct rte_ip_frag_pkt *p1, *p2;
- struct rte_ip_frag_pkt *empty, *old;
+ struct ip_frag_pkt *p1, *p2;
+ struct ip_frag_pkt *empty, *old;
uint64_t max_cycles;
uint32_t i, assoc, sig1, sig2;
* @internal Fragmented packet to reassemble.
* First two entries in the frags[] array are for the last and first fragments.
*/
-struct rte_ip_frag_pkt {
- TAILQ_ENTRY(rte_ip_frag_pkt) lru; /**< LRU list */
+struct ip_frag_pkt {
+ TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */
struct ip_frag_key key; /**< fragmentation key */
uint64_t start; /**< creation timestamp */
uint32_t total_size; /**< expected reassembled size */
/**< mbufs to be freed */
};
-TAILQ_HEAD(rte_ip_pkt_list, rte_ip_frag_pkt); /**< @internal fragments tailq */
+TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */
/** fragmentation table statistics */
-struct rte_ip_frag_tbl_stat {
+struct ip_frag_tbl_stat {
uint64_t find_num; /**< total # of find/insert attempts. */
uint64_t add_num; /**< # of add ops. */
uint64_t del_num; /**< # of del ops. */
uint32_t bucket_entries; /**< hash assocaitivity. */
uint32_t nb_entries; /**< total size of the table. */
uint32_t nb_buckets; /**< num of associativity lines. */
- struct rte_ip_frag_pkt *last; /**< last used entry. */
- struct rte_ip_pkt_list lru; /**< LRU list for table entries. */
- struct rte_ip_frag_tbl_stat stat; /**< statistics counters. */
- struct rte_ip_frag_pkt pkt[0]; /**< hash table. */
+ struct ip_frag_pkt *last; /**< last used entry. */
+ struct ip_pkt_list lru; /**< LRU list for table entries. */
+ struct ip_frag_tbl_stat stat; /**< statistics counters. */
+ struct ip_frag_pkt pkt[0]; /**< hash table. */
};
/** IPv6 fragment extension header */
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv4_hdr));
/* Fragment size should be a multiply of 8. */
- RTE_IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
+ IP_FRAG_ASSERT((frag_size & IPV4_HDR_FO_MASK) == 0);
in_hdr = (struct ipv4_hdr *) pkt_in->pkt.data;
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
* Reassemble fragments into one packet.
*/
struct rte_mbuf *
-ipv4_frag_reassemble(const struct rte_ip_frag_pkt *fp)
+ipv4_frag_reassemble(const struct ip_frag_pkt *fp)
{
struct ipv4_hdr *ip_hdr;
struct rte_mbuf *m, *prev;
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
struct ipv4_hdr *ip_hdr)
{
- struct rte_ip_frag_pkt *fp;
+ struct ip_frag_pkt *fp;
struct ip_frag_key key;
const uint64_t *psd;
uint16_t ip_len;
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
/* Fragment size should be a multiple of 8. */
- RTE_IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);
+ IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
* Reassemble fragments into one packet.
*/
struct rte_mbuf *
-ipv6_frag_reassemble(const struct rte_ip_frag_pkt *fp)
+ipv6_frag_reassemble(const struct ip_frag_pkt *fp)
{
struct ipv6_hdr *ip_hdr;
struct ipv6_extension_fragment *frag_hdr;
struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms,
struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)
{
- struct rte_ip_frag_pkt *fp;
+ struct ip_frag_pkt *fp;
struct ip_frag_key key;
uint16_t ip_len, ip_ofs;