X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ip_frag%2Frte_ip_frag.h;h=9f8cede8ddae49fe2489d6934234d50185dbe4f1;hb=e32cb57973fc311b4b5f60ae5dac37d99e48c94d;hp=5c5e502cf5f484c8b31078698c3d39a4236922a9;hpb=0aa31d7a5929a95580f6a54765a618d2b2615f4b;p=dpdk.git diff --git a/lib/librte_ip_frag/rte_ip_frag.h b/lib/librte_ip_frag/rte_ip_frag.h index 5c5e502cf5..9f8cede8dd 100644 --- a/lib/librte_ip_frag/rte_ip_frag.h +++ b/lib/librte_ip_frag/rte_ip_frag.h @@ -36,17 +36,24 @@ /** * @file - * RTE IPv4 Fragmentation and Reassembly + * RTE IP Fragmentation and Reassembly * - * Implementation of IPv4 packet fragmentation and reassembly. + * Implementation of IP packet fragmentation and reassembly. */ +#ifdef __cplusplus +extern "C" { +#endif + #include #include #include -#include +#include #include +#include + +struct rte_mbuf; enum { IP_LAST_FRAG_IDX, /**< index of last fragment */ @@ -63,18 +70,19 @@ struct ip_frag { struct rte_mbuf *mb; /**< fragment mbuf */ }; -/** @internal to uniquely indetify fragmented datagram. */ +/** @internal to uniquely identify fragmented datagram. */ struct ip_frag_key { - uint64_t src_dst; /**< src address */ + uint64_t src_dst[4]; /**< src address, first 8 bytes used for IPv4 */ uint32_t id; /**< dst address */ + uint32_t key_len; /**< src/dst key length */ }; -/* +/** * @internal Fragmented packet to reassemble. * First two entries in the frags[] array are for the last and first fragments. */ -struct rte_ip_frag_pkt { - TAILQ_ENTRY(rte_ip_frag_pkt) lru; /**< LRU list */ +struct ip_frag_pkt { + TAILQ_ENTRY(ip_frag_pkt) lru; /**< LRU list */ struct ip_frag_key key; /**< fragmentation key */ uint64_t start; /**< creation timestamp */ uint32_t total_size; /**< expected reassembled size */ @@ -92,10 +100,10 @@ struct rte_ip_frag_death_row { /**< mbufs to be freed */ }; -TAILQ_HEAD(rte_ip_pkt_list, rte_ip_frag_pkt); /**< @internal fragments tailq */ +TAILQ_HEAD(ip_pkt_list, ip_frag_pkt); /**< @internal fragments tailq */ /** fragmentation table statistics */ -struct rte_ip_frag_tbl_stat { +struct ip_frag_tbl_stat { uint64_t find_num; /**< total # of find/insert attempts. */ uint64_t add_num; /**< # of add ops. */ uint64_t del_num; /**< # of del ops. */ @@ -110,35 +118,40 @@ struct rte_ip_frag_tbl { uint32_t entry_mask; /**< hash value mask. */ uint32_t max_entries; /**< max entries allowed. */ uint32_t use_entries; /**< entries in use. */ - uint32_t bucket_entries; /**< hash assocaitivity. */ + uint32_t bucket_entries; /**< hash associativity. */ uint32_t nb_entries; /**< total size of the table. */ uint32_t nb_buckets; /**< num of associativity lines. */ - struct rte_ip_frag_pkt *last; /**< last used entry. */ - struct rte_ip_pkt_list lru; /**< LRU list for table entries. */ - struct rte_ip_frag_tbl_stat stat; /**< statistics counters. */ - struct rte_ip_frag_pkt pkt[0]; /**< hash table. */ + struct ip_frag_pkt *last; /**< last used entry. */ + struct ip_pkt_list lru; /**< LRU list for table entries. */ + struct ip_frag_tbl_stat stat; /**< statistics counters. */ + __extension__ struct ip_frag_pkt pkt[0]; /**< hash table. */ }; /** IPv6 fragment extension header */ +#define RTE_IPV6_EHDR_MF_SHIFT 0 +#define RTE_IPV6_EHDR_MF_MASK 1 +#define RTE_IPV6_EHDR_FO_SHIFT 3 +#define RTE_IPV6_EHDR_FO_MASK (~((1 << RTE_IPV6_EHDR_FO_SHIFT) - 1)) + +#define RTE_IPV6_FRAG_USED_MASK \ + (RTE_IPV6_EHDR_MF_MASK | RTE_IPV6_EHDR_FO_MASK) + +#define RTE_IPV6_GET_MF(x) ((x) & RTE_IPV6_EHDR_MF_MASK) +#define RTE_IPV6_GET_FO(x) ((x) >> RTE_IPV6_EHDR_FO_SHIFT) + +#define RTE_IPV6_SET_FRAG_DATA(fo, mf) \ + (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK)) + struct ipv6_extension_fragment { uint8_t next_header; /**< Next header type */ - uint8_t reserved1; /**< Reserved */ - union { - struct { - uint16_t frag_offset:13; /**< Offset from the start of the packet */ - uint16_t reserved2:2; /**< Reserved */ - uint16_t more_frags:1; - /**< 1 if more fragments left, 0 if last fragment */ - }; - uint16_t frag_data; - /**< union of all fragmentation data */ - }; + uint8_t reserved; /**< Reserved */ + uint16_t frag_data; /**< All fragmentation data */ uint32_t id; /**< Packet ID */ } __attribute__((__packed__)); -/* +/** * Create a new IP fragmentation table. * * @param bucket_num @@ -161,17 +174,14 @@ struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries, uint32_t max_entries, uint64_t max_cycles, int socket_id); -/* +/** * Free allocated IP fragmentation table. * - * @param btl + * @param tbl * Fragmentation table to free. */ -static inline void -rte_ip_frag_table_destroy( struct rte_ip_frag_tbl *tbl) -{ - rte_free(tbl); -} +void +rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl); /** * This function implements the fragmentation of IPv6 packets. @@ -202,6 +212,53 @@ rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect); +/** + * This function implements reassembly of fragmented IPv6 packets. + * Incoming mbuf should have its l2_len/l3_len fields setup correctly. + * + * @param tbl + * Table where to lookup/add the fragmented packet. + * @param dr + * Death row to free buffers to + * @param mb + * Incoming mbuf with IPv6 fragment. + * @param tms + * Fragment arrival timestamp. + * @param ip_hdr + * Pointer to the IPv6 header. + * @param frag_hdr + * Pointer to the IPv6 fragment extension header. + * @return + * Pointer to mbuf for reassembled packet, or NULL if: + * - an error occurred. + * - not all fragments of the packet are collected yet. + */ +struct rte_mbuf *rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, + struct rte_ip_frag_death_row *dr, + struct rte_mbuf *mb, uint64_t tms, struct ipv6_hdr *ip_hdr, + struct ipv6_extension_fragment *frag_hdr); + +/** + * Return a pointer to the packet's fragment header, if found. + * It only looks at the extension header that's right after the fixed IPv6 + * header, and doesn't follow the whole chain of extension headers. + * + * @param hdr + * Pointer to the IPv6 header. + * @return + * Pointer to the IPv6 fragment extension header, or NULL if it's not + * present. + */ +static inline struct ipv6_extension_fragment * +rte_ipv6_frag_get_ipv6_fragment_header(struct ipv6_hdr *hdr) +{ + if (hdr->proto == IPPROTO_FRAGMENT) { + return (struct ipv6_extension_fragment *) ++hdr; + } + else + return NULL; +} + /** * IPv4 fragmentation. * @@ -231,7 +288,7 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect); -/* +/** * This function implements reassembly of fragmented IPv4 packets. * Incoming mbufs should have its l2_len/l3_len fields setup correclty. * @@ -246,15 +303,15 @@ int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, * @param ip_hdr * Pointer to the IPV4 header inside the fragment. * @return - * Pointer to mbuf for reassebled packet, or NULL if: - * - an error occured. + * Pointer to mbuf for reassembled packet, or NULL if: + * - an error occurred. * - not all fragments of the packet are collected yet. */ struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr); -/* +/** * Check if the IPv4 packet is fragmented * * @param hdr @@ -273,7 +330,7 @@ rte_ipv4_frag_pkt_is_fragmented(const struct ipv4_hdr * hdr) { return ip_flag != 0 || ip_ofs != 0; } -/* +/** * Free mbufs on a given death row. * * @param dr @@ -285,7 +342,7 @@ void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr, uint32_t prefetch); -/* +/** * Dump fragmentation table statistics to file. * * @param f @@ -296,4 +353,8 @@ void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr, void rte_ip_frag_table_statistics_dump(FILE * f, const struct rte_ip_frag_tbl *tbl); +#ifdef __cplusplus +} +#endif + #endif /* _RTE_IP_FRAG_H_ */