};
/** IPv6 fragment extension header */
+#define RTE_IPV6_EHDR_MF_SHIFT 0
+#define RTE_IPV6_EHDR_MF_MASK 1
+#define RTE_IPV6_EHDR_FO_SHIFT 3
+#define RTE_IPV6_EHDR_FO_MASK (~((1 << RTE_IPV6_EHDR_FO_SHIFT) - 1))
+
+#define RTE_IPV6_FRAG_USED_MASK \
+ (RTE_IPV6_EHDR_MF_MASK | RTE_IPV6_EHDR_FO_MASK)
+
+#define RTE_IPV6_GET_MF(x) ((x) & RTE_IPV6_EHDR_MF_MASK)
+#define RTE_IPV6_GET_FO(x) ((x) >> RTE_IPV6_EHDR_FO_SHIFT)
+
+#define RTE_IPV6_SET_FRAG_DATA(fo, mf) \
+ (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK))
+
struct ipv6_extension_fragment {
uint8_t next_header; /**< Next header type */
- uint8_t reserved1; /**< Reserved */
- union {
- struct {
- uint16_t frag_offset:13; /**< Offset from the start of the packet */
- uint16_t reserved2:2; /**< Reserved */
- uint16_t more_frags:1;
- /**< 1 if more fragments left, 0 if last fragment */
- };
- uint16_t frag_data;
- /**< union of all fragmentation data */
- };
+ uint8_t reserved; /**< Reserved */
+ uint16_t frag_data; /**< All fragmentation data */
uint32_t id; /**< Packet ID */
} __attribute__((__packed__));
*
*/
-/* Fragment Extension Header */
-#define IPV6_HDR_MF_SHIFT 0
-#define IPV6_HDR_FO_SHIFT 3
-#define IPV6_HDR_MF_MASK (1 << IPV6_HDR_MF_SHIFT)
-#define IPV6_HDR_FO_MASK ((1 << IPV6_HDR_FO_SHIFT) - 1)
-
static inline void
__fill_ipv6hdr_frag(struct ipv6_hdr *dst,
const struct ipv6_hdr *src, uint16_t len, uint16_t fofs,
fh = (struct ipv6_extension_fragment *) ++dst;
fh->next_header = src->proto;
- fh->reserved1 = 0;
- fh->frag_offset = rte_cpu_to_be_16(fofs);
- fh->reserved2 = 0;
- fh->more_frags = rte_cpu_to_be_16(mf);
+ fh->reserved = 0;
+ fh->frag_data = rte_cpu_to_be_16(RTE_IPV6_SET_FRAG_DATA(fofs, mf));
fh->id = 0;
}
frag_size = (uint16_t)(mtu_size - sizeof(struct ipv6_hdr));
/* Fragment size should be a multiple of 8. */
- IP_FRAG_ASSERT((frag_size & IPV6_HDR_FO_MASK) == 0);
+ IP_FRAG_ASSERT((frag_size & ~RTE_IPV6_EHDR_FO_MASK) == 0);
/* Check that pkts_out is big enough to hold all fragments */
if (unlikely (frag_size * nb_pkts_out <
"tbl: %p, max_cycles: %" PRIu64 ", entry_mask: %#x, "
"max_entries: %u, use_entries: %u\n\n",
__func__, __LINE__,
- mb, tms, IPv6_KEY_BYTES(key.src_dst), key.id, ip_ofs, ip_len, frag_hdr->more_frags,
+ mb, tms, IPv6_KEY_BYTES(key.src_dst), key.id, ip_ofs, ip_len,
+ RTE_IPV6_GET_MF(frag_hdr->frag_data),
tbl, tbl->max_cycles, tbl->entry_mask, tbl->max_entries,
tbl->use_entries);
struct ipv6_hdr *pkt_hdr = rte_pktmbuf_mtod(pkt, struct ipv6_hdr *);
struct ipv6_extension_fragment *frag_hdr;
+ uint16_t frag_data = 0;
frag_hdr = rte_ipv6_frag_get_ipv6_fragment_header(pkt_hdr);
- uint16_t frag_offset = frag_hdr->frag_offset;
- uint16_t frag_flag = frag_hdr->more_frags;
+ if (frag_hdr != NULL)
+ frag_data = rte_be_to_cpu_16(frag_hdr->frag_data);
/* If it is a fragmented packet, then try to reassemble */
- if ((frag_flag == 0) && (frag_offset == 0))
+ if ((frag_data & RTE_IPV6_FRAG_USED_MASK) == 0)
p->tx_buf[p->tx_buf_count++] = pkt;
else {
struct rte_mbuf *mo;