-#define IPV4_HDR_DF_SHIFT 14
-#define IPV4_HDR_MF_SHIFT 13
-#define IPV4_HDR_FO_SHIFT 3
+#define RTE_IPV4_HDR_DF_SHIFT 14
+#define RTE_IPV4_HDR_MF_SHIFT 13
+#define RTE_IPV4_HDR_FO_SHIFT 3
-#define IPV4_HDR_DF_MASK (1 << IPV4_HDR_DF_SHIFT)
-#define IPV4_HDR_MF_MASK (1 << IPV4_HDR_MF_SHIFT)
+#define IPV4_HDR_DF_MASK (1 << RTE_IPV4_HDR_DF_SHIFT)
+#define IPV4_HDR_MF_MASK (1 << RTE_IPV4_HDR_MF_SHIFT)
static inline void __fill_ipv4hdr_frag(struct rte_ipv4_hdr *dst,
const struct rte_ipv4_hdr *src, uint16_t len, uint16_t fofs,
uint16_t dofs, uint32_t mf)
{
rte_memcpy(dst, src, sizeof(*dst));
static inline void __fill_ipv4hdr_frag(struct rte_ipv4_hdr *dst,
const struct rte_ipv4_hdr *src, uint16_t len, uint16_t fofs,
uint16_t dofs, uint32_t mf)
{
rte_memcpy(dst, src, sizeof(*dst));
- fofs = (uint16_t)(fofs + (dofs >> IPV4_HDR_FO_SHIFT));
- fofs = (uint16_t)(fofs | mf << IPV4_HDR_MF_SHIFT);
+ fofs = (uint16_t)(fofs + (dofs >> RTE_IPV4_HDR_FO_SHIFT));
+ fofs = (uint16_t)(fofs | mf << RTE_IPV4_HDR_MF_SHIFT);
uint16_t fragment_offset, flag_offset, frag_size;
uint16_t frag_bytes_remaining;
uint16_t fragment_offset, flag_offset, frag_size;
uint16_t frag_bytes_remaining;
+ /*
+ * Formal parameter checking.
+ */
+ if (unlikely(pkt_in == NULL) || unlikely(pkts_out == NULL) ||
+ unlikely(nb_pkts_out == 0) ||
+ unlikely(pool_direct == NULL) || unlikely(pool_indirect == NULL) ||
+ unlikely(mtu_size < RTE_ETHER_MIN_MTU))
+ return -EINVAL;
+
/*
* Ensure the IP payload length of all fragments is aligned to a
* multiple of 8 bytes as per RFC791 section 2.3.
/*
* Ensure the IP payload length of all fragments is aligned to a
* multiple of 8 bytes as per RFC791 section 2.3.