Issues were reported by checkpatch.pl.
Signed-off-by: Anatoly Burakov <anatoly.burakov@intel.com>
Acked-by: Thomas Monjalon <thomas.monjalon@6wind.com>
#else /*RTE_IPV4_FRAG_DEBUG*/
#else /*RTE_IPV4_FRAG_DEBUG*/
-#define RTE_IPV4_FRAG_ASSERT(exp) do { } while(0)
+#define RTE_IPV4_FRAG_ASSERT(exp) do { } while (0)
#endif /*RTE_IPV4_FRAG_DEBUG*/
#endif /*RTE_IPV4_FRAG_DEBUG*/
/* Fragment size should be a multiply of 8. */
RTE_IPV4_FRAG_ASSERT(IPV4_MAX_FRAGS_PER_PACKET * frag_size >=
/* Fragment size should be a multiply of 8. */
RTE_IPV4_FRAG_ASSERT(IPV4_MAX_FRAGS_PER_PACKET * frag_size >=
- (uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr)));
+ (uint16_t)(pkt_in->pkt.pkt_len - sizeof(struct ipv4_hdr)));
- in_hdr = (struct ipv4_hdr*) pkt_in->pkt.data;
+ in_hdr = (struct ipv4_hdr *) pkt_in->pkt.data;
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
/* If Don't Fragment flag is set */
if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
flag_offset = rte_cpu_to_be_16(in_hdr->fragment_offset);
/* If Don't Fragment flag is set */
if (unlikely ((flag_offset & IPV4_HDR_DF_MASK) != 0))
/* Check that pkts_out is big enough to hold all fragments */
/* Check that pkts_out is big enough to hold all fragments */
- if (unlikely (frag_size * nb_pkts_out <
+ if (unlikely(frag_size * nb_pkts_out <
(uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
(uint16_t)(pkt_in->pkt.pkt_len - sizeof (struct ipv4_hdr))))
in_seg = pkt_in;
in_seg_data_pos = sizeof(struct ipv4_hdr);
in_seg = pkt_in;
in_seg_data_pos = sizeof(struct ipv4_hdr);
out_pkt = rte_pktmbuf_alloc(pool_direct);
if (unlikely(out_pkt == NULL)) {
__free_fragments(pkts_out, out_pkt_pos);
out_pkt = rte_pktmbuf_alloc(pool_direct);
if (unlikely(out_pkt == NULL)) {
__free_fragments(pkts_out, out_pkt_pos);
}
/* Reserve space for the IP header that will be built later */
}
/* Reserve space for the IP header that will be built later */
if (unlikely(out_seg == NULL)) {
rte_pktmbuf_free(out_pkt);
__free_fragments(pkts_out, out_pkt_pos);
if (unlikely(out_seg == NULL)) {
rte_pktmbuf_free(out_pkt);
__free_fragments(pkts_out, out_pkt_pos);
}
out_seg_prev->pkt.next = out_seg;
out_seg_prev = out_seg;
}
out_seg_prev->pkt.next = out_seg;
out_seg_prev = out_seg;
in_seg_data_pos += len;
/* Current output packet (i.e. fragment) done ? */
in_seg_data_pos += len;
/* Current output packet (i.e. fragment) done ? */
- if (unlikely(out_pkt->pkt.pkt_len >= mtu_size)) {
+ if (unlikely(out_pkt->pkt.pkt_len >= mtu_size))
/* Current input segment done ? */
if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
in_seg = in_seg->pkt.next;
in_seg_data_pos = 0;
/* Current input segment done ? */
if (unlikely(in_seg_data_pos == in_seg->pkt.data_len)) {
in_seg = in_seg->pkt.next;
in_seg_data_pos = 0;
- if (unlikely(in_seg == NULL)) {
+ if (unlikely(in_seg == NULL))