}
                if (c != ',') {
                        printf("character %c is not a decimal digit\n", c);
-                       return (0);
+                       return 0;
                }
                if (! value_ok) {
                        printf("No valid value before comma\n");
-                       return (0);
+                       return 0;
                }
                if (nb_item < max_items) {
                        parsed_items[nb_item] = value;
        if (nb_item >= max_items) {
                printf("Number of %s = %u > %u (maximum items)\n",
                       item_name, nb_item + 1, max_items);
-               return (0);
+               return 0;
        }
        parsed_items[nb_item++] = value;
        if (! check_unique_values)
-               return (nb_item);
+               return nb_item;
 
        /*
         * Then, check that all values in the list are differents.
                        if (parsed_items[j] == parsed_items[i]) {
                                printf("duplicated %s %u at index %u and %u\n",
                                       item_name, parsed_items[i], i, j);
-                               return (0);
+                               return 0;
                        }
                }
        }
-       return (nb_item);
+       return nb_item;
 }
 
 struct cmd_set_list_result {
 
                printf("%s ring memory zoneof (port %d, queue %d) not"
                       "found (zone name = %s\n",
                       ring_name, port_id, q_id, mz_name);
-       return (mz);
+       return mz;
 }
 
 union igb_ring_dword {
 
 
        m = __rte_mbuf_raw_alloc(mp);
        __rte_mbuf_sanity_check_raw(m, 0);
-       return (m);
+       return m;
 }
 
 
 
        nb = pg_num * sizeof(*pa);
 
        if ((fd = open(PAGEMAP_FNAME, O_RDONLY)) < 0)
-               return (ENOENT);
+               return ENOENT;
 
        if ((rc = pread(fd, pa, nb, ofs)) < 0 || (rc -= nb) != 0) {
 
        for (i = 0; i != pg_num; i++)
                pa[i] = (pa[i] & PAGEMAP_PFN_MASK) * pg_sz;
 
-       return (rc);
+       return rc;
 }
 
 struct rte_mempool *
        pg_sz = getpagesize();
        if (rte_is_power_of_2(pg_sz) == 0) {
                rte_errno = EINVAL;
-               return (mp);
+               return mp;
        }
 
        pg_shift = rte_bsf32(pg_sz);
                        "error code: %d\n",
                        __func__, name, sz, errno);
                rte_errno = rc;
-               return (mp);
+               return mp;
        }
 
        /* extract physical mappings of the allocated memory. */
        }
 
        free(pa);
-       return (mp);
+       return mp;
 }
 
 #else /* RTE_EXEC_ENV_LINUXAPP */
        __rte_unused int socket_id, __rte_unused unsigned flags)
 {
        rte_errno = ENOTSUP;
-       return (NULL);
+       return NULL;
 }
 
 #endif /* RTE_EXEC_ENV_LINUXAPP */
 
        char pool_name[RTE_MEMPOOL_NAMESIZE];
 
        mbuf_poolname_build(sock_id, pool_name, sizeof(pool_name));
-       return (rte_mempool_lookup((const char *)pool_name));
+       return rte_mempool_lookup((const char *)pool_name);
 }
 
 /**
 
 
        m = __rte_mbuf_raw_alloc(mp);
        __rte_mbuf_sanity_check_raw(m, 0);
-       return (m);
+       return m;
 }
 
 static void
 
        }
        printf("count: %u\n", count);
 
-       return (count == 2 ? 0 : -1);
+       return count == 2 ? 0 : -1;
 }
 
 static int
 
        printf("%s finished at lcore %u, "
               "number of freed mbufs: %u\n",
               __func__, lcore, free);
-       return (0);
+       return 0;
 }
 
 static void
        rte_wmb();
 
        printf("%s finished at lcore %u\n", __func__, lcore);
-       return (0);
+       return 0;
 }
 
 #endif
        if ((lnum = rte_lcore_count()) == 1) {
                printf("skipping %s, number of lcores: %u is not enough\n",
                    __func__, lnum);
-               return (0);
+               return 0;
        }
 
        printf("starting %s, at %u lcores\n", __func__, lnum);
                                SOCKET_ID_ANY)) == NULL) {
                printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
                    __func__);
-               return (-1);
+               return -1;
        }
 
        if (refcnt_mbuf_ring == NULL &&
                        RING_F_SP_ENQ)) == NULL) {
                printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
                    "\n", __func__);
-               return (-1);
+               return -1;
        }
 
        refcnt_stop_slaves = 0;
        rte_ring_dump(stdout, refcnt_mbuf_ring);
 
 #endif
-       return (0);
+       return 0;
 }
 
 #include <unistd.h>
 
 static inline size_t
 get_rand_offset(size_t uoffset)
 {
-       return (((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
-                       ~(ALIGNMENT_UNIT - 1)) + uoffset);
+       return ((rte_rand() % (LARGE_BUFFER_SIZE - SMALL_BUFFER_SIZE)) &
+                       ~(ALIGNMENT_UNIT - 1)) + uoffset;
 }
 
 /* Fill in source and destination addresses. */
 
                printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
                        "returns: %#zx, while expected: %#zx;\n",
                        __func__, elt_num, total_size, sz, (size_t)usz);
-               return (-1);
+               return -1;
        }
 
-       return (0);
+       return 0;
 }
 
 static int
 
                        align, bound)) == NULL) {
                printf("%s(%s): memzone creation failed\n",
                        __func__, name);
-               return (-1);
+               return -1;
        }
 
        if ((mz->phys_addr & ((phys_addr_t)align - 1)) != 0) {
                printf("%s(%s): invalid phys addr alignment\n",
                        __func__, mz->name);
-               return (-1);
+               return -1;
        }
 
        if (((uintptr_t) mz->addr & ((uintptr_t)align - 1)) != 0) {
                printf("%s(%s): invalid virtual addr alignment\n",
                        __func__, mz->name);
-               return (-1);
+               return -1;
        }
 
        if ((mz->len & RTE_CACHE_LINE_MASK) != 0 || mz->len < len ||
                        mz->len < RTE_CACHE_LINE_SIZE) {
                printf("%s(%s): invalid length\n",
                        __func__, mz->name);
-               return (-1);
+               return -1;
        }
 
        if ((mz->phys_addr & bmask) !=
                        ((mz->phys_addr + mz->len - 1) & bmask)) {
                printf("%s(%s): invalid memzone boundary %u crossed\n",
                        __func__, mz->name, bound);
-               return (-1);
+               return -1;
        }
 
-       return (0);
+       return 0;
 }
 
 static int
                        100, SOCKET_ID_ANY, 0, 32, UINT32_MAX)) != NULL) {
                printf("%s(%s)created a memzone with invalid boundary "
                        "conditions\n", __func__, memzone_err->name);
-               return (-1);
+               return -1;
        }
 
        /* should fail as len is greater then boundary */
                        100, SOCKET_ID_ANY, 0, 32, 32)) != NULL) {
                printf("%s(%s)created a memzone with invalid boundary "
                        "conditions\n", __func__, memzone_err->name);
-               return (-1);
+               return -1;
        }
 
        if ((rc = check_memzone_bounded("bounded_128", 100, 128, 128)) != 0)
-               return (rc);
+               return rc;
 
        if ((rc = check_memzone_bounded("bounded_256", 100, 256, 128)) != 0)
-               return (rc);
+               return rc;
 
        if ((rc = check_memzone_bounded("bounded_1K", 100, 64, 1024)) != 0)
-               return (rc);
+               return rc;
 
        if ((rc = check_memzone_bounded("bounded_1K_MAX", 0, 64, 1024)) != 0)
-               return (rc);
+               return rc;
 
        return 0;
 }
 
        } else {
                drop_prob = 1.0;
        }
-       return (drop_prob);
+       return drop_prob;
 }
 
 /**
                        ret = 0;
                }
         }
-       return (ret);
+       return ret;
 }
 
 /**
                        ret = 0;
                 }
        }
-       return (ret);
+       return ret;
 }
 
 /**
                   USEC_PER_MSEC); /**< diff is in micro secs */
 
        if (diff == 0)
-               return(0);
+               return 0;
 
        clk_freq_hz = ((end - start) * USEC_PER_SEC / diff);
-       return (clk_freq_hz);
+       return clk_freq_hz;
 }
 
 /**
                                        (uint16_t)tcfg->tconfig->min_th,
                                        (uint16_t)tcfg->tconfig->max_th,
                                        (uint16_t)tcfg->tconfig->maxp_inv[i]) != 0) {
-                       return(FAIL);
+                       return FAIL;
                }
        }
 
        *tcfg->tqueue->q = 0;
        *tcfg->tvar->dropped = 0;
        *tcfg->tvar->enqueued = 0;
-       return(PASS);
+       return PASS;
 }
 
 /**
         * check if target actual queue size has been reached
         */
         if (*q != level)
-                return (-1);
+                return -1;
         /**
          * success
          */
-        return (0);
+        return 0;
 }
 
 /**
          */
         avg = rte_red_get_avg_int(red_cfg, red);
         if (avg != level)
-                return (-1);
+                return -1;
         /**
          * success
          */
-        return (0);
+        return 0;
 }
 
 /**
                       (double)tcfg->tqueue->drop_tolerance);
        }
 out:
-       return (result);
+       return result;
 }
 
 /**
                       (double)tcfg->tqueue->drop_tolerance);
        }
 out:
-       return (result);
+       return result;
 }
 
 /**
                       diff <= (double)tcfg->tqueue->avg_tolerance ? "pass" : "fail");
        }
 out:
-       return (result);
+       return result;
 }
 
 /**
               diff, (double)tcfg->tqueue->avg_tolerance,
               diff <= (double)tcfg->tqueue->avg_tolerance ? "pass" : "fail");
 out:
-       return (result);
+       return result;
 }
 
 /**
                       diff, (double)tcfg->tqueue->drop_tolerance);
        }
 out:
-       return (result);
+       return result;
 }
 
 /**
                       diff <= tcfg->tqueue->avg_tolerance ? "pass" : "fail");
        }
 out:
-       return (result);
+       return result;
 }
 
 /**
 
        rdtsc_prof_print(&prof);
 out:
-       return (result);
+       return result;
 }
 
 /**
 
        rdtsc_prof_print(&prof);
 out:
-       return (result);
+       return result;
 }
 
 /**
               *tcfg->tvar->enqueued, *tcfg->tvar->dropped,
               drop_prob * 100.0, drop_rate * 100.0);
 out:
-       return (result);
+       return result;
 }
 
 /**
                printf("[total: %u, pass: %u, fail: %u]\n", num_tests, num_pass, num_tests - num_pass);
                ret = -1;
        }
-       return (ret);
+       return ret;
 }
 
 static struct test_command red_cmd = {
 
                printf("error at %s:%d\tcondition " #exp " failed\n",   \
                    __func__, __LINE__);                                \
                rte_ring_dump(stdout, r);                               \
-               return (-1);                                            \
+               return -1;                                              \
        }
 
 #define        TEST_RING_FULL_EMTPY_ITER       8
                TEST_RING_VERIFY(0 == memcmp(src, dst, rsz));
                rte_ring_dump(stdout, r);
        }
-       return (0);
+       return 0;
 }
 
 static int
 
 
         for (n = 0; v != 0; v &= v - 1, n++)
            ;
-        return (n);
+        return n;
     }
 
 This is done to determine which forwarding algorithm to use.
         /* Create new mbuf for the header. */
 
         if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
-            return (NULL);
+            return NULL;
 
         /* If requested, then make a new clone packet. */
 
         if (use_clone != 0 && unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
             rte_pktmbuf_free(hdr);
-            return (NULL);
+            return NULL;
         }
 
         /* prepend new header */
         hdr->ol_flags = pkt->ol_flags;
         rte_mbuf_sanity_check(hdr, RTE_MBUF_PKT, 1);
 
-        return (hdr);
+        return hdr;
     }
 
        qp = rte_zmalloc_socket("AES-NI PMD Queue Pair", sizeof(*qp),
                                        RTE_CACHE_LINE_SIZE, socket_id);
        if (qp == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        qp->id = qp_id;
        dev->data->queue_pairs[qp_id] = qp;
 
        tail = queue->tail;
 
        /* Find how many can actually fit on the ring */
-       overflow = (rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
-                               - queue->max_inflights);
+       overflow = rte_atomic16_add_return(&tmp_qp->inflights16, nb_pkts)
+                               - queue->max_inflights;
        if (overflow > 0) {
                rte_atomic16_sub(&tmp_qp->inflights16, overflow);
                nb_pkts_possible = nb_pkts - overflow;
 
                (qp_conf->nb_descriptors < ADF_MIN_SYM_DESC)) {
                PMD_DRV_LOG(ERR, "Can't create qp for %u descriptors",
                                qp_conf->nb_descriptors);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        if (dev->pci_dev->mem_resource[0].addr == NULL) {
                PMD_DRV_LOG(ERR, "Could not find VF config space "
                                "(UIO driver attached?).");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        if (queue_pair_id >=
                                        ADF_NUM_BUNDLES_PER_DEV)) {
                PMD_DRV_LOG(ERR, "qp_id %u invalid for this device",
                                queue_pair_id);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /* Allocate the queue pair data structure. */
                        sizeof(*qp), RTE_CACHE_LINE_SIZE);
        if (qp == NULL) {
                PMD_DRV_LOG(ERR, "Failed to alloc mem for qp struct");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        qp->mmap_bar_addr = dev->pci_dev->mem_resource[0].addr;
        rte_atomic16_init(&qp->inflights16);
 
 create_err:
        rte_free(qp);
-       return (-EFAULT);
+       return -EFAULT;
 }
 
 int qat_crypto_sym_qp_release(struct rte_cryptodev *dev, uint16_t queue_pair_id)
        PMD_INIT_FUNC_TRACE();
        if (desc_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
                PMD_DRV_LOG(ERR, "Invalid descriptor size %d", desc_size);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /*
                        socket_id);
        if (qp_mz == NULL) {
                PMD_DRV_LOG(ERR, "Failed to allocate ring memzone");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        queue->base_addr = (char *)qp_mz->addr;
        if (adf_verify_queue_size(desc_size, nb_desc, &(queue->queue_size))
                        != 0) {
                PMD_DRV_LOG(ERR, "Invalid num inflights");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        queue->max_inflights = ADF_MAX_INFLIGHTS(queue->queue_size,
 
        if (queue->max_inflights < 2) {
                PMD_DRV_LOG(ERR, "Invalid num inflights");
-               return (-EINVAL);
+               return -EINVAL;
        }
        queue->head = 0;
        queue->tail = 0;
 {
        PMD_INIT_FUNC_TRACE();
        if (((queue_size_bytes - 1) & phys_addr) != 0)
-               return (-EINVAL);
+               return -EINVAL;
        return 0;
 }
 
                        return 0;
                }
        PMD_DRV_LOG(ERR, "Invalid ring size %d", msg_size * msg_num);
-       return (-EINVAL);
+       return -EINVAL;
 }
 
 static void adf_queue_arb_enable(struct qat_queue *txq, void *base_addr)
 
 
 uint32_t bnx2x_dmae_opcode_add_comp(uint32_t opcode, uint8_t comp_type)
 {
-       return (opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
-                         DMAE_COMMAND_C_TYPE_ENABLE));
+       return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+                         DMAE_COMMAND_C_TYPE_ENABLE);
 }
 
 uint32_t bnx2x_dmae_opcode_clr_src_reset(uint32_t opcode)
 {
-       return (opcode & ~DMAE_COMMAND_SRC_RESET);
+       return opcode & ~DMAE_COMMAND_SRC_RESET;
 }
 
 uint32_t
 
        mb();                   /* status block fields can change */
        hw_cons = le16toh(*fp->tx_cons_sb);
-       return (hw_cons != txq->tx_pkt_head);
+       return hw_cons != txq->tx_pkt_head;
 }
 
 static uint8_t bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
        if (unlikely((rx_cq_cons_sb & MAX_RCQ_ENTRIES(rxq)) ==
                     MAX_RCQ_ENTRIES(rxq)))
                rx_cq_cons_sb++;
-       return (rxq->rx_cq_head != rx_cq_cons_sb);
+       return rxq->rx_cq_head != rx_cq_cons_sb;
 }
 
 static void
        /* Update producers */
        bnx2x_update_rx_prod(sc, fp, bd_prod_fw, sw_cq_prod);
 
-       return (sw_cq_cons != hw_cq_cons);
+       return sw_cq_cons != hw_cq_cons;
 }
 
 static uint16_t
 /* checks the GLOBAL_RESET bit, should be run under rtnl lock */
 static uint8_t bnx2x_reset_is_global(struct bnx2x_softc *sc)
 {
-       return (REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT);
+       return REG_RD(sc, BNX2X_RECOVERY_GLOB_REG) & BNX2X_GLOBAL_RESET_BIT;
 }
 
 /* clear RESET_IN_PROGRESS bit for the engine, should be run under rtnl lock */
 
        val = ((val & mask) >> shift);
 
-       return (val != 0);
+       return val != 0;
 }
 
 /* set pf load mark */
 static uint8_t bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
 {
        if (CHIP_IS_E1x(fp->sc)) {
-               return (fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H);
+               return fp->cl_id + SC_PORT(fp->sc) * ETH_MAX_RX_CLIENTS_E1H;
        } else {
-               return (fp->cl_id);
+               return fp->cl_id;
        }
 }
 
        uint32_t offset = BAR_USTRORM_INTMEM;
 
        if (IS_VF(sc)) {
-               return (PXP_VF_ADDR_USDM_QUEUES_START +
+               return PXP_VF_ADDR_USDM_QUEUES_START +
                        (sc->acquire_resp.resc.hw_qid[fp->index] *
-                        sizeof(struct ustorm_queue_zone_data)));
+                        sizeof(struct ustorm_queue_zone_data));
        } else if (!CHIP_IS_E1x(sc)) {
                offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
        } else {
 
 static uint8_t bnx2x_is_pcie_pending(struct bnx2x_softc *sc)
 {
-       return (bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
-               PCIM_EXP_STA_TRANSACTION_PND);
+       return bnx2x_pcie_capability_read(sc, PCIR_EXPRESS_DEVICE_STA) &
+               PCIM_EXP_STA_TRANSACTION_PND;
 }
 
 /*
 {
        uint32_t base = PXP2_REG_PGL_PRETEND_FUNC_F0;
        uint32_t stride = (PXP2_REG_PGL_PRETEND_FUNC_F1 - base);
-       return (base + (SC_ABS_FUNC(sc)) * stride);
+       return base + (SC_ABS_FUNC(sc)) * stride;
 }
 
 /*
 {
        /* adjust polling timeout */
        if (CHIP_REV_IS_EMUL(sc)) {
-               return (FLR_POLL_CNT * 2000);
+               return FLR_POLL_CNT * 2000;
        }
 
        if (CHIP_REV_IS_FPGA(sc)) {
-               return (FLR_POLL_CNT * 120);
+               return FLR_POLL_CNT * 120;
        }
 
        return FLR_POLL_CNT;
 
 static inline int
 func_by_vn(struct bnx2x_softc *sc, int vn)
 {
-    return (2 * vn + SC_PORT(sc));
+    return 2 * vn + SC_PORT(sc);
 }
 
 /*
        return fp->cl_id;
     }
 
-    return (fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x);
+    return fp->cl_id + SC_PORT(sc) * FP_SB_MAX_E1x;
 }
 
 int bnx2x_init(struct bnx2x_softc *sc);
 
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (NULL == rxq) {
                PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        rxq->sc = sc;
        rxq->mb_pool = mp;
        if (NULL == dma) {
                PMD_RX_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed!");
                bnx2x_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        fp->rx_desc_mapping = rxq->rx_ring_phys_addr = (uint64_t)dma->phys_addr;
        rxq->rx_ring = (uint64_t*)dma->addr;
        if (NULL == rxq->sw_ring) {
                PMD_RX_LOG(ERR, "rte_zmalloc for sw_ring failed!");
                bnx2x_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Initialize software ring entries */
                        PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d",
                                   (unsigned)rxq->queue_id, idx);
                        bnx2x_rx_queue_release(rxq);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
                rxq->sw_ring[idx] = mbuf;
                rxq->rx_ring[idx] = mbuf->buf_physaddr;
        dma = ring_dma_zone_reserve(dev, "bnx2x_rcq", queue_idx, dma_size, socket_id);
        if (NULL == dma) {
                PMD_RX_LOG(ERR, "RCQ  alloc failed");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        fp->rx_comp_mapping = rxq->cq_ring_phys_addr = (uint64_t)dma->phys_addr;
        rxq->cq_ring = (union eth_rx_cqe*)dma->addr;
        txq = rte_zmalloc("ethdev TX queue", sizeof(struct bnx2x_tx_queue),
                          RTE_CACHE_LINE_SIZE);
        if (txq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
        txq->sc = sc;
 
        txq->nb_tx_pages = 1;
        tz = ring_dma_zone_reserve(dev, "tx_hw_ring", queue_idx, tsize, socket_id);
        if (tz == NULL) {
                bnx2x_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        fp->tx_desc_mapping = txq->tx_ring_phys_addr = (uint64_t)tz->phys_addr;
        txq->tx_ring = (union eth_tx_bd_types *) tz->addr;
                                   RTE_CACHE_LINE_SIZE);
        if (txq->sw_ring == NULL) {
                bnx2x_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
 
        val = (uint8_t)(*((volatile uint8_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
        PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%02x", (unsigned long)offset, val);
 
-       return (val);
+       return val;
 }
 
 uint16_t
        val = (uint16_t)(*((volatile uint16_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
        PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val);
 
-       return (val);
+       return val;
 }
 
 uint32_t
        val = (uint32_t)(*((volatile uint32_t*)((uintptr_t)sc->bar[BAR0].base_addr + offset)));
        PMD_DEBUG_PERIODIC_LOG(DEBUG, "offset=0x%08lx val=0x%08x", (unsigned long)offset, val);
 
-       return (val);
+       return val;
 }
 
        port4mode_ovwr_val = REG_RD(sc, MISC_REG_PORT4MODE_EN_OVWR);
        if (port4mode_ovwr_val & (1 << 0)) {
                /* Return 4-port mode override value */
-               return ((port4mode_ovwr_val & (1 << 1)) == (1 << 1));
+               return (port4mode_ovwr_val & (1 << 1)) == (1 << 1);
        }
        /* Return 4-port mode from input pin */
        return (uint8_t) REG_RD(sc, MISC_REG_PORT4MODE_EN);
 
 static inline uint32_t
 ipv4_hash(struct ipv4_hdr *ipv4_hdr)
 {
-       return (ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr);
+       return ipv4_hdr->src_addr ^ ipv4_hdr->dst_addr;
 }
 
 static inline uint32_t
 
 
 static inline bool is_x_1g_port(const struct link_config *lc)
 {
-       return ((lc->supported & FW_PORT_CAP_SPEED_1G) != 0);
+       return (lc->supported & FW_PORT_CAP_SPEED_1G) != 0;
 }
 
 static inline bool is_x_10g_port(const struct link_config *lc)
 
                        "failed to init HW",
                        eth_dev->data->port_id, pci_dev->id.vendor_id,
                        pci_dev->id.device_id);
-               return -(ENODEV);
+               return -ENODEV;
        }
 
        /* Allocate memory for storing MAC addresses */
                PMD_INIT_LOG(ERR, "Failed to allocate %d bytes needed to "
                        "store MAC addresses",
                        ETHER_ADDR_LEN * hw->mac.rar_entry_count);
-               return -(ENOMEM);
+               return -ENOMEM;
        }
 
        /* Copy the permanent MAC address */
        rte_intr_callback_register(&(pci_dev->intr_handle),
                eth_em_interrupt_handler, (void *)eth_dev);
 
-       return (0);
+       return 0;
 }
 
 static int
                PMD_INIT_LOG(ERR, "PHY reset is blocked due to "
                        "SOL/IDER session");
        }
-       return (0);
+       return 0;
 
 error:
        em_hw_control_release(hw);
-       return (diag);
+       return diag;
 }
 
 static int
        intr->flags |= E1000_FLAG_NEED_LINK_UPDATE;
        PMD_INIT_FUNC_TRACE();
 
-       return (0);
+       return 0;
 }
 
 static void
        /* Initialize the hardware */
        if (em_hardware_init(hw)) {
                PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
-               return (-EIO);
+               return -EIO;
        }
 
        E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN);
 
        PMD_INIT_LOG(DEBUG, "<<");
 
-       return (0);
+       return 0;
 
 error_invalid_config:
        PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
                     dev->data->dev_conf.link_speed,
                     dev->data->dev_conf.link_duplex, dev->data->port_id);
        em_dev_clear_queues(dev);
-       return (-EINVAL);
+       return -EINVAL;
 }
 
 /*********************************************************************
 
        diag = e1000_init_hw(hw);
        if (diag < 0)
-               return (diag);
+               return diag;
        e1000_check_for_link(hw);
-       return (0);
+       return 0;
 }
 
 /* This function is based on em_update_stats_counters() in e1000/if_em.c */
        case e1000_82574:
        case e1000_80003es2lan: /* 9K Jumbo Frame size */
        case e1000_82583:
-               return (0x2412);
+               return 0x2412;
        case e1000_pchlan:
-               return (0x1000);
+               return 0x1000;
        /* Adapters that do not support jumbo frames */
        case e1000_ich8lan:
-               return (ETHER_MAX_LEN);
+               return ETHER_MAX_LEN;
        default:
-               return (MAX_JUMBO_FRAME_SIZE);
+               return MAX_JUMBO_FRAME_SIZE;
        }
 }
 
        E1000_READ_REG(hw, E1000_ICR);
        regval = E1000_READ_REG(hw, E1000_IMS);
        E1000_WRITE_REG(hw, E1000_IMS, regval | E1000_ICR_LSC);
-       return (0);
+       return 0;
 }
 
 /*
        struct e1000_hw *hw;
 
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+       return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
        struct e1000_hw *hw;
 
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+       return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
            (fc_conf->high_water < fc_conf->low_water)) {
                PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
                PMD_INIT_LOG(ERR, "high water must <= 0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
        }
 
        PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
-       return (-EIO);
+       return -EIO;
 }
 
 static void
 
 
        m = __rte_mbuf_raw_alloc(mp);
        __rte_mbuf_sanity_check_raw(m, 0);
-       return (m);
+       return m;
 }
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb)             \
        if (likely (txq->ctx_cache.flags == flags &&
                        ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
                        txq->ctx_cache.cmp_mask) == 0))
-               return (EM_CTX_0);
+               return EM_CTX_0;
 
        /* Mismatch */
-       return (EM_CTX_NUM);
+       return EM_CTX_NUM;
 }
 
 /* Reset transmit descriptors after they have been used */
        txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
 
        /* No Error */
-       return (0);
+       return 0;
 }
 
 static inline uint32_t
 
        tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
        tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
-       return (tmp);
+       return tmp;
 }
 
 uint16_t
                        if (em_xmit_cleanup(txq) != 0) {
                                /* Could not clean any descriptors */
                                if (nb_tx == 0)
-                                       return (0);
+                                       return 0;
                                goto end_of_tx;
                        }
                }
        E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
        txq->tx_tail = tx_id;
 
-       return (nb_tx);
+       return nb_tx;
 }
 
 /*********************************************************************
                pkt_flags |= PKT_RX_IP_CKSUM_BAD;
        if (rx_error & E1000_RXD_ERR_TCPE)
                pkt_flags |= PKT_RX_L4_CKSUM_BAD;
-       return (pkt_flags);
+       return pkt_flags;
 }
 
 uint16_t
                nb_hold = 0;
        }
        rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
+       return nb_rx;
 }
 
 uint16_t
                nb_hold = 0;
        }
        rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
+       return nb_rx;
 }
 
 #define        EM_MAX_BUF_SIZE     16384
        tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
                                      RTE_CACHE_LINE_SIZE, socket_id);
        if (tz == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /* Allocate the tx queue data structure. */
        if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
                        RTE_CACHE_LINE_SIZE)) == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /* Allocate software ring */
        if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
                        sizeof(txq->sw_ring[0]) * nb_desc,
                        RTE_CACHE_LINE_SIZE)) == NULL) {
                em_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->nb_tx_desc = nb_desc;
        em_reset_tx_queue(txq);
 
        dev->data->tx_queues[queue_idx] = txq;
-       return (0);
+       return 0;
 }
 
 static void
        if (nb_desc % EM_RXD_ALIGN != 0 ||
                        (nb_desc > E1000_MAX_RING_DESC) ||
                        (nb_desc < E1000_MIN_RING_DESC)) {
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /*
        if (rx_conf->rx_drop_en) {
                PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
                             "device");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /* Free memory prior to re-allocation if needed. */
        rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
                                      RTE_CACHE_LINE_SIZE, socket_id);
        if (rz == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /* Allocate the RX queue data structure. */
        if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
                        RTE_CACHE_LINE_SIZE)) == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /* Allocate software ring. */
        if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
                        sizeof (rxq->sw_ring[0]) * nb_desc,
                        RTE_CACHE_LINE_SIZE)) == NULL) {
                em_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        rxq->mb_pool = mp;
        dev->data->rx_queues[queue_idx] = rxq;
        em_reset_rx_queue(rxq);
 
-       return (0);
+       return 0;
 }
 
 uint32_t
                        i++) {
                if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
                        *bufsz = bufsz_to_rctl[i].bufsz;
-                       return (bufsz_to_rctl[i].rctl);
+                       return bufsz_to_rctl[i].rctl;
                }
        }
 
        /* Should never happen. */
-       return (-EINVAL);
+       return -EINVAL;
 }
 
 static int
                if (mbuf == NULL) {
                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
                                     "queue_id=%hu", rxq->queue_id);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
 
                dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
 
 err_late:
        igb_hw_control_release(hw);
 
-       return (error);
+       return error;
 }
 
 static int
        PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_igbvf_pmd);
-       return (0);
+       return 0;
 }
 
 static int
        /* Initialize the hardware */
        if (igb_hardware_init(hw)) {
                PMD_INIT_LOG(ERR, "Unable to initialize the hardware");
-               return (-EIO);
+               return -EIO;
        }
        adapter->stopped = 0;
 
 
        PMD_INIT_LOG(DEBUG, "<<");
 
-       return (0);
+       return 0;
 
 error_invalid_config:
        PMD_INIT_LOG(ERR, "Invalid link_speed/link_duplex (%u/%u) for port %u",
                     dev->data->dev_conf.link_speed,
                     dev->data->dev_conf.link_duplex, dev->data->port_id);
        igb_dev_clear_queues(dev);
-       return (-EINVAL);
+       return -EINVAL;
 }
 
 /*********************************************************************
 
        diag = e1000_init_hw(hw);
        if (diag < 0)
-               return (diag);
+               return diag;
 
        E1000_WRITE_REG(hw, E1000_VET, ETHER_TYPE_VLAN << 16 | ETHER_TYPE_VLAN);
        e1000_get_phy_info(hw);
        e1000_check_for_link(hw);
 
-       return (0);
+       return 0;
 }
 
 /* This function is based on igb_update_stats_counters() in igb/if_igb.c */
        struct e1000_hw *hw;
 
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+       return e1000_led_on(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
        struct e1000_hw *hw;
 
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP);
+       return e1000_led_off(hw) == E1000_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
            (fc_conf->high_water < fc_conf->low_water)) {
                PMD_INIT_LOG(ERR, "e1000 incorrect high/low water value");
                PMD_INIT_LOG(ERR, "high water must <=  0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_e1000_fcmode[fc_conf->mode];
        }
 
        PMD_INIT_LOG(ERR, "e1000_setup_link_generic = 0x%x", err);
-       return (-EIO);
+       return -EIO;
 }
 
 #define E1000_RAH_POOLSEL_SHIFT      (18)
 
 
        m = __rte_mbuf_raw_alloc(mp);
        __rte_mbuf_sanity_check_raw(m, 0);
-       return (m);
+       return m;
 }
 
 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
        }
 
        /* Mismatch, use the previous context */
-       return (IGB_CTX_NUM);
+       return IGB_CTX_NUM;
 }
 
 static inline uint32_t
                 */
                if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
                        if (nb_tx == 0)
-                               return (0);
+                               return 0;
                        goto end_of_tx;
                }
 
                   (unsigned) tx_id, (unsigned) nb_tx);
        txq->tx_tail = tx_id;
 
-       return (nb_tx);
+       return nb_tx;
 }
 
 /*********************************************************************
                nb_hold = 0;
        }
        rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
+       return nb_rx;
 }
 
 uint16_t
                nb_hold = 0;
        }
        rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
+       return nb_rx;
 }
 
 /*
        txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
                                                        RTE_CACHE_LINE_SIZE);
        if (txq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /*
         * Allocate TX ring hardware descriptors. A memzone large enough to
                                      E1000_ALIGN, socket_id);
        if (tz == NULL) {
                igb_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->nb_tx_desc = nb_desc;
                                   RTE_CACHE_LINE_SIZE);
        if (txq->sw_ring == NULL) {
                igb_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
        dev->tx_pkt_burst = eth_igb_xmit_pkts;
        dev->data->tx_queues[queue_idx] = txq;
 
-       return (0);
+       return 0;
 }
 
 static void
        if (nb_desc % IGB_RXD_ALIGN != 0 ||
                        (nb_desc > E1000_MAX_RING_DESC) ||
                        (nb_desc < E1000_MIN_RING_DESC)) {
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /* Free memory prior to re-allocation if needed */
        rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
                          RTE_CACHE_LINE_SIZE);
        if (rxq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
        rxq->mb_pool = mp;
        rxq->nb_rx_desc = nb_desc;
        rxq->pthresh = rx_conf->rx_thresh.pthresh;
                                      E1000_ALIGN, socket_id);
        if (rz == NULL) {
                igb_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
        rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
                                   RTE_CACHE_LINE_SIZE);
        if (rxq->sw_ring == NULL) {
                igb_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
                if (mbuf == NULL) {
                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
                                     "queue_id=%hu", rxq->queue_id);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
                dma_addr =
                        rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
 
        enic->fdir.hash = rte_hash_create(&hash_params);
        memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats));
        enic->fdir.stats.free = ENICPMD_FDIR_MAX;
-       return (NULL == enic->fdir.hash);
+       return NULL == enic->fdir.hash;
 }
 
 
        if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
                PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        if (vlan_id > ETH_VLAN_ID_MAX) {
                PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        vid_idx = FM10K_VFTA_IDX(vlan_id);
        if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
                PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
                        "in the VLAN filter table");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        fm10k_mbx_lock(hw);
        fm10k_mbx_unlock(hw);
        if (result != FM10K_SUCCESS) {
                PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
-               return (-EIO);
+               return -EIO;
        }
 
        for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
        }
        if (result != FM10K_SUCCESS) {
                PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
-               return (-EIO);
+               return -EIO;
        }
 
        if (on) {
                        rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
                        FM10K_RX_FREE_THRESH_MIN(q),
                        FM10K_RX_FREE_THRESH_DIV(q));
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        q->alloc_thresh = rx_free_thresh;
        /* make sure the mempool element size can account for alignment. */
        if (!mempool_element_size_valid(mp)) {
                PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /* make sure a valid number of descriptors have been requested */
                        "and a multiple of %u",
                        nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
                        FM10K_MULT_RX_DESC);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /*
                                socket_id);
        if (q == NULL) {
                PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* setup queue */
        q->tail_ptr = (volatile uint32_t *)
                &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
        if (handle_rxconf(q, conf))
-               return (-EINVAL);
+               return -EINVAL;
 
        /* allocate memory for the software ring */
        q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
        if (q->sw_ring == NULL) {
                PMD_INIT_LOG(ERR, "Cannot allocate software ring");
                rte_free(q);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
                PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
                rte_free(q->sw_ring);
                rte_free(q);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        q->hw_ring = mz->addr;
        q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
                        tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
                        FM10K_TX_FREE_THRESH_MIN(q),
                        FM10K_TX_FREE_THRESH_DIV(q));
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        q->free_thresh = tx_free_thresh;
                        tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
                        FM10K_TX_RS_THRESH_MIN(q),
                        FM10K_TX_RS_THRESH_DIV(q));
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        q->rs_thresh = tx_rs_thresh;
                        "and a multiple of %u",
                        nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
                        FM10K_MULT_TX_DESC);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /*
                                socket_id);
        if (q == NULL) {
                PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* setup queue */
        q->tail_ptr = (volatile uint32_t *)
                &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
        if (handle_txconf(q, conf))
-               return (-EINVAL);
+               return -EINVAL;
 
        /* allocate memory for the software ring */
        q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
        if (q->sw_ring == NULL) {
                PMD_INIT_LOG(ERR, "Cannot allocate software ring");
                rte_free(q);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
                PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
                rte_free(q->sw_ring);
                rte_free(q);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        q->hw_ring = mz->addr;
        q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
                PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
                rte_free(q->sw_ring);
                rte_free(q);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        dev->data->tx_queues[queue_id] = q;
 
        pool->num_free -= valid_entry->len;
        pool->num_alloc += valid_entry->len;
 
-       return (valid_entry->base + pool->base);
+       return valid_entry->base + pool->base;
 }
 
 /**
 
                interval = I40E_QUEUE_ITR_INTERVAL_DEFAULT;
 
        /* Convert to hardware count, as writing each 1 represents 2 us */
-       return (interval / 2);
+       return interval / 2;
 }
 
 #define I40E_VALID_FLOW(flow_type) \
 
                PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u",
                            args->ops, info.ops);
 
-       return (err | info.result);
+       return err | info.result;
 }
 
 /*
 
        mask |= PKT_TX_IEEE1588_TMST;
 #endif
 
-       return ((flags & mask) ? 1 : 0);
+       return (flags & mask) ? 1 : 0;
 }
 
 /* set i40e TSO context descriptor */
        if (!rxq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
                            "rx queue data structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        rxq->mp = mp;
        rxq->nb_rx_desc = nb_desc;
        if (!rz) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for RX");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Zero all the descriptors in the ring. */
        if (!rxq->sw_ring) {
                i40e_dev_rx_queue_release(rxq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW ring");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        i40e_reset_rx_queue(rxq);
        if (!txq) {
                PMD_DRV_LOG(ERR, "Failed to allocate memory for "
                            "tx queue structure");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Allocate TX hardware ring descriptors. */
        if (!tz) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to reserve DMA memory for TX");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->nb_tx_desc = nb_desc;
        if (!txq->sw_ring) {
                i40e_dev_tx_queue_release(txq);
                PMD_DRV_LOG(ERR, "Failed to allocate memory for SW TX ring");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        i40e_reset_tx_queue(txq);
 
        } else {
                media_type = ixgbe_get_media_type_82599(hw);
        }
-       return (media_type);
+       return media_type;
 }
 
 /*
                 hw->mac.ops.flap_tx_laser = NULL;
        }
 
-       return (rc);
+       return rc;
 }
 
         */
        *state = (by_ctl >> BYPASS_STATUS_OFF_SHIFT) &  BYPASS_STATUS_OFF_MASK;
 
-       return (ret_val);
+       return ret_val;
 }
 
 
 
         */
        if ((diag != IXGBE_SUCCESS) && (diag != IXGBE_ERR_INVALID_MAC_ADDR)) {
                PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-               return (diag);
+               return diag;
        }
 
        /* negotiate mailbox API version to use with the PF. */
 
                default:
                        PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
-                       return (-EIO);
+                       return -EIO;
        }
 
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
        PMD_INIT_FUNC_TRACE();
 
        rte_eth_driver_register(&rte_ixgbevf_pmd);
-       return (0);
+       return 0;
 }
 
 static int
 
        ixgbe_restore_statistics_mapping(dev);
 
-       return (0);
+       return 0;
 
 error:
        PMD_INIT_LOG(ERR, "failure in ixgbe_dev_start(): %d", err);
        struct ixgbe_hw *hw;
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+       return ixgbe_led_on(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
        struct ixgbe_hw *hw;
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       return (ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP);
+       return ixgbe_led_off(hw, 0) == IXGBE_SUCCESS ? 0 : -ENOTSUP;
 }
 
 static int
                (fc_conf->high_water < fc_conf->low_water)) {
                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[fc_conf->mode];
            (pfc_conf->fc.high_water <= pfc_conf->fc.low_water)) {
                PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
                PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->fc.requested_mode = rte_fcmode_2_ixgbe_fcmode[pfc_conf->fc.mode];
        reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
        if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
                PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
-               return (-1);
+               return -1;
        }
 
        return 0;
 
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        vector = ixgbe_uta_vector(hw,mac_addr);
        uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
 
        /* The UTA table only exists on 82599 hardware and newer */
        if (hw->mac.type < ixgbe_mac_82599EB)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        if(on) {
                for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
        if (hw->mac.type == ixgbe_mac_82598EB) {
                PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
                             " on 82599 hardware and newer");
-               return (-ENOTSUP);
+               return -ENOTSUP;
        }
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
 
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
        reg = IXGBE_READ_REG(hw, addr);
                IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
        for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
                if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
                        ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
                (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
 
        if (ixgbe_vmdq_mode_check(hw) < 0)
-               return (-ENOTSUP);
+               return -ENOTSUP;
 
        memset(&mr_info->mr_conf[rule_id], 0,
                sizeof(struct rte_eth_mirror_conf));
 
 
        m = __rte_mbuf_raw_alloc(mp);
        __rte_mbuf_sanity_check_raw(m, 0);
-       return (m);
+       return m;
 }
 
 
        }
 
        /* Mismatch, use the previous context */
-       return (IXGBE_CTX_NUM);
+       return IXGBE_CTX_NUM;
 }
 
 static inline uint32_t
        txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
 
        /* No Error */
-       return (0);
+       return 0;
 }
 
 uint16_t
                        if (ixgbe_xmit_cleanup(txq) != 0) {
                                /* Could not clean any descriptors */
                                if (nb_tx == 0)
-                                       return (0);
+                                       return 0;
                                goto end_of_tx;
                        }
 
                                                 * descriptors
                                                 */
                                                if (nb_tx == 0)
-                                                       return (0);
+                                                       return 0;
                                                goto end_of_tx;
                                        }
                                }
        IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
        txq->tx_tail = tx_id;
 
-       return (nb_tx);
+       return nb_tx;
 }
 
 /*********************************************************************
        diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
                                    rxq->rx_free_thresh);
        if (unlikely(diag != 0))
-               return (-ENOMEM);
+               return -ENOMEM;
 
        rxdp = &rxq->rx_ring[alloc_idx];
        for (i = 0; i < rxq->rx_free_thresh; ++i) {
                nb_hold = 0;
        }
        rxq->nb_rx_hold = nb_hold;
-       return (nb_rx);
+       return nb_rx;
 }
 
 /**
        txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct ixgbe_tx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /*
         * Allocate TX ring hardware descriptors. A memzone large enough to
                        IXGBE_ALIGN, socket_id);
        if (tz == NULL) {
                ixgbe_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->nb_tx_desc = nb_desc;
                                RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->sw_ring == NULL) {
                ixgbe_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
                     txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
        dev->data->tx_queues[queue_idx] = txq;
 
 
-       return (0);
+       return 0;
 }
 
 /**
        if (nb_desc % IXGBE_RXD_ALIGN != 0 ||
                        (nb_desc > IXGBE_MAX_RING_DESC) ||
                        (nb_desc < IXGBE_MIN_RING_DESC)) {
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /* Free memory prior to re-allocation if needed... */
        rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct ixgbe_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
        rxq->mb_pool = mp;
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
                                      RX_RING_SZ, IXGBE_ALIGN, socket_id);
        if (rz == NULL) {
                ixgbe_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
                                          RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq->sw_ring) {
                ixgbe_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
                                   RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq->sw_sc_ring) {
                ixgbe_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
                if (mbuf == NULL) {
                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
                                     (unsigned) rxq->queue_id);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
 
                rte_mbuf_refcnt_set(mbuf, 1);
 
        /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode
         * has been requested. */
        if (priv->promisc_req)
-               return (type == HASH_RXQ_FLOW_TYPE_PROMISC);
+               return type == HASH_RXQ_FLOW_TYPE_PROMISC;
        switch (type) {
        case HASH_RXQ_FLOW_TYPE_PROMISC:
                return !!priv->promisc_req;
 
 
        for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
                r |= (v & 1);
-       return (l + r);
+       return l + r;
 }
 
 #endif /* RTE_PMD_MLX5_UTILS_H_ */
 
 mpipe_link_compare(struct rte_eth_link *link1,
                   struct rte_eth_link *link2)
 {
-       return ((*(uint64_t *)link1 == *(uint64_t *)link2)
-               ? -1 : 0);
+       return (*(uint64_t *)link1 == *(uint64_t *)link2)
+               ? -1 : 0;
 }
 
 static int
 
            (nb_desc > NFP_NET_MAX_RX_DESC) ||
            (nb_desc < NFP_NET_MIN_RX_DESC)) {
                RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        /*
        rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq == NULL)
-               return (-ENOMEM);
+               return -ENOMEM;
 
        /* Hw queues mapping based on firmware confifguration */
        rxq->qidx = queue_idx;
        if (tz == NULL) {
                RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
                nfp_net_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Saving physical and virtual addresses for the RX ring */
                                         RTE_CACHE_LINE_SIZE, socket_id);
        if (rxq->rxbufs == NULL) {
                nfp_net_rx_queue_release(rxq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
                if (mbuf == NULL) {
                        RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
                                (unsigned)rxq->qidx);
-                       return (-ENOMEM);
+                       return -ENOMEM;
                }
 
                dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (txq == NULL) {
                RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        /*
        if (tz == NULL) {
                RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
                nfp_net_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        txq->tx_count = nb_desc;
                                         RTE_CACHE_LINE_SIZE, socket_id);
        if (txq->txbufs == NULL) {
                nfp_net_tx_queue_release(txq);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
                   txq->txbufs, txq->txds, (unsigned long int)txq->dma);
 
        }
        if (vq == NULL) {
                PMD_INIT_LOG(ERR, "%s: Can not allocate virtqueue", __func__);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
        if (queue_type == VTNET_RQ && vq->sw_ring == NULL) {
                PMD_INIT_LOG(ERR, "%s: Can not allocate RX soft ring",
 
        if (rxmode->hw_ip_checksum) {
                PMD_DRV_LOG(ERR, "HW IP checksum not supported");
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        hw->vlan_strip = rxmode->hw_vlan_strip;
 
        if (d)
                closedir(d);
 
-       return (d != NULL);
+       return d != NULL;
 }
 
 /* Extract I/O port numbers from sysfs */
 
 static inline bool
 vmxnet3_cmd_ring_desc_empty(struct vmxnet3_cmd_ring *ring)
 {
-       return (ring->next2comp == ring->next2fill);
+       return ring->next2comp == ring->next2fill;
 }
 
 typedef struct vmxnet3_comp_ring {
 
 static inline int __attribute__((always_inline))
 virtqueue_full(const struct virtqueue *vq)
 {
-       return (vq->vq_free_cnt == 0);
+       return vq->vq_free_cnt == 0;
 }
 
 #define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
 
        if (string == NULL)
                return -1;
 
-       return (atoi(++string) + 1);
+       return atoi(++string) + 1;
 }
 
 #define FILE_LINUX_CPU_CORE_ID \
 
        *pkt2_color = color3_2;
        *pkt3_color = color3_3;
 
-       return (drop0 | (drop1 << 1) | (drop2 << 2) | (drop3 << 3));
+       return drop0 | (drop1 << 1) | (drop2 << 2) | (drop3 << 3);
 }
 
 PIPELINE_TABLE_AH_HIT_DROP_TIME(fa_table_ah_hit, pkt_work, pkt4_work);
 
                        txmb->tail = 0;
        }
 
-       return (fill);
+       return fill;
 }
 
 /* Enqueue a single packet, and send burst if queue is filled */
        if(++txmb->head == len)
                txmb->head = 0;
 
-       return (0);
+       return 0;
 }
 
 static inline void
        errno = 0;
        v = strtoul(str, &end, 10);
        if (errno != 0 || *end != '\0')
-               return (-EINVAL);
+               return -EINVAL;
 
        if (v < min || v > max)
-               return (-EINVAL);
+               return -EINVAL;
 
        *val = (uint32_t)v;
-       return (0);
+       return 0;
 }
 
 static int
        errno = 0;
        v = strtoul(str, &end, 10);
        if (errno != 0)
-               return (-EINVAL);
+               return -EINVAL;
 
        if (*end != '\0') {
                if (strncmp(frmt_sec, end, sizeof(frmt_sec)) == 0)
                        v *= MS_PER_S;
                else if (strncmp(frmt_msec, end, sizeof (frmt_msec)) != 0)
-                       return (-EINVAL);
+                       return -EINVAL;
        }
 
        if (v < min || v > max)
-               return (-EINVAL);
+               return -EINVAL;
 
        *val = (uint32_t)v;
-       return (0);
+       return 0;
 }
 
 static int
                                                optarg,
                                                lgopts[option_index].name);
                                        print_usage(prgname);
-                                       return (ret);
+                                       return ret;
                                }
                        }
 
                                                optarg,
                                                lgopts[option_index].name);
                                        print_usage(prgname);
-                                       return (ret);
+                                       return ret;
                                }
                        }
 
 
        for (n = 0; v != 0; v &= v - 1, n++)
                ;
 
-       return (n);
+       return n;
 }
 
 /**
 
        /* Create new mbuf for the header. */
        if (unlikely ((hdr = rte_pktmbuf_alloc(header_pool)) == NULL))
-               return (NULL);
+               return NULL;
 
        /* If requested, then make a new clone packet. */
        if (use_clone != 0 &&
            unlikely ((pkt = rte_pktmbuf_clone(pkt, clone_pool)) == NULL)) {
                rte_pktmbuf_free(hdr);
-               return (NULL);
+               return NULL;
        }
 
        /* prepend new header */
        hdr->ol_flags = pkt->ol_flags;
 
        __rte_mbuf_sanity_check(hdr, 1);
-       return (hdr);
+       return hdr;
 }
 
 /*
        if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
                return 0;
 
-       return ((uint32_t)pm);
+       return (uint32_t)pm;
 }
 
 static int
        n = strtoul(q_arg, &end, 0);
        if (errno != 0 || end == NULL || *end != '\0' ||
                        n == 0 || n >= MAX_RX_QUEUE_PER_LCORE)
-               return (-1);
+               return -1;
 
-       return (n);
+       return n;
 }
 
 /* Parse the argument given in the command line of the application */
 
        init_val = rte_jhash_1word(k->ip_dst, init_val);
        init_val = rte_jhash_1word(*p, init_val);
 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-       return (init_val);
+       return init_val;
 }
 
 static inline uint32_t
        init_val = rte_jhash(k->ip_dst, sizeof(uint8_t) * IPV6_ADDR_LEN, init_val);
        init_val = rte_jhash_1word(*p, init_val);
 #endif /* RTE_MACHINE_CPUFLAG_SSE4_2 */
-       return (init_val);
+       return init_val;
 }
 
 #define IPV4_L3FWD_NUM_ROUTES \
 
        pktmbuf_pool = rte_pktmbuf_pool_create(PKTMBUF_POOL_NAME, num_mbufs,
                MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
 
-       return (pktmbuf_pool == NULL); /* 0  on success */
+       return pktmbuf_pool == NULL; /* 0  on success */
 }
 
 /**
 
        ret = optind-1;
        optind = 0; /* reset getopt lib */
 
-       return (ret);
+       return ret;
 }
 
 /*
 
        err = rte_netmap_ioctl(port->fd, NIOCGINFO, &req);
        if (err) {
                printf("[E] NIOCGINFO ioctl failed (error %d)\n", err);
-               return (err);
+               return err;
        }
 
        snprintf(req.nr_name, sizeof(req.nr_name), "%s", port->str);
        err = rte_netmap_ioctl(port->fd, NIOCREGIF, &req);
        if (err) {
                printf("[E] NIOCREGIF ioctl failed (error %d)\n", err);
-               return (err);
+               return err;
        }
 
        /* mmap only once. */
 
        if (ports.mem == MAP_FAILED) {
                printf("[E] NETMAP mmap failed for fd: %d)\n", port->fd);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        port->nmif = NETMAP_IF(ports.mem, req.nr_offset);
        port->tx_ring = NETMAP_TXRING(port->nmif, 0);
        port->rx_ring = NETMAP_RXRING(port->nmif, 0);
 
-       return (0);
+       return 0;
 }
 
 
 
        portid = strtoul(ifname, &endptr, 10);
        if (endptr == ifname || *endptr != '\0' ||
                        portid >= RTE_DIM(ports) || errno != 0)
-               return (-EINVAL);
+               return -EINVAL;
 
        *port = (uint8_t)portid;
-       return (0);
+       return 0;
 }
 
 /**
                ;
 
        if (i == RTE_DIM(fd_port))
-               return (-ENOMEM);
+               return -ENOMEM;
 
        fd_port[i].port = FD_PORT_RSRV;
-       return (IDX_TO_FD(i));
+       return IDX_TO_FD(i);
 }
 
 static int32_t
        idx = FD_TO_IDX(fd);
 
        if (!FD_VALID(fd) || (port = fd_port[idx].port) == FD_PORT_FREE)
-               return (-EINVAL);
+               return -EINVAL;
 
        /* if we still have a valid port attached, release the port */
        if (port < RTE_DIM(ports) && ports[port].fd == idx) {
        }
 
        fd_port[idx].port = FD_PORT_FREE;
-       return (0);
+       return 0;
 }
 
 static int
        uint8_t portid;
 
        if (req == NULL)
-               return (-EINVAL);
+               return -EINVAL;
 
        if (req->nr_version != NETMAP_API) {
                req->nr_version = NETMAP_API;
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        if ((rc = ifname_to_portid(req->nr_name, &portid)) != 0) {
                RTE_LOG(ERR, USER1, "Invalid interface name:\"%s\" "
                        "in NIOCGINFO call\n", req->nr_name);
-               return (rc);
+               return rc;
        }
 
        if (ports[portid].pool == NULL) {
                RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        *port = portid;
-       return (0);
+       return 0;
 }
 
 /**
 
        req = (struct nmreq *)param;
        if ((rc = check_nmreq(req, &portid)) != 0)
-               return (rc);
+               return rc;
 
        req->nr_tx_rings = (uint16_t)(ports[portid].nr_tx_rings - 1);
        req->nr_rx_rings = (uint16_t)(ports[portid].nr_rx_rings - 1);
        req->nr_memsize = netmap.mem_sz;
        req->nr_offset = 0;
 
-       return (0);
+       return 0;
 }
 
 static void
        if (ports[port].fd < RTE_DIM(fd_port)) {
                RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
                        port, IDX_TO_FD(ports[port].fd));
-               return (-EBUSY);
+               return -EBUSY;
        }
        if (fd_port[idx].port != FD_PORT_RSRV) {
                RTE_LOG(ERR, USER1, "fd: %u is misconfigured\n",
                        IDX_TO_FD(idx));
-               return (-EBUSY);
+               return -EBUSY;
        }
 
        nmif = ports[port].nmif;
 
        /* only ALL rings supported right now. */
        if (req->nr_ringid != 0)
-               return (-EINVAL);
+               return -EINVAL;
 
        snprintf(nmif->ni_name, sizeof(nmif->ni_name), "%s", req->nr_name);
        nmif->ni_version  = req->nr_version;
                RTE_LOG(ERR, USER1,
                        "Couldn't start ethernet device %s (error %d)\n",
                        req->nr_name, rc);
-           return (rc);
+           return rc;
        }
 
        /* setup fdi <--> port relationtip. */
        req->nr_memsize = netmap.mem_sz;
        req->nr_offset = (uintptr_t)nmif - (uintptr_t)netmap.mem;
 
-       return (0);
+       return 0;
 }
 
 /**
 
        req = (struct nmreq *)param;
        if ((rc = check_nmreq(req, &portid)) != 0)
-               return (rc);
+               return rc;
 
        idx = FD_TO_IDX(fd);
 
        rc = netmap_regif(req, idx, portid);
        rte_spinlock_unlock(&netmap_lock);
 
-       return (rc);
+       return rc;
 }
 
 static void
        }
 
        rte_spinlock_unlock(&netmap_lock);
-       return (rc);
+       return rc;
 }
 
 /**
                rc += r->avail;
        }
 
-       return (rc);
+       return rc;
 }
 
 /**
        idx = FD_TO_IDX(fd);
        if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
                        ports[port].fd == idx) {
-               return (rx_sync_if(fd_port[idx].port));
+               return rx_sync_if(fd_port[idx].port);
        } else  {
-               return (-EINVAL);
+               return -EINVAL;
        }
 }
 
                rc += r->avail;
        }
 
-       return (rc);
+       return rc;
 }
 
 /**
        idx = FD_TO_IDX(fd);
        if ((port = fd_port[idx].port) < RTE_DIM(ports) &&
                        ports[port].fd == idx) {
-               return (tx_sync_if(fd_port[idx].port));
+               return tx_sync_if(fd_port[idx].port);
        } else  {
-               return (-EINVAL);
+               return -EINVAL;
        }
 }
 
                        RTE_CACHE_LINE_SIZE, conf->socket_id)) == NULL) {
                RTE_LOG(ERR, USER1, "%s: failed to allocate %zu bytes\n",
                        __func__, sz);
-               return (-ENOMEM);
+               return -ENOMEM;
        }
 
        netmap.mem_sz = sz;
                fd_port[i].port = FD_PORT_FREE;
        }
 
-       return (0);
+       return 0;
 }
 
 
                        conf->nr_rx_rings > netmap.conf.max_rings) {
                RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
                        __func__, portid);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
                rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
                        rx_slots > netmap.conf.max_slots) {
                RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
                        __func__, portid);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        ret = rte_eth_dev_configure(portid, conf->nr_rx_rings,
 
        if (ret < 0) {
            RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
-           return (ret);
+           return ret;
        }
 
        for (i = 0; i < conf->nr_tx_rings; i++) {
                                "Couldn't configure TX queue %"PRIu16" of "
                                "port %"PRIu8"\n",
                                i, portid);
-                       return (ret);
+                       return ret;
                }
 
                ret = rte_eth_rx_queue_setup(portid, i, rx_slots,
                                "Couldn't configure RX queue %"PRIu16" of "
                                "port %"PRIu8"\n",
                                i, portid);
-                       return (ret);
+                       return ret;
                }
        }
 
        ports[portid].tx_burst = conf->tx_burst;
        ports[portid].rx_burst = conf->rx_burst;
 
-       return (0);
+       return 0;
 }
 
 int
                errno =-rc;
                rc = -1;
        }
-       return (rc);
+       return rc;
 }
 
 int rte_netmap_ioctl(int fd, uint32_t op, void *param)
 
        if (!FD_VALID(fd)) {
            errno = EBADF;
-           return (-1);
+           return -1;
        }
 
        switch (op) {
                ret = 0;
        }
 
-       return (ret);
+       return ret;
 }
 
 void *
                        ((flags & MAP_FIXED) != 0 && addr != NULL)) {
 
                errno = EINVAL;
-               return (MAP_FAILED);
+               return MAP_FAILED;
        }
 
        return (void *)((uintptr_t)netmap.mem + (uintptr_t)offset);
                errno = -fd;
                fd = -1;
        }
-       return (fd);
+       return fd;
 }
 
 /**
 
 static inline int __attribute__ ((always_inline))
 _lthread_queue_empty(struct lthread_queue *q)
 {
-       return (q->tail == q->head);
+       return q->tail == q->head;
 }
 
 
 
  */
 static inline int _lthread_sched_isdone(struct lthread_sched *sched)
 {
-       return ((sched->run_flag == 0) &&
+       return (sched->run_flag == 0) &&
                        (_lthread_queue_empty(sched->ready)) &&
                        (_lthread_queue_empty(sched->pready)) &&
-                       (sched->nb_blocked_threads == 0));
+                       (sched->nb_blocked_threads == 0);
 }
 
 /*
 
 
 static inline int str_is(const char *str, const char *is)
 {
-       return (strcmp(str, is) == 0);
+       return strcmp(str, is) == 0;
 }
 
 /* returns core mask used by DPDK */
 
 static inline int
 is_bit_set(int i, unsigned int mask)
 {
-    return ((1 << i) & mask);
+    return (1 << i) & mask;
 }
 
 #endif /* _MAIN_H_ */
 
 static inline int __attribute__((always_inline))
 ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
 {
-       return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
+       return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
 }
 
 /*
        }
        ll_new[i].next = NULL;
 
-       return (ll_new);
+       return ll_new;
 }
 
 /*
 
 static inline int __attribute__((always_inline))
 ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
 {
-       return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
+       return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
 }
 
 /*
        }
        ll_new[i].next = NULL;
 
-       return (ll_new);
+       return ll_new;
 }
 
 /*
 
        xs = xs_daemon_open();
        if (xs == NULL) {
                RTE_LOG(ERR, XENHOST, "xs_daemon_open failed\n");
-               return (-1);
+               return -1;
        }
 
        ret = xs_watch(xs, "/local/domain", "mytoken");
        if (ret == 0) {
                RTE_LOG(ERR, XENHOST, "%s: xs_watch failed\n", __func__);
                xs_daemon_close(xs);
-               return (-1);
+               return -1;
        }
 
        /* We are notified of read availability on the watch via the file descriptor. */
                        return guest;
        }
 
-       return (NULL);
+       return NULL;
 }
 
 
 
                int field_index = r1->config->defs[n].field_index;
 
                if (r1->wildness[field_index] != r2->wildness[field_index])
-                       return (r1->wildness[field_index] -
-                               r2->wildness[field_index]);
+                       return r1->wildness[field_index] -
+                               r2->wildness[field_index];
        }
        return 0;
 }
 
 static inline __attribute__((always_inline)) uint32_t
 check_any_match_x4(uint64_t val[])
 {
-       return ((val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH);
+       return (val[0] | val[1] | val[2] | val[3]) & RTE_ACL_NODE_MATCH;
 }
 
 static inline __attribute__((always_inline)) void
 
 int
 rte_cfgfile_has_section(struct rte_cfgfile *cfg, const char *sectionname)
 {
-       return (_get_section(cfg, sectionname) != NULL);
+       return _get_section(cfg, sectionname) != NULL;
 }
 
 int
 rte_cfgfile_has_entry(struct rte_cfgfile *cfg, const char *sectionname,
                const char *entryname)
 {
-       return (rte_cfgfile_get_entry(cfg, sectionname, entryname) != NULL);
+       return rte_cfgfile_get_entry(cfg, sectionname, entryname) != NULL;
 }
 
        if (nb_qpairs > (dev_info.max_nb_queue_pairs)) {
                CDEV_LOG_ERR("Invalid num queue_pairs (%u) for dev %u",
                                nb_qpairs, dev->data->dev_id);
-           return (-EINVAL);
+           return -EINVAL;
        }
 
        if (dev->data->queue_pairs == NULL) { /* first time configuration */
 
        if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
                CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        dev = &rte_crypto_devices[dev_id];
        if (dev->data->dev_started) {
                CDEV_LOG_ERR(
                    "device %d must be stopped to allow configuration", dev_id);
-               return (-EBUSY);
+               return -EBUSY;
        }
 
        /* Setup new number of queue pairs and reconfigure device. */
 
        if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
                CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        dev = &rte_crypto_devices[dev_id];
 
        if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
                CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        dev = &rte_crypto_devices[dev_id];
        if (queue_pair_id >= dev->data->nb_queue_pairs) {
                CDEV_LOG_ERR("Invalid queue_pair_id=%d", queue_pair_id);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        if (dev->data->dev_started) {
 
        if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
                CDEV_LOG_ERR("Invalid dev_id=%d", dev_id);
-               return (-ENODEV);
+               return -ENODEV;
        }
 
        if (stats == NULL) {
        struct rte_cryptodev_callback *user_cb;
 
        if (!cb_fn)
-               return (-EINVAL);
+               return -EINVAL;
 
        if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
                CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        dev = &rte_crypto_devices[dev_id];
        }
 
        rte_spinlock_unlock(&rte_cryptodev_cb_lock);
-       return ((user_cb == NULL) ? -ENOMEM : 0);
+       return (user_cb == NULL) ? -ENOMEM : 0;
 }
 
 int
        struct rte_cryptodev_callback *cb, *next;
 
        if (!cb_fn)
-               return (-EINVAL);
+               return -EINVAL;
 
        if (!rte_cryptodev_pmd_is_valid_dev(dev_id)) {
                CDEV_LOG_ERR("Invalid dev_id=%" PRIu8, dev_id);
-               return (-EINVAL);
+               return -EINVAL;
        }
 
        dev = &rte_crypto_devices[dev_id];
 
 eal_cpu_detected(unsigned lcore_id)
 {
        const unsigned ncpus = eal_get_ncpus();
-       return (lcore_id < ncpus);
+       return lcore_id < ncpus;
 }
 
                }
        }
 
-       return (len - MALLOC_ELEM_OVERHEAD - align);
+       return len - MALLOC_ELEM_OVERHEAD - align;
 }
 
 static const struct rte_memzone *
 
 
 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 {
-       return (__atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
+       return __atomic_add_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
 }
 
 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
 {
-       return (__atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0);
+       return __atomic_sub_fetch(&v->cnt, 1, __ATOMIC_ACQUIRE) == 0;
 }
 
 /*------------------------- 32 bit atomic operations -------------------------*/
                        : [cnt] "r" (&v->cnt)
                        : "cc", "xer", "memory");
 
-       return (ret == 0);
+       return ret == 0;
 }
 
 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
                        : [cnt] "r" (&v->cnt)
                        : "cc", "xer", "memory");
 
-       return (ret == 0);
+       return ret == 0;
 }
 /*------------------------- 64 bit atomic operations -------------------------*/
 
                        : [cnt] "r" (&v->cnt)
                        : "cc", "xer", "memory");
 
-       return (ret == 0);
+       return ret == 0;
 }
 
 static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
                        : [cnt] "r" (&v->cnt)
                        : "cc", "xer", "memory");
 
-       return (ret == 0);
+       return ret == 0;
 }
 
 static inline int rte_atomic64_test_and_set(rte_atomic64_t *v)
 
  */
 static inline uint16_t rte_arch_bswap16(uint16_t _x)
 {
-       return ((_x >> 8) | ((_x << 8) & 0xff00));
+       return (_x >> 8) | ((_x << 8) & 0xff00);
 }
 
 /*
  */
 static inline uint32_t rte_arch_bswap32(uint32_t _x)
 {
-       return ((_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
-               ((_x << 24) & 0xff000000));
+       return (_x >> 24) | ((_x >> 8) & 0xff00) | ((_x << 8) & 0xff0000) |
+               ((_x << 24) & 0xff000000);
 }
 
 /*
 /* 64-bit mode */
 static inline uint64_t rte_arch_bswap64(uint64_t _x)
 {
-       return ((_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
+       return (_x >> 56) | ((_x >> 40) & 0xff00) | ((_x >> 24) & 0xff0000) |
                ((_x >> 8) & 0xff000000) | ((_x << 8) & (0xffULL << 32)) |
                ((_x << 24) & (0xffULL << 40)) |
-               ((_x << 40) & (0xffULL << 48)) | ((_x << 56)));
+               ((_x << 40) & (0xffULL << 48)) | ((_x << 56));
 }
 
 #ifndef RTE_FORCE_INTRINSICS
 
 static inline int
 rte_spinlock_trylock(rte_spinlock_t *sl)
 {
-       return (__sync_lock_test_and_set(&sl->locked, 1) == 0);
+       return __sync_lock_test_and_set(&sl->locked, 1) == 0;
 }
 
 #endif
 
                        : [cnt] "+m" (v->cnt),  /* output */
                          [ret] "=qm" (ret)
                        );
-       return (ret != 0);
+       return ret != 0;
 }
 
 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
                        : [cnt] "+m" (v->cnt),  /* output */
                          [ret] "=qm" (ret)
                        );
-       return (ret != 0);
+       return ret != 0;
 }
 
 /*------------------------- 32 bit atomic operations -------------------------*/
                        : [cnt] "+m" (v->cnt),  /* output */
                          [ret] "=qm" (ret)
                        );
-       return (ret != 0);
+       return ret != 0;
 }
 
 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
                        : [cnt] "+m" (v->cnt),  /* output */
                          [ret] "=qm" (ret)
                        );
-       return (ret != 0);
+       return ret != 0;
 }
 #endif
 
 
                        : "[lockval]" (lockval)
                        : "memory");
 
-       return (lockval == 0);
+       return lockval == 0;
 }
 #endif
 
 
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic16_inc_and_test(rte_atomic16_t *v)
 {
-       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+       return __sync_add_and_fetch(&v->cnt, 1) == 0;
 }
 #endif
 
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic16_dec_and_test(rte_atomic16_t *v)
 {
-       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+       return __sync_sub_and_fetch(&v->cnt, 1) == 0;
 }
 #endif
 
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic32_inc_and_test(rte_atomic32_t *v)
 {
-       return (__sync_add_and_fetch(&v->cnt, 1) == 0);
+       return __sync_add_and_fetch(&v->cnt, 1) == 0;
 }
 #endif
 
 #ifdef RTE_FORCE_INTRINSICS
 static inline int rte_atomic32_dec_and_test(rte_atomic32_t *v)
 {
-       return (__sync_sub_and_fetch(&v->cnt, 1) == 0);
+       return __sync_sub_and_fetch(&v->cnt, 1) == 0;
 }
 #endif
 
 
 static inline int
 rte_spinlock_trylock (rte_spinlock_t *sl)
 {
-       return (__sync_lock_test_and_set(&sl->locked,1) == 0);
+       return __sync_lock_test_and_set(&sl->locked,1) == 0;
 }
 #endif
 
 
        struct rte_config *cfg = rte_eal_get_configuration();
        if (lcore_id >= RTE_MAX_LCORE)
                return 0;
-       return (cfg->lcore_role[lcore_id] != ROLE_OFF);
+       return cfg->lcore_role[lcore_id] != ROLE_OFF;
 }
 
 /**
 
        index = (log2 - MALLOC_MINSIZE_LOG2 + MALLOC_LOG2_INCREMENT - 1) /
                MALLOC_LOG2_INCREMENT;
 
-       return (index <= RTE_HEAP_NUM_FREELISTS-1?
-               index: RTE_HEAP_NUM_FREELISTS-1);
+       return index <= RTE_HEAP_NUM_FREELISTS-1?
+               index: RTE_HEAP_NUM_FREELISTS-1;
 }
 
 /*
 
 static inline int
 malloc_elem_cookies_ok(const struct malloc_elem *elem)
 {
-       return (elem != NULL &&
+       return elem != NULL &&
                        MALLOC_ELEM_HEADER(elem) == MALLOC_HEADER_COOKIE &&
-                       MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE);
+                       MALLOC_ELEM_TRAILER(elem) == MALLOC_TRAILER_COOKIE;
 }
 
 #endif
 
                check_flag = RTE_MEMZONE_16GB;
        }
 
-       return (check_flag & flags);
+       return check_flag & flags;
 }
 
 /*
 
 static int
 is_ivshmem_device(struct rte_pci_device * dev)
 {
-       return (dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
-                       && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM);
+       return dev->id.vendor_id == PCI_VENDOR_ID_IVSHMEM
+                       && dev->id.device_id == PCI_DEVICE_ID_IVSHMEM;
 }
 
 static void *
 
        mfn = mfn_offset + memseg[memseg_id].mfn[mfn_id];
 
        /** return mechine address */
-       return (mfn * PAGE_SIZE + phy_addr % PAGE_SIZE);
+       return mfn * PAGE_SIZE + phy_addr % PAGE_SIZE;
 }
 
 int
 
        struct e1000_hw *hw = &adapter->hw;
        s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
 
-       return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+       return (i2cctl & E1000_I2C_DATA_IN) != 0;
 }
 
 /* igb_set_i2c_data - Sets the I2C data bit
        struct e1000_hw *hw = &adapter->hw;
        s32 i2cctl = E1000_READ_REG(hw, E1000_I2CPARAMS);
 
-       return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+       return (i2cctl & E1000_I2C_CLK_IN) != 0;
 }
 
 static const struct i2c_algo_bit_data igb_i2c_algo = {
        igb_lro_flush_all(q_vector);
 
 #endif /* IGB_NO_LRO */
-       return (total_packets < budget);
+       return total_packets < budget;
 }
 #else /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
 /**
        igb_lro_flush_all(q_vector);
 
 #endif /* IGB_NO_LRO */
-       return (total_packets < budget);
+       return total_packets < budget;
 }
 #endif /* CONFIG_IGB_DISABLE_PACKET_SPLIT */
 
 
        /* set the correct pool for the new PF MAC address in entry 0 */
        ret = ixgbe_add_mac_filter(adapter, hw->mac.addr,
                                    adapter->num_vfs);
-       return (ret > 0 ? 0 : ret);
+       return ret > 0 ? 0 : ret;
 }
 
 
 
 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
                  size_t size, int direction)
 {
-       return (((u64) (page - mem_map) << PAGE_SHIFT) + offset +
-               PCI_DRAM_OFFSET);
+       return ((u64) (page - mem_map) << PAGE_SHIFT) + offset +
+               PCI_DRAM_OFFSET;
 }
 
 #else /* CONFIG_HIGHMEM */
 
        KNI_DBG_RX("kni_rcvmsg expect_len %ld, flags 0x%08x, pkt_len %d\n",
                   (unsigned long)len, q->flags, pkt_len);
 
-       return (pkt_len + vnet_hdr_len);
+       return pkt_len + vnet_hdr_len;
 }
 
 /* dummy tap like ioctl */
 
  */
 static inline int is_unicast_ether_addr(const struct ether_addr *ea)
 {
-       return ((ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0);
+       return (ea->addr_bytes[0] & ETHER_GROUP_ADDR) == 0;
 }
 
 /**
  */
 static inline int is_multicast_ether_addr(const struct ether_addr *ea)
 {
-       return (ea->addr_bytes[0] & ETHER_GROUP_ADDR);
+       return ea->addr_bytes[0] & ETHER_GROUP_ADDR;
 }
 
 /**
  */
 static inline int is_universal_ether_addr(const struct ether_addr *ea)
 {
-       return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0);
+       return (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) == 0;
 }
 
 /**
  */
 static inline int is_local_admin_ether_addr(const struct ether_addr *ea)
 {
-       return ((ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) != 0);
+       return (ea->addr_bytes[0] & ETHER_LOCAL_ADMIN_ADDR) != 0;
 }
 
 /**
  */
 static inline int is_valid_assigned_ether_addr(const struct ether_addr *ea)
 {
-       return (is_unicast_ether_addr(ea) && (! is_zero_ether_addr(ea)));
+       return is_unicast_ether_addr(ea) && (! is_zero_ether_addr(ea));
 }
 
 /**
 
 #else
        const __m128i x = _mm_cmpeq_epi32(k1, k2);
 
-       return (_mm_movemask_epi8(x) != 0xffff);
+       return _mm_movemask_epi8(x) != 0xffff;
 #endif
 }
 
 
 
        uint32_t tag = primary_hash >> all_bits_shift;
 
-       return (primary_hash ^ ((tag + 1) * alt_bits_xor));
+       return primary_hash ^ ((tag + 1) * alt_bits_xor);
 }
 
 void
                                 * Return index where key is stored,
                                 * substracting the first dummy index
                                 */
-                               return (prim_bkt->key_idx[i] - 1);
+                               return prim_bkt->key_idx[i] - 1;
                        }
                }
        }
                                 * Return index where key is stored,
                                 * substracting the first dummy index
                                 */
-                               return (sec_bkt->key_idx[i] - 1);
+                               return sec_bkt->key_idx[i] - 1;
                        }
                }
        }
                prim_bkt->signatures[ret].current = sig;
                prim_bkt->signatures[ret].alt = alt_hash;
                prim_bkt->key_idx[ret] = new_idx;
-               return (new_idx - 1);
+               return new_idx - 1;
        }
 
        /* Error in addition, store new slot back in the ring and return error */
                                 * Return index where key is stored,
                                 * substracting the first dummy index
                                 */
-                               return (bkt->key_idx[i] - 1);
+                               return bkt->key_idx[i] - 1;
                        }
                }
        }
                                 * Return index where key is stored,
                                 * substracting the first dummy index
                                 */
-                               return (bkt->key_idx[i] - 1);
+                               return bkt->key_idx[i] - 1;
                        }
                }
        }
                                 * Return index where key is stored,
                                 * substracting the first dummy index
                                 */
-                               return (bkt->key_idx[i] - 1);
+                               return bkt->key_idx[i] - 1;
                        }
                }
        }
                                 * Return index where key is stored,
                                 * substracting the first dummy index
                                 */
-                               return (bkt->key_idx[i] - 1);
+                               return bkt->key_idx[i] - 1;
                        }
                }
        }
        /* Increment iterator */
        (*next)++;
 
-       return (position - 1);
+       return position - 1;
 }
 
                        IPv6_KEY_BYTES(p1[i].key.src_dst), p1[i].key.id, p1[i].start);
 
                if (ip_frag_key_cmp(key, &p1[i].key) == 0)
-                       return (p1 + i);
+                       return p1 + i;
                else if (ip_frag_key_is_empty(&p1[i].key))
                        empty = (empty == NULL) ? (p1 + i) : empty;
                else if (max_cycles + p1[i].start < tms)
                        IPv6_KEY_BYTES(p2[i].key.src_dst), p2[i].key.id, p2[i].start);
 
                if (ip_frag_key_cmp(key, &p2[i].key) == 0)
-                       return (p2 + i);
+                       return p2 + i;
                else if (ip_frag_key_is_empty(&p2[i].key))
                        empty = (empty == NULL) ?( p2 + i) : empty;
                else if (max_cycles + p2[i].start < tms)
 
                return 1 << (MAX_DEPTH_TBL24 - depth);
 
        /* Else if depth is greater than 24 */
-       return (1 << (RTE_LPM_MAX_DEPTH - depth));
+       return 1 << (RTE_LPM_MAX_DEPTH - depth);
 }
 
 /*
 
                uintptr_t off;
 
                off = (const char *)elt - (const char *)mp->elt_va_start;
-               return (mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask));
+               return mp->elt_pa[off >> mp->pg_shift] + (off & mp->pg_mask);
        } else {
                /*
                 * If huge pages are disabled, we cannot assume the
 
 {
        uint32_t prod_tail = r->prod.tail;
        uint32_t cons_tail = r->cons.tail;
-       return (((cons_tail - prod_tail - 1) & r->prod.mask) == 0);
+       return ((cons_tail - prod_tail - 1) & r->prod.mask) == 0;
 }
 
 /**
 {
        uint32_t prod_tail = r->prod.tail;
        uint32_t cons_tail = r->cons.tail;
-       return ((prod_tail - cons_tail) & r->prod.mask);
+       return (prod_tail - cons_tail) & r->prod.mask;
 }
 
 /**
 {
        uint32_t prod_tail = r->prod.tail;
        uint32_t cons_tail = r->cons.tail;
-       return ((cons_tail - prod_tail - 1) & r->prod.mask);
+       return (cons_tail - prod_tail - 1) & r->prod.mask;
 }
 
 /**
 
 static inline uint32_t
 less(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
 {
-       return (a*d < b*c);
+       return a*d < b*c;
 }
 
 static inline uint32_t
 less_or_equal(uint32_t a, uint32_t b, uint32_t c, uint32_t d)
 {
-       return (a*d <= b*c);
+       return a*d <= b*c;
 }
 
 /* check whether a/b is a valid approximation */
 
 static inline uint64_t
 __rte_bitmap_mask1_get(struct rte_bitmap *bmp)
 {
-       return ((~1lu) << bmp->offset1);
+       return (~1lu) << bmp->offset1;
 }
 
 static inline void
        index2 = pos >> RTE_BITMAP_SLAB_BIT_SIZE_LOG2;
        offset2 = pos & RTE_BITMAP_SLAB_BIT_MASK;
        slab2 = bmp->array2 + index2;
-       return ((*slab2) & (1lu << offset2));
+       return (*slab2) & (1lu << offset2);
 }
 
 /**
        v1 |= v2;
        v3 |= v4;
 
-       return (v1 | v3);
+       return v1 | v3;
 }
 
 /**
 
 rte_fast_rand(void)
 {
        rte_red_rand_seed = (214013 * rte_red_rand_seed) + 2531011;
-       return (rte_red_rand_seed >> 10);
+       return rte_red_rand_seed >> 10;
 }
 
 /**
 
        size0 = sizeof(struct rte_sched_port);
        size1 = rte_sched_port_get_array_base(params, e_RTE_SCHED_PORT_ARRAY_TOTAL);
 
-       return (size0 + size1);
+       return size0 + size1;
 }
 
 static void
 {
        struct rte_sched_queue *queue = port->queue + qindex;
 
-       return (queue->qr == queue->qw);
+       return queue->qr == queue->qw;
 }
 
 #endif /* RTE_SCHED_DEBUG */