From: NĂ©lio Laranjeiro Date: Mon, 5 Mar 2018 12:20:59 +0000 (+0100) Subject: net/mlx5: mark parameters with unused attribute X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=56f08e1671f99a15cf6b00027c2f7d81d69c4f5f;p=dpdk.git net/mlx5: mark parameters with unused attribute Replaces all (void)foo; by __rte_unused macro except when variables are under #if statements. Signed-off-by: Nelio Laranjeiro Acked-by: Adrien Mazarguil --- diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 03a6a05dee..5aaf568e59 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -571,7 +571,8 @@ priv_uar_init_secondary(struct priv *priv) * 0 on success, negative errno value on failure. */ static int -mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) { struct ibv_device **list; struct ibv_device *ibv_dev; @@ -588,7 +589,6 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_counter_set_description cs_desc; #endif - (void)pci_drv; assert(pci_drv == &mlx5_driver); /* Get mlx5_dev[] index. */ idx = mlx5_dev_idx(&pci_dev->addr); diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index f98fc4c3b0..0c383deba7 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -467,11 +467,9 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_cmd edata = { @@ -483,7 +481,6 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) /* priv_lock() is not taken to allow concurrent calls. */ - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -533,11 +530,9 @@ mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete) * * @param dev * Pointer to Ethernet device structure. - * @param wait_to_complete - * Wait for request completion (ignored). */ static int -mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS }; @@ -545,7 +540,6 @@ mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete) struct rte_eth_link dev_link; uint64_t sc; - (void)wait_to_complete; if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) { WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno)); return -1; @@ -675,7 +669,7 @@ priv_link_stop(struct priv *priv) * Wait for request completion (ignored). */ int -priv_link_update(struct priv *priv, int wait_to_complete) +priv_link_update(struct priv *priv, int wait_to_complete __rte_unused) { struct rte_eth_dev *dev = priv->dev; struct utsname utsname; @@ -687,9 +681,9 @@ priv_link_update(struct priv *priv, int wait_to_complete) sscanf(utsname.release, "%d.%d.%d", &ver[0], &ver[1], &ver[2]) != 3 || KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0)) - ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gset(dev); else - ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete); + ret = mlx5_link_update_unlocked_gs(dev); /* If lsc interrupt is disabled, should always be ready for traffic. */ if (!dev->data->dev_conf.intr_conf.lsc) { priv_link_start(priv); @@ -741,7 +735,7 @@ priv_force_link_status_change(struct priv *priv, int status) * Wait for request completion (ignored). */ int -mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) +mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) { struct priv *priv = dev->data->dev_private; int ret; diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index bf718f8947..f21d40a692 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -526,7 +526,7 @@ mlx5_flow_item_validate(const struct rte_flow_item *item, * 0 on success, errno value on failure. */ static int -priv_flow_convert_rss_conf(struct priv *priv, +priv_flow_convert_rss_conf(struct priv *priv __rte_unused, struct mlx5_flow_parse *parser, const struct rte_eth_rss_conf *rss_conf) { @@ -535,7 +535,6 @@ priv_flow_convert_rss_conf(struct priv *priv, * priv_flow_convert_actions() to initialize the parser with the * device default RSS configuration. */ - (void)priv; if (rss_conf) { if (rss_conf->rss_hf & MLX5_RSS_HF_MASK) return EINVAL; @@ -568,13 +567,11 @@ priv_flow_convert_rss_conf(struct priv *priv, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_attributes(struct priv *priv, +priv_flow_convert_attributes(struct priv *priv __rte_unused, const struct rte_flow_attr *attr, struct rte_flow_error *error, - struct mlx5_flow_parse *parser) + struct mlx5_flow_parse *parser __rte_unused) { - (void)priv; - (void)parser; if (attr->group) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP, @@ -779,7 +776,7 @@ exit_action_not_supported: * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -priv_flow_convert_items_validate(struct priv *priv, +priv_flow_convert_items_validate(struct priv *priv __rte_unused, const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) @@ -787,7 +784,6 @@ priv_flow_convert_items_validate(struct priv *priv, const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; - (void)priv; /* Initialise the offsets to start after verbs attribute. */ for (i = 0; i != hash_rxq_init_n; ++i) parser->queue[i].offset = sizeof(struct ibv_flow_attr); @@ -871,14 +867,13 @@ exit_item_not_supported: * A verbs flow attribute on success, NULL otherwise. */ static struct ibv_flow_attr* -priv_flow_convert_allocate(struct priv *priv, +priv_flow_convert_allocate(struct priv *priv __rte_unused, unsigned int priority, unsigned int size, struct rte_flow_error *error) { struct ibv_flow_attr *ibv_attr; - (void)priv; ibv_attr = rte_calloc(__func__, 1, size, 0); if (!ibv_attr) { rte_flow_error_set(error, ENOMEM, @@ -900,7 +895,8 @@ priv_flow_convert_allocate(struct priv *priv, * Internal parser structure. */ static void -priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) +priv_flow_convert_finalise(struct priv *priv __rte_unused, + struct mlx5_flow_parse *parser) { const unsigned int ipv4 = hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; @@ -911,7 +907,6 @@ priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser) const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; unsigned int i; - (void)priv; /* Remove any other flow not matching the pattern. */ if (parser->queues_n == 1) { for (i = 0; i != hash_rxq_init_n; ++i) { @@ -2432,11 +2427,10 @@ mlx5_ctrl_flow(struct rte_eth_dev *dev, int mlx5_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_destroy(priv, &priv->flows, flow); priv_unlock(priv); @@ -2451,11 +2445,10 @@ mlx5_flow_destroy(struct rte_eth_dev *dev, */ int mlx5_flow_flush(struct rte_eth_dev *dev, - struct rte_flow_error *error) + struct rte_flow_error *error __rte_unused) { struct priv *priv = dev->data->dev_private; - (void)error; priv_lock(priv); priv_flow_flush(priv, &priv->flows); priv_unlock(priv); diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index e8a8d45940..a529dfeac7 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -88,12 +88,11 @@ mlx5_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index) */ int mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac, - uint32_t index, uint32_t vmdq) + uint32_t index, uint32_t vmdq __rte_unused) { unsigned int i; int ret = 0; - (void)vmdq; assert(index < MLX5_MAX_MAC_ADDRESSES); /* First, make sure this address isn't already configured. */ for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) { diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c index 857dfcd837..38a8e2f409 100644 --- a/drivers/net/mlx5/mlx5_mr.c +++ b/drivers/net/mlx5/mlx5_mr.c @@ -26,15 +26,12 @@ struct mlx5_check_mempool_data { /* Called by mlx5_check_mempool() when iterating the memory chunks. */ static void -mlx5_check_mempool_cb(struct rte_mempool *mp, +mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused, void *opaque, struct rte_mempool_memhdr *memhdr, - unsigned int mem_idx) + unsigned int mem_idx __rte_unused) { struct mlx5_check_mempool_data *data = opaque; - (void)mp; - (void)mem_idx; - /* It already failed, skip the next chunks. */ if (data->ret != 0) return; @@ -336,9 +333,8 @@ priv_mr_get(struct priv *priv, struct rte_mempool *mp) * 0 on success, errno on failure. */ int -priv_mr_release(struct priv *priv, struct mlx5_mr *mr) +priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr) { - (void)priv; assert(mr); DEBUG("Memory Region %p refcnt: %d", (void *)mr, rte_atomic32_read(&mr->refcnt)); diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 238fa7e563..8b9cc1dd04 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -910,9 +910,9 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_rxq_ibv *rxq_ibv) { - (void)priv; assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 049f7e6c1f..93d794ede7 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -1899,11 +1899,10 @@ skip: * Number of packets successfully transmitted (<= pkts_n). */ uint16_t -removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_tx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1924,11 +1923,10 @@ removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) * Number of packets successfully received (<= pkts_n). */ uint16_t -removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +removed_rx_burst(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } @@ -1940,58 +1938,51 @@ removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) */ uint16_t __attribute__((weak)) -mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_txq; - (void)pkts; - (void)pkts_n; return 0; } uint16_t __attribute__((weak)) -mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_rx_burst_vec(void *dpdk_txq __rte_unused, + struct rte_mbuf **pkts __rte_unused, + uint16_t pkts_n __rte_unused) { - (void)dpdk_rxq; - (void)pkts; - (void)pkts_n; return 0; } int __attribute__((weak)) -priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_raw_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev) +priv_check_vec_tx_support(struct priv *priv __rte_unused, + struct rte_eth_dev *dev __rte_unused) { - (void)priv; - (void)dev; return -ENOTSUP; } int __attribute__((weak)) -rxq_check_vec_support(struct mlx5_rxq_data *rxq) +rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused) { - (void)rxq; return -ENOTSUP; } int __attribute__((weak)) -priv_check_vec_rx_support(struct priv *priv) +priv_check_vec_rx_support(struct priv *priv __rte_unused) { - (void)priv; return -ENOTSUP; } diff --git a/drivers/net/mlx5/mlx5_stats.c b/drivers/net/mlx5/mlx5_stats.c index eb9c65dcc9..167e405480 100644 --- a/drivers/net/mlx5/mlx5_stats.c +++ b/drivers/net/mlx5/mlx5_stats.c @@ -488,7 +488,7 @@ unlock: * Number of xstats names. */ int -mlx5_xstats_get_names(struct rte_eth_dev *dev, +mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused, struct rte_eth_xstat_name *xstats_names, unsigned int n) { struct priv *priv = dev->data->dev_private; diff --git a/drivers/net/mlx5/mlx5_trigger.c b/drivers/net/mlx5/mlx5_trigger.c index f5711a998b..72e8ff6440 100644 --- a/drivers/net/mlx5/mlx5_trigger.c +++ b/drivers/net/mlx5/mlx5_trigger.c @@ -340,9 +340,9 @@ error: * 0 on success. */ int -priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev) +priv_dev_traffic_disable(struct priv *priv, + struct rte_eth_dev *dev __rte_unused) { - (void)dev; priv_flow_flush(priv, &priv->ctrl_flows); return 0; } diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index ed1c713ead..071d88a1f2 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -578,9 +578,9 @@ mlx5_priv_txq_ibv_get(struct priv *priv, uint16_t idx) * 0 on success, errno on failure. */ int -mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv, (void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt)); @@ -603,9 +603,9 @@ mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) * Verbs Tx queue object. */ int -mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv) +mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused, + struct mlx5_txq_ibv *txq_ibv) { - (void)priv; assert(txq_ibv); return (rte_atomic32_read(&txq_ibv->refcnt) == 1); } @@ -806,13 +806,10 @@ mlx5_priv_txq_get(struct priv *priv, uint16_t idx) mlx5_priv_txq_ibv_get(priv, idx); for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) { - struct mlx5_mr *mr = NULL; - - (void)mr; - if (ctrl->txq.mp2mr[i]) { - mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp); - assert(mr); - } + if (ctrl->txq.mp2mr[i]) + claim_nonzero + (priv_mr_get(priv, + ctrl->txq.mp2mr[i]->mp)); } rte_atomic32_inc(&ctrl->refcnt); DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,