* 0 on success, negative errno value on failure.
*/
static int
-mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
{
struct ibv_device **list;
struct ibv_device *ibv_dev;
struct ibv_counter_set_description cs_desc;
#endif
- (void)pci_drv;
assert(pci_drv == &mlx5_driver);
/* Get mlx5_dev[] index. */
idx = mlx5_dev_idx(&pci_dev->addr);
*
* @param dev
* Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
*/
static int
-mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update_unlocked_gset(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct ethtool_cmd edata = {
/* priv_lock() is not taken to allow concurrent calls. */
- (void)wait_to_complete;
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
*
* @param dev
* Pointer to Ethernet device structure.
- * @param wait_to_complete
- * Wait for request completion (ignored).
*/
static int
-mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update_unlocked_gs(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
struct ethtool_link_settings gcmd = { .cmd = ETHTOOL_GLINKSETTINGS };
struct rte_eth_link dev_link;
uint64_t sc;
- (void)wait_to_complete;
if (priv_ifreq(priv, SIOCGIFFLAGS, &ifr)) {
WARN("ioctl(SIOCGIFFLAGS) failed: %s", strerror(errno));
return -1;
* Wait for request completion (ignored).
*/
int
-priv_link_update(struct priv *priv, int wait_to_complete)
+priv_link_update(struct priv *priv, int wait_to_complete __rte_unused)
{
struct rte_eth_dev *dev = priv->dev;
struct utsname utsname;
sscanf(utsname.release, "%d.%d.%d",
&ver[0], &ver[1], &ver[2]) != 3 ||
KERNEL_VERSION(ver[0], ver[1], ver[2]) < KERNEL_VERSION(4, 9, 0))
- ret = mlx5_link_update_unlocked_gset(dev, wait_to_complete);
+ ret = mlx5_link_update_unlocked_gset(dev);
else
- ret = mlx5_link_update_unlocked_gs(dev, wait_to_complete);
+ ret = mlx5_link_update_unlocked_gs(dev);
/* If lsc interrupt is disabled, should always be ready for traffic. */
if (!dev->data->dev_conf.intr_conf.lsc) {
priv_link_start(priv);
* Wait for request completion (ignored).
*/
int
-mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
{
struct priv *priv = dev->data->dev_private;
int ret;
* 0 on success, errno value on failure.
*/
static int
-priv_flow_convert_rss_conf(struct priv *priv,
+priv_flow_convert_rss_conf(struct priv *priv __rte_unused,
struct mlx5_flow_parse *parser,
const struct rte_eth_rss_conf *rss_conf)
{
* priv_flow_convert_actions() to initialize the parser with the
* device default RSS configuration.
*/
- (void)priv;
if (rss_conf) {
if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
return EINVAL;
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_attributes(struct priv *priv,
+priv_flow_convert_attributes(struct priv *priv __rte_unused,
const struct rte_flow_attr *attr,
struct rte_flow_error *error,
- struct mlx5_flow_parse *parser)
+ struct mlx5_flow_parse *parser __rte_unused)
{
- (void)priv;
- (void)parser;
if (attr->group) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
* 0 on success, a negative errno value otherwise and rte_errno is set.
*/
static int
-priv_flow_convert_items_validate(struct priv *priv,
+priv_flow_convert_items_validate(struct priv *priv __rte_unused,
const struct rte_flow_item items[],
struct rte_flow_error *error,
struct mlx5_flow_parse *parser)
const struct mlx5_flow_items *cur_item = mlx5_flow_items;
unsigned int i;
- (void)priv;
/* Initialise the offsets to start after verbs attribute. */
for (i = 0; i != hash_rxq_init_n; ++i)
parser->queue[i].offset = sizeof(struct ibv_flow_attr);
* A verbs flow attribute on success, NULL otherwise.
*/
static struct ibv_flow_attr*
-priv_flow_convert_allocate(struct priv *priv,
+priv_flow_convert_allocate(struct priv *priv __rte_unused,
unsigned int priority,
unsigned int size,
struct rte_flow_error *error)
{
struct ibv_flow_attr *ibv_attr;
- (void)priv;
ibv_attr = rte_calloc(__func__, 1, size, 0);
if (!ibv_attr) {
rte_flow_error_set(error, ENOMEM,
* Internal parser structure.
*/
static void
-priv_flow_convert_finalise(struct priv *priv, struct mlx5_flow_parse *parser)
+priv_flow_convert_finalise(struct priv *priv __rte_unused,
+ struct mlx5_flow_parse *parser)
{
const unsigned int ipv4 =
hash_rxq_init[parser->layer].ip_version == MLX5_IPV4;
const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6;
unsigned int i;
- (void)priv;
/* Remove any other flow not matching the pattern. */
if (parser->queues_n == 1) {
for (i = 0; i != hash_rxq_init_n; ++i) {
int
mlx5_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
- struct rte_flow_error *error)
+ struct rte_flow_error *error __rte_unused)
{
struct priv *priv = dev->data->dev_private;
- (void)error;
priv_lock(priv);
priv_flow_destroy(priv, &priv->flows, flow);
priv_unlock(priv);
*/
int
mlx5_flow_flush(struct rte_eth_dev *dev,
- struct rte_flow_error *error)
+ struct rte_flow_error *error __rte_unused)
{
struct priv *priv = dev->data->dev_private;
- (void)error;
priv_lock(priv);
priv_flow_flush(priv, &priv->flows);
priv_unlock(priv);
*/
int
mlx5_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac,
- uint32_t index, uint32_t vmdq)
+ uint32_t index, uint32_t vmdq __rte_unused)
{
unsigned int i;
int ret = 0;
- (void)vmdq;
assert(index < MLX5_MAX_MAC_ADDRESSES);
/* First, make sure this address isn't already configured. */
for (i = 0; (i != MLX5_MAX_MAC_ADDRESSES); ++i) {
/* Called by mlx5_check_mempool() when iterating the memory chunks. */
static void
-mlx5_check_mempool_cb(struct rte_mempool *mp,
+mlx5_check_mempool_cb(struct rte_mempool *mp __rte_unused,
void *opaque, struct rte_mempool_memhdr *memhdr,
- unsigned int mem_idx)
+ unsigned int mem_idx __rte_unused)
{
struct mlx5_check_mempool_data *data = opaque;
- (void)mp;
- (void)mem_idx;
-
/* It already failed, skip the next chunks. */
if (data->ret != 0)
return;
* 0 on success, errno on failure.
*/
int
-priv_mr_release(struct priv *priv, struct mlx5_mr *mr)
+priv_mr_release(struct priv *priv __rte_unused, struct mlx5_mr *mr)
{
- (void)priv;
assert(mr);
DEBUG("Memory Region %p refcnt: %d",
(void *)mr, rte_atomic32_read(&mr->refcnt));
* Verbs Rx queue object.
*/
int
-mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv)
+mlx5_priv_rxq_ibv_releasable(struct priv *priv __rte_unused,
+ struct mlx5_rxq_ibv *rxq_ibv)
{
- (void)priv;
assert(rxq_ibv);
return (rte_atomic32_read(&rxq_ibv->refcnt) == 1);
}
* Number of packets successfully transmitted (<= pkts_n).
*/
uint16_t
-removed_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_tx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
* Number of packets successfully received (<= pkts_n).
*/
uint16_t
-removed_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+removed_rx_burst(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_rxq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
*/
uint16_t __attribute__((weak))
-mlx5_tx_burst_raw_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_raw_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
uint16_t __attribute__((weak))
-mlx5_tx_burst_vec(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_tx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_txq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
uint16_t __attribute__((weak))
-mlx5_rx_burst_vec(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n)
+mlx5_rx_burst_vec(void *dpdk_txq __rte_unused,
+ struct rte_mbuf **pkts __rte_unused,
+ uint16_t pkts_n __rte_unused)
{
- (void)dpdk_rxq;
- (void)pkts;
- (void)pkts_n;
return 0;
}
int __attribute__((weak))
-priv_check_raw_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+priv_check_raw_vec_tx_support(struct priv *priv __rte_unused,
+ struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
- (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_tx_support(struct priv *priv, struct rte_eth_dev *dev)
+priv_check_vec_tx_support(struct priv *priv __rte_unused,
+ struct rte_eth_dev *dev __rte_unused)
{
- (void)priv;
- (void)dev;
return -ENOTSUP;
}
int __attribute__((weak))
-rxq_check_vec_support(struct mlx5_rxq_data *rxq)
+rxq_check_vec_support(struct mlx5_rxq_data *rxq __rte_unused)
{
- (void)rxq;
return -ENOTSUP;
}
int __attribute__((weak))
-priv_check_vec_rx_support(struct priv *priv)
+priv_check_vec_rx_support(struct priv *priv __rte_unused)
{
- (void)priv;
return -ENOTSUP;
}
* Number of xstats names.
*/
int
-mlx5_xstats_get_names(struct rte_eth_dev *dev,
+mlx5_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
struct rte_eth_xstat_name *xstats_names, unsigned int n)
{
struct priv *priv = dev->data->dev_private;
* 0 on success.
*/
int
-priv_dev_traffic_disable(struct priv *priv, struct rte_eth_dev *dev)
+priv_dev_traffic_disable(struct priv *priv,
+ struct rte_eth_dev *dev __rte_unused)
{
- (void)dev;
priv_flow_flush(priv, &priv->ctrl_flows);
return 0;
}
* 0 on success, errno on failure.
*/
int
-mlx5_priv_txq_ibv_release(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_priv_txq_ibv_release(struct priv *priv __rte_unused,
+ struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
DEBUG("%p: Verbs Tx queue %p: refcnt %d", (void *)priv,
(void *)txq_ibv, rte_atomic32_read(&txq_ibv->refcnt));
* Verbs Tx queue object.
*/
int
-mlx5_priv_txq_ibv_releasable(struct priv *priv, struct mlx5_txq_ibv *txq_ibv)
+mlx5_priv_txq_ibv_releasable(struct priv *priv __rte_unused,
+ struct mlx5_txq_ibv *txq_ibv)
{
- (void)priv;
assert(txq_ibv);
return (rte_atomic32_read(&txq_ibv->refcnt) == 1);
}
mlx5_priv_txq_ibv_get(priv, idx);
for (i = 0; i != MLX5_PMD_TX_MP_CACHE; ++i) {
- struct mlx5_mr *mr = NULL;
-
- (void)mr;
- if (ctrl->txq.mp2mr[i]) {
- mr = priv_mr_get(priv, ctrl->txq.mp2mr[i]->mp);
- assert(mr);
- }
+ if (ctrl->txq.mp2mr[i])
+ claim_nonzero
+ (priv_mr_get(priv,
+ ctrl->txq.mp2mr[i]->mp));
}
rte_atomic32_inc(&ctrl->refcnt);
DEBUG("%p: Tx queue %p: refcnt %d", (void *)priv,