if (dev_port == dev_port_prev)
goto try_dev_id;
dev_port_prev = dev_port;
- if (dev_port == (priv->port - 1u))
+ if (dev_port == 0)
strlcpy(match, name, sizeof(match));
}
closedir(dir);
mlx5_ifindex(const struct rte_eth_dev *dev)
{
char ifname[IF_NAMESIZE];
- int ret;
+ unsigned int ret;
ret = mlx5_get_ifname(dev, &ifname);
if (ret)
return ret;
ret = if_nametoindex(ifname);
- if (ret == -1) {
+ if (ret == 0) {
rte_errno = errno;
return -rte_errno;
}
if (++j == rxqs_n)
j = 0;
}
- /*
- * Once the device is added to the list of memory event callback, its
- * global MR cache table cannot be expanded on the fly because of
- * deadlock. If it overflows, lookup should be done by searching MR list
- * linearly, which is slow.
- */
- if (mlx5_mr_btree_init(&priv->mr.cache, MLX5_MR_BTREE_CACHE_N * 2,
- dev->device->numa_node)) {
- /* rte_errno is already set. */
- return -rte_errno;
- }
- rte_rwlock_write_lock(&mlx5_shared_data->mem_event_rwlock);
- LIST_INSERT_HEAD(&mlx5_shared_data->mem_event_cb_list,
- priv, mem_event_cb);
- rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock);
return 0;
}
};
if (dev->rx_pkt_burst == mlx5_rx_burst ||
+ dev->rx_pkt_burst == mlx5_rx_burst_mprq ||
dev->rx_pkt_burst == mlx5_rx_burst_vec)
return ptypes;
return NULL;
rx_pkt_burst = mlx5_rx_burst_vec;
DRV_LOG(DEBUG, "port %u selected Rx vectorized function",
dev->data->port_id);
+ } else if (mlx5_mprq_enabled(dev)) {
+ rx_pkt_burst = mlx5_rx_burst_mprq;
}
return rx_pkt_burst;
}