To support more bus types, remove PCI dependency where possible.
Signed-off-by: Xueming Li <xuemingl@nvidia.com>
Acked-by: Viacheslav Ovsiienko <viacheslavo@nvidia.com>
priv = dev->data->dev_private;
domain_id = priv->domain_id;
MLX5_ASSERT(priv->representor);
- MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
if (opriv &&
* Look for sibling devices in order to reuse their switch domain
* if any, otherwise allocate one.
*/
- MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(port_id, NULL) {
const struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
dev_config.decap_en = 1;
dev_config.log_hp_size = MLX5_ARG_UNSET;
dev_config.allow_duplicate_pattern = 1;
+ list[i].numa_node = pci_dev->device.numa_node;
list[i].eth_dev = mlx5_dev_spawn(&pci_dev->device,
&list[i],
&dev_config,
int dbmap_env;
int err = 0;
- sh->numa_node = spawn->pci_dev->device.numa_node;
pthread_mutex_init(&sh->txpp.mutex, NULL);
/*
* Configure environment variable "MLX5_BF_SHUT_UP"
rte_errno = ENOMEM;
goto exit;
}
+ sh->numa_node = spawn->numa_node;
if (spawn->bond_info)
sh->bond = *spawn->bond_info;
err = mlx5_os_open_device(spawn, config, sh);
*/
err = mlx5_mr_btree_init(&sh->share_cache.cache,
MLX5_MR_BTREE_CACHE_N * 2,
- spawn->pci_dev->device.numa_node);
+ sh->numa_node);
if (err) {
err = rte_errno;
goto error;
unsigned int c = 0;
uint16_t port_id;
- MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
{
if (config->txq_inline_min != MLX5_ARG_UNSET) {
/* Application defines size of inlined data explicitly. */
- switch (spawn->pci_dev->id.device_id) {
- case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
- case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
- if (config->txq_inline_min <
- (int)MLX5_INLINE_HSIZE_L2) {
- DRV_LOG(DEBUG,
- "txq_inline_mix aligned to minimal"
- " ConnectX-4 required value %d",
- (int)MLX5_INLINE_HSIZE_L2);
- config->txq_inline_min = MLX5_INLINE_HSIZE_L2;
+ if (spawn->pci_dev != NULL) {
+ switch (spawn->pci_dev->id.device_id) {
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4:
+ case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF:
+ if (config->txq_inline_min <
+ (int)MLX5_INLINE_HSIZE_L2) {
+ DRV_LOG(DEBUG,
+ "txq_inline_mix aligned to minimal ConnectX-4 required value %d",
+ (int)MLX5_INLINE_HSIZE_L2);
+ config->txq_inline_min =
+ MLX5_INLINE_HSIZE_L2;
+ }
+ break;
}
- break;
}
goto exit;
}
}
}
}
+ if (spawn->pci_dev == NULL) {
+ config->txq_inline_min = MLX5_INLINE_HSIZE_NONE;
+ goto exit;
+ }
/*
* We get here if we are unable to deduce
* inline data size with DevX. Try PCI ID
if (sh->refcnt == 1)
return 0;
/* Find the device with shared context. */
- MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(port_id, NULL) {
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
*
* @param[in] port_id
* port_id to start looking for device.
- * @param[in] pci_dev
- * Pointer to the hint PCI device. When device is being probed
+ * @param[in] odev
+ * Pointer to the hint device. When device is being probed
* the its siblings (master and preceding representors might
* not have assigned driver yet (because the mlx5_os_pci_probe()
- * is not completed yet, for this case match on hint PCI
+ * is not completed yet, for this case match on hint
* device may be used to detect sibling device.
*
* @return
* port_id of found device, RTE_MAX_ETHPORT if not found.
*/
uint16_t
-mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev)
+mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev)
{
while (port_id < RTE_MAX_ETHPORTS) {
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
if (dev->state != RTE_ETH_DEV_UNUSED &&
dev->device &&
- (dev->device == &pci_dev->device ||
+ (dev->device == odev ||
(dev->device->driver &&
dev->device->driver->name &&
!strcmp(dev->device->driver->name, MLX5_PCI_DRIVER_NAME))))
uint32_t max_port; /**< Device maximal port index. */
uint32_t phys_port; /**< Device physical port index. */
int pf_bond; /**< bonding device PF index. < 0 - no bonding */
+ int numa_node; /**< Device numa node. */
struct mlx5_switch_info info; /**< Switch information. */
void *phys_dev; /**< Associated physical device. */
struct rte_eth_dev *eth_dev; /**< Associated Ethernet device. */
void mlx5_proc_priv_uninit(struct rte_eth_dev *dev);
int mlx5_udp_tunnel_port_add(struct rte_eth_dev *dev,
struct rte_eth_udp_tunnel *udp_tunnel);
-uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev);
+uint16_t mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev);
int mlx5_dev_close(struct rte_eth_dev *dev);
bool mlx5_is_hpf(struct rte_eth_dev *dev);
void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh);
/* Macro to iterate over all valid ports for mlx5 driver. */
-#define MLX5_ETH_FOREACH_DEV(port_id, pci_dev) \
- for (port_id = mlx5_eth_find_next(0, pci_dev); \
+#define MLX5_ETH_FOREACH_DEV(port_id, dev) \
+ for (port_id = mlx5_eth_find_next(0, dev); \
port_id < RTE_MAX_ETHPORTS; \
- port_id = mlx5_eth_find_next(port_id + 1, pci_dev))
+ port_id = mlx5_eth_find_next(port_id + 1, dev))
int mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs);
struct mlx5_dev_ctx_shared *
mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn,
if (priv->representor) {
uint16_t port_id;
- MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(port_id, dev->device) {
struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
}
/**
- * Finds the first ethdev that match the pci device.
+ * Finds the first ethdev that match the device.
* The existence of multiple ethdev per pci device is only with representors.
* On such case, it is enough to get only one of the ports as they all share
* the same ibv context.
*
- * @param pdev
- * Pointer to the PCI device.
+ * @param dev
+ * Pointer to the device.
*
* @return
* Pointer to the ethdev if found, NULL otherwise.
*/
static struct rte_eth_dev *
-pci_dev_to_eth_dev(struct rte_pci_device *pdev)
+dev_to_eth_dev(struct rte_device *dev)
{
uint16_t port_id;
- port_id = rte_eth_find_next_of(0, &pdev->device);
+ port_id = rte_eth_find_next_of(0, dev);
if (port_id == RTE_MAX_ETHPORTS)
return NULL;
return &rte_eth_devices[port_id];
struct mlx5_priv *priv;
struct mlx5_dev_ctx_shared *sh;
- dev = pci_dev_to_eth_dev(pdev);
+ dev = dev_to_eth_dev(&pdev->device);
if (!dev) {
DRV_LOG(WARNING, "unable to find matching ethdev "
"to PCI device %p", (void *)pdev);
struct mlx5_mr *mr;
struct mr_cache_entry entry;
- dev = pci_dev_to_eth_dev(pdev);
+ dev = dev_to_eth_dev(&pdev->device);
if (!dev) {
DRV_LOG(WARNING, "unable to find matching ethdev "
"to PCI device %p", (void *)pdev);
uint32_t explicit;
uint16_t rx_queue;
- if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+ if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
rte_errno = ENODEV;
DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
return -rte_errno;
int ret;
uint16_t cur_port = priv->dev_data->port_id;
- if (mlx5_eth_find_next(rx_port, priv->pci_dev) != rx_port) {
+ if (mlx5_eth_find_next(rx_port, dev->device) != rx_port) {
rte_errno = ENODEV;
DRV_LOG(ERR, "Rx port %u does not belong to mlx5", rx_port);
return -rte_errno;
{
int ret = 0;
uint16_t p, pp;
- struct mlx5_priv *priv = dev->data->dev_private;
/*
* If the Rx port has no hairpin configuration with the current port,
* information updating.
*/
if (rx_port == RTE_MAX_ETHPORTS) {
- MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(p, dev->device) {
ret = mlx5_hairpin_bind_single_port(dev, p);
if (ret != 0)
goto unbind;
return mlx5_hairpin_bind_single_port(dev, rx_port);
}
unbind:
- MLX5_ETH_FOREACH_DEV(pp, priv->pci_dev)
+ MLX5_ETH_FOREACH_DEV(pp, dev->device)
if (pp < p)
mlx5_hairpin_unbind_single_port(dev, pp);
return ret;
{
int ret = 0;
uint16_t p;
- struct mlx5_priv *priv = dev->data->dev_private;
if (rx_port == RTE_MAX_ETHPORTS)
- MLX5_ETH_FOREACH_DEV(p, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(p, dev->device) {
ret = mlx5_hairpin_unbind_single_port(dev, p);
if (ret != 0)
return ret;
if (config->txqs_inline == MLX5_ARG_UNSET)
txqs_inline =
#if defined(RTE_ARCH_ARM64)
- (priv->pci_dev->id.device_id ==
+ (priv->pci_dev && priv->pci_dev->id.device_id ==
PCI_DEVICE_ID_MELLANOX_CONNECTX5BF) ?
MLX5_INLINE_MAX_TXQS_BLUEFIELD :
#endif
* Look for sibling devices in order to reuse their switch domain
* if any, otherwise allocate one.
*/
- MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) {
+ MLX5_ETH_FOREACH_DEV(port_id, NULL) {
const struct mlx5_priv *opriv =
rte_eth_devices[port_id].data->dev_private;
dev_config.dv_flow_en = 1;
dev_config.decap_en = 0;
dev_config.log_hp_size = MLX5_ARG_UNSET;
+ list[ns].numa_node = pci_dev->device.numa_node;
list[ns].eth_dev = mlx5_dev_spawn(&pci_dev->device,
&list[ns],
&dev_config);