X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_ethdev.c;h=2278b24c012253dca87ec930aa82ad4dc5d8287a;hb=40e01fd8c84c1ec71d41dc4fff23350428f4e48a;hp=27372f1f2f4798ac9bf28cc148d5ecf830764f2d;hpb=09a16bcab7b61acf186c596b5be6c13f3b4c9e65;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 27372f1f2f..2278b24c01 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -580,16 +580,34 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) info->switch_info.domain_id = priv->domain_id; info->switch_info.port_id = priv->representor_id; if (priv->representor) { - unsigned int i = mlx5_dev_to_port_id(dev->device, NULL, 0); - uint16_t port_id[i]; + uint16_t port_id; - i = RTE_MIN(mlx5_dev_to_port_id(dev->device, port_id, i), i); - while (i--) { + if (priv->pf_bond >= 0) { + /* + * Switch port ID is opaque value with driver defined + * format. Push the PF index in bonding configurations + * in upper four bits of port ID. If we get too many + * representors (more than 4K) or PFs (more than 15) + * this approach must be reconsidered. + */ + if ((info->switch_info.port_id >> + MLX5_PORT_ID_BONDING_PF_SHIFT) || + priv->pf_bond > MLX5_PORT_ID_BONDING_PF_MASK) { + DRV_LOG(ERR, "can't update switch port ID" + " for bonding device"); + assert(false); + return -ENODEV; + } + info->switch_info.port_id |= + priv->pf_bond << MLX5_PORT_ID_BONDING_PF_SHIFT; + } + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { struct mlx5_priv *opriv = - rte_eth_devices[port_id[i]].data->dev_private; + rte_eth_devices[port_id].data->dev_private; if (!opriv || opriv->representor || + opriv->sh != priv->sh || opriv->domain_id != priv->domain_id) continue; /* @@ -600,7 +618,6 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) break; } } - return 0; } @@ -717,11 +734,13 @@ mlx5_find_master_dev(struct rte_eth_dev *dev) priv = dev->data->dev_private; domain_id = priv->domain_id; assert(priv->representor); - RTE_ETH_FOREACH_DEV_OF(port_id, dev->device) { - priv = rte_eth_devices[port_id].data->dev_private; - if (priv && - priv->master && - priv->domain_id == domain_id) + MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + struct mlx5_priv *opriv = + rte_eth_devices[port_id].data->dev_private; + if (opriv && + opriv->master && + opriv->domain_id == domain_id && + opriv->sh == priv->sh) return &rte_eth_devices[port_id]; } return NULL; @@ -980,6 +999,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) int ret; struct rte_eth_link dev_link; time_t start_time = time(NULL); + int retry = MLX5_GET_LINK_STATUS_RETRY_COUNT; do { ret = mlx5_link_update_unlocked_gs(dev, &dev_link); @@ -988,7 +1008,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) if (ret == 0) break; /* Handle wait to complete situation. */ - if (wait_to_complete && ret == -EAGAIN) { + if ((wait_to_complete || retry) && ret == -EAGAIN) { if (abs((int)difftime(time(NULL), start_time)) < MLX5_LINK_STATUS_TIMEOUT) { usleep(0); @@ -1000,7 +1020,7 @@ mlx5_link_update(struct rte_eth_dev *dev, int wait_to_complete) } else if (ret < 0) { return ret; } - } while (wait_to_complete); + } while (wait_to_complete || retry-- > 0); ret = !!memcmp(&dev->data->dev_link, &dev_link, sizeof(struct rte_eth_link)); dev->data->dev_link = dev_link; @@ -1428,6 +1448,37 @@ mlx5_dev_shared_handler_uninstall(struct rte_eth_dev *dev) mlx5_dev_interrupt_handler, sh); sh->intr_handle.fd = 0; sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Uninstall devx shared asynchronous device events handler. + * This function is implemeted to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_dev_shared_handler_devx_uninstall(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + assert(priv->ibv_port); + assert(priv->ibv_port <= sh->max_port); + assert(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].devx_ih_port_id >= RTE_MAX_ETHPORTS) + goto exit; + assert(sh->port[priv->ibv_port - 1].devx_ih_port_id == + (uint32_t)dev->data->port_id); + sh->port[priv->ibv_port - 1].devx_ih_port_id = RTE_MAX_ETHPORTS; + if (!sh->devx_intr_cnt || --sh->devx_intr_cnt) + goto exit; if (sh->intr_handle_devx.fd) { rte_intr_callback_unregister(&sh->intr_handle_devx, mlx5_dev_interrupt_handler_devx, @@ -1470,8 +1521,9 @@ mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) assert(sh->intr_cnt); goto exit; } - sh->port[priv->ibv_port - 1].ih_port_id = (uint32_t)dev->data->port_id; if (sh->intr_cnt) { + sh->port[priv->ibv_port - 1].ih_port_id = + (uint32_t)dev->data->port_id; sh->intr_cnt++; goto exit; } @@ -1480,52 +1532,81 @@ mlx5_dev_shared_handler_install(struct rte_eth_dev *dev) flags = fcntl(sh->ctx->async_fd, F_GETFL); ret = fcntl(sh->ctx->async_fd, F_SETFL, flags | O_NONBLOCK); if (ret) { - DRV_LOG(INFO, "failed to change file descriptor" - " async event queue"); - goto error; + DRV_LOG(INFO, "failed to change file descriptor async event" + " queue"); + /* Indicate there will be no interrupts. */ + dev->data->dev_conf.intr_conf.lsc = 0; + dev->data->dev_conf.intr_conf.rmv = 0; + } else { + sh->intr_handle.fd = sh->ctx->async_fd; + sh->intr_handle.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register(&sh->intr_handle, + mlx5_dev_interrupt_handler, sh); + sh->intr_cnt++; + sh->port[priv->ibv_port - 1].ih_port_id = + (uint32_t)dev->data->port_id; + } +exit: + pthread_mutex_unlock(&sh->intr_mutex); +} + +/** + * Install devx shared asyncronous device events handler. + * This function is implemeted to support event sharing + * between multiple ports of single IB device. + * + * @param dev + * Pointer to Ethernet device. + */ +static void +mlx5_dev_shared_handler_devx_install(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_ibv_shared *sh = priv->sh; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + pthread_mutex_lock(&sh->intr_mutex); + assert(priv->ibv_port); + assert(priv->ibv_port <= sh->max_port); + assert(dev->data->port_id < RTE_MAX_ETHPORTS); + if (sh->port[priv->ibv_port - 1].devx_ih_port_id < RTE_MAX_ETHPORTS) { + /* The handler is already installed for this port. */ + assert(sh->devx_intr_cnt); + goto exit; + } + if (sh->devx_intr_cnt) { + sh->devx_intr_cnt++; + sh->port[priv->ibv_port - 1].devx_ih_port_id = + (uint32_t)dev->data->port_id; + goto exit; } - sh->intr_handle.fd = sh->ctx->async_fd; - sh->intr_handle.type = RTE_INTR_HANDLE_EXT; - rte_intr_callback_register(&sh->intr_handle, - mlx5_dev_interrupt_handler, sh); if (priv->config.devx) { #ifndef HAVE_IBV_DEVX_ASYNC - goto error_unregister; + goto exit; #else sh->devx_comp = mlx5_glue->devx_create_cmd_comp(sh->ctx); if (sh->devx_comp) { - flags = fcntl(sh->devx_comp->fd, F_GETFL); - ret = fcntl(sh->devx_comp->fd, F_SETFL, + int flags = fcntl(sh->devx_comp->fd, F_GETFL); + int ret = fcntl(sh->devx_comp->fd, F_SETFL, flags | O_NONBLOCK); + if (ret) { DRV_LOG(INFO, "failed to change file descriptor" - " devx async event queue"); - goto error_unregister; + " devx async event queue"); + } else { + sh->intr_handle_devx.fd = sh->devx_comp->fd; + sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; + rte_intr_callback_register + (&sh->intr_handle_devx, + mlx5_dev_interrupt_handler_devx, sh); + sh->devx_intr_cnt++; + sh->port[priv->ibv_port - 1].devx_ih_port_id = + (uint32_t)dev->data->port_id; } - sh->intr_handle_devx.fd = sh->devx_comp->fd; - sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; - rte_intr_callback_register - (&sh->intr_handle_devx, - mlx5_dev_interrupt_handler_devx, sh); - } else { - DRV_LOG(INFO, "failed to create devx async command " - "completion"); - goto error_unregister; } #endif /* HAVE_IBV_DEVX_ASYNC */ } - sh->intr_cnt++; - goto exit; -error_unregister: - rte_intr_callback_unregister(&sh->intr_handle, - mlx5_dev_interrupt_handler, sh); -error: - /* Indicate there will be no interrupts. */ - dev->data->dev_conf.intr_conf.lsc = 0; - dev->data->dev_conf.intr_conf.rmv = 0; - sh->intr_handle.fd = 0; - sh->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; - sh->port[priv->ibv_port - 1].ih_port_id = RTE_MAX_ETHPORTS; exit: pthread_mutex_unlock(&sh->intr_mutex); } @@ -1554,6 +1635,30 @@ mlx5_dev_interrupt_handler_install(struct rte_eth_dev *dev) mlx5_dev_shared_handler_install(dev); } +/** + * Devx uninstall interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_devx_uninstall(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_devx_uninstall(dev); +} + +/** + * Devx install interrupt handler. + * + * @param dev + * Pointer to Ethernet device. + */ +void +mlx5_dev_interrupt_handler_devx_install(struct rte_eth_dev *dev) +{ + mlx5_dev_shared_handler_devx_install(dev); +} + /** * DPDK callback to bring the link DOWN. * @@ -1629,36 +1734,6 @@ mlx5_is_removed(struct rte_eth_dev *dev) return 0; } -/** - * Get port ID list of mlx5 instances sharing a common device. - * - * @param[in] dev - * Device to look for. - * @param[out] port_list - * Result buffer for collected port IDs. - * @param port_list_n - * Maximum number of entries in result buffer. If 0, @p port_list can be - * NULL. - * - * @return - * Number of matching instances regardless of the @p port_list_n - * parameter, 0 if none were found. - */ -unsigned int -mlx5_dev_to_port_id(const struct rte_device *dev, uint16_t *port_list, - unsigned int port_list_n) -{ - uint16_t id; - unsigned int n = 0; - - RTE_ETH_FOREACH_DEV_OF(id, dev) { - if (n < port_list_n) - port_list[n] = id; - n++; - } - return n; -} - /** * Get the E-Switch parameters by port id. *