X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Flinux%2Fmlx5_os.c;h=72bbb665cfdc20ea4545879272a4e857e81b4a4f;hb=d61138d4f0e2927cc1d0fef6d810fa7f5d1161a1;hp=b1212667eea4989e84db3b5a9517ecad8dd72fe8;hpb=3fd2961efa1586da496c1c22c8cb128d668a22e3;p=dpdk.git diff --git a/drivers/net/mlx5/linux/mlx5_os.c b/drivers/net/mlx5/linux/mlx5_os.c index b1212667ee..72bbb665cf 100644 --- a/drivers/net/mlx5/linux/mlx5_os.c +++ b/drivers/net/mlx5/linux/mlx5_os.c @@ -646,56 +646,6 @@ out: return ret; } -/** - * Create the Tx queue DevX/Verbs object. - * - * @param dev - * Pointer to Ethernet device. - * @param idx - * Queue index in DPDK Tx queue array. - * - * @return - * 0 on success, a negative errno value otherwise and rte_errno is set. - */ -static int -mlx5_os_txq_obj_new(struct rte_eth_dev *dev, uint16_t idx) -{ - struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_txq_data *txq_data = (*priv->txqs)[idx]; - struct mlx5_txq_ctrl *txq_ctrl = - container_of(txq_data, struct mlx5_txq_ctrl, txq); - - if (txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) - return mlx5_txq_devx_obj_new(dev, idx); -#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET - if (!priv->config.dv_esw_en) - return mlx5_txq_devx_obj_new(dev, idx); -#endif - return mlx5_txq_ibv_obj_new(dev, idx); -} - -/** - * Release an Tx DevX/verbs queue object. - * - * @param txq_obj - * DevX/Verbs Tx queue object. - */ -static void -mlx5_os_txq_obj_release(struct mlx5_txq_obj *txq_obj) -{ - if (txq_obj->txq_ctrl->type == MLX5_TXQ_TYPE_HAIRPIN) { - mlx5_txq_devx_obj_release(txq_obj); - return; - } -#ifdef HAVE_MLX5DV_DEVX_UAR_OFFSET - if (!txq_obj->txq_ctrl->priv->config.dv_esw_en) { - mlx5_txq_devx_obj_release(txq_obj); - return; - } -#endif - mlx5_txq_ibv_obj_release(txq_obj); -} - /** * DV flow counter mode detect and config. * @@ -877,7 +827,6 @@ mlx5_representor_match(struct mlx5_dev_spawn_data *spawn, return false; } - /** * Spawn an Ethernet device from Verbs information. * @@ -1294,20 +1243,14 @@ err_secondary: config->dv_flow_en = 0; } #endif - if (spawn->max_port > UINT8_MAX) { - /* Verbs can't support ports larger than 255 by design. */ - DRV_LOG(ERR, "can't support IB ports > UINT8_MAX"); - err = EINVAL; - goto error; - } config->ind_table_max_size = sh->device_attr.max_rwq_indirection_table_size; /* * Remove this check once DPDK supports larger/variable * indirection tables. */ - if (config->ind_table_max_size > (unsigned int)ETH_RSS_RETA_SIZE_512) - config->ind_table_max_size = ETH_RSS_RETA_SIZE_512; + if (config->ind_table_max_size > (unsigned int)RTE_ETH_RSS_RETA_SIZE_512) + config->ind_table_max_size = RTE_ETH_RSS_RETA_SIZE_512; DRV_LOG(DEBUG, "maximum Rx indirection table size is %u", config->ind_table_max_size); config->hw_vlan_strip = !!(sh->device_attr.raw_packet_caps & @@ -1573,7 +1516,7 @@ err_secondary: /* * If HW has bug working with tunnel packet decapsulation and * scatter FCS, and decapsulation is needed, clear the hw_fcs_strip - * bit. Then DEV_RX_OFFLOAD_KEEP_CRC bit will not be set anymore. + * bit. Then RTE_ETH_RX_OFFLOAD_KEEP_CRC bit will not be set anymore. */ if (config->hca_attr.scatter_fcs_w_decap_disable && config->decap_en) config->hw_fcs_strip = 0; @@ -1646,6 +1589,7 @@ err_secondary: */ MLX5_ASSERT(spawn->ifindex); priv->if_index = spawn->ifindex; + priv->lag_affinity_idx = sh->refcnt - 1; eth_dev->data->dev_private = priv; priv->dev_data = eth_dev->data; eth_dev->data->mac_addrs = priv->mac; @@ -1745,27 +1689,22 @@ err_secondary: ibv_obj_ops.drop_action_create; priv->obj_ops.drop_action_destroy = ibv_obj_ops.drop_action_destroy; -#ifndef HAVE_MLX5DV_DEVX_UAR_OFFSET - priv->obj_ops.txq_obj_modify = ibv_obj_ops.txq_obj_modify; -#else - if (config->dv_esw_en) - priv->obj_ops.txq_obj_modify = - ibv_obj_ops.txq_obj_modify; -#endif - /* Use specific wrappers for Tx object. */ - priv->obj_ops.txq_obj_new = mlx5_os_txq_obj_new; - priv->obj_ops.txq_obj_release = mlx5_os_txq_obj_release; mlx5_queue_counter_id_prepare(eth_dev); priv->obj_ops.lb_dummy_queue_create = mlx5_rxq_ibv_obj_dummy_lb_create; priv->obj_ops.lb_dummy_queue_release = mlx5_rxq_ibv_obj_dummy_lb_release; + } else if (spawn->max_port > UINT8_MAX) { + /* Verbs can't support ports larger than 255 by design. */ + DRV_LOG(ERR, "must enable DV and ESW when RDMA link ports > 255"); + err = ENOTSUP; + goto error; } else { priv->obj_ops = ibv_obj_ops; } if (config->tx_pp && (priv->config.dv_esw_en || - priv->obj_ops.txq_obj_new != mlx5_os_txq_obj_new)) { + priv->obj_ops.txq_obj_new != mlx5_txq_devx_obj_new)) { /* * HAVE_MLX5DV_DEVX_UAR_OFFSET is required to support * packet pacing and already checked above. @@ -2519,11 +2458,9 @@ mlx5_os_pci_probe_pf(struct mlx5_common_device *cdev, * Representor interrupts handle is released in mlx5_dev_stop(). */ if (list[i].info.representor) { - struct rte_intr_handle *intr_handle; - intr_handle = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, - sizeof(*intr_handle), 0, - SOCKET_ID_ANY); - if (!intr_handle) { + struct rte_intr_handle *intr_handle = + rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED); + if (intr_handle == NULL) { DRV_LOG(ERR, "port %u failed to allocate memory for interrupt handler " "Rx interrupts will not be supported", @@ -2687,7 +2624,7 @@ mlx5_os_auxiliary_probe(struct mlx5_common_device *cdev) if (eth_dev == NULL) return -rte_errno; /* Post create. */ - eth_dev->intr_handle = &adev->intr_handle; + eth_dev->intr_handle = adev->intr_handle; if (rte_eal_process_type() == RTE_PROC_PRIMARY) { eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_RMV; @@ -2727,6 +2664,15 @@ mlx5_os_net_probe(struct mlx5_common_device *cdev) return mlx5_os_auxiliary_probe(cdev); } +/** + * Cleanup resources when the last device is closed. + */ +void +mlx5_os_net_cleanup(void) +{ + mlx5_pmd_socket_uninit(); +} + /** * Install shared asynchronous device events handler. * This function is implemented to support event sharing @@ -2742,24 +2688,38 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) int flags; struct ibv_context *ctx = sh->cdev->ctx; - sh->intr_handle.fd = -1; + sh->intr_handle = rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED); + if (sh->intr_handle == NULL) { + DRV_LOG(ERR, "Fail to allocate intr_handle"); + rte_errno = ENOMEM; + return; + } + rte_intr_fd_set(sh->intr_handle, -1); + flags = fcntl(ctx->async_fd, F_GETFL); ret = fcntl(ctx->async_fd, F_SETFL, flags | O_NONBLOCK); if (ret) { DRV_LOG(INFO, "failed to change file descriptor async event" " queue"); } else { - sh->intr_handle.fd = ctx->async_fd; - sh->intr_handle.type = RTE_INTR_HANDLE_EXT; - if (rte_intr_callback_register(&sh->intr_handle, + rte_intr_fd_set(sh->intr_handle, ctx->async_fd); + rte_intr_type_set(sh->intr_handle, RTE_INTR_HANDLE_EXT); + if (rte_intr_callback_register(sh->intr_handle, mlx5_dev_interrupt_handler, sh)) { DRV_LOG(INFO, "Fail to install the shared interrupt."); - sh->intr_handle.fd = -1; + rte_intr_fd_set(sh->intr_handle, -1); } } if (sh->devx) { #ifdef HAVE_IBV_DEVX_ASYNC - sh->intr_handle_devx.fd = -1; + sh->intr_handle_devx = + rte_intr_instance_alloc(RTE_INTR_INSTANCE_F_SHARED); + if (!sh->intr_handle_devx) { + DRV_LOG(ERR, "Fail to allocate intr_handle"); + rte_errno = ENOMEM; + return; + } + rte_intr_fd_set(sh->intr_handle_devx, -1); sh->devx_comp = (void *)mlx5_glue->devx_create_cmd_comp(ctx); struct mlx5dv_devx_cmd_comp *devx_comp = sh->devx_comp; if (!devx_comp) { @@ -2773,13 +2733,14 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) " devx comp"); return; } - sh->intr_handle_devx.fd = devx_comp->fd; - sh->intr_handle_devx.type = RTE_INTR_HANDLE_EXT; - if (rte_intr_callback_register(&sh->intr_handle_devx, + rte_intr_fd_set(sh->intr_handle_devx, devx_comp->fd); + rte_intr_type_set(sh->intr_handle_devx, + RTE_INTR_HANDLE_EXT); + if (rte_intr_callback_register(sh->intr_handle_devx, mlx5_dev_interrupt_handler_devx, sh)) { DRV_LOG(INFO, "Fail to install the devx shared" " interrupt."); - sh->intr_handle_devx.fd = -1; + rte_intr_fd_set(sh->intr_handle_devx, -1); } #endif /* HAVE_IBV_DEVX_ASYNC */ } @@ -2796,13 +2757,15 @@ mlx5_os_dev_shared_handler_install(struct mlx5_dev_ctx_shared *sh) void mlx5_os_dev_shared_handler_uninstall(struct mlx5_dev_ctx_shared *sh) { - if (sh->intr_handle.fd >= 0) - mlx5_intr_callback_unregister(&sh->intr_handle, + if (rte_intr_fd_get(sh->intr_handle) >= 0) + mlx5_intr_callback_unregister(sh->intr_handle, mlx5_dev_interrupt_handler, sh); + rte_intr_instance_free(sh->intr_handle); #ifdef HAVE_IBV_DEVX_ASYNC - if (sh->intr_handle_devx.fd >= 0) - rte_intr_callback_unregister(&sh->intr_handle_devx, + if (rte_intr_fd_get(sh->intr_handle_devx) >= 0) + rte_intr_callback_unregister(sh->intr_handle_devx, mlx5_dev_interrupt_handler_devx, sh); + rte_intr_instance_free(sh->intr_handle_devx); if (sh->devx_comp) mlx5_glue->devx_destroy_cmd_comp(sh->devx_comp); #endif