X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5.c;h=02ea2e781e54192228c518ceb69b2dd6d243d2c9;hb=953e74e6b73a876d6f149fd759bd0423e5438247;hp=abd7ff70df0275cba5c86f96f47194c193825685;hpb=e6988afdc75a628bba20bbc291831e63902b20a6;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index abd7ff70df..02ea2e781e 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -12,7 +12,6 @@ #include #include -#include #include #include #include @@ -28,19 +27,22 @@ #include #include #include -#include #include #include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_utils.h" #include "mlx5_rxtx.h" +#include "mlx5_rx.h" +#include "mlx5_tx.h" #include "mlx5_autoconf.h" #include "mlx5_mr.h" #include "mlx5_flow.h" #include "mlx5_flow_os.h" #include "rte_pmd_mlx5.h" +#define MLX5_ETH_DRIVER_NAME mlx5_eth + /* Device parameter to enable RX completion queue compression. */ #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" @@ -173,6 +175,9 @@ /* Decap will be used or not. */ #define MLX5_DECAP_EN "decap_en" +/* Device parameter to configure allow or prevent duplicate rules pattern. */ +#define MLX5_ALLOW_DUPLICATE_PATTERN "allow_duplicate_pattern" + /* Shared memory between primary and secondary processes. */ struct mlx5_shared_data *mlx5_shared_data; @@ -212,7 +217,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_trunk = 3, .grow_shift = 2, .need_lock = 1, - .release_mem_en = 1, + .release_mem_en = 0, + .per_core_cache = (1 << 16), .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_tag_ipool", @@ -275,10 +281,13 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { }, #endif [MLX5_IPOOL_MTR] = { - .size = sizeof(struct mlx5_flow_meter), + /** + * The ipool index should grow continually from small to big, + * for meter idx, so not set grow_trunk to avoid meter index + * not jump continually. + */ + .size = sizeof(struct mlx5_legacy_flow_meter), .trunk_size = 64, - .grow_trunk = 3, - .grow_shift = 2, .need_lock = 1, .release_mem_en = 1, .malloc = mlx5_malloc, @@ -317,7 +326,8 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .grow_trunk = 3, .grow_shift = 2, .need_lock = 1, - .release_mem_en = 1, + .release_mem_en = 0, + .per_core_cache = 1 << 19, .malloc = mlx5_malloc, .free = mlx5_free, .type = "mlx5_flow_handle_ipool", @@ -347,13 +357,65 @@ static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { .free = mlx5_free, .type = "mlx5_shared_action_rss", }, + [MLX5_IPOOL_MTR_POLICY] = { + /** + * The ipool index should grow continually from small to big, + * for policy idx, so not set grow_trunk to avoid policy index + * not jump continually. + */ + .size = sizeof(struct mlx5_flow_meter_sub_policy), + .trunk_size = 64, + .need_lock = 1, + .release_mem_en = 1, + .malloc = mlx5_malloc, + .free = mlx5_free, + .type = "mlx5_meter_policy_ipool", + }, }; #define MLX5_FLOW_MIN_ID_POOL_SIZE 512 #define MLX5_ID_GENERATION_ARRAY_FACTOR 16 -#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 4096 +#define MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE 1024 + +/** + * Decide whether representor ID is a HPF(host PF) port on BF2. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * Non-zero if HPF, otherwise 0. + */ +bool +mlx5_is_hpf(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + uint16_t repr = MLX5_REPRESENTOR_REPR(priv->representor_id); + int type = MLX5_REPRESENTOR_TYPE(priv->representor_id); + + return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_VF && + MLX5_REPRESENTOR_REPR(-1) == repr; +} + +/** + * Decide whether representor ID is a SF port representor. + * + * @param dev + * Pointer to Ethernet device structure. + * + * @return + * Non-zero if HPF, otherwise 0. + */ +bool +mlx5_is_sf_repr(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int type = MLX5_REPRESENTOR_TYPE(priv->representor_id); + + return priv->representor != 0 && type == RTE_ETH_REPRESENTOR_SF; +} /** * Initialize the ASO aging management structure. @@ -378,7 +440,7 @@ mlx5_flow_aso_age_mng_init(struct mlx5_dev_ctx_shared *sh) rte_errno = ENOMEM; return -ENOMEM; } - err = mlx5_aso_queue_init(sh); + err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_FLOW_HIT); if (err) { mlx5_free(sh->aso_age_mng); return -1; @@ -400,8 +462,8 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) { int i, j; - mlx5_aso_queue_stop(sh); - mlx5_aso_queue_uninit(sh); + mlx5_aso_flow_hit_queue_poll_stop(sh); + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_FLOW_HIT); if (sh->aso_age_mng->pools) { struct mlx5_aso_age_pool *pool; @@ -539,6 +601,77 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) memset(&sh->cmng, 0, sizeof(sh->cmng)); } +/** + * Initialize the aso flow meters management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free + */ +int +mlx5_aso_flow_mtrs_mng_init(struct mlx5_dev_ctx_shared *sh) +{ + if (!sh->mtrmng) { + sh->mtrmng = mlx5_malloc(MLX5_MEM_ZERO, + sizeof(*sh->mtrmng), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!sh->mtrmng) { + DRV_LOG(ERR, + "meter management allocation was failed."); + rte_errno = ENOMEM; + return -ENOMEM; + } + if (sh->meter_aso_en) { + rte_spinlock_init(&sh->mtrmng->pools_mng.mtrsl); + LIST_INIT(&sh->mtrmng->pools_mng.meters); + } + sh->mtrmng->def_policy_id = MLX5_INVALID_POLICY_ID; + } + return 0; +} + +/** + * Close and release all the resources of + * the ASO flow meter management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. + */ +static void +mlx5_aso_flow_mtrs_mng_close(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_aso_mtr_pool *mtr_pool; + struct mlx5_flow_mtr_mng *mtrmng = sh->mtrmng; + uint32_t idx; +#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO + struct mlx5_aso_mtr *aso_mtr; + int i; +#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ + + if (sh->meter_aso_en) { + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_POLICER); + idx = mtrmng->pools_mng.n_valid; + while (idx--) { + mtr_pool = mtrmng->pools_mng.pools[idx]; +#ifdef HAVE_MLX5_DR_CREATE_ACTION_ASO + for (i = 0; i < MLX5_ASO_MTRS_PER_POOL; i++) { + aso_mtr = &mtr_pool->mtrs[i]; + if (aso_mtr->fm.meter_action) + claim_zero + (mlx5_glue->destroy_flow_action + (aso_mtr->fm.meter_action)); + } +#endif /* HAVE_MLX5_DR_CREATE_ACTION_ASO */ + claim_zero(mlx5_devx_cmd_destroy + (mtr_pool->devx_obj)); + mtrmng->pools_mng.n_valid--; + mlx5_free(mtr_pool); + } + mlx5_free(sh->mtrmng->pools_mng.pools); + } + mlx5_free(sh->mtrmng); + sh->mtrmng = NULL; +} + /* Send FLOW_AGED event if needed. */ void mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) @@ -550,20 +683,112 @@ mlx5_age_event_prepare(struct mlx5_dev_ctx_shared *sh) age_info = &sh->port[i].age_info; if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW)) continue; - if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) + MLX5_AGE_UNSET(age_info, MLX5_AGE_EVENT_NEW); + if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER)) { + MLX5_AGE_UNSET(age_info, MLX5_AGE_TRIGGER); rte_eth_dev_callback_process (&rte_eth_devices[sh->port[i].devx_ih_port_id], RTE_ETH_EVENT_FLOW_AGED, NULL); - age_info->flags = 0; + } } } +/* + * Initialize the ASO connection tracking structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +int +mlx5_flow_aso_ct_mng_init(struct mlx5_dev_ctx_shared *sh) +{ + int err; + + if (sh->ct_mng) + return 0; + sh->ct_mng = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*sh->ct_mng), + RTE_CACHE_LINE_SIZE, SOCKET_ID_ANY); + if (!sh->ct_mng) { + DRV_LOG(ERR, "ASO CT management allocation failed."); + rte_errno = ENOMEM; + return -rte_errno; + } + err = mlx5_aso_queue_init(sh, ASO_OPC_MOD_CONNECTION_TRACKING); + if (err) { + mlx5_free(sh->ct_mng); + /* rte_errno should be extracted from the failure. */ + rte_errno = EINVAL; + return -rte_errno; + } + rte_spinlock_init(&sh->ct_mng->ct_sl); + rte_rwlock_init(&sh->ct_mng->resize_rwl); + LIST_INIT(&sh->ct_mng->free_cts); + return 0; +} + +/* + * Close and release all the resources of the + * ASO connection tracking management structure. + * + * @param[in] sh + * Pointer to mlx5_dev_ctx_shared object to free. + */ +static void +mlx5_flow_aso_ct_mng_close(struct mlx5_dev_ctx_shared *sh) +{ + struct mlx5_aso_ct_pools_mng *mng = sh->ct_mng; + struct mlx5_aso_ct_pool *ct_pool; + struct mlx5_aso_ct_action *ct; + uint32_t idx; + uint32_t val; + uint32_t cnt; + int i; + + mlx5_aso_queue_uninit(sh, ASO_OPC_MOD_CONNECTION_TRACKING); + idx = mng->next; + while (idx--) { + cnt = 0; + ct_pool = mng->pools[idx]; + for (i = 0; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) { + ct = &ct_pool->actions[i]; + val = __atomic_fetch_sub(&ct->refcnt, 1, + __ATOMIC_RELAXED); + MLX5_ASSERT(val == 1); + if (val > 1) + cnt++; +#ifdef HAVE_MLX5_DR_ACTION_ASO_CT + if (ct->dr_action_orig) + claim_zero(mlx5_glue->destroy_flow_action + (ct->dr_action_orig)); + if (ct->dr_action_rply) + claim_zero(mlx5_glue->destroy_flow_action + (ct->dr_action_rply)); +#endif + } + claim_zero(mlx5_devx_cmd_destroy(ct_pool->devx_obj)); + if (cnt) { + DRV_LOG(DEBUG, "%u ASO CT objects are being used in the pool %u", + cnt, i); + } + mlx5_free(ct_pool); + /* in case of failure. */ + mng->next--; + } + mlx5_free(mng->pools); + mlx5_free(mng); + /* Management structure must be cleared to 0s during allocation. */ + sh->ct_mng = NULL; +} + /** * Initialize the flow resources' indexed mempool. * * @param[in] sh * Pointer to mlx5_dev_ctx_shared object. - * @param[in] sh + * @param[in] config * Pointer to user dev config. */ static void @@ -588,12 +813,17 @@ mlx5_flow_ipool_create(struct mlx5_dev_ctx_shared *sh, MLX5_FLOW_HANDLE_VERBS_SIZE; break; } - if (config->reclaim_mode) + if (config->reclaim_mode) { cfg.release_mem_en = 1; + cfg.per_core_cache = 0; + } else { + cfg.release_mem_en = 0; + } sh->ipool[i] = mlx5_ipool_create(&cfg); } } + /** * Release the flow resources' indexed mempool. * @@ -607,6 +837,9 @@ mlx5_flow_ipool_destroy(struct mlx5_dev_ctx_shared *sh) for (i = 0; i < MLX5_IPOOL_MAX; ++i) mlx5_ipool_destroy(sh->ipool[i]); + for (i = 0; i < MLX5_MAX_MODIFY_NUM; ++i) + if (sh->mdh_ipools[i]) + mlx5_ipool_destroy(sh->mdh_ipools[i]); } /* @@ -905,6 +1138,9 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, rte_errno = ENOMEM; goto exit; } + sh->numa_node = spawn->numa_node; + if (spawn->bond_info) + sh->bond = *spawn->bond_info; err = mlx5_os_open_device(spawn, config, sh); if (!sh->ctx) goto error; @@ -914,8 +1150,8 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, goto error; } sh->refcnt = 1; - sh->bond_dev = UINT16_MAX; sh->max_port = spawn->max_port; + sh->reclaim_mode = config->reclaim_mode; strncpy(sh->ibdev_name, mlx5_os_get_ctx_device_name(sh->ctx), sizeof(sh->ibdev_name) - 1); strncpy(sh->ibdev_path, mlx5_os_get_ctx_device_path(sh->ctx), @@ -980,7 +1216,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, */ err = mlx5_mr_btree_init(&sh->share_cache.cache, MLX5_MR_BTREE_CACHE_N * 2, - spawn->pci_dev->device.numa_node); + sh->numa_node); if (err) { err = rte_errno; goto error; @@ -1018,6 +1254,8 @@ error: MLX5_ASSERT(sh); if (sh->cnt_id_tbl) mlx5_l3t_destroy(sh->cnt_id_tbl); + if (sh->share_cache.cache.table) + mlx5_mr_btree_free(&sh->share_cache.cache); if (sh->tis) claim_zero(mlx5_devx_cmd_destroy(sh->tis)); if (sh->td) @@ -1087,6 +1325,8 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_flow_aso_age_mng_close(sh); sh->aso_age_mng = NULL; } + if (sh->mtrmng) + mlx5_aso_flow_mtrs_mng_close(sh); mlx5_flow_ipool_destroy(sh); mlx5_os_dev_shared_handler_uninstall(sh); if (sh->cnt_id_tbl) { @@ -1148,20 +1388,22 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) /* Tables are only used in DV and DR modes. */ #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) struct mlx5_dev_ctx_shared *sh = priv->sh; - char s[MLX5_HLIST_NAMESIZE]; + char s[MLX5_NAME_SIZE]; MLX5_ASSERT(sh); snprintf(s, sizeof(s), "%s_flow_table", priv->sh->ibdev_name); sh->flow_tbls = mlx5_hlist_create(s, MLX5_FLOW_TABLE_HLIST_ARRAY_SIZE, - 0, 0, flow_dv_tbl_create_cb, + false, true, sh, + flow_dv_tbl_create_cb, flow_dv_tbl_match_cb, - flow_dv_tbl_remove_cb); + flow_dv_tbl_remove_cb, + flow_dv_tbl_clone_cb, + flow_dv_tbl_clone_free_cb); if (!sh->flow_tbls) { DRV_LOG(ERR, "flow tables with hash creation failed."); err = ENOMEM; return err; } - sh->flow_tbls->ctx = sh; #ifndef HAVE_MLX5DV_DR struct rte_flow_error error; struct rte_eth_dev *dev = &rte_eth_devices[priv->dev_data->port_id]; @@ -1171,9 +1413,12 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) * because DV expect to see them even if they cannot be created by * RDMA-CORE. */ - if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, NULL, 0, 1, &error) || - !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, NULL, 0, 1, &error) || - !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, NULL, 0, 1, &error)) { + if (!flow_dv_tbl_resource_get(dev, 0, 0, 0, 0, + NULL, 0, 1, 0, &error) || + !flow_dv_tbl_resource_get(dev, 0, 1, 0, 0, + NULL, 0, 1, 0, &error) || + !flow_dv_tbl_resource_get(dev, 0, 0, 1, 0, + NULL, 0, 1, 0, &error)) { err = ENOMEM; goto error; } @@ -1245,6 +1490,7 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) struct mlx5_proc_priv *ppriv; size_t ppriv_size; + mlx5_proc_priv_uninit(dev); /* * UAR register table follows the process private structure. BlueFlame * registers for Tx queues are stored in the table. @@ -1317,8 +1563,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) * If all the flows are already flushed in the device stop stage, * then this will return directly without any action. */ - mlx5_flow_list_flush(dev, &priv->flows, true); - mlx5_shared_action_flush(dev); + mlx5_flow_list_flush(dev, MLX5_FLOW_TYPE_GEN, true); + mlx5_action_handle_flush(dev); mlx5_flow_meter_flush(dev, NULL); /* Prevent crashes when queues are still in use. */ dev->rx_pkt_burst = removed_rx_burst; @@ -1336,6 +1582,11 @@ mlx5_dev_close(struct rte_eth_dev *dev) priv->rxqs_n = 0; priv->rxqs = NULL; } + if (priv->representor) { + /* Each representor has a dedicated interrupts handler */ + mlx5_free(dev->intr_handle); + dev->intr_handle = NULL; + } if (priv->txqs != NULL) { /* XXX race condition if mlx5_tx_burst() is still running. */ rte_delay_us_sleep(1000); @@ -1354,6 +1605,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (priv->mreg_cp_tbl) mlx5_hlist_destroy(priv->mreg_cp_tbl); mlx5_mprq_free_mp(dev); + if (priv->sh->ct_mng) + mlx5_flow_aso_ct_mng_close(priv->sh); mlx5_os_free_shared_dr(priv); if (priv->rss_conf.rss_key != NULL) mlx5_free(priv->rss_conf.rss_key); @@ -1395,7 +1648,8 @@ mlx5_dev_close(struct rte_eth_dev *dev) if (ret) DRV_LOG(WARNING, "port %u some flows still remain", dev->data->port_id); - mlx5_cache_list_destroy(&priv->hrxqs); + if (priv->hrxqs) + mlx5_list_destroy(priv->hrxqs); /* * Free the shared context in last turn, because the cleanup * routines above may use some shared fields, like @@ -1407,7 +1661,7 @@ mlx5_dev_close(struct rte_eth_dev *dev) unsigned int c = 0; uint16_t port_id; - MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + MLX5_ETH_FOREACH_DEV(port_id, dev->device) { struct mlx5_priv *opriv = rte_eth_devices[port_id].data->dev_private; @@ -1451,6 +1705,7 @@ const struct eth_dev_ops mlx5_dev_ops = { .xstats_get_names = mlx5_xstats_get_names, .fw_version_get = mlx5_fw_version_get, .dev_infos_get = mlx5_dev_infos_get, + .representor_info_get = mlx5_representor_info_get, .read_clock = mlx5_txpp_read_clock, .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, .vlan_filter_set = mlx5_vlan_filter_set, @@ -1477,7 +1732,7 @@ const struct eth_dev_ops mlx5_dev_ops = { .reta_query = mlx5_dev_rss_reta_query, .rss_hash_update = mlx5_rss_hash_update, .rss_hash_conf_get = mlx5_rss_hash_conf_get, - .filter_ctrl = mlx5_dev_filter_ctrl, + .flow_ops_get = mlx5_flow_ops_get, .rxq_info_get = mlx5_rxq_info_get, .txq_info_get = mlx5_txq_info_get, .rx_burst_mode_get = mlx5_rx_burst_mode_get, @@ -1496,6 +1751,7 @@ const struct eth_dev_ops mlx5_dev_ops = { .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, + .get_monitor_addr = mlx5_get_monitor_addr, }; /* Available operations from secondary process. */ @@ -1507,6 +1763,7 @@ const struct eth_dev_ops mlx5_dev_sec_ops = { .xstats_get_names = mlx5_xstats_get_names, .fw_version_get = mlx5_fw_version_get, .dev_infos_get = mlx5_dev_infos_get, + .representor_info_get = mlx5_representor_info_get, .read_clock = mlx5_txpp_read_clock, .rx_queue_start = mlx5_rx_queue_start, .rx_queue_stop = mlx5_rx_queue_stop, @@ -1540,6 +1797,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .xstats_get_names = mlx5_xstats_get_names, .fw_version_get = mlx5_fw_version_get, .dev_infos_get = mlx5_dev_infos_get, + .representor_info_get = mlx5_representor_info_get, .read_clock = mlx5_txpp_read_clock, .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, .vlan_filter_set = mlx5_vlan_filter_set, @@ -1562,7 +1820,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .mtu_set = mlx5_dev_set_mtu, .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, .vlan_offload_set = mlx5_vlan_offload_set, - .filter_ctrl = mlx5_dev_filter_ctrl, + .flow_ops_get = mlx5_flow_ops_get, .rxq_info_get = mlx5_rxq_info_get, .txq_info_get = mlx5_txq_info_get, .rx_burst_mode_get = mlx5_rx_burst_mode_get, @@ -1580,6 +1838,7 @@ const struct eth_dev_ops mlx5_dev_ops_isolate = { .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, + .get_monitor_addr = mlx5_get_monitor_addr, }; /** @@ -1715,7 +1974,7 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->max_dump_files_num = tmp; } else if (strcmp(MLX5_LRO_TIMEOUT_USEC, key) == 0) { config->lro.timeout = tmp; - } else if (strcmp(MLX5_CLASS_ARG_NAME, key) == 0) { + } else if (strcmp(RTE_DEVARGS_KEY_CLASS, key) == 0) { DRV_LOG(DEBUG, "class argument is %s.", val); } else if (strcmp(MLX5_HP_BUF_SIZE, key) == 0) { config->log_hp_size = tmp; @@ -1732,6 +1991,8 @@ mlx5_args_check(const char *key, const char *val, void *opaque) config->sys_mem_en = !!tmp; } else if (strcmp(MLX5_DECAP_EN, key) == 0) { config->decap_en = !!tmp; + } else if (strcmp(MLX5_ALLOW_DUPLICATE_PATTERN, key) == 0) { + config->allow_duplicate_pattern = !!tmp; } else { DRV_LOG(WARNING, "%s: unknown parameter", key); rte_errno = EINVAL; @@ -1786,11 +2047,12 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) MLX5_REPRESENTOR, MLX5_MAX_DUMP_FILES_NUM, MLX5_LRO_TIMEOUT_USEC, - MLX5_CLASS_ARG_NAME, + RTE_DEVARGS_KEY_CLASS, MLX5_HP_BUF_SIZE, MLX5_RECLAIM_MEM, MLX5_SYS_MEM_EN, MLX5_DECAP_EN, + MLX5_ALLOW_DUPLICATE_PATTERN, NULL, }; struct rte_kvargs *kvlist; @@ -1843,18 +2105,20 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, { if (config->txq_inline_min != MLX5_ARG_UNSET) { /* Application defines size of inlined data explicitly. */ - switch (spawn->pci_dev->id.device_id) { - case PCI_DEVICE_ID_MELLANOX_CONNECTX4: - case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: - if (config->txq_inline_min < - (int)MLX5_INLINE_HSIZE_L2) { - DRV_LOG(DEBUG, - "txq_inline_mix aligned to minimal" - " ConnectX-4 required value %d", - (int)MLX5_INLINE_HSIZE_L2); - config->txq_inline_min = MLX5_INLINE_HSIZE_L2; + if (spawn->pci_dev != NULL) { + switch (spawn->pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4: + case PCI_DEVICE_ID_MELLANOX_CONNECTX4VF: + if (config->txq_inline_min < + (int)MLX5_INLINE_HSIZE_L2) { + DRV_LOG(DEBUG, + "txq_inline_mix aligned to minimal ConnectX-4 required value %d", + (int)MLX5_INLINE_HSIZE_L2); + config->txq_inline_min = + MLX5_INLINE_HSIZE_L2; + } + break; } - break; } goto exit; } @@ -1908,6 +2172,10 @@ mlx5_set_min_inline(struct mlx5_dev_spawn_data *spawn, } } } + if (spawn->pci_dev == NULL) { + config->txq_inline_min = MLX5_INLINE_HSIZE_NONE; + goto exit; + } /* * We get here if we are unable to deduce * inline data size with DevX. Try PCI ID @@ -2031,7 +2299,8 @@ rte_pmd_mlx5_get_dyn_flag_names(char *names[], unsigned int n) */ int mlx5_dev_check_sibling_config(struct mlx5_priv *priv, - struct mlx5_dev_config *config) + struct mlx5_dev_config *config, + struct rte_device *dpdk_dev) { struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_dev_config *sh_conf = NULL; @@ -2042,7 +2311,7 @@ mlx5_dev_check_sibling_config(struct mlx5_priv *priv, if (sh->refcnt == 1) return 0; /* Find the device with shared context. */ - MLX5_ETH_FOREACH_DEV(port_id, priv->pci_dev) { + MLX5_ETH_FOREACH_DEV(port_id, dpdk_dev) { struct mlx5_priv *opriv = rte_eth_devices[port_id].data->dev_private; @@ -2073,28 +2342,31 @@ mlx5_dev_check_sibling_config(struct mlx5_priv *priv, * * @param[in] port_id * port_id to start looking for device. - * @param[in] pci_dev - * Pointer to the hint PCI device. When device is being probed + * @param[in] odev + * Pointer to the hint device. When device is being probed * the its siblings (master and preceding representors might * not have assigned driver yet (because the mlx5_os_pci_probe() - * is not completed yet, for this case match on hint PCI + * is not completed yet, for this case match on hint * device may be used to detect sibling device. * * @return * port_id of found device, RTE_MAX_ETHPORT if not found. */ uint16_t -mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) +mlx5_eth_find_next(uint16_t port_id, struct rte_device *odev) { while (port_id < RTE_MAX_ETHPORTS) { struct rte_eth_dev *dev = &rte_eth_devices[port_id]; if (dev->state != RTE_ETH_DEV_UNUSED && dev->device && - (dev->device == &pci_dev->device || + (dev->device == odev || (dev->device->driver && dev->device->driver->name && - !strcmp(dev->device->driver->name, MLX5_DRIVER_NAME)))) + ((strcmp(dev->device->driver->name, + MLX5_PCI_DRIVER_NAME) == 0) || + (strcmp(dev->device->driver->name, + MLX5_AUXILIARY_DRIVER_NAME) == 0))))) break; port_id++; } @@ -2104,23 +2376,23 @@ mlx5_eth_find_next(uint16_t port_id, struct rte_pci_device *pci_dev) } /** - * DPDK callback to remove a PCI device. + * Callback to remove a device. * - * This function removes all Ethernet devices belong to a given PCI device. + * This function removes all Ethernet devices belong to a given device. * - * @param[in] pci_dev - * Pointer to the PCI device. + * @param[in] dev + * Pointer to the generic device. * * @return * 0 on success, the function cannot fail. */ -static int -mlx5_pci_remove(struct rte_pci_device *pci_dev) +int +mlx5_net_remove(struct rte_device *dev) { uint16_t port_id; int ret = 0; - RTE_ETH_FOREACH_DEV_OF(port_id, &pci_dev->device) { + RTE_ETH_FOREACH_DEV_OF(port_id, dev) { /* * mlx5_dev_close() is not registered to secondary process, * call the close function explicitly for secondary process. @@ -2211,23 +2483,21 @@ static const struct rte_pci_id mlx5_pci_id_map[] = { } }; -static struct mlx5_pci_driver mlx5_driver = { - .driver_class = MLX5_CLASS_NET, - .pci_driver = { - .driver = { - .name = MLX5_DRIVER_NAME, - }, - .id_table = mlx5_pci_id_map, - .probe = mlx5_os_pci_probe, - .remove = mlx5_pci_remove, - .dma_map = mlx5_dma_map, - .dma_unmap = mlx5_dma_unmap, - .drv_flags = PCI_DRV_FLAGS, - }, +static struct mlx5_class_driver mlx5_net_driver = { + .drv_class = MLX5_CLASS_ETH, + .name = RTE_STR(MLX5_ETH_DRIVER_NAME), + .id_table = mlx5_pci_id_map, + .probe = mlx5_os_net_probe, + .remove = mlx5_net_remove, + .dma_map = mlx5_net_dma_map, + .dma_unmap = mlx5_net_dma_unmap, + .probe_again = 1, + .intr_lsc = 1, + .intr_rmv = 1, }; /* Initialize driver log type. */ -RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) +RTE_LOG_REGISTER_DEFAULT(mlx5_logtype, NOTICE) /** * Driver initialization routine. @@ -2241,9 +2511,9 @@ RTE_INIT(rte_mlx5_pmd_init) mlx5_set_cksum_table(); mlx5_set_swp_types_table(); if (mlx5_glue) - mlx5_pci_driver_register(&mlx5_driver); + mlx5_class_driver_register(&mlx5_net_driver); } -RTE_PMD_EXPORT_NAME(net_mlx5, __COUNTER__); -RTE_PMD_REGISTER_PCI_TABLE(net_mlx5, mlx5_pci_id_map); -RTE_PMD_REGISTER_KMOD_DEP(net_mlx5, "* ib_uverbs & mlx5_core & mlx5_ib"); +RTE_PMD_EXPORT_NAME(MLX5_ETH_DRIVER_NAME, __COUNTER__); +RTE_PMD_REGISTER_PCI_TABLE(MLX5_ETH_DRIVER_NAME, mlx5_pci_id_map); +RTE_PMD_REGISTER_KMOD_DEP(MLX5_ETH_DRIVER_NAME, "* ib_uverbs & mlx5_core & mlx5_ib");