X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5.c;h=aae2ef9af7370e336ada24dfaf2ce9b9ed61be02;hb=255b8f86eb6e483b7015c080a85aba424e0ce420;hp=84123f8e3d204d82ddc1f5d00a04ce6a2dc6f4d9;hpb=07a99de886f9c4815cb902ef95148c3f9dd49589;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 84123f8e3d..aae2ef9af7 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -11,8 +11,8 @@ #include #include -#include -#include +#include +#include #include #include #include @@ -38,14 +38,12 @@ #include "mlx5_autoconf.h" #include "mlx5_mr.h" #include "mlx5_flow.h" +#include "mlx5_flow_os.h" #include "rte_pmd_mlx5.h" /* Device parameter to enable RX completion queue compression. */ #define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" -/* Device parameter to enable RX completion entry padding to 128B. */ -#define MLX5_RXQ_CQE_PAD_EN "rxq_cqe_pad_en" - /* Device parameter to enable padding Rx packet to cacheline size. */ #define MLX5_RXQ_PKT_PAD_EN "rxq_pkt_pad_en" @@ -183,10 +181,9 @@ int mlx5_logtype; static LIST_HEAD(, mlx5_dev_ctx_shared) mlx5_dev_ctx_list = LIST_HEAD_INITIALIZER(); -static pthread_mutex_t mlx5_dev_ctx_list_mutex = PTHREAD_MUTEX_INITIALIZER; - +static pthread_mutex_t mlx5_dev_ctx_list_mutex; static const struct mlx5_indexed_pool_config mlx5_ipool_cfg[] = { -#ifdef HAVE_IBV_FLOW_DV_SUPPORT +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) [MLX5_IPOOL_DECAP_ENCAP] = { .size = sizeof(struct mlx5_flow_dv_encap_decap_resource), .trunk_size = 64, @@ -415,8 +412,8 @@ mlx5_flow_aso_age_mng_close(struct mlx5_dev_ctx_shared *sh) for (j = 0; j < MLX5_COUNTERS_PER_POOL; ++j) if (pool->actions[j].dr_action) claim_zero - (mlx5_glue->destroy_flow_action - (pool->actions[j].dr_action)); + (mlx5_flow_os_destroy_flow_action + (pool->actions[j].dr_action)); mlx5_free(pool); } mlx5_free(sh->aso_age_mng->pools); @@ -523,7 +520,7 @@ mlx5_flow_counters_mng_close(struct mlx5_dev_ctx_shared *sh) if (cnt->action) claim_zero - (mlx5_glue->destroy_flow_action + (mlx5_flow_os_destroy_flow_action (cnt->action)); if (fallback && MLX5_POOL_GET_CNT (pool, j)->dcs_when_free) @@ -939,14 +936,6 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, goto error; } if (sh->devx) { - /* Query the EQN for this core. */ - err = mlx5_glue->devx_query_eqn(sh->ctx, 0, &sh->eqn); - if (err) { - rte_errno = errno; - DRV_LOG(ERR, "Failed to query event queue number %d.", - rte_errno); - goto error; - } err = mlx5_os_get_pdn(sh->pd, &sh->pdn); if (err) { DRV_LOG(ERR, "Fail to extract pdn from PD"); @@ -1004,6 +993,11 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, err = rte_errno; goto error; } + if (LIST_EMPTY(&mlx5_dev_ctx_list)) { + err = mlx5_flow_os_init_workspace_once(); + if (err) + goto error; + } mlx5_flow_aging_init(sh); mlx5_flow_counters_mng_init(sh); mlx5_flow_ipool_create(sh, config); @@ -1014,6 +1008,7 @@ mlx5_alloc_shared_dev_ctx(const struct mlx5_dev_spawn_data *spawn, rte_rwlock_write_unlock(&mlx5_shared_data->mem_event_rwlock); /* Add context to the global device list. */ LIST_INSERT_HEAD(&mlx5_dev_ctx_list, sh, next); + rte_spinlock_init(&sh->geneve_tlv_opt_sl); exit: pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); return sh; @@ -1079,6 +1074,9 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_mr_release_cache(&sh->share_cache); /* Remove context from the global device list. */ LIST_REMOVE(sh, next); + /* Release flow workspaces objects on the last device. */ + if (LIST_EMPTY(&mlx5_dev_ctx_list)) + mlx5_flow_os_release_workspace(); pthread_mutex_unlock(&mlx5_dev_ctx_list_mutex); /* * Ensure there is no async event handler installed. @@ -1109,6 +1107,7 @@ mlx5_free_shared_dev_ctx(struct mlx5_dev_ctx_shared *sh) mlx5_glue->devx_free_uar(sh->devx_rx_uar); if (sh->ctx) claim_zero(mlx5_glue->close_device(sh->ctx)); + MLX5_ASSERT(sh->geneve_tlv_option_resource == NULL); pthread_mutex_destroy(&sh->txpp.mutex); mlx5_free(sh); return; @@ -1147,7 +1146,7 @@ mlx5_alloc_table_hash_list(struct mlx5_priv *priv __rte_unused) { int err = 0; /* Tables are only used in DV and DR modes. */ -#ifdef HAVE_IBV_FLOW_DV_SUPPORT +#if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) struct mlx5_dev_ctx_shared *sh = priv->sh; char s[MLX5_HLIST_NAMESIZE]; @@ -1252,13 +1251,13 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) */ ppriv_size = sizeof(struct mlx5_proc_priv) + priv->txqs_n * sizeof(void *); - ppriv = mlx5_malloc(MLX5_MEM_RTE, ppriv_size, RTE_CACHE_LINE_SIZE, - dev->device->numa_node); + ppriv = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, ppriv_size, + RTE_CACHE_LINE_SIZE, dev->device->numa_node); if (!ppriv) { rte_errno = ENOMEM; return -rte_errno; } - ppriv->uar_table_sz = ppriv_size; + ppriv->uar_table_sz = priv->txqs_n; dev->process_private = ppriv; return 0; } @@ -1269,7 +1268,7 @@ mlx5_proc_priv_init(struct rte_eth_dev *dev) * @param dev * Pointer to Ethernet device structure. */ -static void +void mlx5_proc_priv_uninit(struct rte_eth_dev *dev) { if (!dev->process_private) @@ -1429,6 +1428,156 @@ mlx5_dev_close(struct rte_eth_dev *dev) return 0; } +const struct eth_dev_ops mlx5_dev_ops = { + .dev_configure = mlx5_dev_configure, + .dev_start = mlx5_dev_start, + .dev_stop = mlx5_dev_stop, + .dev_set_link_down = mlx5_set_link_down, + .dev_set_link_up = mlx5_set_link_up, + .dev_close = mlx5_dev_close, + .promiscuous_enable = mlx5_promiscuous_enable, + .promiscuous_disable = mlx5_promiscuous_disable, + .allmulticast_enable = mlx5_allmulticast_enable, + .allmulticast_disable = mlx5_allmulticast_disable, + .link_update = mlx5_link_update, + .stats_get = mlx5_stats_get, + .stats_reset = mlx5_stats_reset, + .xstats_get = mlx5_xstats_get, + .xstats_reset = mlx5_xstats_reset, + .xstats_get_names = mlx5_xstats_get_names, + .fw_version_get = mlx5_fw_version_get, + .dev_infos_get = mlx5_dev_infos_get, + .read_clock = mlx5_txpp_read_clock, + .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, + .vlan_filter_set = mlx5_vlan_filter_set, + .rx_queue_setup = mlx5_rx_queue_setup, + .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, + .tx_queue_setup = mlx5_tx_queue_setup, + .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, + .rx_queue_release = mlx5_rx_queue_release, + .tx_queue_release = mlx5_tx_queue_release, + .rx_queue_start = mlx5_rx_queue_start, + .rx_queue_stop = mlx5_rx_queue_stop, + .tx_queue_start = mlx5_tx_queue_start, + .tx_queue_stop = mlx5_tx_queue_stop, + .flow_ctrl_get = mlx5_dev_get_flow_ctrl, + .flow_ctrl_set = mlx5_dev_set_flow_ctrl, + .mac_addr_remove = mlx5_mac_addr_remove, + .mac_addr_add = mlx5_mac_addr_add, + .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, + .mtu_set = mlx5_dev_set_mtu, + .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, + .vlan_offload_set = mlx5_vlan_offload_set, + .reta_update = mlx5_dev_rss_reta_update, + .reta_query = mlx5_dev_rss_reta_query, + .rss_hash_update = mlx5_rss_hash_update, + .rss_hash_conf_get = mlx5_rss_hash_conf_get, + .filter_ctrl = mlx5_dev_filter_ctrl, + .rxq_info_get = mlx5_rxq_info_get, + .txq_info_get = mlx5_txq_info_get, + .rx_burst_mode_get = mlx5_rx_burst_mode_get, + .tx_burst_mode_get = mlx5_tx_burst_mode_get, + .rx_queue_intr_enable = mlx5_rx_intr_enable, + .rx_queue_intr_disable = mlx5_rx_intr_disable, + .is_removed = mlx5_is_removed, + .udp_tunnel_port_add = mlx5_udp_tunnel_port_add, + .get_module_info = mlx5_get_module_info, + .get_module_eeprom = mlx5_get_module_eeprom, + .hairpin_cap_get = mlx5_hairpin_cap_get, + .mtr_ops_get = mlx5_flow_meter_ops_get, + .hairpin_bind = mlx5_hairpin_bind, + .hairpin_unbind = mlx5_hairpin_unbind, + .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, + .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, + .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, + .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, +}; + +/* Available operations from secondary process. */ +const struct eth_dev_ops mlx5_dev_sec_ops = { + .stats_get = mlx5_stats_get, + .stats_reset = mlx5_stats_reset, + .xstats_get = mlx5_xstats_get, + .xstats_reset = mlx5_xstats_reset, + .xstats_get_names = mlx5_xstats_get_names, + .fw_version_get = mlx5_fw_version_get, + .dev_infos_get = mlx5_dev_infos_get, + .read_clock = mlx5_txpp_read_clock, + .rx_queue_start = mlx5_rx_queue_start, + .rx_queue_stop = mlx5_rx_queue_stop, + .tx_queue_start = mlx5_tx_queue_start, + .tx_queue_stop = mlx5_tx_queue_stop, + .rxq_info_get = mlx5_rxq_info_get, + .txq_info_get = mlx5_txq_info_get, + .rx_burst_mode_get = mlx5_rx_burst_mode_get, + .tx_burst_mode_get = mlx5_tx_burst_mode_get, + .get_module_info = mlx5_get_module_info, + .get_module_eeprom = mlx5_get_module_eeprom, +}; + +/* Available operations in flow isolated mode. */ +const struct eth_dev_ops mlx5_dev_ops_isolate = { + .dev_configure = mlx5_dev_configure, + .dev_start = mlx5_dev_start, + .dev_stop = mlx5_dev_stop, + .dev_set_link_down = mlx5_set_link_down, + .dev_set_link_up = mlx5_set_link_up, + .dev_close = mlx5_dev_close, + .promiscuous_enable = mlx5_promiscuous_enable, + .promiscuous_disable = mlx5_promiscuous_disable, + .allmulticast_enable = mlx5_allmulticast_enable, + .allmulticast_disable = mlx5_allmulticast_disable, + .link_update = mlx5_link_update, + .stats_get = mlx5_stats_get, + .stats_reset = mlx5_stats_reset, + .xstats_get = mlx5_xstats_get, + .xstats_reset = mlx5_xstats_reset, + .xstats_get_names = mlx5_xstats_get_names, + .fw_version_get = mlx5_fw_version_get, + .dev_infos_get = mlx5_dev_infos_get, + .read_clock = mlx5_txpp_read_clock, + .dev_supported_ptypes_get = mlx5_dev_supported_ptypes_get, + .vlan_filter_set = mlx5_vlan_filter_set, + .rx_queue_setup = mlx5_rx_queue_setup, + .rx_hairpin_queue_setup = mlx5_rx_hairpin_queue_setup, + .tx_queue_setup = mlx5_tx_queue_setup, + .tx_hairpin_queue_setup = mlx5_tx_hairpin_queue_setup, + .rx_queue_release = mlx5_rx_queue_release, + .tx_queue_release = mlx5_tx_queue_release, + .rx_queue_start = mlx5_rx_queue_start, + .rx_queue_stop = mlx5_rx_queue_stop, + .tx_queue_start = mlx5_tx_queue_start, + .tx_queue_stop = mlx5_tx_queue_stop, + .flow_ctrl_get = mlx5_dev_get_flow_ctrl, + .flow_ctrl_set = mlx5_dev_set_flow_ctrl, + .mac_addr_remove = mlx5_mac_addr_remove, + .mac_addr_add = mlx5_mac_addr_add, + .mac_addr_set = mlx5_mac_addr_set, + .set_mc_addr_list = mlx5_set_mc_addr_list, + .mtu_set = mlx5_dev_set_mtu, + .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, + .vlan_offload_set = mlx5_vlan_offload_set, + .filter_ctrl = mlx5_dev_filter_ctrl, + .rxq_info_get = mlx5_rxq_info_get, + .txq_info_get = mlx5_txq_info_get, + .rx_burst_mode_get = mlx5_rx_burst_mode_get, + .tx_burst_mode_get = mlx5_tx_burst_mode_get, + .rx_queue_intr_enable = mlx5_rx_intr_enable, + .rx_queue_intr_disable = mlx5_rx_intr_disable, + .is_removed = mlx5_is_removed, + .get_module_info = mlx5_get_module_info, + .get_module_eeprom = mlx5_get_module_eeprom, + .hairpin_cap_get = mlx5_hairpin_cap_get, + .mtr_ops_get = mlx5_flow_meter_ops_get, + .hairpin_bind = mlx5_hairpin_bind, + .hairpin_unbind = mlx5_hairpin_unbind, + .hairpin_get_peer_ports = mlx5_hairpin_get_peer_ports, + .hairpin_queue_peer_update = mlx5_hairpin_queue_peer_update, + .hairpin_queue_peer_bind = mlx5_hairpin_queue_peer_bind, + .hairpin_queue_peer_unbind = mlx5_hairpin_queue_peer_unbind, +}; + /** * Verify and store value for device argument. * @@ -1475,8 +1624,6 @@ mlx5_args_check(const char *key, const char *val, void *opaque) } config->cqe_comp = !!tmp; config->cqe_comp_fmt = tmp; - } else if (strcmp(MLX5_RXQ_CQE_PAD_EN, key) == 0) { - config->cqe_pad = !!tmp; } else if (strcmp(MLX5_RXQ_PKT_PAD_EN, key) == 0) { config->hw_padding = !!tmp; } else if (strcmp(MLX5_RX_MPRQ_EN, key) == 0) { @@ -1605,7 +1752,6 @@ mlx5_args(struct mlx5_dev_config *config, struct rte_devargs *devargs) { const char **params = (const char *[]){ MLX5_RXQ_CQE_COMP_EN, - MLX5_RXQ_CQE_PAD_EN, MLX5_RXQ_PKT_PAD_EN, MLX5_RX_MPRQ_EN, MLX5_RX_MPRQ_LOG_STRIDE_NUM, @@ -2084,6 +2230,7 @@ RTE_LOG_REGISTER(mlx5_logtype, pmd.net.mlx5, NOTICE) */ RTE_INIT(rte_mlx5_pmd_init) { + pthread_mutex_init(&mlx5_dev_ctx_list_mutex, NULL); mlx5_common_init(); /* Build the static tables for Verbs conversion. */ mlx5_set_ptype_table();