From: Nelio Laranjeiro Date: Fri, 30 Oct 2015 18:55:08 +0000 (+0100) Subject: mlx5: adapt indirection table size depending on Rx queues number X-Git-Tag: spdx-start~8251 X-Git-Url: http://git.droids-corp.org/?a=commitdiff_plain;h=95e16ef3254f7e75076c7ed2bdd4e8275c5e6894;p=dpdk.git mlx5: adapt indirection table size depending on Rx queues number Use the maximum size of the indirection table when the number of requested RX queues is not a power of two, this help to improve RSS balancing. A message informs users that balancing is not optimal in such cases. Signed-off-by: Nelio Laranjeiro Signed-off-by: Adrien Mazarguil --- diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index e394d32149..4413248bd5 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -299,7 +299,9 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ether_addr mac; #ifdef HAVE_EXP_QUERY_DEVICE - exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS; + exp_device_attr.comp_mask = + IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS | + IBV_EXP_DEVICE_ATTR_RX_HASH; #endif /* HAVE_EXP_QUERY_DEVICE */ DEBUG("using port %u (%08" PRIx32 ")", port, test); @@ -363,6 +365,12 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) DEBUG("L2 tunnel checksum offloads are %ssupported", (priv->hw_csum_l2tun ? "" : "not ")); + priv->ind_table_max_size = exp_device_attr.rx_hash_caps.max_rwq_indirection_table_size; + DEBUG("maximum RX indirection table size is %u", + priv->ind_table_max_size); + +#else /* HAVE_EXP_QUERY_DEVICE */ + priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; #endif /* HAVE_EXP_QUERY_DEVICE */ priv->vf = vf; diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 29fc1dade7..5a41678dfc 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -109,6 +109,7 @@ struct priv { /* Indirection tables referencing all RX WQs. */ struct ibv_exp_rwq_ind_table *(*ind_tables)[]; unsigned int ind_tables_n; /* Number of indirection tables. */ + unsigned int ind_table_max_size; /* Maximum indirection table size. */ /* Hash RX QPs feeding the indirection table. */ struct hash_rxq (*hash_rxqs)[]; unsigned int hash_rxqs_n; /* Hash RX QPs array size. */ diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index 369f8b61ad..3952c71d33 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -46,6 +46,9 @@ /* Request send completion once in every 64 sends, might be less. */ #define MLX5_PMD_TX_PER_COMP_REQ 64 +/* RSS Indirection table size. */ +#define RSS_INDIRECTION_TABLE_SIZE 128 + /* Maximum number of Scatter/Gather Elements per Work Request. */ #ifndef MLX5_PMD_SGE_WR_N #define MLX5_PMD_SGE_WR_N 4 diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 8ea1267db9..41f8811508 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -224,7 +224,13 @@ priv_make_ind_table_init(struct priv *priv, int priv_create_hash_rxqs(struct priv *priv) { - unsigned int wqs_n = (1 << log2above(priv->rxqs_n)); + /* If the requested number of WQs is not a power of two, use the + * maximum indirection table size for better balancing. + * The result is always rounded to the next power of two. */ + unsigned int wqs_n = + (1 << log2above((priv->rxqs_n & (priv->rxqs_n - 1)) ? + priv->ind_table_max_size : + priv->rxqs_n)); struct ibv_exp_wq *wqs[wqs_n]; struct ind_table_init ind_table_init[IND_TABLE_INIT_N]; unsigned int ind_tables_n = @@ -251,16 +257,17 @@ priv_create_hash_rxqs(struct priv *priv) " indirection table cannot be created"); return EINVAL; } - if (wqs_n < priv->rxqs_n) { + if ((wqs_n < priv->rxqs_n) || (wqs_n > priv->ind_table_max_size)) { ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n); err = ERANGE; goto error; } - if (wqs_n != priv->rxqs_n) - WARN("%u RX queues are configured, consider rounding this" - " number to the next power of two (%u) for optimal" - " performance", - priv->rxqs_n, wqs_n); + if (wqs_n != priv->rxqs_n) { + INFO("%u RX queues are configured, consider rounding this" + " number to the next power of two for better balancing", + priv->rxqs_n); + DEBUG("indirection table extended to assume %u WQs", wqs_n); + } /* When the number of RX queues is not a power of two, the remaining * table entries are padded with reused WQs and hashes are not spread * uniformly. */