struct mlx5_priv *priv = dev->data->dev_private;
unsigned int rxqs_n = dev->data->nb_rx_queues;
unsigned int txqs_n = dev->data->nb_tx_queues;
- unsigned int i;
- unsigned int j;
- unsigned int reta_idx_n;
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
int ret = 0;
DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
- /*
- * If the requested number of RX queues is not a power of two,
- * use the maximum indirection table size for better balancing.
- * The result is always rounded to the next power of two.
- */
- reta_idx_n = (1 << log2above((rxqs_n & (rxqs_n - 1)) ?
- priv->config.ind_table_max_size :
- rxqs_n));
- ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
- if (ret)
- return ret;
- /*
- * When the number of RX queues is not a power of two,
- * the remaining table entries are padded with reused WQs
- * and hashes are not spread uniformly.
- */
- for (i = 0, j = 0; (i != reta_idx_n); ++i) {
- (*priv->reta_idx)[i] = j;
- if (++j == rxqs_n)
- j = 0;
- }
}
+ priv->skip_default_rss_reta = 0;
ret = mlx5_proc_priv_init(dev);
if (ret)
return ret;
return 0;
}
+/**
+ * Configure default RSS reta.
+ *
+ * @param dev
+ * Pointer to Ethernet device structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_dev_configure_rss_reta(struct rte_eth_dev *dev)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ unsigned int rxqs_n = dev->data->nb_rx_queues;
+ unsigned int i;
+ unsigned int j;
+ unsigned int reta_idx_n;
+ int ret = 0;
+ unsigned int *rss_queue_arr = NULL;
+ unsigned int rss_queue_n = 0;
+
+ if (priv->skip_default_rss_reta)
+ return ret;
+ rss_queue_arr = rte_malloc("", rxqs_n * sizeof(unsigned int), 0);
+ if (!rss_queue_arr) {
+ DRV_LOG(ERR, "port %u cannot allocate RSS queue list (%u)",
+ dev->data->port_id, rxqs_n);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ for (i = 0, j = 0; i < rxqs_n; i++) {
+ struct mlx5_rxq_data *rxq_data;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
+
+ rxq_data = (*priv->rxqs)[i];
+ rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ rss_queue_arr[j++] = i;
+ }
+ rss_queue_n = j;
+ if (rss_queue_n > priv->config.ind_table_max_size) {
+ DRV_LOG(ERR, "port %u cannot handle this many Rx queues (%u)",
+ dev->data->port_id, rss_queue_n);
+ rte_errno = EINVAL;
+ rte_free(rss_queue_arr);
+ return -rte_errno;
+ }
+ DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
+ dev->data->port_id, priv->rxqs_n, rxqs_n);
+ priv->rxqs_n = rxqs_n;
+ /*
+ * If the requested number of RX queues is not a power of two,
+ * use the maximum indirection table size for better balancing.
+ * The result is always rounded to the next power of two.
+ */
+ reta_idx_n = (1 << log2above((rss_queue_n & (rss_queue_n - 1)) ?
+ priv->config.ind_table_max_size :
+ rss_queue_n));
+ ret = mlx5_rss_reta_index_resize(dev, reta_idx_n);
+ if (ret) {
+ rte_free(rss_queue_arr);
+ return ret;
+ }
+ /*
+ * When the number of RX queues is not a power of two,
+ * the remaining table entries are padded with reused WQs
+ * and hashes are not spread uniformly.
+ */
+ for (i = 0, j = 0; (i != reta_idx_n); ++i) {
+ (*priv->reta_idx)[i] = rss_queue_arr[j];
+ if (++j == rss_queue_n)
+ j = 0;
+ }
+ rte_free(rss_queue_arr);
+ return ret;
+}
+
/**
* Sets default tuning parameters.
*
}
} else { /* ind_tbl->type == MLX5_IND_TBL_TYPE_DEVX */
struct mlx5_devx_rqt_attr *rqt_attr = NULL;
+ const unsigned int rqt_n =
+ 1 << (rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size));
rqt_attr = rte_calloc(__func__, 1, sizeof(*rqt_attr) +
- queues_n * sizeof(uint32_t), 0);
+ rqt_n * sizeof(uint32_t), 0);
if (!rqt_attr) {
DRV_LOG(ERR, "port %u cannot allocate RQT resources",
dev->data->port_id);
goto error;
}
rqt_attr->rqt_max_size = priv->config.ind_table_max_size;
- rqt_attr->rqt_actual_size = queues_n;
+ rqt_attr->rqt_actual_size = rqt_n;
for (i = 0; i != queues_n; ++i) {
struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev,
queues[i]);
rqt_attr->rq_list[i] = rxq->obj->rq->id;
ind_tbl->queues[i] = queues[i];
}
+ k = i; /* Retain value of i for use in error case. */
+ for (j = 0; k != rqt_n; ++k, ++j)
+ rqt_attr->rq_list[k] = rqt_attr->rq_list[j];
ind_tbl->rqt = mlx5_devx_cmd_create_rqt(priv->sh->ctx,
rqt_attr);
rte_free(rqt_attr);
struct mlx5_ind_table_obj *ind_tbl;
int err;
struct mlx5_devx_obj *tir = NULL;
+ struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
+ struct mlx5_rxq_ctrl *rxq_ctrl =
+ container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl) {
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[queues[0]];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
enum mlx5_ind_tbl_type type;
type = rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_IBV ?
tir_attr.rx_hash_fn = MLX5_RX_HASH_FN_TOEPLITZ;
memcpy(&tir_attr.rx_hash_field_selector_outer, &hash_fields,
sizeof(uint64_t));
- tir_attr.transport_domain = priv->sh->tdn;
+ if (rxq_ctrl->obj->type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
+ tir_attr.transport_domain = priv->sh->td->id;
+ else
+ tir_attr.transport_domain = priv->sh->tdn;
memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
tir_attr.indirect_table = ind_tbl->rqt->id;
if (dev->data->dev_conf.lpbk_mode)