int ret = 0;
(void)conf;
- if (mlx5_is_secondary())
- return -E_RTE_SECONDARY;
priv_lock(priv);
if (!rte_is_power_of_2(desc)) {
desc = 1 << log2above(desc);
struct mlx5_rxq_ctrl *rxq_ctrl;
struct priv *priv;
- if (mlx5_is_secondary())
- return;
-
if (rxq == NULL)
return;
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
unsigned int count = 0;
struct rte_intr_handle *intr_handle = priv->dev->intr_handle;
- assert(!mlx5_is_secondary());
if (!priv->dev->data->dev_conf.intr_conf.rxq)
return 0;
priv_rx_intr_vec_disable(priv);
- intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n]));
+ intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0]));
if (intr_handle->intr_vec == NULL) {
ERROR("failed to allocate memory for interrupt vector,"
" Rx interrupts will not be supported");
if (!priv->dev->data->dev_conf.intr_conf.rxq)
return;
+ if (!intr_handle->intr_vec)
+ goto free;
for (i = 0; i != n; ++i) {
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_rxq_data *rxq_data;
rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv);
}
+free:
rte_intr_free_epoll_fd(intr_handle);
- free(intr_handle->intr_vec);
+ if (intr_handle->intr_vec)
+ free(intr_handle->intr_vec);
intr_handle->nb_efd = 0;
intr_handle->intr_vec = NULL;
}
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
int ret = 0;
int
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct priv *priv = mlx5_get_priv(dev);
+ struct priv *priv = dev->data->dev_private;
struct mlx5_rxq_data *rxq_data;
struct mlx5_rxq_ctrl *rxq_ctrl;
struct mlx5_rxq_ibv *rxq_ibv = NULL;
attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){
.comp_mask = 0,
};
- if (priv->cqe_comp) {
+ if (priv->cqe_comp && !rxq_data->hw_timestamp) {
attr.cq.mlx5.comp_mask |=
MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
* make cq_ci and rq_ci aligned.
*/
if (rxq_check_vec_support(rxq_data) < 0)
- cqe_n *= 2;
+ attr.cq.ibv.cqe *= 2;
+ } else if (priv->cqe_comp && rxq_data->hw_timestamp) {
+ DEBUG("Rx CQE compression is disabled for HW timestamp");
}
tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv,
&attr.cq.mlx5));
};
rxq_data->cq_db = cq_info.dbrec;
rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf;
+ rxq_data->cq_uar = cq_info.cq_uar;
+ rxq_data->cqn = cq_info.cqn;
+ rxq_data->cq_arm_sn = 0;
/* Update doorbell counter. */
rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n;
rte_wmb();
0, socket);
if (!tmpl)
return NULL;
+ tmpl->socket = socket;
if (priv->dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
/* Enable scattered packets support for this queue if necessary. */
if (priv->hw_csum_l2tun)
tmpl->rxq.csum_l2tun =
!!dev->data->dev_conf.rxmode.hw_ip_checksum;
+ tmpl->rxq.hw_timestamp =
+ !!dev->data->dev_conf.rxmode.hw_timestamp;
/* Configure VLAN stripping. */
tmpl->rxq.vlan_strip = (priv->hw_vlan_strip &&
!!dev->data->dev_conf.rxmode.hw_vlan_strip);
struct mlx5_ind_table_ibv *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
- priv->ind_table_max_size;
+ log2above(priv->ind_table_max_size);
struct ibv_wq *wq[1 << wq_n];
unsigned int i;
unsigned int j;
* @param hash_fields
* Verbs protocol hash field to make the RSS on.
* @param queues
- * Queues entering in hash queue.
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
*
struct mlx5_ind_table_ibv *ind_tbl;
struct ibv_qp *qp;
+ queues_n = hash_fields ? queues_n : 1;
ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n);
if (!ind_tbl)
ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n);
* @param rss_conf
* RSS configuration for the Rx hash queue.
* @param queues
- * Queues entering in hash queue.
+ * Queues entering in hash queue. In case of empty hash_fields only the
+ * first queue index will be taken for the indirection table.
* @param queues_n
* Number of queues.
*
{
struct mlx5_hrxq *hrxq;
+ queues_n = hash_fields ? queues_n : 1;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
struct mlx5_ind_table_ibv *ind_tbl;