/* Resync CQE and WQE (WQ in reset state). */
rte_io_wmb();
*txq->cq_db = rte_cpu_to_be_32(txq->cq_ci);
+ txq->cq_pi = txq->cq_ci;
rte_io_wmb();
}
MLX5_ASSERT(rte_eal_process_type() == RTE_PROC_PRIMARY);
ret = priv->obj_ops.txq_obj_modify(txq_ctrl->obj,
- MLX5_TXQ_MOD_RDY2RDY,
+ MLX5_TXQ_MOD_RST2RDY,
(uint8_t)priv->dev_port);
if (ret)
return ret;
DRV_LOG(DEBUG, "port %u adding Tx queue %u to list",
dev->data->port_id, idx);
(*priv->txqs)[idx] = &txq_ctrl->txq;
- dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STARTED;
return 0;
}
res = mlx5_tx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
- if (hairpin_conf->peer_count != 1 ||
- hairpin_conf->peers[0].port != dev->data->port_id ||
- hairpin_conf->peers[0].queue >= priv->rxqs_n) {
- DRV_LOG(ERR, "port %u unable to setup hairpin queue index %u "
- " invalid hairpind configuration", dev->data->port_id,
- idx);
+ if (hairpin_conf->peer_count != 1) {
rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue index %u"
+ " peer count is %u", dev->data->port_id,
+ idx, hairpin_conf->peer_count);
return -rte_errno;
}
+ if (hairpin_conf->peers[0].port == dev->data->port_id) {
+ if (hairpin_conf->peers[0].queue >= priv->rxqs_n) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
+ " index %u, Rx %u is larger than %u",
+ dev->data->port_id, idx,
+ hairpin_conf->peers[0].queue, priv->txqs_n);
+ return -rte_errno;
+ }
+ } else {
+ if (hairpin_conf->manual_bind == 0 ||
+ hairpin_conf->tx_explicit == 0) {
+ rte_errno = EINVAL;
+ DRV_LOG(ERR, "port %u unable to setup Tx hairpin queue"
+ " index %u peer port %u with attributes %u %u",
+ dev->data->port_id, idx,
+ hairpin_conf->peers[0].port,
+ hairpin_conf->manual_bind,
+ hairpin_conf->tx_explicit);
+ return -rte_errno;
+ }
+ }
txq_ctrl = mlx5_txq_hairpin_new(dev, idx, desc, hairpin_conf);
if (!txq_ctrl) {
DRV_LOG(ERR, "port %u unable to allocate queue index %u",
rte_errno = ENOMEM;
goto error;
}
- rte_atomic32_inc(&tmpl->refcnt);
+ __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
tmpl->type = MLX5_TXQ_TYPE_STANDARD;
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
tmpl->txq.idx = idx;
tmpl->hairpin_conf = *hairpin_conf;
tmpl->type = MLX5_TXQ_TYPE_HAIRPIN;
- rte_atomic32_inc(&tmpl->refcnt);
+ __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
LIST_INSERT_HEAD(&priv->txqsctrl, tmpl, next);
return tmpl;
}
if (txq_data) {
ctrl = container_of(txq_data, struct mlx5_txq_ctrl, txq);
- rte_atomic32_inc(&ctrl->refcnt);
+ __atomic_fetch_add(&ctrl->refcnt, 1, __ATOMIC_RELAXED);
}
return ctrl;
}
if (!(*priv->txqs)[idx])
return 0;
txq_ctrl = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- if (!rte_atomic32_dec_and_test(&txq_ctrl->refcnt))
+ if (__atomic_sub_fetch(&txq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
return 1;
if (txq_ctrl->obj) {
priv->obj_ops.txq_obj_release(txq_ctrl->obj);
txq_ctrl->txq.fcqs = NULL;
}
txq_free_elts(txq_ctrl);
- mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
+ dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
+ if (!__atomic_load_n(&txq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (txq_ctrl->type == MLX5_TXQ_TYPE_STANDARD)
+ mlx5_mr_btree_free(&txq_ctrl->txq.mr_ctrl.cache_bh);
+ LIST_REMOVE(txq_ctrl, next);
+ mlx5_free(txq_ctrl);
+ (*priv->txqs)[idx] = NULL;
}
- LIST_REMOVE(txq_ctrl, next);
- mlx5_free(txq_ctrl);
- (*priv->txqs)[idx] = NULL;
- dev->data->tx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
return 0;
}
if (!(*priv->txqs)[idx])
return -1;
txq = container_of((*priv->txqs)[idx], struct mlx5_txq_ctrl, txq);
- return (rte_atomic32_read(&txq->refcnt) == 1);
+ return (__atomic_load_n(&txq->refcnt, __ATOMIC_RELAXED) == 1);
}
/**
(RTE_MBUF_DYNFLAG_TX_TIMESTAMP_NAME, NULL);
off = rte_mbuf_dynfield_lookup
(RTE_MBUF_DYNFIELD_TIMESTAMP_NAME, NULL);
- if (nbit > 0 && off >= 0 && sh->txpp.refcnt)
+ if (nbit >= 0 && off >= 0 && sh->txpp.refcnt)
mask = 1ULL << nbit;
for (i = 0; i != priv->txqs_n; ++i) {
data = (*priv->txqs)[i];