}
rxq->rxq_ctrl = rxq_ctrl;
rxq_ctrl->type = MLX5_RXQ_TYPE_STANDARD;
- rxq_ctrl->priv = priv;
+ rxq_ctrl->sh = priv->sh;
rxq_ctrl->obj = rxq;
rxq_data = &rxq_ctrl->rxq;
/* Create CQ using DevX API. */
mlx5_rx_descriptor_status(void *rx_queue, uint16_t offset)
{
struct mlx5_rxq_data *rxq = rx_queue;
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- struct rte_eth_dev *dev = ETH_DEV(rxq_ctrl->priv);
- if (dev->rx_pkt_burst == NULL ||
- dev->rx_pkt_burst == removed_rx_burst) {
- rte_errno = ENOTSUP;
- return -rte_errno;
- }
if (offset >= (1 << rxq->cqe_n)) {
rte_errno = EINVAL;
return -rte_errno;
sm.is_wq = 1;
sm.queue_id = rxq->idx;
sm.state = IBV_WQS_RESET;
- if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv), &sm))
+ if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
return -1;
if (rxq_ctrl->dump_file_n <
- rxq_ctrl->priv->config.max_dump_files_num) {
+ RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) {
MKSTR(err_str, "Unexpected CQE error syndrome "
"0x%02x CQN = %u RQN = %u wqe_counter = %u"
" rq_ci = %u cq_ci = %u", u.err_cqe->syndrome,
sm.is_wq = 1;
sm.queue_id = rxq->idx;
sm.state = IBV_WQS_RDY;
- if (mlx5_queue_state_modify(ETH_DEV(rxq_ctrl->priv),
- &sm))
+ if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm))
return -1;
if (vec) {
const uint32_t elts_n =
/* Support tunnel matching. */
#define MLX5_FLOW_TUNNEL 10
+#define RXQ_PORT(rxq_ctrl) LIST_FIRST(&(rxq_ctrl)->owners)->priv
+#define RXQ_DEV(rxq_ctrl) ETH_DEV(RXQ_PORT(rxq_ctrl))
+#define RXQ_PORT_ID(rxq_ctrl) PORT_ID(RXQ_PORT(rxq_ctrl))
+
/* First entry must be NULL for comparison. */
#define mlx5_mr_btree_len(bt) ((bt)->len - 1)
LIST_HEAD(priv, mlx5_rxq_priv) owners; /* Owner rxq list. */
struct mlx5_rxq_obj *obj; /* Verbs/DevX elements. */
struct mlx5_dev_ctx_shared *sh; /* Shared context. */
- struct mlx5_priv *priv; /* Back pointer to private data. */
enum mlx5_rxq_type type; /* Rxq type. */
unsigned int socket; /* CPU socket ID for allocations. */
uint32_t share_group; /* Group ID of shared RXQ. */
*/
rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq);
mp = mlx5_rxq_mprq_enabled(rxq) ? rxq->mprq_mp : rxq->mp;
- return mlx5_mr_mempool2mr_bh(&rxq_ctrl->priv->sh->cdev->mr_scache,
+ return mlx5_mr_mempool2mr_bh(&rxq_ctrl->sh->cdev->mr_scache,
mr_ctrl, mp, addr);
}
buf = rte_pktmbuf_alloc(seg->mp);
if (buf == NULL) {
- DRV_LOG(ERR, "port %u empty mbuf pool",
- PORT_ID(rxq_ctrl->priv));
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(ERR, "port %u queue %u empty mbuf pool",
+ RXQ_PORT_ID(rxq_ctrl),
+ rxq_ctrl->rxq.idx);
+ else
+ DRV_LOG(ERR, "share group %u queue %u empty mbuf pool",
+ rxq_ctrl->share_group,
+ rxq_ctrl->share_qid);
rte_errno = ENOMEM;
goto error;
}
for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j)
(*rxq->elts)[elts_n + j] = &rxq->fake_mbuf;
}
- DRV_LOG(DEBUG,
- "port %u SPRQ queue %u allocated and configured %u segments"
- " (max %u packets)",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx, elts_n,
- elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG,
+ "port %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+ RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
+ else
+ DRV_LOG(DEBUG,
+ "share group %u SPRQ queue %u allocated and configured %u segments (max %u packets)",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid, elts_n,
+ elts_n / (1 << rxq_ctrl->rxq.sges_n));
return 0;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]);
(*rxq_ctrl->rxq.elts)[i] = NULL;
}
- DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
- PORT_ID(rxq_ctrl->priv), rxq_ctrl->rxq.idx);
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG, "port %u SPRQ queue %u failed, freed everything",
+ RXQ_PORT_ID(rxq_ctrl), rxq_ctrl->rxq.idx);
+ else
+ DRV_LOG(DEBUG, "share group %u SPRQ queue %u failed, freed everything",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid);
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;
}
uint16_t used = q_n - (elts_ci - rxq->rq_pi);
uint16_t i;
- DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
- PORT_ID(rxq_ctrl->priv), rxq->idx, q_n);
+ if (rxq_ctrl->share_group == 0)
+ DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
+ RXQ_PORT_ID(rxq_ctrl), rxq->idx, q_n);
+ else
+ DRV_LOG(DEBUG, "share group %u Rx queue %u freeing %d WRs",
+ rxq_ctrl->share_group, rxq_ctrl->share_qid, q_n);
if (rxq->elts == NULL)
return;
/**
(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
tmpl->sh = priv->sh;
- tmpl->priv = priv;
tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.rq_repl_thresh =
tmpl->rxq.rss_hash = 0;
tmpl->rxq.port_id = dev->data->port_id;
tmpl->sh = priv->sh;
- tmpl->priv = priv;
tmpl->rxq.mp = NULL;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts = NULL;
struct mlx5_rxq_ctrl *ctrl =
container_of(rxq, struct mlx5_rxq_ctrl, rxq);
- if (!ctrl->priv->config.rx_vec_en || rxq->sges_n != 0)
+ if (!RXQ_PORT(ctrl)->config.rx_vec_en || rxq->sges_n != 0)
return -ENOTSUP;
if (rxq->lro)
return -ENOTSUP;
* 0 on success, (-1) on failure and rte_errno is set.
*/
static int
-mlx5_rxq_mempool_register(struct mlx5_rxq_ctrl *rxq_ctrl)
+mlx5_rxq_mempool_register(struct rte_eth_dev *dev,
+ struct mlx5_rxq_ctrl *rxq_ctrl)
{
- struct mlx5_priv *priv = rxq_ctrl->priv;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = rxq_ctrl->sh;
struct rte_mempool *mp;
uint32_t s;
int ret = 0;
}
for (s = 0; s < rxq_ctrl->rxq.rxseg_n; s++) {
mp = rxq_ctrl->rxq.rxseg[s].mp;
- ret = mlx5_mr_mempool_register(&priv->sh->cdev->mr_scache,
- priv->sh->cdev->pd, mp,
- &priv->mp_id);
+ ret = mlx5_mr_mempool_register(&sh->cdev->mr_scache,
+ sh->cdev->pd, mp, &priv->mp_id);
if (ret < 0 && rte_errno != EEXIST)
return ret;
rte_mempool_mem_iter(mp, mlx5_rxq_mempool_register_cb,
* the implicit registration is enabled or not,
* Rx mempool destruction is tracked to free MRs.
*/
- if (mlx5_rxq_mempool_register(rxq_ctrl) < 0)
+ if (mlx5_rxq_mempool_register(dev, rxq_ctrl) < 0)
goto error;
ret = rxq_alloc_elts(rxq_ctrl);
if (ret)