uint32_t flow_tunnels_n[MLX5_FLOW_TUNNEL]; /* Tunnels counters. */
uint32_t wqn; /* WQ number. */
uint16_t dump_file_n; /* Number of dump files. */
- struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
- uint32_t hairpin_status; /* Hairpin binding status. */
};
/* RX queue private data. */
struct mlx5_rxq_ctrl *ctrl; /* Shared Rx Queue. */
LIST_ENTRY(mlx5_rxq_priv) owner_entry; /* Entry in shared rxq_ctrl. */
struct mlx5_priv *priv; /* Back pointer to private data. */
+ struct rte_eth_hairpin_conf hairpin_conf; /* Hairpin configuration. */
+ uint32_t hairpin_status; /* Hairpin binding status. */
};
/* mlx5_rxq.c */
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts = NULL;
tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
- tmpl->hairpin_conf = *hairpin_conf;
tmpl->rxq.idx = idx;
+ rxq->hairpin_conf = *hairpin_conf;
mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl,
- rxq);
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
- return &rxq_ctrl->hairpin_conf;
+ if (idx < priv->rxqs_n && rxq != NULL) {
+ if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ return &rxq->hairpin_conf;
}
return NULL;
}
}
rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN ||
- rxq_ctrl->hairpin_conf.peers[0].queue != i) {
+ rxq->hairpin_conf.peers[0].queue != i) {
rte_errno = ENOMEM;
DRV_LOG(ERR, "port %u Tx queue %d can't be binded to "
"Rx queue %d", dev->data->port_id,
if (ret)
goto error;
/* Qs with auto-bind will be destroyed directly. */
- rxq_ctrl->hairpin_status = 1;
+ rxq->hairpin_status = 1;
txq_ctrl->hairpin_status = 1;
mlx5_txq_release(dev, i);
}
}
peer_info->qp_id = rxq_ctrl->obj->rq->id;
peer_info->vhca_id = priv->config.hca_attr.vhca_id;
- peer_info->peer_q = rxq_ctrl->hairpin_conf.peers[0].queue;
- peer_info->tx_explicit = rxq_ctrl->hairpin_conf.tx_explicit;
- peer_info->manual_bind = rxq_ctrl->hairpin_conf.manual_bind;
+ peer_info->peer_q = rxq->hairpin_conf.peers[0].queue;
+ peer_info->tx_explicit = rxq->hairpin_conf.tx_explicit;
+ peer_info->manual_bind = rxq->hairpin_conf.manual_bind;
}
return 0;
}
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->hairpin_status != 0) {
+ if (rxq->hairpin_status != 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already bound",
dev->data->port_id, cur_queue);
return 0;
}
if (peer_info->tx_explicit !=
- rxq_ctrl->hairpin_conf.tx_explicit) {
+ rxq->hairpin_conf.tx_explicit) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer Tx rule mode"
" mismatch", dev->data->port_id, cur_queue);
return -rte_errno;
}
if (peer_info->manual_bind !=
- rxq_ctrl->hairpin_conf.manual_bind) {
+ rxq->hairpin_conf.manual_bind) {
rte_errno = EINVAL;
DRV_LOG(ERR, "port %u Rx queue %d and peer binding mode"
" mismatch", dev->data->port_id, cur_queue);
rq_attr.hairpin_peer_vhca = peer_info->vhca_id;
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
- rxq_ctrl->hairpin_status = 1;
+ rxq->hairpin_status = 1;
}
return ret;
}
dev->data->port_id, cur_queue);
return -rte_errno;
}
- if (rxq_ctrl->hairpin_status == 0) {
+ if (rxq->hairpin_status == 0) {
DRV_LOG(DEBUG, "port %u Rx queue %d is already unbound",
dev->data->port_id, cur_queue);
return 0;
rq_attr.rq_state = MLX5_SQC_STATE_RST;
ret = mlx5_devx_cmd_modify_rq(rxq_ctrl->obj->rq, &rq_attr);
if (ret == 0)
- rxq_ctrl->hairpin_status = 0;
+ rxq->hairpin_status = 0;
}
return ret;
}
rxq_ctrl = rxq->ctrl;
if (rxq_ctrl->type != MLX5_RXQ_TYPE_HAIRPIN)
continue;
- pp = rxq_ctrl->hairpin_conf.peers[0].port;
+ pp = rxq->hairpin_conf.peers[0].port;
if (pp >= RTE_MAX_ETHPORTS) {
rte_errno = ERANGE;
DRV_LOG(ERR, "port %hu queue %u peer port "