* Pointer to RX queue structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
static int
rxq_alloc_elts_sprq(struct mlx5_rxq_ctrl *rxq_ctrl)
* Pointer to RX queue structure.
*
* @return
- * 0 on success, errno value on failure.
+ * 0 on success, negative errno value on failure.
*/
int
rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl)
*/
if (mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq))
ret = rxq_alloc_elts_mprq(rxq_ctrl);
- return (ret || rxq_alloc_elts_sprq(rxq_ctrl));
+ if (ret == 0)
+ ret = rxq_alloc_elts_sprq(rxq_ctrl);
+ return ret;
}
/**
static int
mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx)
{
- struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (!(*priv->rxqs)[idx]) {
+ if (rxq == NULL) {
rte_errno = EINVAL;
return -rte_errno;
}
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- return (__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED) == 1);
+ return (__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED) == 1);
}
/* Fetches and drops all SW-owned and error CQEs to synchronize CQ. */
struct rte_mempool *mp)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
struct rte_eth_rxseg_split *rx_seg =
(struct rte_eth_rxseg_split *)conf->rx_seg;
struct rte_eth_rxseg_split rx_single = {.mp = mp};
res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
if (res)
return res;
- rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg, n_seg);
+ rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u private data",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg, n_seg);
if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
const struct rte_eth_hairpin_conf *hairpin_conf)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl =
- container_of(rxq, struct mlx5_rxq_ctrl, rxq);
+ struct mlx5_rxq_priv *rxq;
+ struct mlx5_rxq_ctrl *rxq_ctrl;
int res;
res = mlx5_rx_queue_pre_setup(dev, idx, &desc);
return -rte_errno;
}
}
- rxq_ctrl = mlx5_rxq_hairpin_new(dev, idx, desc, hairpin_conf);
+ rxq = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*rxq), 0,
+ SOCKET_ID_ANY);
+ if (!rxq) {
+ DRV_LOG(ERR, "port %u unable to allocate hairpin rx queue index %u private data",
+ dev->data->port_id, idx);
+ rte_errno = ENOMEM;
+ return -rte_errno;
+ }
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ rxq_ctrl = mlx5_rxq_hairpin_new(dev, rxq, desc, hairpin_conf);
if (!rxq_ctrl) {
- DRV_LOG(ERR, "port %u unable to allocate queue index %u",
+ DRV_LOG(ERR, "port %u unable to allocate hairpin queue index %u",
dev->data->port_id, idx);
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
- DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
+ DRV_LOG(DEBUG, "port %u adding hairpin Rx queue %u to list",
dev->data->port_id, idx);
(*priv->rxqs)[idx] = &rxq_ctrl->rxq;
return 0;
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
- intr_handle->intr_vec = mlx5_malloc(0,
- n * sizeof(intr_handle->intr_vec[0]),
- 0, SOCKET_ID_ANY);
- if (intr_handle->intr_vec == NULL) {
+ if (rte_intr_vec_list_alloc(intr_handle, NULL, n)) {
DRV_LOG(ERR,
"port %u failed to allocate memory for interrupt"
" vector, Rx interrupts will not be supported",
rte_errno = ENOMEM;
return -rte_errno;
}
- intr_handle->type = RTE_INTR_HANDLE_EXT;
+
+ if (rte_intr_type_set(intr_handle, RTE_INTR_HANDLE_EXT))
+ return -rte_errno;
+
for (i = 0; i != n; ++i) {
/* This rxq obj must not be released in this function. */
- struct mlx5_rxq_ctrl *rxq_ctrl = mlx5_rxq_get(dev, i);
- struct mlx5_rxq_obj *rxq_obj = rxq_ctrl ? rxq_ctrl->obj : NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, i);
+ struct mlx5_rxq_obj *rxq_obj = rxq ? rxq->ctrl->obj : NULL;
int rc;
/* Skip queues that cannot request interrupts. */
if (!rxq_obj || (!rxq_obj->ibv_channel &&
!rxq_obj->devx_channel)) {
/* Use invalid intr_vec[] index to disable entry. */
- intr_handle->intr_vec[i] =
- RTE_INTR_VEC_RXTX_OFFSET +
- RTE_MAX_RXTX_INTR_VEC_ID;
- /* Decrease the rxq_ctrl's refcnt */
- if (rxq_ctrl)
- mlx5_rxq_release(dev, i);
+ if (rte_intr_vec_list_index_set(intr_handle, i,
+ RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID))
+ return -rte_errno;
continue;
}
+ mlx5_rxq_ref(dev, i);
if (count >= RTE_MAX_RXTX_INTR_VEC_ID) {
DRV_LOG(ERR,
"port %u too many Rx queues for interrupt"
mlx5_rx_intr_vec_disable(dev);
return -rte_errno;
}
- intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count;
- intr_handle->efds[count] = rxq_obj->fd;
+
+ if (rte_intr_vec_list_index_set(intr_handle, i,
+ RTE_INTR_VEC_RXTX_OFFSET + count))
+ return -rte_errno;
+ if (rte_intr_efds_index_set(intr_handle, count,
+ rxq_obj->fd))
+ return -rte_errno;
count++;
}
if (!count)
mlx5_rx_intr_vec_disable(dev);
- else
- intr_handle->nb_efd = count;
+ else if (rte_intr_nb_efd_set(intr_handle, count))
+ return -rte_errno;
return 0;
}
if (!dev->data->dev_conf.intr_conf.rxq)
return;
- if (!intr_handle->intr_vec)
+ if (rte_intr_vec_list_index_get(intr_handle, 0) < 0)
goto free;
for (i = 0; i != n; ++i) {
- if (intr_handle->intr_vec[i] == RTE_INTR_VEC_RXTX_OFFSET +
- RTE_MAX_RXTX_INTR_VEC_ID)
+ if (rte_intr_vec_list_index_get(intr_handle, i) ==
+ RTE_INTR_VEC_RXTX_OFFSET + RTE_MAX_RXTX_INTR_VEC_ID)
continue;
/**
* Need to access directly the queue to release the reference
* kept in mlx5_rx_intr_vec_enable().
*/
- mlx5_rxq_release(dev, i);
+ mlx5_rxq_deref(dev, i);
}
free:
rte_intr_free_epoll_fd(intr_handle);
- if (intr_handle->intr_vec)
- mlx5_free(intr_handle->intr_vec);
- intr_handle->nb_efd = 0;
- intr_handle->intr_vec = NULL;
+
+ rte_intr_vec_list_free(intr_handle);
+
+ rte_intr_nb_efd_set(intr_handle, 0);
}
/**
int
mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
- struct mlx5_rxq_ctrl *rxq_ctrl;
-
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl)
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
+ if (!rxq)
goto error;
- if (rxq_ctrl->irq) {
- if (!rxq_ctrl->obj) {
- mlx5_rxq_release(dev, rx_queue_id);
+ if (rxq->ctrl->irq) {
+ if (!rxq->ctrl->obj)
goto error;
- }
- mlx5_arm_cq(&rxq_ctrl->rxq, rxq_ctrl->rxq.cq_arm_sn);
+ mlx5_arm_cq(&rxq->ctrl->rxq, rxq->ctrl->rxq.cq_arm_sn);
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
rte_errno = EINVAL;
mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, rx_queue_id);
int ret = 0;
- rxq_ctrl = mlx5_rxq_get(dev, rx_queue_id);
- if (!rxq_ctrl) {
+ if (!rxq) {
rte_errno = EINVAL;
return -rte_errno;
}
- if (!rxq_ctrl->obj)
+ if (!rxq->ctrl->obj)
goto error;
- if (rxq_ctrl->irq) {
- ret = priv->obj_ops.rxq_event_get(rxq_ctrl->obj);
+ if (rxq->ctrl->irq) {
+ ret = priv->obj_ops.rxq_event_get(rxq->ctrl->obj);
if (ret < 0)
goto error;
- rxq_ctrl->rxq.cq_arm_sn++;
+ rxq->ctrl->rxq.cq_arm_sn++;
}
- mlx5_rxq_release(dev, rx_queue_id);
return 0;
error:
/**
rte_errno = errno;
else
rte_errno = EINVAL;
- ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_rxq_release(dev, rx_queue_id);
- if (ret != EAGAIN)
+ if (rte_errno != EAGAIN)
DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d",
dev->data->port_id, rx_queue_id);
- rte_errno = ret; /* Restore rte_errno. */
return -rte_errno;
}
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * RX queue index.
+ * @param rxq
+ * RX queue private data.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
rte_errno = ENOMEM;
return NULL;
}
+ LIST_INIT(&tmpl->owners);
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
/*
* Build the array of actual buffer offsets and lengths.
tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->sh = priv->sh;
tmpl->priv = priv;
tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.uar_lock_cq = &priv->sh->uar_lock_cq;
#endif
tmpl->rxq.idx = idx;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
*
* @param dev
* Pointer to Ethernet device.
- * @param idx
- * RX queue index.
+ * @param rxq
+ * RX queue.
* @param desc
* Number of descriptors to configure in queue.
* @param hairpin_conf
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+mlx5_rxq_hairpin_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
+ uint16_t desc,
const struct rte_eth_hairpin_conf *hairpin_conf)
{
+ uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
rte_errno = ENOMEM;
return NULL;
}
+ LIST_INIT(&tmpl->owners);
+ rxq->ctrl = tmpl;
+ LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
tmpl->type = MLX5_RXQ_TYPE_HAIRPIN;
tmpl->socket = SOCKET_ID_ANY;
tmpl->rxq.rss_hash = 0;
tmpl->rxq.port_id = dev->data->port_id;
+ tmpl->sh = priv->sh;
tmpl->priv = priv;
tmpl->rxq.mp = NULL;
tmpl->rxq.elts_n = log2above(desc);
tmpl->rxq.elts = NULL;
tmpl->rxq.mr_ctrl.cache_bh = (struct mlx5_mr_btree) { 0 };
- tmpl->hairpin_conf = *hairpin_conf;
tmpl->rxq.idx = idx;
- __atomic_fetch_add(&tmpl->refcnt, 1, __ATOMIC_RELAXED);
+ rxq->hairpin_conf = *hairpin_conf;
+ mlx5_rxq_ref(dev, idx);
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
}
+/**
+ * Increase Rx queue reference count.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_priv *
+mlx5_rxq_ref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq != NULL)
+ __atomic_fetch_add(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+ return rxq;
+}
+
+/**
+ * Dereference a Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * Updated reference count.
+ */
+uint32_t
+mlx5_rxq_deref(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ if (rxq == NULL)
+ return 0;
+ return __atomic_sub_fetch(&rxq->refcnt, 1, __ATOMIC_RELAXED);
+}
+
/**
* Get a Rx queue.
*
* @return
* A pointer to the queue if it exists, NULL otherwise.
*/
-struct mlx5_rxq_ctrl *
+struct mlx5_rxq_priv *
mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx];
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
- if (rxq_data) {
- rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
- __atomic_fetch_add(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED);
- }
- return rxq_ctrl;
+ if (priv->rxq_privs == NULL)
+ return NULL;
+ return (*priv->rxq_privs)[idx];
+}
+
+/**
+ * Get Rx queue shareable control.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue control if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_ctrl *
+mlx5_rxq_ctrl_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : rxq->ctrl;
+}
+
+/**
+ * Get Rx queue shareable data.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ *
+ * @return
+ * A pointer to the queue data if it exists, NULL otherwise.
+ */
+struct mlx5_rxq_data *
+mlx5_rxq_data_get(struct rte_eth_dev *dev, uint16_t idx)
+{
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+
+ return rxq == NULL ? NULL : &rxq->ctrl->rxq;
}
/**
mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
+ struct mlx5_rxq_ctrl *rxq_ctrl = rxq->ctrl;
if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
return 0;
- rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
+ if (mlx5_rxq_deref(dev, idx) > 1)
return 1;
if (rxq_ctrl->obj) {
priv->obj_ops.rxq_obj_release(rxq_ctrl->obj);
rxq_free_elts(rxq_ctrl);
dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
}
- if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
+ if (!__atomic_load_n(&rxq->refcnt, __ATOMIC_RELAXED)) {
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
+ LIST_REMOVE(rxq, owner_entry);
LIST_REMOVE(rxq_ctrl, next);
mlx5_free(rxq_ctrl);
(*priv->rxqs)[idx] = NULL;
+ mlx5_free(rxq);
+ (*priv->rxq_privs)[idx] = NULL;
}
return 0;
}
mlx5_rxq_get_hairpin_conf(struct rte_eth_dev *dev, uint16_t idx)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_rxq_ctrl *rxq_ctrl = NULL;
+ struct mlx5_rxq_priv *rxq = mlx5_rxq_get(dev, idx);
- if (idx < priv->rxqs_n && (*priv->rxqs)[idx]) {
- rxq_ctrl = container_of((*priv->rxqs)[idx],
- struct mlx5_rxq_ctrl,
- rxq);
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
- return &rxq_ctrl->hairpin_conf;
+ if (idx < priv->rxqs_n && rxq != NULL) {
+ if (rxq->ctrl->type == MLX5_RXQ_TYPE_HAIRPIN)
+ return &rxq->hairpin_conf;
}
return NULL;
}
return 1;
priv->obj_ops.ind_table_destroy(ind_tbl);
for (i = 0; i != ind_tbl->queues_n; ++i)
- claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
mlx5_free(ind_tbl);
return 0;
}
log2above(priv->config.ind_table_max_size);
for (i = 0; i != queues_n; ++i) {
- if (!mlx5_rxq_get(dev, queues[i])) {
+ if (mlx5_rxq_ref(dev, queues[i]) == NULL) {
ret = -rte_errno;
goto error;
}
error:
err = rte_errno;
for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ mlx5_rxq_deref(dev, ind_tbl->queues[j]);
rte_errno = err;
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ind_tbl;
}
+static int
+mlx5_ind_table_obj_check_standalone(struct rte_eth_dev *dev __rte_unused,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ uint32_t refcnt;
+
+ refcnt = __atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED);
+ if (refcnt <= 1)
+ return 0;
+ /*
+ * Modification of indirection tables having more than 1
+ * reference is unsupported.
+ */
+ DRV_LOG(DEBUG,
+ "Port %u cannot modify indirection table %p (refcnt %u > 1).",
+ dev->data->port_id, (void *)ind_tbl, refcnt);
+ rte_errno = EINVAL;
+ return -rte_errno;
+}
+
/**
* Modify an indirection table.
*
bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i, j;
+ unsigned int i;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
MLX5_ASSERT(standalone);
RTE_SET_USED(standalone);
- if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {
- /*
- * Modification of indirection ntables having more than 1
- * reference unsupported. Intended for standalone indirection
- * tables only.
- */
- DRV_LOG(DEBUG,
- "Port %u cannot modify indirection table (refcnt> 1).",
- dev->data->port_id);
- rte_errno = EINVAL;
+ if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
return -rte_errno;
- }
for (i = 0; i != queues_n; ++i) {
if (!mlx5_rxq_get(dev, queues[i])) {
ret = -rte_errno;
ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
if (ret)
goto error;
- for (j = 0; j < ind_tbl->queues_n; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
ind_tbl->queues_n = queues_n;
ind_tbl->queues = queues;
return 0;
error:
err = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, queues[j]);
rte_errno = err;
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
}
+/**
+ * Attach an indirection table to its queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to attach.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ unsigned int i;
+ int ret;
+
+ ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
+ ind_tbl->queues_n, true);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ return ret;
+ }
+ for (i = 0; i < ind_tbl->queues_n; i++)
+ mlx5_rxq_get(dev, ind_tbl->queues[i]);
+ return 0;
+}
+
+/**
+ * Detach an indirection table from its queues.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to detach.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_detach(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
+ log2above(ind_tbl->queues_n) :
+ log2above(priv->config.ind_table_max_size);
+ unsigned int i;
+ int ret;
+
+ ret = mlx5_ind_table_obj_check_standalone(dev, ind_tbl);
+ if (ret != 0)
+ return ret;
+ MLX5_ASSERT(priv->obj_ops.ind_table_modify);
+ ret = priv->obj_ops.ind_table_modify(dev, n, NULL, 0, ind_tbl);
+ if (ret != 0) {
+ DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
+ dev->data->port_id, (void *)ind_tbl);
+ return ret;
+ }
+ for (i = 0; i < ind_tbl->queues_n; i++)
+ mlx5_rxq_release(dev, ind_tbl->queues[i]);
+ return ret;
+}
+
int
mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
void *cb_ctx)