* Number of queues in the array.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, const uint16_t *queues,
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
const unsigned int wq_n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
log2above(priv->config.ind_table_max_size);
* @return
* An indirection table if found.
*/
-static struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, const uint16_t *queues,
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_get(struct rte_eth_dev *dev, const uint16_t *queues,
uint32_t queues_n)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
if ((ind_tbl->queues_n == queues_n) &&
* 1 while a reference on it exists, 0 when freed.
*/
static int
-mlx5_ind_table_ibv_release(struct rte_eth_dev *dev,
- struct mlx5_ind_table_ibv *ind_tbl)
+mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
{
unsigned int i;
* The number of object not released.
*/
int
-mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev)
+mlx5_ind_table_obj_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
int ret = 0;
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
DRV_LOG(DEBUG,
- "port %u Verbs indirection table %p still referenced",
+ "port %u indirection table obj %p still referenced",
dev->data->port_id, (void *)ind_tbl);
++ret;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct ibv_qp *qp;
#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
struct mlx5dv_qp_init_attr qp_init_attr;
int err;
queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
if (!ind_tbl) {
rte_errno = ENOMEM;
return NULL;
return hrxq;
error:
err = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
if (qp)
claim_zero(mlx5_glue->destroy_qp(qp));
rte_errno = err; /* Restore rte_errno. */
queues_n = hash_fields ? queues_n : 1;
LIST_FOREACH(hrxq, &priv->hrxqs, next) {
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
if (hrxq->rss_key_len != rss_key_len)
continue;
continue;
if (hrxq->hash_fields != hash_fields)
continue;
- ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
continue;
if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_ibv_release(dev, ind_tbl);
+ mlx5_ind_table_obj_release(dev, ind_tbl);
continue;
}
rte_atomic32_inc(&hrxq->refcnt);
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_release(dev, hrxq->ind_table);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table);
LIST_REMOVE(hrxq, next);
rte_free(hrxq);
return 0;
}
- claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table));
+ claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
return 1;
}
* Pointer to Ethernet device.
*
* @return
- * The Verbs object initialised, NULL otherwise and rte_errno is set.
+ * The Verbs/DevX object initialised, NULL otherwise and rte_errno is set.
*/
-static struct mlx5_ind_table_ibv *
-mlx5_ind_table_ibv_drop_new(struct rte_eth_dev *dev)
+static struct mlx5_ind_table_obj *
+mlx5_ind_table_obj_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct mlx5_rxq_obj *rxq;
- struct mlx5_ind_table_ibv tmpl;
+ struct mlx5_ind_table_obj tmpl;
rxq = mlx5_rxq_obj_drop_new(dev);
if (!rxq)
* Pointer to Ethernet device.
*/
static void
-mlx5_ind_table_ibv_drop_release(struct rte_eth_dev *dev)
+mlx5_ind_table_obj_drop_release(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl = priv->drop_queue.hrxq->ind_table;
+ struct mlx5_ind_table_obj *ind_tbl = priv->drop_queue.hrxq->ind_table;
claim_zero(mlx5_glue->destroy_rwq_ind_table(ind_tbl->ind_table));
mlx5_rxq_obj_drop_release(dev);
mlx5_hrxq_drop_new(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_ind_table_ibv *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl;
struct ibv_qp *qp;
struct mlx5_hrxq *hrxq;
rte_atomic32_inc(&priv->drop_queue.hrxq->refcnt);
return priv->drop_queue.hrxq;
}
- ind_tbl = mlx5_ind_table_ibv_drop_new(dev);
+ ind_tbl = mlx5_ind_table_obj_drop_new(dev);
if (!ind_tbl)
return NULL;
qp = mlx5_glue->create_qp_ex(priv->sh->ctx,
return hrxq;
error:
if (ind_tbl)
- mlx5_ind_table_ibv_drop_release(dev);
+ mlx5_ind_table_obj_drop_release(dev);
return NULL;
}
mlx5_glue->destroy_flow_action(hrxq->action);
#endif
claim_zero(mlx5_glue->destroy_qp(hrxq->qp));
- mlx5_ind_table_ibv_drop_release(dev);
+ mlx5_ind_table_obj_drop_release(dev);
rte_free(hrxq);
priv->drop_queue.hrxq = NULL;
}
uint16_t dump_file_n; /* Number of dump files. */
};
+enum mlx5_ind_tbl_type {
+ MLX5_IND_TBL_TYPE_IBV,
+ MLX5_IND_TBL_TYPE_DEVX,
+};
+
/* Indirection table. */
-struct mlx5_ind_table_ibv {
- LIST_ENTRY(mlx5_ind_table_ibv) next; /* Pointer to the next element. */
+struct mlx5_ind_table_obj {
+ LIST_ENTRY(mlx5_ind_table_obj) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+ enum mlx5_ind_tbl_type type;
+ RTE_STD_C11
+ union {
+ struct ibv_rwq_ind_table *ind_table; /**< Indirection table. */
+ struct mlx5_devx_obj *rqt; /* DevX RQT object. */
+ };
uint32_t queues_n; /**< Number of queues in the list. */
uint16_t queues[]; /**< Queue list. */
};
struct mlx5_hrxq {
LIST_ENTRY(mlx5_hrxq) next; /* Pointer to the next element. */
rte_atomic32_t refcnt; /* Reference counter. */
- struct mlx5_ind_table_ibv *ind_table; /* Indirection table. */
+ struct mlx5_ind_table_obj *ind_table; /* Indirection table. */
struct ibv_qp *qp; /* Verbs queue pair. */
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
void *action; /* DV QP action pointer. */
int mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx);
int mlx5_rxq_verify(struct rte_eth_dev *dev);
int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl);
-int mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev);
+int mlx5_ind_table_obj_verify(struct rte_eth_dev *dev);
struct mlx5_hrxq *mlx5_hrxq_new(struct rte_eth_dev *dev,
const uint8_t *rss_key, uint32_t rss_key_len,
uint64_t hash_fields,