(unsigned int)sizeof(rss_hash_default_key),
"wrong RSS default key size.");
-/**
- * Check whether Multi-Packet RQ can be enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 1 if supported, negative errno value if not.
- */
-inline int
-mlx5_check_mprq_support(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- if (priv->config.mprq.enabled &&
- priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
- return 1;
- return -ENOTSUP;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the Rx queue.
- *
- * @param rxq
- * Pointer to receive queue structure.
- *
- * @return
- * 0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
-{
- return rxq->strd_num_n > 0;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_mprq_enabled(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t i;
- uint16_t n = 0;
- uint16_t n_ibv = 0;
-
- if (mlx5_check_mprq_support(dev) < 0)
- return 0;
- /* All the configured queues should be enabled. */
- for (i = 0; i < priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
-
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
- continue;
- n_ibv++;
- if (mlx5_rxq_mprq_enabled(rxq))
- ++n;
- }
- /* Multi-Packet RQ can't be partially configured. */
- MLX5_ASSERT(n == 0 || n == n_ibv);
- return n == n_ibv;
-}
-
/**
* Calculate the number of CQEs in CQ for the Rx queue.
*
(1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
(1 << rxq->elts_n);
const uint16_t q_mask = q_n - 1;
- uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi);
+ uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
+ rxq->elts_ci : rxq->rq_ci;
+ uint16_t used = q_n - (elts_ci - rxq->rq_pi);
uint16_t i;
DRV_LOG(DEBUG, "port %u Rx queue %u freeing %d WRs",
*/
if (mlx5_rxq_check_vec_support(rxq) > 0) {
for (i = 0; i < used; ++i)
- (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL;
- rxq->rq_pi = rxq->rq_ci;
+ (*rxq->elts)[(elts_ci + i) & q_mask] = NULL;
+ rxq->rq_pi = elts_ci;
}
for (i = 0; i != q_n; ++i) {
if ((*rxq->elts)[i] != NULL)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
- RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH);
+ if (!config->mprq.enabled)
+ offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
if (config->hw_fcs_strip)
offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
-
if (config->hw_csum)
offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
rte_io_wmb();
*rxq->cq_db = rte_cpu_to_be_32(rxq->cq_ci);
rte_io_wmb();
- /* Reset RQ consumer before moving queue ro READY state. */
+ /* Reset RQ consumer before moving queue to READY state. */
*rxq->rq_db = rte_cpu_to_be_32(0);
rte_io_wmb();
ret = priv->obj_ops.rxq_obj_modify(rxq_ctrl->obj, MLX5_RXQ_MOD_RST2RDY);
unsigned int count = 0;
struct rte_intr_handle *intr_handle = dev->intr_handle;
+ /* Representor shares dev->intr_handle with PF. */
+ if (priv->representor)
+ return 0;
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
+ /* Representor shares dev->intr_handle with PF. */
+ if (priv->representor)
+ return;
if (!dev->data->dev_conf.intr_conf.rxq)
return;
if (!intr_handle->intr_vec)
const struct rte_eth_rxseg_split *qs_seg = rx_seg;
unsigned int tail_len;
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, sizeof(*tmpl) +
- desc_n * sizeof(struct rte_mbuf *), 0, socket);
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
+ sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
+ (!!mprq_en) *
+ (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
+ 0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
dev->data->port_id, idx, mb_len, max_rx_pkt_len,
RTE_PKTMBUF_HEADROOM);
rte_errno = ENOSPC;
- return NULL;
+ goto error;
}
tmpl->type = MLX5_RXQ_TYPE_STANDARD;
if (mlx5_mr_btree_init(&tmpl->rxq.mr_ctrl.cache_bh,
mlx5_free(rxq_ctrl->obj);
rxq_ctrl->obj = NULL;
}
- if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
+ if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD) {
rxq_free_elts(rxq_ctrl);
+ dev->data->rx_queue_state[idx] = RTE_ETH_QUEUE_STATE_STOPPED;
+ }
if (!__atomic_load_n(&rxq_ctrl->refcnt, __ATOMIC_RELAXED)) {
if (rxq_ctrl->type == MLX5_RXQ_TYPE_STANDARD)
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
* Pointer to Ethernet device.
* @param ind_table
* Indirection table to release.
+ * @param standalone
+ * Indirection table for Standalone queue.
*
* @return
* 1 while a reference on it exists, 0 when freed.
*/
int
mlx5_ind_table_obj_release(struct rte_eth_dev *dev,
- struct mlx5_ind_table_obj *ind_tbl)
+ struct mlx5_ind_table_obj *ind_tbl,
+ bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
unsigned int i;
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
- LIST_REMOVE(ind_tbl, next);
+ if (!standalone)
+ LIST_REMOVE(ind_tbl, next);
mlx5_free(ind_tbl);
return 0;
}
return ret;
}
+/**
+ * Setup an indirection table structure fields.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param ind_table
+ * Indirection table to modify.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+int
+mlx5_ind_table_obj_setup(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t queues_n = ind_tbl->queues_n;
+ uint16_t *queues = ind_tbl->queues;
+ unsigned int i, j;
+ int ret = 0, err;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
+
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_get(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
+ }
+ ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
+ if (ret)
+ goto error;
+ __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ return 0;
+error:
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ rte_errno = err;
+ DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+ return ret;
+}
+
/**
* Create an indirection table.
*
* Queues entering in the indirection table.
* @param queues_n
* Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
*
* @return
* The Verbs/DevX object initialized, NULL otherwise and rte_errno is set.
*/
static struct mlx5_ind_table_obj *
mlx5_ind_table_obj_new(struct rte_eth_dev *dev, const uint16_t *queues,
- uint32_t queues_n)
+ uint32_t queues_n, bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
- const unsigned int n = rte_is_power_of_2(queues_n) ?
- log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
- unsigned int i, j;
int ret;
ind_tbl = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*ind_tbl) +
return NULL;
}
ind_tbl->queues_n = queues_n;
- for (i = 0; i != queues_n; ++i) {
- struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]);
- if (!rxq)
- goto error;
- ind_tbl->queues[i] = queues[i];
+ ind_tbl->queues = (uint16_t *)(ind_tbl + 1);
+ memcpy(ind_tbl->queues, queues, queues_n * sizeof(*queues));
+ ret = mlx5_ind_table_obj_setup(dev, ind_tbl);
+ if (ret < 0) {
+ mlx5_free(ind_tbl);
+ return NULL;
}
- ret = priv->obj_ops.ind_table_new(dev, n, ind_tbl);
- if (ret < 0)
- goto error;
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
- LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ if (!standalone)
+ LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
return ind_tbl;
-error:
- ret = rte_errno;
- for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
- rte_errno = ret;
- mlx5_free(ind_tbl);
- DEBUG("Port %u cannot create indirection table.", dev->data->port_id);
- return NULL;
}
/**
- * Get an Rx Hash queue.
+ * Modify an indirection table.
*
* @param dev
* Pointer to Ethernet device.
- * @param rss_conf
- * RSS configuration for the Rx hash queue.
+ * @param ind_table
+ * Indirection table to modify.
* @param queues
- * Queues entering in hash queue. In case of empty hash_fields only the
- * first queue index will be taken for the indirection table.
+ * Queues replacement for the indirection table.
* @param queues_n
- * Number of queues.
+ * Number of queues in the array.
+ * @param standalone
+ * Indirection table for Standalone queue.
*
* @return
- * An hash Rx queue index on success.
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
*/
-uint32_t
-mlx5_hrxq_get(struct rte_eth_dev *dev,
- const uint8_t *rss_key, uint32_t rss_key_len,
- uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n)
+int
+mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
+ struct mlx5_ind_table_obj *ind_tbl,
+ uint16_t *queues, const uint32_t queues_n,
+ bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- uint32_t idx;
-
- queues_n = hash_fields ? queues_n : 1;
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
- hrxq, next) {
- struct mlx5_ind_table_obj *ind_tbl;
+ unsigned int i, j;
+ int ret = 0, err;
+ const unsigned int n = rte_is_power_of_2(queues_n) ?
+ log2above(queues_n) :
+ log2above(priv->config.ind_table_max_size);
- if (hrxq->shared)
- continue;
- if (hrxq->rss_key_len != rss_key_len)
- continue;
- if (memcmp(hrxq->rss_key, rss_key, rss_key_len))
- continue;
- if (hrxq->hash_fields != hash_fields)
- continue;
- ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
- if (!ind_tbl)
- continue;
- if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_obj_release(dev, ind_tbl);
- continue;
+ MLX5_ASSERT(standalone);
+ RTE_SET_USED(standalone);
+ if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) > 1) {
+ /*
+ * Modification of indirection ntables having more than 1
+ * reference unsupported. Intended for standalone indirection
+ * tables only.
+ */
+ DEBUG("Port %u cannot modify indirection table (refcnt> 1).",
+ dev->data->port_id);
+ rte_errno = EINVAL;
+ return -rte_errno;
+ }
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_get(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
}
- __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
- return idx;
}
+ MLX5_ASSERT(priv->obj_ops.ind_table_modify);
+ ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
+ if (ret)
+ goto error;
+ for (j = 0; j < ind_tbl->queues_n; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ ind_tbl->queues_n = queues_n;
+ ind_tbl->queues = queues;
return 0;
+error:
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ rte_errno = err;
+ DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+ return ret;
+}
+
+/**
+ * Match an Rx Hash queue.
+ *
+ * @param list
+ * Cache list pointer.
+ * @param entry
+ * Hash queue entry pointer.
+ * @param cb_ctx
+ * Context of the callback function.
+ *
+ * @return
+ * 0 if match, none zero if not match.
+ */
+int
+mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry,
+ void *cb_ctx)
+{
+ struct rte_eth_dev *dev = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+ struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+ struct mlx5_ind_table_obj *ind_tbl;
+
+ if (hrxq->rss_key_len != rss_desc->key_len ||
+ memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
+ hrxq->hash_fields != rss_desc->hash_fields)
+ return 1;
+ ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
+ rss_desc->queue_num);
+ if (ind_tbl)
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+ return ind_tbl != hrxq->ind_table;
}
/**
queues, queues_n)) {
ind_tbl = hrxq->ind_table;
} else {
+ if (hrxq->standalone) {
+ /*
+ * Replacement of indirection table unsupported for
+ * stanalone hrxq objects (used by shared RSS).
+ */
+ rte_errno = ENOTSUP;
+ return -rte_errno;
+ }
ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
if (!ind_tbl)
- ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ hrxq->standalone);
}
if (!ind_tbl) {
rte_errno = ENOMEM;
goto error;
}
if (ind_tbl != hrxq->ind_table) {
- mlx5_ind_table_obj_release(dev, hrxq->ind_table);
+ MLX5_ASSERT(!hrxq->standalone);
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone);
hrxq->ind_table = ind_tbl;
}
hrxq->hash_fields = hash_fields;
return 0;
error:
err = rte_errno;
- if (ind_tbl != hrxq->ind_table)
- mlx5_ind_table_obj_release(dev, ind_tbl);
+ if (ind_tbl != hrxq->ind_table) {
+ MLX5_ASSERT(!hrxq->standalone);
+ mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
+ }
rte_errno = err;
return -rte_errno;
}
-/**
- * Release the hash Rx queue.
- *
- * @param dev
- * Pointer to Ethernet device.
- * @param hrxq
- * Index to Hash Rx queue to release.
- *
- * @return
- * 1 while a reference on it exists, 0 when freed.
- */
-int
-mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+static void
+__mlx5_hrxq_remove(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
- if (!hrxq)
- return 0;
- if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- mlx5_glue->destroy_flow_action(hrxq->action);
+ mlx5_glue->destroy_flow_action(hrxq->action);
#endif
- priv->obj_ops.hrxq_destroy(hrxq);
- mlx5_ind_table_obj_release(dev, hrxq->ind_table);
- ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs,
- hrxq_idx, hrxq, next);
- mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
- return 0;
+ priv->obj_ops.hrxq_destroy(hrxq);
+ if (!hrxq->standalone) {
+ mlx5_ind_table_obj_release(dev, hrxq->ind_table,
+ hrxq->standalone);
}
- claim_nonzero(mlx5_ind_table_obj_release(dev, hrxq->ind_table));
- return 1;
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
}
/**
- * Create an Rx Hash queue.
+ * Release the hash Rx queue.
*
* @param dev
* Pointer to Ethernet device.
- * @param rss_key
- * RSS key for the Rx hash queue.
- * @param rss_key_len
- * RSS key length.
- * @param hash_fields
- * Verbs protocol hash field to make the RSS on.
- * @param queues
- * Queues entering in hash queue. In case of empty hash_fields only the
- * first queue index will be taken for the indirection table.
- * @param queues_n
- * Number of queues.
- * @param tunnel
- * Tunnel type.
- * @param shared
- * If true new object of Rx Hash queue will be used in shared action.
+ * @param hrxq
+ * Index to Hash Rx queue to release.
*
- * @return
- * The DevX object initialized index, 0 otherwise and rte_errno is set.
+ * @param list
+ * Cache list pointer.
+ * @param entry
+ * Hash queue entry pointer.
*/
-uint32_t
-mlx5_hrxq_new(struct rte_eth_dev *dev,
- const uint8_t *rss_key, uint32_t rss_key_len,
- uint64_t hash_fields,
- const uint16_t *queues, uint32_t queues_n,
- int tunnel, bool shared)
+void
+mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry)
+{
+ struct rte_eth_dev *dev = list->ctx;
+ struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+
+ __mlx5_hrxq_remove(dev, hrxq);
+}
+
+static struct mlx5_hrxq *
+__mlx5_hrxq_create(struct rte_eth_dev *dev,
+ struct mlx5_flow_rss_desc *rss_desc)
{
struct mlx5_priv *priv = dev->data->dev_private;
+ const uint8_t *rss_key = rss_desc->key;
+ uint32_t rss_key_len = rss_desc->key_len;
+ bool standalone = !!rss_desc->shared_rss;
+ const uint16_t *queues =
+ standalone ? rss_desc->const_q : rss_desc->queue;
+ uint32_t queues_n = rss_desc->queue_num;
struct mlx5_hrxq *hrxq = NULL;
uint32_t hrxq_idx = 0;
- struct mlx5_ind_table_obj *ind_tbl;
+ struct mlx5_ind_table_obj *ind_tbl = rss_desc->ind_tbl;
int ret;
- queues_n = hash_fields ? queues_n : 1;
- ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+ queues_n = rss_desc->hash_fields ? queues_n : 1;
if (!ind_tbl)
- ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n);
- if (!ind_tbl) {
- rte_errno = ENOMEM;
- return 0;
- }
+ ind_tbl = mlx5_ind_table_obj_get(dev, queues, queues_n);
+ if (!ind_tbl)
+ ind_tbl = mlx5_ind_table_obj_new(dev, queues, queues_n,
+ standalone);
+ if (!ind_tbl)
+ return NULL;
hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
if (!hrxq)
goto error;
- hrxq->shared = !!shared;
+ hrxq->standalone = standalone;
+ hrxq->idx = hrxq_idx;
hrxq->ind_table = ind_tbl;
hrxq->rss_key_len = rss_key_len;
- hrxq->hash_fields = hash_fields;
+ hrxq->hash_fields = rss_desc->hash_fields;
memcpy(hrxq->rss_key, rss_key, rss_key_len);
- ret = priv->obj_ops.hrxq_new(dev, hrxq, tunnel);
- if (ret < 0) {
- rte_errno = errno;
+ ret = priv->obj_ops.hrxq_new(dev, hrxq, rss_desc->tunnel);
+ if (ret < 0)
goto error;
- }
- __atomic_fetch_add(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
- ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_HRXQ], &priv->hrxqs, hrxq_idx,
- hrxq, next);
- return hrxq_idx;
+ return hrxq;
error:
- ret = rte_errno; /* Save rte_errno before cleanup. */
- mlx5_ind_table_obj_release(dev, ind_tbl);
+ if (!rss_desc->ind_tbl)
+ mlx5_ind_table_obj_release(dev, ind_tbl, standalone);
if (hrxq)
mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
- rte_errno = ret; /* Restore rte_errno. */
+ return NULL;
+}
+
+/**
+ * Create an Rx Hash queue.
+ *
+ * @param list
+ * Cache list pointer.
+ * @param entry
+ * Hash queue entry pointer.
+ * @param cb_ctx
+ * Context of the callback function.
+ *
+ * @return
+ * queue entry on success, NULL otherwise.
+ */
+struct mlx5_cache_entry *
+mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
+ struct mlx5_cache_entry *entry __rte_unused,
+ void *cb_ctx)
+{
+ struct rte_eth_dev *dev = list->ctx;
+ struct mlx5_flow_cb_ctx *ctx = cb_ctx;
+ struct mlx5_flow_rss_desc *rss_desc = ctx->data;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = __mlx5_hrxq_create(dev, rss_desc);
+ return hrxq ? &hrxq->entry : NULL;
+}
+
+/**
+ * Get an Rx Hash queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param rss_desc
+ * RSS configuration for the Rx hash queue.
+ *
+ * @return
+ * An hash Rx queue index on success.
+ */
+uint32_t mlx5_hrxq_get(struct rte_eth_dev *dev,
+ struct mlx5_flow_rss_desc *rss_desc)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ struct mlx5_cache_entry *entry;
+ struct mlx5_flow_cb_ctx ctx = {
+ .data = rss_desc,
+ };
+
+ if (rss_desc->shared_rss) {
+ hrxq = __mlx5_hrxq_create(dev, rss_desc);
+ } else {
+ entry = mlx5_cache_register(&priv->hrxqs, &ctx);
+ if (!entry)
+ return 0;
+ hrxq = container_of(entry, typeof(*hrxq), entry);
+ }
+ return hrxq->idx;
+}
+
+/**
+ * Release the hash Rx queue.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param hrxq_idx
+ * Index to Hash Rx queue to release.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+int mlx5_hrxq_release(struct rte_eth_dev *dev, uint32_t hrxq_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+
+ hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx);
+ if (!hrxq)
+ return 0;
+ if (!hrxq->standalone)
+ return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
+ __mlx5_hrxq_remove(dev, hrxq);
return 0;
}
struct mlx5_hrxq *hrxq = NULL;
int ret;
- if (priv->drop_queue.hrxq) {
- __atomic_fetch_add(&priv->drop_queue.hrxq->refcnt, 1,
- __ATOMIC_RELAXED);
+ if (priv->drop_queue.hrxq)
return priv->drop_queue.hrxq;
- }
hrxq = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*hrxq), 0, SOCKET_ID_ANY);
if (!hrxq) {
DRV_LOG(WARNING,
ret = priv->obj_ops.drop_action_create(dev);
if (ret < 0)
goto error;
- __atomic_store_n(&hrxq->refcnt, 1, __ATOMIC_RELAXED);
return hrxq;
error:
if (hrxq) {
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq = priv->drop_queue.hrxq;
- if (__atomic_sub_fetch(&hrxq->refcnt, 1, __ATOMIC_RELAXED) == 0) {
- priv->obj_ops.drop_action_destroy(dev);
- mlx5_free(priv->drop_queue.rxq);
- mlx5_free(hrxq->ind_table);
- mlx5_free(hrxq);
- priv->drop_queue.rxq = NULL;
- priv->drop_queue.hrxq = NULL;
- }
+ if (!priv->drop_queue.hrxq)
+ return;
+ priv->obj_ops.drop_action_destroy(dev);
+ mlx5_free(priv->drop_queue.rxq);
+ mlx5_free(hrxq->ind_table);
+ mlx5_free(hrxq);
+ priv->drop_queue.rxq = NULL;
+ priv->drop_queue.hrxq = NULL;
}
/**
* @return
* The number of object not released.
*/
-int
+uint32_t
mlx5_hrxq_verify(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_hrxq *hrxq;
- uint32_t idx;
- int ret = 0;
- ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_HRXQ], priv->hrxqs, idx,
- hrxq, next) {
- DRV_LOG(DEBUG,
- "port %u hash Rx queue %p still referenced",
- dev->data->port_id, (void *)hrxq);
- ++ret;
- }
- return ret;
+ return mlx5_cache_list_get_entry_num(&priv->hrxqs);
}
/**