#include <rte_mbuf.h>
#include <rte_malloc.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_common.h>
#include <rte_interrupts.h>
#include <rte_debug.h>
#include "mlx5_defs.h"
#include "mlx5.h"
-#include "mlx5_rxtx.h"
+#include "mlx5_tx.h"
+#include "mlx5_rx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
(unsigned int)sizeof(rss_hash_default_key),
"wrong RSS default key size.");
-/**
- * Check whether Multi-Packet RQ can be enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 1 if supported, negative errno value if not.
- */
-inline int
-mlx5_check_mprq_support(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
-
- if (priv->config.mprq.enabled &&
- priv->rxqs_n >= priv->config.mprq.min_rxqs_num)
- return 1;
- return -ENOTSUP;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the Rx queue.
- *
- * @param rxq
- * Pointer to receive queue structure.
- *
- * @return
- * 0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_rxq_mprq_enabled(struct mlx5_rxq_data *rxq)
-{
- return rxq->strd_num_n > 0;
-}
-
-/**
- * Check whether Multi-Packet RQ is enabled for the device.
- *
- * @param dev
- * Pointer to Ethernet device.
- *
- * @return
- * 0 if disabled, otherwise enabled.
- */
-inline int
-mlx5_mprq_enabled(struct rte_eth_dev *dev)
-{
- struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t i;
- uint16_t n = 0;
- uint16_t n_ibv = 0;
-
- if (mlx5_check_mprq_support(dev) < 0)
- return 0;
- /* All the configured queues should be enabled. */
- for (i = 0; i < priv->rxqs_n; ++i) {
- struct mlx5_rxq_data *rxq = (*priv->rxqs)[i];
- struct mlx5_rxq_ctrl *rxq_ctrl = container_of
- (rxq, struct mlx5_rxq_ctrl, rxq);
-
- if (rxq == NULL || rxq_ctrl->type != MLX5_RXQ_TYPE_STANDARD)
- continue;
- n_ibv++;
- if (mlx5_rxq_mprq_enabled(rxq))
- ++n;
- }
- /* Multi-Packet RQ can't be partially configured. */
- MLX5_ASSERT(n == 0 || n == n_ibv);
- return n == n_ibv;
-}
-
/**
* Calculate the number of CQEs in CQ for the Rx queue.
*
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
- RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH);
+ if (!config->mprq.enabled)
+ offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
if (config->hw_fcs_strip)
offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
-
if (config->hw_csum)
offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
unsigned int count = 0;
struct rte_intr_handle *intr_handle = dev->intr_handle;
- /* Representor shares dev->intr_handle with PF. */
- if (priv->representor)
- return 0;
if (!dev->data->dev_conf.intr_conf.rxq)
return 0;
mlx5_rx_intr_vec_disable(dev);
unsigned int rxqs_n = priv->rxqs_n;
unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID);
- /* Representor shares dev->intr_handle with PF. */
- if (priv->representor)
- return;
if (!dev->data->dev_conf.intr_conf.rxq)
return;
if (!intr_handle->intr_vec)
snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init,
- (void *)(uintptr_t)(1 << strd_num_n),
+ (void *)((uintptr_t)1 << strd_num_n),
dev->device->numa_node, 0);
if (mp == NULL) {
DRV_LOG(ERR,
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
+ mlx5_mr_btree_free(&tmpl->rxq.mr_ctrl.cache_bh);
mlx5_free(tmpl);
return NULL;
}
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *rxq_ctrl;
- if (!(*priv->rxqs)[idx])
+ if (priv->rxqs == NULL || (*priv->rxqs)[idx] == NULL)
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
if (__atomic_sub_fetch(&rxq_ctrl->refcnt, 1, __ATOMIC_RELAXED) > 1)
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_ind_table_obj *ind_tbl;
+ rte_rwlock_read_lock(&priv->ind_tbls_lock);
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
if ((ind_tbl->queues_n == queues_n) &&
(memcmp(ind_tbl->queues, queues,
ind_tbl->queues_n * sizeof(ind_tbl->queues[0]))
- == 0))
+ == 0)) {
+ __atomic_fetch_add(&ind_tbl->refcnt, 1,
+ __ATOMIC_RELAXED);
break;
+ }
}
- if (ind_tbl) {
- unsigned int i;
-
- __atomic_fetch_add(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
- for (i = 0; i != ind_tbl->queues_n; ++i)
- mlx5_rxq_get(dev, ind_tbl->queues[i]);
- }
+ rte_rwlock_read_unlock(&priv->ind_tbls_lock);
return ind_tbl;
}
bool standalone)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i;
+ unsigned int i, ret;
- if (__atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED) == 0)
- priv->obj_ops.ind_table_destroy(ind_tbl);
+ rte_rwlock_write_lock(&priv->ind_tbls_lock);
+ ret = __atomic_sub_fetch(&ind_tbl->refcnt, 1, __ATOMIC_RELAXED);
+ if (!ret && !standalone)
+ LIST_REMOVE(ind_tbl, next);
+ rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+ if (ret)
+ return 1;
+ priv->obj_ops.ind_table_destroy(ind_tbl);
for (i = 0; i != ind_tbl->queues_n; ++i)
claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i]));
- if (__atomic_load_n(&ind_tbl->refcnt, __ATOMIC_RELAXED) == 0) {
- if (!standalone)
- LIST_REMOVE(ind_tbl, next);
- mlx5_free(ind_tbl);
- return 0;
- }
- return 1;
+ mlx5_free(ind_tbl);
+ return 0;
}
/**
struct mlx5_ind_table_obj *ind_tbl;
int ret = 0;
+ rte_rwlock_read_lock(&priv->ind_tbls_lock);
LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) {
DRV_LOG(DEBUG,
"port %u indirection table obj %p still referenced",
dev->data->port_id, (void *)ind_tbl);
++ret;
}
+ rte_rwlock_read_unlock(&priv->ind_tbls_lock);
return ret;
}
for (j = 0; j < i; j++)
mlx5_rxq_release(dev, ind_tbl->queues[j]);
rte_errno = err;
- DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+ DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
+ dev->data->port_id);
return ret;
}
mlx5_free(ind_tbl);
return NULL;
}
- if (!standalone)
+ if (!standalone) {
+ rte_rwlock_write_lock(&priv->ind_tbls_lock);
LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next);
+ rte_rwlock_write_unlock(&priv->ind_tbls_lock);
+ }
return ind_tbl;
}
* reference unsupported. Intended for standalone indirection
* tables only.
*/
- DEBUG("Port %u cannot modify indirection table (refcnt> 1).",
- dev->data->port_id);
+ DRV_LOG(DEBUG,
+ "Port %u cannot modify indirection table (refcnt> 1).",
+ dev->data->port_id);
rte_errno = EINVAL;
return -rte_errno;
}
error:
err = rte_errno;
for (j = 0; j < i; j++)
- mlx5_rxq_release(dev, ind_tbl->queues[j]);
+ mlx5_rxq_release(dev, queues[j]);
rte_errno = err;
- DEBUG("Port %u cannot setup indirection table.", dev->data->port_id);
+ DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
+ dev->data->port_id);
return ret;
}
-/**
- * Match an Rx Hash queue.
- *
- * @param list
- * Cache list pointer.
- * @param entry
- * Hash queue entry pointer.
- * @param cb_ctx
- * Context of the callback function.
- *
- * @return
- * 0 if match, none zero if not match.
- */
int
-mlx5_hrxq_match_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry,
+mlx5_hrxq_match_cb(void *tool_ctx __rte_unused, struct mlx5_list_entry *entry,
void *cb_ctx)
{
- struct rte_eth_dev *dev = list->ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_rss_desc *rss_desc = ctx->data;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
- struct mlx5_ind_table_obj *ind_tbl;
- if (hrxq->rss_key_len != rss_desc->key_len ||
+ return (hrxq->rss_key_len != rss_desc->key_len ||
memcmp(hrxq->rss_key, rss_desc->key, rss_desc->key_len) ||
- hrxq->hash_fields != rss_desc->hash_fields)
- return 1;
- ind_tbl = mlx5_ind_table_obj_get(dev, rss_desc->queue,
- rss_desc->queue_num);
- if (ind_tbl)
- mlx5_ind_table_obj_release(dev, ind_tbl, hrxq->standalone);
- return ind_tbl != hrxq->ind_table;
+ hrxq->hash_fields != rss_desc->hash_fields ||
+ hrxq->ind_table->queues_n != rss_desc->queue_num ||
+ memcmp(hrxq->ind_table->queues, rss_desc->queue,
+ rss_desc->queue_num * sizeof(rss_desc->queue[0])));
}
/**
* Index to Hash Rx queue to release.
*
* @param list
- * Cache list pointer.
+ * mlx5 list pointer.
* @param entry
* Hash queue entry pointer.
*/
void
-mlx5_hrxq_remove_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry)
+mlx5_hrxq_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
__mlx5_hrxq_remove(dev, hrxq);
return NULL;
}
-/**
- * Create an Rx Hash queue.
- *
- * @param list
- * Cache list pointer.
- * @param entry
- * Hash queue entry pointer.
- * @param cb_ctx
- * Context of the callback function.
- *
- * @return
- * queue entry on success, NULL otherwise.
- */
-struct mlx5_cache_entry *
-mlx5_hrxq_create_cb(struct mlx5_cache_list *list,
- struct mlx5_cache_entry *entry __rte_unused,
- void *cb_ctx)
+struct mlx5_list_entry *
+mlx5_hrxq_create_cb(void *tool_ctx, void *cb_ctx)
{
- struct rte_eth_dev *dev = list->ctx;
+ struct rte_eth_dev *dev = tool_ctx;
struct mlx5_flow_cb_ctx *ctx = cb_ctx;
struct mlx5_flow_rss_desc *rss_desc = ctx->data;
struct mlx5_hrxq *hrxq;
return hrxq ? &hrxq->entry : NULL;
}
+struct mlx5_list_entry *
+mlx5_hrxq_clone_cb(void *tool_ctx, struct mlx5_list_entry *entry,
+ void *cb_ctx __rte_unused)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq;
+ uint32_t hrxq_idx = 0;
+
+ hrxq = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_HRXQ], &hrxq_idx);
+ if (!hrxq)
+ return NULL;
+ memcpy(hrxq, entry, sizeof(*hrxq) + MLX5_RSS_HASH_KEY_LEN);
+ hrxq->idx = hrxq_idx;
+ return &hrxq->entry;
+}
+
+void
+mlx5_hrxq_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry)
+{
+ struct rte_eth_dev *dev = tool_ctx;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_hrxq *hrxq = container_of(entry, typeof(*hrxq), entry);
+
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq->idx);
+}
+
/**
* Get an Rx Hash queue.
*
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_hrxq *hrxq;
- struct mlx5_cache_entry *entry;
+ struct mlx5_list_entry *entry;
struct mlx5_flow_cb_ctx ctx = {
.data = rss_desc,
};
if (rss_desc->shared_rss) {
hrxq = __mlx5_hrxq_create(dev, rss_desc);
} else {
- entry = mlx5_cache_register(&priv->hrxqs, &ctx);
+ entry = mlx5_list_register(priv->hrxqs, &ctx);
if (!entry)
return 0;
hrxq = container_of(entry, typeof(*hrxq), entry);
}
- return hrxq->idx;
+ if (hrxq)
+ return hrxq->idx;
+ return 0;
}
/**
if (!hrxq)
return 0;
if (!hrxq->standalone)
- return mlx5_cache_unregister(&priv->hrxqs, &hrxq->entry);
+ return mlx5_list_unregister(priv->hrxqs, &hrxq->entry);
__mlx5_hrxq_remove(dev, hrxq);
return 0;
}
{
struct mlx5_priv *priv = dev->data->dev_private;
- return mlx5_cache_list_get_entry_num(&priv->hrxqs);
+ return mlx5_list_get_entry_num(priv->hrxqs);
}
/**