*/
#include <stddef.h>
-#include <assert.h>
#include <errno.h>
#include <string.h>
#include <stdint.h>
#include <rte_debug.h>
#include <rte_io.h>
+#include <mlx5_glue.h>
+#include <mlx5_devx_cmds.h>
+
+#include "mlx5_defs.h"
#include "mlx5.h"
#include "mlx5_rxtx.h"
#include "mlx5_utils.h"
#include "mlx5_autoconf.h"
-#include "mlx5_defs.h"
-#include "mlx5_glue.h"
#include "mlx5_flow.h"
-#include "mlx5_devx_cmds.h"
+
/* Default RSS hash key also used for ConnectX-3. */
uint8_t rss_hash_default_key[] = {
++n;
}
/* Multi-Packet RQ can't be partially configured. */
- assert(n == 0 || n == n_ibv);
+ MLX5_ASSERT(n == 0 || n == n_ibv);
return n == n_ibv;
}
goto error;
}
/* Headroom is reserved by rte_pktmbuf_alloc(). */
- assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+ MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
- assert(rte_pktmbuf_data_len(buf) == 0);
- assert(rte_pktmbuf_pkt_len(buf) == 0);
- assert(!buf->next);
+ MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
+ MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
+ MLX5_ASSERT(!buf->next);
/* Only the first segment keeps headroom. */
if (i % sges_n)
SET_DATA_OFF(buf, 0);
rxq->port_id, rxq->idx);
if (rxq->mprq_bufs == NULL)
return;
- assert(mlx5_rxq_check_vec_support(rxq) < 0);
+ MLX5_ASSERT(mlx5_rxq_check_vec_support(rxq) < 0);
for (i = 0; (i != (1u << rxq->elts_n)); ++i) {
if ((*rxq->mprq_bufs)[i] != NULL)
mlx5_mprq_buf_free((*rxq->mprq_bufs)[i]);
{
struct mlx5_devx_modify_rq_attr rq_attr = { 0 };
- assert(rxq_obj);
+ MLX5_ASSERT(rxq_obj);
rq_attr.state = MLX5_RQC_STATE_RST;
rq_attr.rq_state = MLX5_RQC_STATE_RDY;
mlx5_devx_cmd_modify_rq(rxq_obj->rq, &rq_attr);
static int
mlx5_rxq_obj_release(struct mlx5_rxq_obj *rxq_obj)
{
- assert(rxq_obj);
+ MLX5_ASSERT(rxq_obj);
if (rte_atomic32_dec_and_test(&rxq_obj->refcnt)) {
switch (rxq_obj->type) {
case MLX5_RXQ_OBJ_TYPE_IBV:
- assert(rxq_obj->wq);
- assert(rxq_obj->cq);
+ MLX5_ASSERT(rxq_obj->wq);
+ MLX5_ASSERT(rxq_obj->cq);
rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_glue->destroy_wq(rxq_obj->wq));
claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
break;
case MLX5_RXQ_OBJ_TYPE_DEVX_RQ:
- assert(rxq_obj->cq);
- assert(rxq_obj->rq);
+ MLX5_ASSERT(rxq_obj->cq);
+ MLX5_ASSERT(rxq_obj->rq);
rxq_free_elts(rxq_obj->rxq_ctrl);
claim_zero(mlx5_devx_cmd_destroy(rxq_obj->rq));
rxq_release_rq_resources(rxq_obj->rxq_ctrl);
claim_zero(mlx5_glue->destroy_cq(rxq_obj->cq));
break;
case MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN:
- assert(rxq_obj->rq);
+ MLX5_ASSERT(rxq_obj->rq);
rxq_obj_hairpin_release(rxq_obj);
break;
}
struct mlx5_devx_create_rq_attr attr = { 0 };
struct mlx5_rxq_obj *tmpl = NULL;
int ret = 0;
+ uint32_t max_wq_data;
- assert(rxq_data);
- assert(!rxq_ctrl->obj);
+ MLX5_ASSERT(rxq_data);
+ MLX5_ASSERT(!rxq_ctrl->obj);
tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0,
rxq_ctrl->socket);
if (!tmpl) {
tmpl->type = MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN;
tmpl->rxq_ctrl = rxq_ctrl;
attr.hairpin = 1;
- /* Workaround for hairpin startup */
- attr.wq_attr.log_hairpin_num_packets = log2above(32);
- /* Workaround for packets larger than 1KB */
- attr.wq_attr.log_hairpin_data_sz =
- priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ max_wq_data = priv->config.hca_attr.log_max_hairpin_wq_data_sz;
+ /* Jumbo frames > 9KB should be supported, and more packets. */
+ if (priv->config.log_hp_size != (uint32_t)MLX5_ARG_UNSET) {
+ if (priv->config.log_hp_size > max_wq_data) {
+ DRV_LOG(ERR, "total data size %u power of 2 is "
+ "too large for hairpin",
+ priv->config.log_hp_size);
+ rte_errno = ERANGE;
+ return NULL;
+ }
+ attr.wq_attr.log_hairpin_data_sz = priv->config.log_hp_size;
+ } else {
+ attr.wq_attr.log_hairpin_data_sz =
+ (max_wq_data < MLX5_HAIRPIN_JUMBO_LOG_SIZE) ?
+ max_wq_data : MLX5_HAIRPIN_JUMBO_LOG_SIZE;
+ }
+ /* Set the packets number to the maximum value for performance. */
+ attr.wq_attr.log_hairpin_num_packets =
+ attr.wq_attr.log_hairpin_data_sz -
+ MLX5_HAIRPIN_QUEUE_STRIDE;
tmpl->rq = mlx5_devx_cmd_create_rq(priv->sh->ctx, &attr,
rxq_ctrl->socket);
if (!tmpl->rq) {
int ret = 0;
struct mlx5dv_obj obj;
- assert(rxq_data);
- assert(!rxq_ctrl->obj);
+ MLX5_ASSERT(rxq_data);
+ MLX5_ASSERT(!rxq_ctrl->obj);
if (type == MLX5_RXQ_OBJ_TYPE_DEVX_HAIRPIN)
return mlx5_rxq_obj_hairpin_new(dev, idx);
priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE;
if (strd_sz_n < rxq->strd_sz_n)
strd_sz_n = rxq->strd_sz_n;
}
- assert(strd_num_n && strd_sz_n);
+ MLX5_ASSERT(strd_num_n && strd_sz_n);
buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
MLX5_MAX_TCP_HDR_OFFSET)
max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
- assert(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
+ MLX5_ASSERT(max_lro_size >= MLX5_LRO_SEG_CHUNK_SIZE);
max_lro_size /= MLX5_LRO_SEG_CHUNK_SIZE;
if (priv->max_lro_msg_size)
priv->max_lro_msg_size =
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
+ unsigned int mprq_stride_nums;
unsigned int mprq_stride_size;
+ unsigned int mprq_stride_cap;
struct mlx5_dev_config *config = &priv->config;
- unsigned int strd_headroom_en;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
- /*
- * LRO packet may consume all the stride memory, hence we cannot
- * guaranty head-room near the packet memory in the stride.
- * In this case scatter is, for sure, enabled and an empty mbuf may be
- * added in the start for the head-room.
- */
- if (lro_on_queue && RTE_PKTMBUF_HEADROOM > 0 &&
- non_scatter_min_mbuf_size > mb_len) {
- strd_headroom_en = 0;
- mprq_stride_size = RTE_MIN(max_rx_pkt_len,
- 1u << config->mprq.max_stride_size_n);
- } else {
- strd_headroom_en = 1;
- mprq_stride_size = non_scatter_min_mbuf_size;
- }
+ mprq_stride_nums = config->mprq.stride_num_n ?
+ config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
+ mprq_stride_size = non_scatter_min_mbuf_size <=
+ (1U << config->mprq.max_stride_size_n) ?
+ log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
+ mprq_stride_cap = (config->mprq.stride_num_n ?
+ (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
+ (config->mprq.stride_size_n ?
+ (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
/*
* This Rx queue can be configured as a Multi-Packet RQ if all of the
* following conditions are met:
* - MPRQ is enabled.
* - The number of descs is more than the number of strides.
- * - max_rx_pkt_len plus overhead is less than the max size of a
- * stride.
+ * - max_rx_pkt_len plus overhead is less than the max size
+ * of a stride or mprq_stride_size is specified by a user.
+ * Need to nake sure that there are enough stides to encap
+ * the maximum packet size in case mprq_stride_size is set.
* Otherwise, enable Rx scatter if necessary.
*/
- if (mprq_en &&
- desc > (1U << config->mprq.stride_num_n) &&
- mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
+ if (mprq_en && desc > (1U << mprq_stride_nums) &&
+ (non_scatter_min_mbuf_size <=
+ (1U << config->mprq.max_stride_size_n) ||
+ (config->mprq.stride_size_n &&
+ non_scatter_min_mbuf_size <= mprq_stride_cap))) {
/* TODO: Rx scatter isn't supported yet. */
tmpl->rxq.sges_n = 0;
/* Trim the number of descs needed. */
- desc >>= config->mprq.stride_num_n;
- tmpl->rxq.strd_num_n = config->mprq.stride_num_n;
- tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
- config->mprq.min_stride_size_n);
+ desc >>= mprq_stride_nums;
+ tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
+ config->mprq.stride_num_n : mprq_stride_nums;
+ tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
+ config->mprq.stride_size_n : mprq_stride_size;
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
- tmpl->rxq.strd_headroom_en = strd_headroom_en;
+ tmpl->rxq.strd_scatter_en =
+ !!(offloads & DEV_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pkt_len,
if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
"port %u MPRQ is requested but cannot be enabled"
- " (requested: desc = %u, stride_sz = %u,"
- " supported: min_stride_num = %u, max_stride_sz = %u).",
- dev->data->port_id, desc, mprq_stride_size,
- (1 << config->mprq.stride_num_n),
- (1 << config->mprq.max_stride_size_n));
+ " (requested: packet size = %u, desc = %u,"
+ " stride_sz = %u, stride_num = %u,"
+ " supported: min_stride_sz = %u, max_stride_sz = %u).",
+ dev->data->port_id, non_scatter_min_mbuf_size, desc,
+ config->mprq.stride_size_n ?
+ (1U << config->mprq.stride_size_n) :
+ (1U << mprq_stride_size),
+ config->mprq.stride_num_n ?
+ (1U << config->mprq.stride_num_n) :
+ (1U << mprq_stride_nums),
+ (1U << config->mprq.min_stride_size_n),
+ (1U << config->mprq.max_stride_size_n));
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
if (!(*priv->rxqs)[idx])
return 0;
rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq);
- assert(rxq_ctrl->priv);
+ MLX5_ASSERT(rxq_ctrl->priv);
if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
tir_attr.transport_domain = priv->sh->td->id;
else
tir_attr.transport_domain = priv->sh->tdn;
- memcpy(tir_attr.rx_hash_toeplitz_key, rss_key, rss_key_len);
+ memcpy(tir_attr.rx_hash_toeplitz_key, rss_key,
+ MLX5_RSS_HASH_KEY_LEN);
tir_attr.indirect_table = ind_tbl->rqt->id;
if (dev->data->dev_conf.lpbk_mode)
tir_attr.self_lb_block =