/**
* Check whether Multi-Packet RQ is enabled for the device.
- * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
*
* @param dev
* Pointer to Ethernet device.
rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
rxq_ctrl->rxq.wqes = NULL;
}
- if (rxq_ctrl->wq_umem)
+ if (rxq_ctrl->wq_umem) {
mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
+ rxq_ctrl->wq_umem = NULL;
+ }
}
/**
struct mlx5_rxq_ctrl *rxq_ctrl =
container_of(rxq_data, struct mlx5_rxq_ctrl, rxq);
struct mlx5_devx_create_rq_attr rq_attr;
- uint32_t wqe_n = 1 << rxq_data->elts_n;
+ uint32_t wqe_n = 1 << (rxq_data->elts_n - rxq_data->sges_n);
uint32_t wq_size = 0;
uint32_t wqe_size = 0;
uint32_t log_wqe_size = 0;
MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES;
wqe_size = sizeof(struct mlx5_wqe_mprq);
} else {
- int max_sge = 0;
- int num_scatter = 0;
-
rq_attr.wq_attr.wq_type = MLX5_WQ_TYPE_CYCLIC;
- max_sge = 1 << rxq_data->sges_n;
- num_scatter = RTE_MAX(max_sge, 1);
- wqe_size = sizeof(struct mlx5_wqe_data_seg) * num_scatter;
+ wqe_size = sizeof(struct mlx5_wqe_data_seg);
}
- log_wqe_size = log2above(wqe_size);
+ log_wqe_size = log2above(wqe_size) + rxq_data->sges_n;
rq_attr.wq_attr.log_wq_stride = log_wqe_size;
- rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n;
+ rq_attr.wq_attr.log_wq_sz = rxq_data->elts_n - rxq_data->sges_n;
/* Calculate and allocate WQ memory space. */
wqe_size = 1 << log_wqe_size; /* round up power of two.*/
wq_size = wqe_n * wqe_size;
goto error;
rxq_ctrl->dbr_offset = dbr_offset;
rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
+ rxq_ctrl->dbr_umem_id_valid = 1;
rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
(uintptr_t)rxq_ctrl->dbr_offset);
}
return 0;
}
+#define MLX5_MAX_LRO_SIZE (UINT8_MAX * 256u)
+#define MLX5_MAX_TCP_HDR_OFFSET ((unsigned int)(sizeof(struct rte_ether_hdr) + \
+ sizeof(struct rte_vlan_hdr) * 2 + \
+ sizeof(struct rte_ipv6_hdr)))
/**
* Adjust the maximum LRO massage size.
- * LRO massage is contained in the MPRQ strides.
- * While the LRO massage size cannot be bigger than 65280 according to the
- * PRM, the strides which contain it may be bigger.
- * Adjust the maximum LRO massage size to avoid the above option.
*
* @param dev
* Pointer to Ethernet device.
- * @param strd_n
- * Number of strides per WQE..
- * @param strd_sz
- * The stride size.
+ * @param max_lro_size
+ * The maximum size for LRO packet.
*/
static void
-mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t strd_n,
- uint32_t strd_sz)
+mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t max_lro_size)
{
struct mlx5_priv *priv = dev->data->dev_private;
- uint32_t max_buf_len = strd_sz * strd_n;
- if (max_buf_len > (uint64_t)UINT16_MAX)
- max_buf_len = RTE_ALIGN_FLOOR((uint32_t)UINT16_MAX, strd_sz);
- max_buf_len /= 256;
- max_buf_len = RTE_MIN(max_buf_len, (uint32_t)UINT8_MAX);
- assert(max_buf_len);
+ if (priv->config.hca_attr.lro_max_msg_sz_mode ==
+ MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
+ MLX5_MAX_TCP_HDR_OFFSET)
+ max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
+ max_lro_size = RTE_MIN(max_lro_size, MLX5_MAX_LRO_SIZE);
+ assert(max_lro_size >= 256u);
+ max_lro_size /= 256u;
if (priv->max_lro_msg_size)
priv->max_lro_msg_size =
- RTE_MIN((uint32_t)priv->max_lro_msg_size, max_buf_len);
+ RTE_MIN((uint32_t)priv->max_lro_msg_size, max_lro_size);
else
- priv->max_lro_msg_size = max_buf_len;
+ priv->max_lro_msg_size = max_lro_size;
}
/**
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int mprq_stride_size;
struct mlx5_dev_config *config = &priv->config;
- /*
- * LRO packet may consume all the stride memory, hence we cannot
- * guaranty head-room. A new striding RQ feature may be added in CX6 DX
- * to allow head-room and tail-room for the LRO packets.
- */
- unsigned int strd_headroom_en = mlx5_lro_on(dev) ? 0 : 1;
+ unsigned int strd_headroom_en;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
const int mprq_en = mlx5_check_mprq_support(dev) > 0;
-
+ unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
+ RTE_PKTMBUF_HEADROOM;
+ unsigned int max_lro_size = 0;
+
+ if (non_scatter_min_mbuf_size > mb_len && !(offloads &
+ DEV_RX_OFFLOAD_SCATTER)) {
+ DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
+ " configured and no enough mbuf space(%u) to contain "
+ "the maximum RX packet length(%u) with head-room(%u)",
+ dev->data->port_id, idx, mb_len, max_rx_pkt_len,
+ RTE_PKTMBUF_HEADROOM);
+ rte_errno = ENOSPC;
+ return NULL;
+ }
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
desc_n * sizeof(struct rte_mbuf *),
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
+ /*
+ * LRO packet may consume all the stride memory, hence we cannot
+ * guaranty head-room near the packet memory in the stride.
+ * In this case scatter is, for sure, enabled and an empty mbuf may be
+ * added in the start for the head-room.
+ */
+ if (mlx5_lro_on(dev) && RTE_PKTMBUF_HEADROOM > 0 &&
+ non_scatter_min_mbuf_size > mb_len) {
+ strd_headroom_en = 0;
+ mprq_stride_size = RTE_MIN(max_rx_pkt_len,
+ 1u << config->mprq.max_stride_size_n);
+ } else {
+ strd_headroom_en = 1;
+ mprq_stride_size = non_scatter_min_mbuf_size;
+ }
/*
* This Rx queue can be configured as a Multi-Packet RQ if all of the
* following conditions are met:
* stride.
* Otherwise, enable Rx scatter if necessary.
*/
- assert(mb_len >= RTE_PKTMBUF_HEADROOM * strd_headroom_en);
- mprq_stride_size = dev->data->dev_conf.rxmode.max_rx_pkt_len +
- RTE_PKTMBUF_HEADROOM * strd_headroom_en;
if (mprq_en &&
desc > (1U << config->mprq.stride_num_n) &&
mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
tmpl->rxq.strd_headroom_en = strd_headroom_en;
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
- mlx5_max_lro_msg_size_adjust(dev, (1 << tmpl->rxq.strd_num_n),
- (1 << tmpl->rxq.strd_sz_n));
+ max_lro_size = RTE_MIN(max_rx_pkt_len,
+ (1u << tmpl->rxq.strd_num_n) *
+ (1u << tmpl->rxq.strd_sz_n));
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",
dev->data->port_id, idx,
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
- } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
+ max_lro_size = max_rx_pkt_len;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
- unsigned int size =
- RTE_PKTMBUF_HEADROOM +
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
/*
* and round it to the next power of two.
*/
sges_n = log2above((size / mb_len) + !!(size % mb_len));
- tmpl->rxq.sges_n = sges_n;
- /* Make sure rxq.sges_n did not overflow. */
- size = mb_len * (1 << tmpl->rxq.sges_n);
- size -= RTE_PKTMBUF_HEADROOM;
- if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
DRV_LOG(ERR,
"port %u too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- dev->data->port_id,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
- rte_errno = EOVERFLOW;
+ " requested maximum packet size %u, the maximum"
+ " supported are %u", dev->data->port_id,
+ 1 << sges_n, max_rx_pkt_len,
+ 1u << MLX5_MAX_LOG_RQ_SEGS);
+ rte_errno = ENOTSUP;
goto error;
}
- } else {
- DRV_LOG(WARNING,
- "port %u the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered mode has"
- " not been requested",
- dev->data->port_id,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
+ tmpl->rxq.sges_n = sges_n;
+ max_lro_size = max_rx_pkt_len;
}
if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
rte_errno = EINVAL;
goto error;
}
+ mlx5_max_lro_msg_size_adjust(dev, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
- claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
- rxq_ctrl->dbr_offset));
+ if (rxq_ctrl->dbr_umem_id_valid)
+ claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
+ rxq_ctrl->dbr_offset));
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);