rte_free((void *)(uintptr_t)rxq_ctrl->rxq.wqes);
rxq_ctrl->rxq.wqes = NULL;
}
- if (rxq_ctrl->wq_umem)
+ if (rxq_ctrl->wq_umem) {
mlx5_glue->devx_umem_dereg(rxq_ctrl->wq_umem);
+ rxq_ctrl->wq_umem = NULL;
+ }
}
/**
goto error;
rxq_ctrl->dbr_offset = dbr_offset;
rxq_ctrl->dbr_umem_id = dbr_page->umem->umem_id;
+ rxq_ctrl->dbr_umem_id_valid = 1;
rxq_data->rq_db = (uint32_t *)((uintptr_t)dbr_page->dbrs +
(uintptr_t)rxq_ctrl->dbr_offset);
}
* Callback function to initialize mbufs for Multi-Packet RQ.
*/
static inline void
-mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg __rte_unused,
+mlx5_mprq_buf_init(struct rte_mempool *mp, void *opaque_arg,
void *_m, unsigned int i __rte_unused)
{
struct mlx5_mprq_buf *buf = _m;
+ struct rte_mbuf_ext_shared_info *shinfo;
+ unsigned int strd_n = (unsigned int)(uintptr_t)opaque_arg;
+ unsigned int j;
memset(_m, 0, sizeof(*buf));
buf->mp = mp;
rte_atomic16_set(&buf->refcnt, 1);
+ for (j = 0; j != strd_n; ++j) {
+ shinfo = &buf->shinfos[j];
+ shinfo->free_cb = mlx5_mprq_buf_free_cb;
+ shinfo->fcb_opaque = buf;
+ }
}
/**
}
assert(strd_num_n && strd_sz_n);
buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
- obj_size = buf_len + sizeof(struct mlx5_mprq_buf);
+ obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
+ sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
/*
* Received packets can be either memcpy'd or externally referenced. In
* case that the packet is attached to an mbuf as an external buffer, as
}
snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
- 0, NULL, NULL, mlx5_mprq_buf_init, NULL,
+ 0, NULL, NULL, mlx5_mprq_buf_init,
+ (void *)(uintptr_t)(1 << strd_num_n),
dev->device->numa_node, 0);
if (mp == NULL) {
DRV_LOG(ERR,
return 0;
}
+/**
+ * Adjust the maximum LRO massage size.
+ * LRO massage is contained in the MPRQ strides.
+ * While the LRO massage size cannot be bigger than 65280 according to the
+ * PRM, the strides which contain it may be bigger.
+ * Adjust the maximum LRO massage size to avoid the above option.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param strd_n
+ * Number of strides per WQE..
+ * @param strd_sz
+ * The stride size.
+ */
+static void
+mlx5_max_lro_msg_size_adjust(struct rte_eth_dev *dev, uint32_t strd_n,
+ uint32_t strd_sz)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t max_buf_len = strd_sz * strd_n;
+
+ if (max_buf_len > (uint64_t)UINT16_MAX)
+ max_buf_len = RTE_ALIGN_FLOOR((uint32_t)UINT16_MAX, strd_sz);
+ max_buf_len /= 256;
+ max_buf_len = RTE_MIN(max_buf_len, (uint32_t)UINT8_MAX);
+ assert(max_buf_len);
+ if (priv->max_lro_msg_size)
+ priv->max_lro_msg_size =
+ RTE_MIN((uint32_t)priv->max_lro_msg_size, max_buf_len);
+ else
+ priv->max_lro_msg_size = max_buf_len;
+}
+
/**
* Create a DPDK Rx queue.
*
unsigned int mb_len = rte_pktmbuf_data_room_size(mp);
unsigned int mprq_stride_size;
struct mlx5_dev_config *config = &priv->config;
+ /*
+ * LRO packet may consume all the stride memory, hence we cannot
+ * guaranty head-room. A new striding RQ feature may be added in CX6 DX
+ * to allow head-room and tail-room for the LRO packets.
+ */
+ unsigned int strd_headroom_en = mlx5_lro_on(dev) ? 0 : 1;
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
const int mprq_en = mlx5_check_mprq_support(dev) > 0;
-
+ unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
+ RTE_PKTMBUF_HEADROOM;
+
+ if (non_scatter_min_mbuf_size > mb_len && !(offloads &
+ DEV_RX_OFFLOAD_SCATTER)) {
+ DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
+ " configured and no enough mbuf space(%u) to contain "
+ "the maximum RX packet length(%u) with head-room(%u)",
+ dev->data->port_id, idx, mb_len, max_rx_pkt_len,
+ RTE_PKTMBUF_HEADROOM);
+ rte_errno = ENOSPC;
+ return NULL;
+ }
tmpl = rte_calloc_socket("RXQ", 1,
sizeof(*tmpl) +
desc_n * sizeof(struct rte_mbuf *),
* stride.
* Otherwise, enable Rx scatter if necessary.
*/
- assert(mb_len >= RTE_PKTMBUF_HEADROOM);
- mprq_stride_size =
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- sizeof(struct rte_mbuf_ext_shared_info) +
- RTE_PKTMBUF_HEADROOM;
+ mprq_stride_size = max_rx_pkt_len + RTE_PKTMBUF_HEADROOM *
+ strd_headroom_en;
if (mprq_en &&
desc > (1U << config->mprq.stride_num_n) &&
mprq_stride_size <= (1U << config->mprq.max_stride_size_n)) {
tmpl->rxq.strd_sz_n = RTE_MAX(log2above(mprq_stride_size),
config->mprq.min_stride_size_n);
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
- tmpl->rxq.mprq_max_memcpy_len =
- RTE_MIN(mb_len - RTE_PKTMBUF_HEADROOM,
- config->mprq.max_memcpy_len);
+ tmpl->rxq.strd_headroom_en = strd_headroom_en;
+ tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
+ RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
+ mlx5_max_lro_msg_size_adjust(dev, (1 << tmpl->rxq.strd_num_n),
+ (1 << tmpl->rxq.strd_sz_n));
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",
dev->data->port_id, idx,
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
- } else if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ } else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
- unsigned int size =
- RTE_PKTMBUF_HEADROOM +
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
/*
/* Make sure rxq.sges_n did not overflow. */
size = mb_len * (1 << tmpl->rxq.sges_n);
size -= RTE_PKTMBUF_HEADROOM;
- if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ if (size < max_rx_pkt_len) {
DRV_LOG(ERR,
"port %u too many SGEs (%u) needed to handle"
" requested maximum packet size %u",
dev->data->port_id,
1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ max_rx_pkt_len);
rte_errno = EOVERFLOW;
goto error;
}
- } else {
- DRV_LOG(WARNING,
- "port %u the requested maximum Rx packet size (%u) is"
- " larger than a single mbuf (%u) and scattered mode has"
- " not been requested",
- dev->data->port_id,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- mb_len - RTE_PKTMBUF_HEADROOM);
}
if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
if (rxq_ctrl->obj && !mlx5_rxq_obj_release(rxq_ctrl->obj))
rxq_ctrl->obj = NULL;
if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) {
- claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
- rxq_ctrl->dbr_offset));
+ if (rxq_ctrl->dbr_umem_id_valid)
+ claim_zero(mlx5_release_dbr(dev, rxq_ctrl->dbr_umem_id,
+ rxq_ctrl->dbr_offset));
mlx5_mr_btree_free(&rxq_ctrl->rxq.mr_ctrl.cache_bh);
LIST_REMOVE(rxq_ctrl, next);
rte_free(rxq_ctrl);
if (lro) {
tir_attr.lro_timeout_period_usecs =
priv->config.lro.timeout;
- tir_attr.lro_max_msg_sz = 0xff;
+ tir_attr.lro_max_msg_sz = priv->max_lro_msg_size;
tir_attr.lro_enable_mask = lro;
}
tir = mlx5_devx_cmd_create_tir(priv->sh->ctx, &tir_attr);