unsigned int wqe_n = 1 << rxq_data->elts_n;
if (mlx5_rxq_mprq_enabled(rxq_data))
- cqe_n = wqe_n * (1 << rxq_data->strd_num_n) - 1;
+ cqe_n = wqe_n * RTE_BIT32(rxq_data->log_strd_num) - 1;
else
cqe_n = wqe_n - 1;
return cqe_n;
{
const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n;
unsigned int elts_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- (1 << rxq_ctrl->rxq.elts_n) * (1 << rxq_ctrl->rxq.strd_num_n) :
- (1 << rxq_ctrl->rxq.elts_n);
+ RTE_BIT32(rxq_ctrl->rxq.elts_n) *
+ RTE_BIT32(rxq_ctrl->rxq.log_strd_num) :
+ RTE_BIT32(rxq_ctrl->rxq.elts_n);
+ bool has_vec_support = mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0;
unsigned int i;
int err;
rte_errno = ENOMEM;
goto error;
}
- /* Headroom is reserved by rte_pktmbuf_alloc(). */
- MLX5_ASSERT(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM);
+ /* Only vectored Rx routines rely on headroom size. */
+ MLX5_ASSERT(!has_vec_support ||
+ DATA_OFF(buf) >= RTE_PKTMBUF_HEADROOM);
/* Buffer is supposed to be empty. */
MLX5_ASSERT(rte_pktmbuf_data_len(buf) == 0);
MLX5_ASSERT(rte_pktmbuf_pkt_len(buf) == 0);
(*rxq_ctrl->rxq.elts)[i] = buf;
}
/* If Rx vector is activated. */
- if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) {
+ if (has_vec_support) {
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
struct rte_mbuf *mbuf_init = &rxq->fake_mbuf;
struct rte_pktmbuf_pool_private *priv =
{
struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq;
const uint16_t q_n = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
- (1 << rxq->elts_n) * (1 << rxq->strd_num_n) :
- (1 << rxq->elts_n);
+ RTE_BIT32(rxq->elts_n) * RTE_BIT32(rxq->log_strd_num) :
+ RTE_BIT32(rxq->elts_n);
const uint16_t q_mask = q_n - 1;
uint16_t elts_ci = mlx5_rxq_mprq_enabled(&rxq_ctrl->rxq) ?
rxq->elts_ci : rxq->rq_ci;
mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev)
{
struct mlx5_priv *priv = dev->data->dev_private;
- struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (RTE_ETH_RX_OFFLOAD_SCATTER |
RTE_ETH_RX_OFFLOAD_TIMESTAMP |
RTE_ETH_RX_OFFLOAD_RSS_HASH);
- if (!config->mprq.enabled)
+ if (!priv->config.mprq.enabled)
offloads |= RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT;
- if (config->hw_fcs_strip)
+ if (priv->sh->config.hw_fcs_strip)
offloads |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
- if (config->hw_csum)
+ if (priv->sh->dev_cap.hw_csum)
offloads |= (RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
RTE_ETH_RX_OFFLOAD_TCP_CKSUM);
- if (config->hw_vlan_strip)
+ if (priv->sh->dev_cap.hw_vlan_strip)
offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
- if (MLX5_LRO_SUPPORTED(dev))
+ if (priv->sh->dev_cap.lro_supported)
offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
return offloads;
}
dev->data->port_id, idx);
return false;
} else if (mp == NULL) {
+ if (conf->rx_nseg != rxq_ctrl->rxseg_n) {
+ DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment number mismatch",
+ dev->data->port_id, idx);
+ return false;
+ }
for (i = 0; i < conf->rx_nseg; i++) {
- if (conf->rx_seg[i].split.mp !=
- rxq_ctrl->rxq.rxseg[i].mp ||
- conf->rx_seg[i].split.length !=
- rxq_ctrl->rxq.rxseg[i].length) {
+ if (memcmp(&conf->rx_seg[i].split, &rxq_ctrl->rxseg[i],
+ sizeof(struct rte_eth_rxseg_split))) {
DRV_LOG(ERR, "port %u queue index %u failed to join shared group: segment %u configuration mismatch",
dev->data->port_id, idx, i);
return false;
MLX5_ASSERT(n_seg < MLX5_MAX_RXQ_NSEG);
}
if (conf->share_group > 0) {
- if (!priv->config.hca_attr.mem_rq_rmp) {
+ if (!priv->sh->cdev->config.hca_attr.mem_rq_rmp) {
DRV_LOG(ERR, "port %u queue index %u shared Rx queue not supported by fw",
dev->data->port_id, idx);
rte_errno = EINVAL;
rte_errno = ENOMEM;
return -rte_errno;
}
- rxq->priv = priv;
- rxq->idx = idx;
- (*priv->rxq_privs)[idx] = rxq;
- if (rxq_ctrl != NULL) {
- /* Join owner list. */
- LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
- rxq->ctrl = rxq_ctrl;
- } else {
- rxq_ctrl = mlx5_rxq_new(dev, rxq, desc, socket, conf, rx_seg,
+ if (rxq_ctrl == NULL) {
+ rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, rx_seg,
n_seg);
if (rxq_ctrl == NULL) {
DRV_LOG(ERR, "port %u unable to allocate rx queue index %u",
dev->data->port_id, idx);
mlx5_free(rxq);
- (*priv->rxq_privs)[idx] = NULL;
rte_errno = ENOMEM;
return -rte_errno;
}
}
+ rxq->priv = priv;
+ rxq->idx = idx;
+ (*priv->rxq_privs)[idx] = rxq;
+ /* Join owner list. */
+ LIST_INSERT_HEAD(&rxq_ctrl->owners, rxq, owner_entry);
+ rxq->ctrl = rxq_ctrl;
mlx5_rxq_ref(dev, idx);
DRV_LOG(DEBUG, "port %u adding Rx queue %u to list",
dev->data->port_id, idx);
unsigned int buf_len;
unsigned int obj_num;
unsigned int obj_size;
- unsigned int strd_num_n = 0;
- unsigned int strd_sz_n = 0;
+ unsigned int log_strd_num = 0;
+ unsigned int log_strd_sz = 0;
unsigned int i;
unsigned int n_ibv = 0;
int ret;
n_ibv++;
desc += 1 << rxq->elts_n;
/* Get the max number of strides. */
- if (strd_num_n < rxq->strd_num_n)
- strd_num_n = rxq->strd_num_n;
+ if (log_strd_num < rxq->log_strd_num)
+ log_strd_num = rxq->log_strd_num;
/* Get the max size of a stride. */
- if (strd_sz_n < rxq->strd_sz_n)
- strd_sz_n = rxq->strd_sz_n;
- }
- MLX5_ASSERT(strd_num_n && strd_sz_n);
- buf_len = (1 << strd_num_n) * (1 << strd_sz_n);
- obj_size = sizeof(struct mlx5_mprq_buf) + buf_len + (1 << strd_num_n) *
- sizeof(struct rte_mbuf_ext_shared_info) + RTE_PKTMBUF_HEADROOM;
+ if (log_strd_sz < rxq->log_strd_sz)
+ log_strd_sz = rxq->log_strd_sz;
+ }
+ MLX5_ASSERT(log_strd_num && log_strd_sz);
+ buf_len = RTE_BIT32(log_strd_num) * RTE_BIT32(log_strd_sz);
+ obj_size = sizeof(struct mlx5_mprq_buf) + buf_len +
+ RTE_BIT32(log_strd_num) *
+ sizeof(struct rte_mbuf_ext_shared_info) +
+ RTE_PKTMBUF_HEADROOM;
/*
* Received packets can be either memcpy'd or externally referenced. In
* case that the packet is attached to an mbuf as an external buffer, as
snprintf(name, sizeof(name), "port-%u-mprq", dev->data->port_id);
mp = rte_mempool_create(name, obj_num, obj_size, MLX5_MPRQ_MP_CACHE_SZ,
0, NULL, NULL, mlx5_mprq_buf_init,
- (void *)((uintptr_t)1 << strd_num_n),
+ (void *)((uintptr_t)1 << log_strd_num),
dev->device->numa_node, 0);
if (mp == NULL) {
DRV_LOG(ERR,
{
struct mlx5_priv *priv = dev->data->dev_private;
- if (priv->config.hca_attr.lro_max_msg_sz_mode ==
+ if (priv->sh->cdev->config.hca_attr.lro_max_msg_sz_mode ==
MLX5_LRO_MAX_MSG_SIZE_START_FROM_L4 && max_lro_size >
MLX5_MAX_TCP_HDR_OFFSET)
max_lro_size -= MLX5_MAX_TCP_HDR_OFFSET;
priv->max_lro_msg_size * MLX5_LRO_SEG_CHUNK_SIZE);
}
+/**
+ * Prepare both size and number of stride for Multi-Packet RQ.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param idx
+ * RX queue index.
+ * @param desc
+ * Number of descriptors to configure in queue.
+ * @param rx_seg_en
+ * Indicator if Rx segment enables, if so Multi-Packet RQ doesn't enable.
+ * @param min_mbuf_size
+ * Non scatter min mbuf size, max_rx_pktlen plus overhead.
+ * @param actual_log_stride_num
+ * Log number of strides to configure for this queue.
+ * @param actual_log_stride_size
+ * Log stride size to configure for this queue.
+ *
+ * @return
+ * 0 if Multi-Packet RQ is supported, otherwise -1.
+ */
+static int
+mlx5_mprq_prepare(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
+ bool rx_seg_en, uint32_t min_mbuf_size,
+ uint32_t *actual_log_stride_num,
+ uint32_t *actual_log_stride_size)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_port_config *config = &priv->config;
+ struct mlx5_dev_cap *dev_cap = &priv->sh->dev_cap;
+ uint32_t log_min_stride_num = dev_cap->mprq.log_min_stride_num;
+ uint32_t log_max_stride_num = dev_cap->mprq.log_max_stride_num;
+ uint32_t log_def_stride_num =
+ RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_NUM,
+ log_min_stride_num),
+ log_max_stride_num);
+ uint32_t log_min_stride_size = dev_cap->mprq.log_min_stride_size;
+ uint32_t log_max_stride_size = dev_cap->mprq.log_max_stride_size;
+ uint32_t log_def_stride_size =
+ RTE_MIN(RTE_MAX(MLX5_MPRQ_DEFAULT_LOG_STRIDE_SIZE,
+ log_min_stride_size),
+ log_max_stride_size);
+ uint32_t log_stride_wqe_size;
+
+ if (mlx5_check_mprq_support(dev) != 1 || rx_seg_en)
+ goto unsupport;
+ /* Checks if chosen number of strides is in supported range. */
+ if (config->mprq.log_stride_num > log_max_stride_num ||
+ config->mprq.log_stride_num < log_min_stride_num) {
+ *actual_log_stride_num = log_def_stride_num;
+ DRV_LOG(WARNING,
+ "Port %u Rx queue %u number of strides for Multi-Packet RQ is out of range, setting default value (%u)",
+ dev->data->port_id, idx, RTE_BIT32(log_def_stride_num));
+ } else {
+ *actual_log_stride_num = config->mprq.log_stride_num;
+ }
+ if (config->mprq.log_stride_size) {
+ /* Checks if chosen size of stride is in supported range. */
+ if (config->mprq.log_stride_size > log_max_stride_size ||
+ config->mprq.log_stride_size < log_min_stride_size) {
+ *actual_log_stride_size = log_def_stride_size;
+ DRV_LOG(WARNING,
+ "Port %u Rx queue %u size of a stride for Multi-Packet RQ is out of range, setting default value (%u)",
+ dev->data->port_id, idx,
+ RTE_BIT32(log_def_stride_size));
+ } else {
+ *actual_log_stride_size = config->mprq.log_stride_size;
+ }
+ } else {
+ if (min_mbuf_size <= RTE_BIT32(log_max_stride_size))
+ *actual_log_stride_size = log2above(min_mbuf_size);
+ else
+ goto unsupport;
+ }
+ log_stride_wqe_size = *actual_log_stride_num + *actual_log_stride_size;
+ /* Check if WQE buffer size is supported by hardware. */
+ if (log_stride_wqe_size < dev_cap->mprq.log_min_stride_wqe_size) {
+ *actual_log_stride_num = log_def_stride_num;
+ *actual_log_stride_size = log_def_stride_size;
+ DRV_LOG(WARNING,
+ "Port %u Rx queue %u size of WQE buffer for Multi-Packet RQ is too small, setting default values (stride_num_n=%u, stride_size_n=%u)",
+ dev->data->port_id, idx, RTE_BIT32(log_def_stride_num),
+ RTE_BIT32(log_def_stride_size));
+ log_stride_wqe_size = log_def_stride_num + log_def_stride_size;
+ }
+ MLX5_ASSERT(log_stride_wqe_size >=
+ dev_cap->mprq.log_min_stride_wqe_size);
+ if (desc <= RTE_BIT32(*actual_log_stride_num))
+ goto unsupport;
+ if (min_mbuf_size > RTE_BIT32(log_stride_wqe_size)) {
+ DRV_LOG(WARNING, "Port %u Rx queue %u "
+ "Multi-Packet RQ is unsupported, WQE buffer size (%u) "
+ "is smaller than min mbuf size (%u)",
+ dev->data->port_id, idx, RTE_BIT32(log_stride_wqe_size),
+ min_mbuf_size);
+ goto unsupport;
+ }
+ DRV_LOG(DEBUG, "Port %u Rx queue %u "
+ "Multi-Packet RQ is enabled strd_num_n = %u, strd_sz_n = %u",
+ dev->data->port_id, idx, RTE_BIT32(*actual_log_stride_num),
+ RTE_BIT32(*actual_log_stride_size));
+ return 0;
+unsupport:
+ if (config->mprq.enabled)
+ DRV_LOG(WARNING,
+ "Port %u MPRQ is requested but cannot be enabled\n"
+ " (requested: pkt_sz = %u, desc_num = %u,"
+ " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
+ " supported: min_rxqs_num = %u, min_buf_wqe_sz = %u"
+ " min_stride_sz = %u, max_stride_sz = %u).\n"
+ "Rx segment is %senable.",
+ dev->data->port_id, min_mbuf_size, desc, priv->rxqs_n,
+ RTE_BIT32(config->mprq.log_stride_size),
+ RTE_BIT32(config->mprq.log_stride_num),
+ config->mprq.min_rxqs_num,
+ RTE_BIT32(dev_cap->mprq.log_min_stride_wqe_size),
+ RTE_BIT32(dev_cap->mprq.log_min_stride_size),
+ RTE_BIT32(dev_cap->mprq.log_max_stride_size),
+ rx_seg_en ? "" : "not ");
+ return -1;
+}
+
/**
* Create a DPDK Rx queue.
*
* @param dev
* Pointer to Ethernet device.
- * @param rxq
- * RX queue private data.
+ * @param idx
+ * RX queue index.
* @param desc
* Number of descriptors to configure in queue.
* @param socket
* A DPDK queue object on success, NULL otherwise and rte_errno is set.
*/
struct mlx5_rxq_ctrl *
-mlx5_rxq_new(struct rte_eth_dev *dev, struct mlx5_rxq_priv *rxq,
- uint16_t desc,
+mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
unsigned int socket, const struct rte_eth_rxconf *conf,
const struct rte_eth_rxseg_split *rx_seg, uint16_t n_seg)
{
- uint16_t idx = rxq->idx;
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_rxq_ctrl *tmpl;
unsigned int mb_len = rte_pktmbuf_data_room_size(rx_seg[0].mp);
- struct mlx5_dev_config *config = &priv->config;
+ struct mlx5_port_config *config = &priv->config;
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO);
RTE_PKTMBUF_HEADROOM;
unsigned int max_lro_size = 0;
unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
- const int mprq_en = mlx5_check_mprq_support(dev) > 0 && n_seg == 1 &&
- !rx_seg[0].offset && !rx_seg[0].length;
- unsigned int mprq_stride_nums = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : MLX5_MPRQ_STRIDE_NUM_N;
- unsigned int mprq_stride_size = non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ?
- log2above(non_scatter_min_mbuf_size) : MLX5_MPRQ_STRIDE_SIZE_N;
- unsigned int mprq_stride_cap = (config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) : (1U << mprq_stride_nums)) *
- (config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) : (1U << mprq_stride_size));
+ uint32_t mprq_log_actual_stride_num = 0;
+ uint32_t mprq_log_actual_stride_size = 0;
+ bool rx_seg_en = n_seg != 1 || rx_seg[0].offset || rx_seg[0].length;
+ const int mprq_en = !mlx5_mprq_prepare(dev, idx, desc, rx_seg_en,
+ non_scatter_min_mbuf_size,
+ &mprq_log_actual_stride_num,
+ &mprq_log_actual_stride_size);
/*
* Always allocate extra slots, even if eventually
* the vector Rx will not be used.
*/
uint16_t desc_n = desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP;
+ size_t alloc_size = sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *);
const struct rte_eth_rxseg_split *qs_seg = rx_seg;
unsigned int tail_len;
- tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO,
- sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *) +
- (!!mprq_en) *
- (desc >> mprq_stride_nums) * sizeof(struct mlx5_mprq_buf *),
- 0, socket);
+ if (mprq_en) {
+ /* Trim the number of descs needed. */
+ desc >>= mprq_log_actual_stride_num;
+ alloc_size += desc * sizeof(struct mlx5_mprq_buf *);
+ }
+ tmpl = mlx5_malloc(MLX5_MEM_RTE | MLX5_MEM_ZERO, alloc_size, 0, socket);
if (!tmpl) {
rte_errno = ENOMEM;
return NULL;
}
LIST_INIT(&tmpl->owners);
- if (conf->share_group > 0) {
- tmpl->rxq.shared = 1;
- tmpl->share_group = conf->share_group;
- tmpl->share_qid = conf->share_qid;
- LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
- }
- rxq->ctrl = tmpl;
- LIST_INSERT_HEAD(&tmpl->owners, rxq, owner_entry);
MLX5_ASSERT(n_seg && n_seg <= MLX5_MAX_RXQ_NSEG);
+ /*
+ * Save the original segment configuration in the shared queue
+ * descriptor for the later check on the sibling queue creation.
+ */
+ tmpl->rxseg_n = n_seg;
+ rte_memcpy(tmpl->rxseg, qs_seg,
+ sizeof(struct rte_eth_rxseg_split) * n_seg);
/*
* Build the array of actual buffer offsets and lengths.
* Pad with the buffers from the last memory pool if
tmpl->socket = socket;
if (dev->data->dev_conf.intr_conf.rxq)
tmpl->irq = 1;
- /*
- * This Rx queue can be configured as a Multi-Packet RQ if all of the
- * following conditions are met:
- * - MPRQ is enabled.
- * - The number of descs is more than the number of strides.
- * - max_rx_pktlen plus overhead is less than the max size
- * of a stride or mprq_stride_size is specified by a user.
- * Need to make sure that there are enough strides to encap
- * the maximum packet size in case mprq_stride_size is set.
- * Otherwise, enable Rx scatter if necessary.
- */
- if (mprq_en && desc > (1U << mprq_stride_nums) &&
- (non_scatter_min_mbuf_size <=
- (1U << config->mprq.max_stride_size_n) ||
- (config->mprq.stride_size_n &&
- non_scatter_min_mbuf_size <= mprq_stride_cap))) {
+ if (mprq_en) {
/* TODO: Rx scatter isn't supported yet. */
tmpl->rxq.sges_n = 0;
- /* Trim the number of descs needed. */
- desc >>= mprq_stride_nums;
- tmpl->rxq.strd_num_n = config->mprq.stride_num_n ?
- config->mprq.stride_num_n : mprq_stride_nums;
- tmpl->rxq.strd_sz_n = config->mprq.stride_size_n ?
- config->mprq.stride_size_n : mprq_stride_size;
+ tmpl->rxq.log_strd_num = mprq_log_actual_stride_num;
+ tmpl->rxq.log_strd_sz = mprq_log_actual_stride_size;
tmpl->rxq.strd_shift_en = MLX5_MPRQ_TWO_BYTE_SHIFT;
tmpl->rxq.strd_scatter_en =
!!(offloads & RTE_ETH_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
max_lro_size = RTE_MIN(max_rx_pktlen,
- (1u << tmpl->rxq.strd_num_n) *
- (1u << tmpl->rxq.strd_sz_n));
- DRV_LOG(DEBUG,
- "port %u Rx queue %u: Multi-Packet RQ is enabled"
- " strd_num_n = %u, strd_sz_n = %u",
- dev->data->port_id, idx,
- tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
+ RTE_BIT32(tmpl->rxq.log_strd_num) *
+ RTE_BIT32(tmpl->rxq.log_strd_sz));
} else if (tmpl->rxq.rxseg_n == 1) {
MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
tmpl->rxq.sges_n = 0;
tmpl->rxq.sges_n = sges_n;
max_lro_size = max_rx_pktlen;
}
- if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
- DRV_LOG(WARNING,
- "port %u MPRQ is requested but cannot be enabled\n"
- " (requested: pkt_sz = %u, desc_num = %u,"
- " rxq_num = %u, stride_sz = %u, stride_num = %u\n"
- " supported: min_rxqs_num = %u,"
- " min_stride_sz = %u, max_stride_sz = %u).",
- dev->data->port_id, non_scatter_min_mbuf_size,
- desc, priv->rxqs_n,
- config->mprq.stride_size_n ?
- (1U << config->mprq.stride_size_n) :
- (1U << mprq_stride_size),
- config->mprq.stride_num_n ?
- (1U << config->mprq.stride_num_n) :
- (1U << mprq_stride_nums),
- config->mprq.min_rxqs_num,
- (1U << config->mprq.min_stride_size_n),
- (1U << config->mprq.max_stride_size_n));
DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u",
dev->data->port_id, 1 << tmpl->rxq.sges_n);
if (desc % (1 << tmpl->rxq.sges_n)) {
tmpl->rxq.crc_present = 0;
tmpl->rxq.lro = lro_on_queue;
if (offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
- if (config->hw_fcs_strip) {
+ if (priv->sh->config.hw_fcs_strip) {
/*
* RQs used for LRO-enabled TIRs should not be
* configured to scatter the FCS.
dev->data->port_id,
tmpl->rxq.crc_present ? "disabled" : "enabled",
tmpl->rxq.crc_present << 2);
- /* Save port ID. */
tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf &&
(!!(dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS));
+ /* Save port ID. */
tmpl->rxq.port_id = dev->data->port_id;
tmpl->sh = priv->sh;
tmpl->rxq.mp = rx_seg[0].mp;
tmpl->rxq.elts_n = log2above(desc);
- tmpl->rxq.rq_repl_thresh =
- MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
- tmpl->rxq.elts =
- (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
+ tmpl->rxq.rq_repl_thresh = MLX5_VPMD_RXQ_RPLNSH_THRESH(desc_n);
+ tmpl->rxq.elts = (struct rte_mbuf *(*)[desc_n])(tmpl + 1);
tmpl->rxq.mprq_bufs =
(struct mlx5_mprq_buf *(*)[desc])(*tmpl->rxq.elts + desc_n);
tmpl->rxq.idx = idx;
+ if (conf->share_group > 0) {
+ tmpl->rxq.shared = 1;
+ tmpl->share_group = conf->share_group;
+ tmpl->share_qid = conf->share_qid;
+ LIST_INSERT_HEAD(&priv->sh->shared_rxqs, tmpl, share_entry);
+ }
LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next);
return tmpl;
error:
* Number of queues in the array.
*
* @return
- * 1 if all queues in indirection table match 0 othrwise.
+ * 1 if all queues in indirection table match 0 otherwise.
*/
static int
mlx5_ind_table_obj_match_queues(const struct mlx5_ind_table_obj *ind_tbl,
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
+ log2above(priv->sh->dev_cap.ind_table_max_size);
if (ref_qs)
for (i = 0; i != queues_n; ++i) {
* Number of queues in the array.
* @param standalone
* Indirection table for Standalone queue.
+ * @param ref_new_qs
+ * Whether to increment new RxQ set reference counters.
+ * @param deref_old_qs
+ * Whether to decrement old RxQ set reference counters.
*
* @return
* 0 on success, a negative errno value otherwise and rte_errno is set.
mlx5_ind_table_obj_modify(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl,
uint16_t *queues, const uint32_t queues_n,
- bool standalone)
+ bool standalone, bool ref_new_qs, bool deref_old_qs)
{
struct mlx5_priv *priv = dev->data->dev_private;
- unsigned int i;
+ unsigned int i = 0, j;
int ret = 0, err;
const unsigned int n = rte_is_power_of_2(queues_n) ?
log2above(queues_n) :
- log2above(priv->config.ind_table_max_size);
+ log2above(priv->sh->dev_cap.ind_table_max_size);
MLX5_ASSERT(standalone);
RTE_SET_USED(standalone);
if (mlx5_ind_table_obj_check_standalone(dev, ind_tbl) < 0)
return -rte_errno;
- for (i = 0; i != queues_n; ++i) {
- if (!mlx5_rxq_get(dev, queues[i])) {
- ret = -rte_errno;
- goto error;
+ if (ref_new_qs)
+ for (i = 0; i != queues_n; ++i) {
+ if (!mlx5_rxq_ref(dev, queues[i])) {
+ ret = -rte_errno;
+ goto error;
+ }
}
- }
MLX5_ASSERT(priv->obj_ops.ind_table_modify);
ret = priv->obj_ops.ind_table_modify(dev, n, queues, queues_n, ind_tbl);
if (ret)
goto error;
+ if (deref_old_qs)
+ for (i = 0; i < ind_tbl->queues_n; i++)
+ claim_nonzero(mlx5_rxq_deref(dev, ind_tbl->queues[i]));
ind_tbl->queues_n = queues_n;
ind_tbl->queues = queues;
return 0;
error:
- err = rte_errno;
- rte_errno = err;
+ if (ref_new_qs) {
+ err = rte_errno;
+ for (j = 0; j < i; j++)
+ mlx5_rxq_deref(dev, queues[j]);
+ rte_errno = err;
+ }
DRV_LOG(DEBUG, "Port %u cannot setup indirection table.",
dev->data->port_id);
return ret;
mlx5_ind_table_obj_attach(struct rte_eth_dev *dev,
struct mlx5_ind_table_obj *ind_tbl)
{
- unsigned int i;
int ret;
ret = mlx5_ind_table_obj_modify(dev, ind_tbl, ind_tbl->queues,
- ind_tbl->queues_n, true);
- if (ret != 0) {
+ ind_tbl->queues_n,
+ true /* standalone */,
+ true /* ref_new_qs */,
+ false /* deref_old_qs */);
+ if (ret != 0)
DRV_LOG(ERR, "Port %u could not modify indirect table obj %p",
dev->data->port_id, (void *)ind_tbl);
- return ret;
- }
- for (i = 0; i < ind_tbl->queues_n; i++)
- mlx5_rxq_ref(dev, ind_tbl->queues[i]);
- return 0;
+ return ret;
}
/**
struct mlx5_priv *priv = dev->data->dev_private;
const unsigned int n = rte_is_power_of_2(ind_tbl->queues_n) ?
log2above(ind_tbl->queues_n) :
- log2above(priv->config.ind_table_max_size);
+ log2above(priv->sh->dev_cap.ind_table_max_size);
unsigned int i;
int ret;
if (hrxq->standalone) {
/*
* Replacement of indirection table unsupported for
- * stanalone hrxq objects (used by shared RSS).
+ * standalone hrxq objects (used by shared RSS).
*/
rte_errno = ENOTSUP;
return -rte_errno;
if (data == NULL)
continue;
data->sh = sh;
- data->rt_timestamp = priv->config.rt_timestamp;
+ data->rt_timestamp = sh->dev_cap.rt_timestamp;
}
}