if (priv->counter_fallback)
DRV_LOG(INFO, "Use fall-back DV counter management\n");
/* Check for LRO support. */
- if (config.dest_tir && mprq && config.hca_attr.lro_cap) {
+ if (config.dest_tir && config.hca_attr.lro_cap) {
/* TBD check tunnel lro caps. */
config.lro.supported = config.hca_attr.lro_cap;
DRV_LOG(DEBUG, "Device supports LRO");
config.hca_attr.lro_timer_supported_periods[0];
DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
config.lro.timeout);
- config.mprq.enabled = 1;
- DRV_LOG(DEBUG, "Enable MPRQ for LRO use");
}
}
if (config.mprq.enabled && mprq) {
DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
- /*
- * WHen using LRO, MPRQ is implicitly enabled.
- * Adjust threshold value to ensure MPRQ can be enabled.
- */
- if (lro_on && priv->config.mprq.min_rxqs_num > priv->rxqs_n)
- priv->config.mprq.min_rxqs_num = priv->rxqs_n;
/*
* If the requested number of RX queues is not a power of two,
* use the maximum indirection table size for better balancing.
/**
* Check whether Multi-Packet RQ is enabled for the device.
- * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
*
* @param dev
* Pointer to Ethernet device.
unsigned int max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
RTE_PKTMBUF_HEADROOM;
+ unsigned int max_lro_size = 0;
if (non_scatter_min_mbuf_size > mb_len && !(offloads &
DEV_RX_OFFLOAD_SCATTER)) {
tmpl->rxq.strd_headroom_en = strd_headroom_en;
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(mb_len -
RTE_PKTMBUF_HEADROOM, config->mprq.max_memcpy_len);
- mlx5_max_lro_msg_size_adjust(dev, RTE_MIN(max_rx_pkt_len,
- (1u << tmpl->rxq.strd_num_n) * (1u << tmpl->rxq.strd_sz_n)));
+ max_lro_size = RTE_MIN(max_rx_pkt_len,
+ (1u << tmpl->rxq.strd_num_n) *
+ (1u << tmpl->rxq.strd_sz_n));
DRV_LOG(DEBUG,
"port %u Rx queue %u: Multi-Packet RQ is enabled"
" strd_num_n = %u, strd_sz_n = %u",
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
} else if (max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
tmpl->rxq.sges_n = 0;
+ max_lro_size = max_rx_pkt_len;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int size = non_scatter_min_mbuf_size;
unsigned int sges_n;
* and round it to the next power of two.
*/
sges_n = log2above((size / mb_len) + !!(size % mb_len));
- tmpl->rxq.sges_n = sges_n;
- /* Make sure rxq.sges_n did not overflow. */
- size = mb_len * (1 << tmpl->rxq.sges_n);
- size -= RTE_PKTMBUF_HEADROOM;
- if (size < max_rx_pkt_len) {
+ if (sges_n > MLX5_MAX_LOG_RQ_SEGS) {
DRV_LOG(ERR,
"port %u too many SGEs (%u) needed to handle"
- " requested maximum packet size %u",
- dev->data->port_id,
- 1 << sges_n,
- max_rx_pkt_len);
- rte_errno = EOVERFLOW;
+ " requested maximum packet size %u, the maximum"
+ " supported are %u", dev->data->port_id,
+ 1 << sges_n, max_rx_pkt_len,
+ 1u << MLX5_MAX_LOG_RQ_SEGS);
+ rte_errno = ENOTSUP;
goto error;
}
+ tmpl->rxq.sges_n = sges_n;
+ max_lro_size = max_rx_pkt_len;
}
if (mprq_en && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
rte_errno = EINVAL;
goto error;
}
+ mlx5_max_lro_msg_size_adjust(dev, max_lro_size);
/* Toggle RX checksum offload if hardware supports it. */
tmpl->rxq.csum = !!(offloads & DEV_RX_OFFLOAD_CHECKSUM);
tmpl->rxq.hw_timestamp = !!(offloads & DEV_RX_OFFLOAD_TIMESTAMP);
unsigned int hw_timestamp:1; /* Enable HW timestamp. */
unsigned int vlan_strip:1; /* Enable VLAN stripping. */
unsigned int crc_present:1; /* CRC must be subtracted. */
- unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */
+ unsigned int sges_n:3; /* Log 2 of SGEs (max buffers per packet). */
unsigned int cqe_n:4; /* Log 2 of CQ elements. */
unsigned int elts_n:4; /* Log 2 of Mbufs. */
unsigned int rss_hash:1; /* RSS hash result is enabled. */
unsigned int strd_shift_en:1; /* Enable 2bytes shift on a stride. */
unsigned int err_state:2; /* enum mlx5_rxq_err_state. */
unsigned int strd_headroom_en:1; /* Enable mbuf headroom in MPRQ. */
- unsigned int :3; /* Remaining bits. */
+ unsigned int :2; /* Remaining bits. */
volatile uint32_t *rq_db;
volatile uint32_t *cq_db;
uint16_t port_id;