} else if (config.cqe_pad) {
DRV_LOG(INFO, "Rx CQE padding is enabled");
}
+ if (config.devx) {
+ priv->counter_fallback = 0;
+ err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
+ if (err) {
+ err = -err;
+ goto error;
+ }
+ if (!config.hca_attr.flow_counters_dump)
+ priv->counter_fallback = 1;
+#ifndef HAVE_IBV_DEVX_ASYNC
+ priv->counter_fallback = 1;
+#endif
+ if (priv->counter_fallback)
+ DRV_LOG(INFO, "Use fall-back DV counter management\n");
+ /* Check for LRO support. */
+ if (config.dest_tir && mprq && config.hca_attr.lro_cap) {
+ /* TBD check tunnel lro caps. */
+ config.lro.supported = config.hca_attr.lro_cap;
+ DRV_LOG(DEBUG, "Device supports LRO");
+ /*
+ * If LRO timeout is not configured by application,
+ * use the minimal supported value.
+ */
+ if (!config.lro.timeout)
+ config.lro.timeout =
+ config.hca_attr.lro_timer_supported_periods[0];
+ DRV_LOG(DEBUG, "LRO session timeout set to %d usec",
+ config.lro.timeout);
+ config.mprq.enabled = 1;
+ DRV_LOG(DEBUG, "Enable MPRQ for LRO use");
+ }
+ }
if (config.mprq.enabled && mprq) {
if (config.mprq.stride_num_n > mprq_max_stride_num_n ||
config.mprq.stride_num_n < mprq_min_stride_num_n) {
* Verbs context returned by ibv_open_device().
*/
mlx5_link_update(eth_dev, 0);
-#ifdef HAVE_IBV_DEVX_OBJ
- if (config.devx) {
- priv->counter_fallback = 0;
- err = mlx5_devx_cmd_query_hca_attr(sh->ctx, &config.hca_attr);
- if (err) {
- err = -err;
- goto error;
- }
- if (!config.hca_attr.flow_counters_dump)
- priv->counter_fallback = 1;
-#ifndef HAVE_IBV_DEVX_ASYNC
- priv->counter_fallback = 1;
-#endif
- if (priv->counter_fallback)
- DRV_LOG(INFO, "Use fall-back DV counter management\n");
- }
-#endif
#ifdef HAVE_MLX5DV_DR_ESWITCH
if (!(config.hca_attr.eswitch_manager && config.dv_flow_en &&
(switch_info->representor || switch_info->master)))
const uint8_t use_app_rss_key =
!!dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
int ret = 0;
+ unsigned int lro_on = mlx5_lro_on(dev);
if (use_app_rss_key &&
(dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key_len !=
DRV_LOG(INFO, "port %u Rx queues number update: %u -> %u",
dev->data->port_id, priv->rxqs_n, rxqs_n);
priv->rxqs_n = rxqs_n;
+ /*
+ * WHen using LRO, MPRQ is implicitly enabled.
+ * Adjust threshold value to ensure MPRQ can be enabled.
+ */
+ if (lro_on && priv->config.mprq.min_rxqs_num > priv->rxqs_n)
+ priv->config.mprq.min_rxqs_num = priv->rxqs_n;
/*
* If the requested number of RX queues is not a power of two,
* use the maximum indirection table size for better balancing.
j = 0;
}
}
+ if (lro_on && priv->config.cqe_comp) {
+ /* CQE compressing is not supported for LRO CQEs. */
+ DRV_LOG(WARNING, "Rx CQE compression isn't supported with LRO");
+ priv->config.cqe_comp = 0;
+ }
ret = mlx5_proc_priv_init(dev);
if (ret)
return ret;
/**
* Check whether Multi-Packet RQ is enabled for the device.
+ * MPRQ can be enabled explicitly, or implicitly by enabling LRO.
*
* @param dev
* Pointer to Ethernet device.
tmpl->rxq.crc_present = 0;
if (offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
if (config->hw_fcs_strip) {
- tmpl->rxq.crc_present = 1;
+ /*
+ * RQs used for LRO-enabled TIRs should not be
+ * configured to scatter the FCS.
+ */
+ if (mlx5_lro_on(dev))
+ DRV_LOG(WARNING,
+ "port %u CRC stripping has been "
+ "disabled but will still be performed "
+ "by hardware, because LRO is enabled",
+ dev->data->port_id);
+ else
+ tmpl->rxq.crc_present = 1;
} else {
DRV_LOG(WARNING,
"port %u CRC stripping has been disabled but will"