return name;
}
+static inline int
+check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
+ uint32_t max_rx_pkt_len, uint32_t dev_info_size)
+{
+ int ret = 0;
+
+ if (dev_info_size == 0) {
+ if (config_size != max_rx_pkt_len) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
+ " %u != %u is not allowed\n",
+ port_id, config_size, max_rx_pkt_len);
+ ret = -EINVAL;
+ }
+ } else if (config_size > dev_info_size) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+ "> max allowed value %u\n", port_id, config_size,
+ dev_info_size);
+ ret = -EINVAL;
+ } else if (config_size < RTE_ETHER_MIN_LEN) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+ "< min allowed value %u\n", port_id, config_size,
+ (unsigned int)RTE_ETHER_MIN_LEN);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
/*
* Validate offloads that are requested through rte_eth_dev_configure against
* the offloads successfuly set by the ethernet device.
/* Chech if offload couldn't be disabled. */
if (offload & set_offloads) {
- RTE_ETHDEV_LOG(INFO,
- "Port %u failed to disable %s offload %s\n",
+ RTE_ETHDEV_LOG(DEBUG,
+ "Port %u %s offload %s is not requested but enabled\n",
port_id, offload_type, offload_name(offload));
}
* Copy the dev_conf parameter into the dev structure.
* rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
*/
- memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+ if (dev_conf != &dev->data->dev_conf)
+ memcpy(&dev->data->dev_conf, dev_conf,
+ sizeof(dev->data->dev_conf));
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
RTE_ETHER_MAX_LEN;
}
+ /*
+ * If LRO is enabled, check that the maximum aggregated packet
+ * size is supported by the configured device.
+ */
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev_conf->rxmode.max_lro_pkt_size == 0)
+ dev->data->dev_conf.rxmode.max_lro_pkt_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ ret = check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ dev_info.max_lro_pkt_size);
+ if (ret != 0)
+ goto rollback;
+ }
+
/* Any requested offloading must be within its device capabilities */
if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
dev_conf->rxmode.offloads) {
if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
(dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested",
+ "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
port_id,
rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
ret = -EINVAL;
return -EINVAL;
}
+ /*
+ * If LRO is enabled, check that the maximum aggregated packet
+ * size is supported by the configured device.
+ */
+ if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
+ dev->data->dev_conf.rxmode.max_lro_pkt_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ int ret = check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ dev_info.max_lro_pkt_size);
+ if (ret != 0)
+ return ret;
+ }
+
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
socket_id, &local_conf, mp);
if (!ret) {
return eth_err(port_id, diag);
}
+ /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
+ dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
+ RTE_MAX_QUEUES_PER_PORT);
+ dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
+ RTE_MAX_QUEUES_PER_PORT);
+
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;