rx_nombuf)},
};
-#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
+#define RTE_NB_STATS RTE_DIM(rte_stats_strings)
static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
{"errors", offsetof(struct rte_eth_stats, q_errors)},
};
-#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
- sizeof(rte_rxq_stats_strings[0]))
+#define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
{"packets", offsetof(struct rte_eth_stats, q_opackets)},
{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
};
-#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
- sizeof(rte_txq_stats_strings[0]))
+#define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
#define RTE_RX_OFFLOAD_BIT2STR(_name) \
{ DEV_RX_OFFLOAD_##_name, #_name }
return name;
}
+static inline int
+check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
+ uint32_t max_rx_pkt_len, uint32_t dev_info_size)
+{
+ int ret = 0;
+
+ if (dev_info_size == 0) {
+ if (config_size != max_rx_pkt_len) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
+ " %u != %u is not allowed\n",
+ port_id, config_size, max_rx_pkt_len);
+ ret = -EINVAL;
+ }
+ } else if (config_size > dev_info_size) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+ "> max allowed value %u\n", port_id, config_size,
+ dev_info_size);
+ ret = -EINVAL;
+ } else if (config_size < RTE_ETHER_MIN_LEN) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+ "< min allowed value %u\n", port_id, config_size,
+ (unsigned int)RTE_ETHER_MIN_LEN);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
/*
* Validate offloads that are requested through rte_eth_dev_configure against
- * the offloads successfuly set by the ethernet device.
+ * the offloads successfully set by the ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
* @param req_offloads
* The offloads that have been requested through `rte_eth_dev_configure`.
* @param set_offloads
- * The offloads successfuly set by the ethernet device.
+ * The offloads successfully set by the ethernet device.
* @param offload_type
* The offload type i.e. Rx/Tx string.
* @param offload_name
ret = -EINVAL;
}
- /* Chech if offload couldn't be disabled. */
+ /* Check if offload couldn't be disabled. */
if (offload & set_offloads) {
- RTE_ETHDEV_LOG(INFO,
- "Port %u failed to disable %s offload %s\n",
+ RTE_ETHDEV_LOG(DEBUG,
+ "Port %u %s offload %s is not requested but enabled\n",
port_id, offload_type, offload_name(offload));
}
* Copy the dev_conf parameter into the dev structure.
* rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
*/
- memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+ if (dev_conf != &dev->data->dev_conf)
+ memcpy(&dev->data->dev_conf, dev_conf,
+ sizeof(dev->data->dev_conf));
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
RTE_ETHER_MAX_LEN;
}
+ /*
+ * If LRO is enabled, check that the maximum aggregated packet
+ * size is supported by the configured device.
+ */
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev_conf->rxmode.max_lro_pkt_size == 0)
+ dev->data->dev_conf.rxmode.max_lro_pkt_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ ret = check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ dev_info.max_lro_pkt_size);
+ if (ret != 0)
+ goto rollback;
+ }
+
/* Any requested offloading must be within its device capabilities */
if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
dev_conf->rxmode.offloads) {
if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
(dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested",
+ "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
port_id,
rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
ret = -EINVAL;
return -EINVAL;
}
+ /*
+ * If LRO is enabled, check that the maximum aggregated packet
+ * size is supported by the configured device.
+ */
+ if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
+ dev->data->dev_conf.rxmode.max_lro_pkt_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ int ret = check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ dev_info.max_lro_pkt_size);
+ if (ret != 0)
+ return ret;
+ }
+
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
socket_id, &local_conf, mp);
if (!ret) {
* return status and does not know if get is successful or not.
*/
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+ dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
return eth_err(port_id, diag);
}
+ /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
+ dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
+ RTE_MAX_QUEUES_PER_PORT);
+ dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
+ RTE_MAX_QUEUES_PER_PORT);
+
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
int mask = 0;
int cur, org = 0;
uint64_t orig_offloads;
- uint64_t *dev_offloads;
+ uint64_t dev_offloads;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
/* save original values in case of failure */
orig_offloads = dev->data->dev_conf.rxmode.offloads;
- dev_offloads = &dev->data->dev_conf.rxmode.offloads;
+ dev_offloads = orig_offloads;
- /*check which option changed by application*/
+ /* check which option changed by application */
cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
mask |= ETH_VLAN_STRIP_MASK;
}
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
mask |= ETH_VLAN_FILTER_MASK;
}
cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+ dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
mask |= ETH_VLAN_EXTEND_MASK;
}
cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
mask |= ETH_QINQ_STRIP_MASK;
}
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
+ dev->data->dev_conf.rxmode.offloads = dev_offloads;
ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
if (ret) {
/* hit an error restore original values */
- *dev_offloads = orig_offloads;
+ dev->data->dev_conf.rxmode.offloads = orig_offloads;
}
return eth_err(port_id, ret);
next = TAILQ_NEXT(cb, next);
if (cb->cb_fn != cb_fn || cb->event != event ||
- (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
+ (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
continue;
/*
cb->param = user_param;
rte_spinlock_lock(&rte_eth_rx_cb_lock);
- /* Add the callbacks at fisrt position*/
+ /* Add the callbacks at first position */
cb->next = rte_eth_devices[port_id].post_rx_burst_cbs[queue_id];
rte_smp_wmb();
rte_eth_devices[port_id].post_rx_burst_cbs[queue_id] = cb;
*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
- for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
- i < RTE_MAX_ETHPORTS; i++) {
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (rte_eth_switch_domains[i].state ==
RTE_ETH_SWITCH_DOMAIN_UNUSED) {
rte_eth_switch_domains[i].state =