rx_nombuf)},
};
-#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0]))
+#define RTE_NB_STATS RTE_DIM(rte_stats_strings)
static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = {
{"packets", offsetof(struct rte_eth_stats, q_ipackets)},
{"errors", offsetof(struct rte_eth_stats, q_errors)},
};
-#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \
- sizeof(rte_rxq_stats_strings[0]))
+#define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings)
static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = {
{"packets", offsetof(struct rte_eth_stats, q_opackets)},
{"bytes", offsetof(struct rte_eth_stats, q_obytes)},
};
-#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \
- sizeof(rte_txq_stats_strings[0]))
+#define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings)
#define RTE_RX_OFFLOAD_BIT2STR(_name) \
{ DEV_RX_OFFLOAD_##_name, #_name }
RTE_RX_OFFLOAD_BIT2STR(KEEP_CRC),
RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
+ RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
};
#undef RTE_RX_OFFLOAD_BIT2STR
RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
- RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
};
#undef RTE_TX_OFFLOAD_BIT2STR
-static const struct {
- uint64_t option;
- const char *name;
-} rte_burst_option_names[] = {
- { RTE_ETH_BURST_SCALAR, "Scalar" },
- { RTE_ETH_BURST_VECTOR, "Vector" },
-
- { RTE_ETH_BURST_ALTIVEC, "AltiVec" },
- { RTE_ETH_BURST_NEON, "Neon" },
- { RTE_ETH_BURST_SSE, "SSE" },
- { RTE_ETH_BURST_AVX2, "AVX2" },
- { RTE_ETH_BURST_AVX512, "AVX512" },
-
- { RTE_ETH_BURST_SCATTERED, "Scattered" },
- { RTE_ETH_BURST_BULK_ALLOC, "Bulk Alloc" },
- { RTE_ETH_BURST_SIMPLE, "Simple" },
- { RTE_ETH_BURST_PER_QUEUE, "Per Queue" },
-};
-
/**
* The user application callback description.
*
return rte_eth_devices[port_id].security_ctx;
}
-uint16_t
-rte_eth_dev_count(void)
-{
- return rte_eth_dev_count_avail();
-}
-
uint16_t
rte_eth_dev_count_avail(void)
{
return name;
}
+static inline int
+check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
+ uint32_t max_rx_pkt_len, uint32_t dev_info_size)
+{
+ int ret = 0;
+
+ if (dev_info_size == 0) {
+ if (config_size != max_rx_pkt_len) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size"
+ " %u != %u is not allowed\n",
+ port_id, config_size, max_rx_pkt_len);
+ ret = -EINVAL;
+ }
+ } else if (config_size > dev_info_size) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+ "> max allowed value %u\n", port_id, config_size,
+ dev_info_size);
+ ret = -EINVAL;
+ } else if (config_size < RTE_ETHER_MIN_LEN) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u "
+ "< min allowed value %u\n", port_id, config_size,
+ (unsigned int)RTE_ETHER_MIN_LEN);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+ * Validate offloads that are requested through rte_eth_dev_configure against
+ * the offloads successfuly set by the ethernet device.
+ *
+ * @param port_id
+ * The port identifier of the Ethernet device.
+ * @param req_offloads
+ * The offloads that have been requested through `rte_eth_dev_configure`.
+ * @param set_offloads
+ * The offloads successfuly set by the ethernet device.
+ * @param offload_type
+ * The offload type i.e. Rx/Tx string.
+ * @param offload_name
+ * The function that prints the offload name.
+ * @return
+ * - (0) if validation successful.
+ * - (-EINVAL) if requested offload has been silently disabled.
+ *
+ */
+static int
+validate_offloads(uint16_t port_id, uint64_t req_offloads,
+ uint64_t set_offloads, const char *offload_type,
+ const char *(*offload_name)(uint64_t))
+{
+ uint64_t offloads_diff = req_offloads ^ set_offloads;
+ uint64_t offload;
+ int ret = 0;
+
+ while (offloads_diff != 0) {
+ /* Check if any offload is requested but not enabled. */
+ offload = 1ULL << __builtin_ctzll(offloads_diff);
+ if (offload & req_offloads) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u failed to enable %s offload %s\n",
+ port_id, offload_type, offload_name(offload));
+ ret = -EINVAL;
+ }
+
+ /* Chech if offload couldn't be disabled. */
+ if (offload & set_offloads) {
+ RTE_ETHDEV_LOG(DEBUG,
+ "Port %u %s offload %s is not requested but enabled\n",
+ port_id, offload_type, offload_name(offload));
+ }
+
+ offloads_diff &= ~offload;
+ }
+
+ return ret;
+}
+
int
rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
* Copy the dev_conf parameter into the dev structure.
* rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
*/
- memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+ if (dev_conf != &dev->data->dev_conf)
+ memcpy(&dev->data->dev_conf, dev_conf,
+ sizeof(dev->data->dev_conf));
ret = rte_eth_dev_info_get(port_id, &dev_info);
if (ret != 0)
RTE_ETHER_MAX_LEN;
}
+ /*
+ * If LRO is enabled, check that the maximum aggregated packet
+ * size is supported by the configured device.
+ */
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev_conf->rxmode.max_lro_pkt_size == 0)
+ dev->data->dev_conf.rxmode.max_lro_pkt_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ ret = check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ dev_info.max_lro_pkt_size);
+ if (ret != 0)
+ goto rollback;
+ }
+
/* Any requested offloading must be within its device capabilities */
if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
dev_conf->rxmode.offloads) {
goto rollback;
}
+ /* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
+ if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
+ (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
+ port_id,
+ rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+ ret = -EINVAL;
+ goto rollback;
+ }
+
/*
* Setup new number of RX/TX queues and reconfigure device.
*/
if (diag != 0) {
RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n",
port_id, diag);
- rte_eth_dev_rx_queue_config(dev, 0);
- rte_eth_dev_tx_queue_config(dev, 0);
ret = eth_err(port_id, diag);
- goto rollback;
+ goto reset_queues;
}
/* Initialize Rx profiling if enabled at compilation time. */
if (diag != 0) {
RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n",
port_id, diag);
- rte_eth_dev_rx_queue_config(dev, 0);
- rte_eth_dev_tx_queue_config(dev, 0);
ret = eth_err(port_id, diag);
- goto rollback;
+ goto reset_queues;
}
- return 0;
+ /* Validate Rx offloads. */
+ diag = validate_offloads(port_id,
+ dev_conf->rxmode.offloads,
+ dev->data->dev_conf.rxmode.offloads, "Rx",
+ rte_eth_dev_rx_offload_name);
+ if (diag != 0) {
+ ret = diag;
+ goto reset_queues;
+ }
+
+ /* Validate Tx offloads. */
+ diag = validate_offloads(port_id,
+ dev_conf->txmode.offloads,
+ dev->data->dev_conf.txmode.offloads, "Tx",
+ rte_eth_dev_tx_offload_name);
+ if (diag != 0) {
+ ret = diag;
+ goto reset_queues;
+ }
+ return 0;
+reset_queues:
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
rollback:
memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
return -EINVAL;
}
+ /*
+ * If LRO is enabled, check that the maximum aggregated packet
+ * size is supported by the configured device.
+ */
+ if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
+ dev->data->dev_conf.rxmode.max_lro_pkt_size =
+ dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ int ret = check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size,
+ dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ dev_info.max_lro_pkt_size);
+ if (ret != 0)
+ return ret;
+ }
+
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
socket_id, &local_conf, mp);
if (!ret) {
* return status and does not know if get is successful or not.
*/
memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+ dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
return eth_err(port_id, diag);
}
+ /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */
+ dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues,
+ RTE_MAX_QUEUES_PER_PORT);
+ dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues,
+ RTE_MAX_QUEUES_PER_PORT);
+
dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
return j;
}
+int
+rte_eth_dev_set_ptypes(uint16_t port_id, uint32_t ptype_mask,
+ uint32_t *set_ptypes, unsigned int num)
+{
+ const uint32_t valid_ptype_masks[] = {
+ RTE_PTYPE_L2_MASK,
+ RTE_PTYPE_L3_MASK,
+ RTE_PTYPE_L4_MASK,
+ RTE_PTYPE_TUNNEL_MASK,
+ RTE_PTYPE_INNER_L2_MASK,
+ RTE_PTYPE_INNER_L3_MASK,
+ RTE_PTYPE_INNER_L4_MASK,
+ };
+ const uint32_t *all_ptypes;
+ struct rte_eth_dev *dev;
+ uint32_t unused_mask;
+ unsigned int i, j;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (num > 0 && set_ptypes == NULL)
+ return -EINVAL;
+
+ if (*dev->dev_ops->dev_supported_ptypes_get == NULL ||
+ *dev->dev_ops->dev_ptypes_set == NULL) {
+ ret = 0;
+ goto ptype_unknown;
+ }
+
+ if (ptype_mask == 0) {
+ ret = (*dev->dev_ops->dev_ptypes_set)(dev,
+ ptype_mask);
+ goto ptype_unknown;
+ }
+
+ unused_mask = ptype_mask;
+ for (i = 0; i < RTE_DIM(valid_ptype_masks); i++) {
+ uint32_t mask = ptype_mask & valid_ptype_masks[i];
+ if (mask && mask != valid_ptype_masks[i]) {
+ ret = -EINVAL;
+ goto ptype_unknown;
+ }
+ unused_mask &= ~valid_ptype_masks[i];
+ }
+
+ if (unused_mask) {
+ ret = -EINVAL;
+ goto ptype_unknown;
+ }
+
+ all_ptypes = (*dev->dev_ops->dev_supported_ptypes_get)(dev);
+ if (all_ptypes == NULL) {
+ ret = 0;
+ goto ptype_unknown;
+ }
+
+ /*
+ * Accommodate as many set_ptypes as possible. If the supplied
+ * set_ptypes array is insufficient fill it partially.
+ */
+ for (i = 0, j = 0; set_ptypes != NULL &&
+ (all_ptypes[i] != RTE_PTYPE_UNKNOWN); ++i) {
+ if (ptype_mask & all_ptypes[i]) {
+ if (j < num - 1) {
+ set_ptypes[j] = all_ptypes[i];
+ j++;
+ continue;
+ }
+ break;
+ }
+ }
+
+ if (set_ptypes != NULL && j < num)
+ set_ptypes[j] = RTE_PTYPE_UNKNOWN;
+
+ return (*dev->dev_ops->dev_ptypes_set)(dev, ptype_mask);
+
+ptype_unknown:
+ if (num > 0)
+ set_ptypes[0] = RTE_PTYPE_UNKNOWN;
+
+ return ret;
+}
+
int
rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
{
return 0;
}
-
int
rte_eth_dev_get_mtu(uint16_t port_id, uint16_t *mtu)
{
int mask = 0;
int cur, org = 0;
uint64_t orig_offloads;
- uint64_t *dev_offloads;
+ uint64_t dev_offloads;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
/* save original values in case of failure */
orig_offloads = dev->data->dev_conf.rxmode.offloads;
- dev_offloads = &dev->data->dev_conf.rxmode.offloads;
+ dev_offloads = orig_offloads;
/*check which option changed by application*/
cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
mask |= ETH_VLAN_STRIP_MASK;
}
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
mask |= ETH_VLAN_FILTER_MASK;
}
cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+ dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
mask |= ETH_VLAN_EXTEND_MASK;
}
cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
- org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+ org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
if (cur != org) {
if (cur)
- *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
else
- *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+ dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
mask |= ETH_QINQ_STRIP_MASK;
}
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
+ dev->data->dev_conf.rxmode.offloads = dev_offloads;
ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
if (ret) {
/* hit an error restore original values */
- *dev_offloads = orig_offloads;
+ dev->data->dev_conf.rxmode.offloads = orig_offloads;
}
return eth_err(port_id, ret);
next = TAILQ_NEXT(cb, next);
if (cb->cb_fn != cb_fn || cb->event != event ||
- (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
+ (cb_arg != (void *)-1 && cb->cb_arg != cb_arg))
continue;
/*
dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
}
-const char *
-rte_eth_burst_mode_option_name(uint64_t option)
-{
- const char *name = "";
- unsigned int i;
-
- for (i = 0; i < RTE_DIM(rte_burst_option_names); ++i) {
- if (option == rte_burst_option_names[i].option) {
- name = rte_burst_option_names[i].name;
- break;
- }
- }
-
- return name;
-}
-
int
rte_eth_dev_set_mc_addr_list(uint16_t port_id,
struct rte_ether_addr *mc_addr_set,
*domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
- for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1;
- i < RTE_MAX_ETHPORTS; i++) {
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (rte_eth_switch_domains[i].state ==
RTE_ETH_SWITCH_DOMAIN_UNUSED) {
rte_eth_switch_domains[i].state =