X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ethdev%2Frte_ethdev.c;h=774c721b3484b0331f7b1154397b3befcd588b03;hb=32f9289dd44f7e7a0979304c799d2114862b45b2;hp=153d50e9a8f73c4c7581cd434a0c2b4bdbbcd168;hpb=5d308972954cbad07d469e9b708aa517787e9948;p=dpdk.git diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c index 153d50e9a8..774c721b34 100644 --- a/lib/librte_ethdev/rte_ethdev.c +++ b/lib/librte_ethdev/rte_ethdev.c @@ -86,7 +86,7 @@ static const struct rte_eth_xstats_name_off rte_stats_strings[] = { rx_nombuf)}, }; -#define RTE_NB_STATS (sizeof(rte_stats_strings) / sizeof(rte_stats_strings[0])) +#define RTE_NB_STATS RTE_DIM(rte_stats_strings) static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = { {"packets", offsetof(struct rte_eth_stats, q_ipackets)}, @@ -94,15 +94,13 @@ static const struct rte_eth_xstats_name_off rte_rxq_stats_strings[] = { {"errors", offsetof(struct rte_eth_stats, q_errors)}, }; -#define RTE_NB_RXQ_STATS (sizeof(rte_rxq_stats_strings) / \ - sizeof(rte_rxq_stats_strings[0])) +#define RTE_NB_RXQ_STATS RTE_DIM(rte_rxq_stats_strings) static const struct rte_eth_xstats_name_off rte_txq_stats_strings[] = { {"packets", offsetof(struct rte_eth_stats, q_opackets)}, {"bytes", offsetof(struct rte_eth_stats, q_obytes)}, }; -#define RTE_NB_TXQ_STATS (sizeof(rte_txq_stats_strings) / \ - sizeof(rte_txq_stats_strings[0])) +#define RTE_NB_TXQ_STATS RTE_DIM(rte_txq_stats_strings) #define RTE_RX_OFFLOAD_BIT2STR(_name) \ { DEV_RX_OFFLOAD_##_name, #_name } @@ -1137,6 +1135,84 @@ rte_eth_dev_tx_offload_name(uint64_t offload) return name; } +static inline int +check_lro_pkt_size(uint16_t port_id, uint32_t config_size, + uint32_t max_rx_pkt_len, uint32_t dev_info_size) +{ + int ret = 0; + + if (dev_info_size == 0) { + if (config_size != max_rx_pkt_len) { + RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size" + " %u != %u is not allowed\n", + port_id, config_size, max_rx_pkt_len); + ret = -EINVAL; + } + } else if (config_size > dev_info_size) { + RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " + "> max allowed value %u\n", port_id, config_size, + dev_info_size); + ret = -EINVAL; + } else if (config_size < RTE_ETHER_MIN_LEN) { + RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d max_lro_pkt_size %u " + "< min allowed value %u\n", port_id, config_size, + (unsigned int)RTE_ETHER_MIN_LEN); + ret = -EINVAL; + } + return ret; +} + +/* + * Validate offloads that are requested through rte_eth_dev_configure against + * the offloads successfuly set by the ethernet device. + * + * @param port_id + * The port identifier of the Ethernet device. + * @param req_offloads + * The offloads that have been requested through `rte_eth_dev_configure`. + * @param set_offloads + * The offloads successfuly set by the ethernet device. + * @param offload_type + * The offload type i.e. Rx/Tx string. + * @param offload_name + * The function that prints the offload name. + * @return + * - (0) if validation successful. + * - (-EINVAL) if requested offload has been silently disabled. + * + */ +static int +validate_offloads(uint16_t port_id, uint64_t req_offloads, + uint64_t set_offloads, const char *offload_type, + const char *(*offload_name)(uint64_t)) +{ + uint64_t offloads_diff = req_offloads ^ set_offloads; + uint64_t offload; + int ret = 0; + + while (offloads_diff != 0) { + /* Check if any offload is requested but not enabled. */ + offload = 1ULL << __builtin_ctzll(offloads_diff); + if (offload & req_offloads) { + RTE_ETHDEV_LOG(ERR, + "Port %u failed to enable %s offload %s\n", + port_id, offload_type, offload_name(offload)); + ret = -EINVAL; + } + + /* Chech if offload couldn't be disabled. */ + if (offload & set_offloads) { + RTE_ETHDEV_LOG(DEBUG, + "Port %u %s offload %s is not requested but enabled\n", + port_id, offload_type, offload_name(offload)); + } + + offloads_diff &= ~offload; + } + + return ret; +} + int rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) @@ -1167,7 +1243,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, * Copy the dev_conf parameter into the dev structure. * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get */ - memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); + if (dev_conf != &dev->data->dev_conf) + memcpy(&dev->data->dev_conf, dev_conf, + sizeof(dev->data->dev_conf)); ret = rte_eth_dev_info_get(port_id, &dev_info); if (ret != 0) @@ -1267,6 +1345,22 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, RTE_ETHER_MAX_LEN; } + /* + * If LRO is enabled, check that the maximum aggregated packet + * size is supported by the configured device. + */ + if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) { + if (dev_conf->rxmode.max_lro_pkt_size == 0) + dev->data->dev_conf.rxmode.max_lro_pkt_size = + dev->data->dev_conf.rxmode.max_rx_pkt_len; + ret = check_lro_pkt_size(port_id, + dev->data->dev_conf.rxmode.max_lro_pkt_size, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + dev_info.max_lro_pkt_size); + if (ret != 0) + goto rollback; + } + /* Any requested offloading must be within its device capabilities */ if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) != dev_conf->rxmode.offloads) { @@ -1310,7 +1404,7 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) && (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) { RTE_ETHDEV_LOG(ERR, - "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested", + "Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n", port_id, rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH)); ret = -EINVAL; @@ -1343,10 +1437,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, if (diag != 0) { RTE_ETHDEV_LOG(ERR, "Port%u dev_configure = %d\n", port_id, diag); - rte_eth_dev_rx_queue_config(dev, 0); - rte_eth_dev_tx_queue_config(dev, 0); ret = eth_err(port_id, diag); - goto rollback; + goto reset_queues; } /* Initialize Rx profiling if enabled at compilation time. */ @@ -1354,14 +1446,34 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, if (diag != 0) { RTE_ETHDEV_LOG(ERR, "Port%u __rte_eth_dev_profile_init = %d\n", port_id, diag); - rte_eth_dev_rx_queue_config(dev, 0); - rte_eth_dev_tx_queue_config(dev, 0); ret = eth_err(port_id, diag); - goto rollback; + goto reset_queues; } - return 0; + /* Validate Rx offloads. */ + diag = validate_offloads(port_id, + dev_conf->rxmode.offloads, + dev->data->dev_conf.rxmode.offloads, "Rx", + rte_eth_dev_rx_offload_name); + if (diag != 0) { + ret = diag; + goto reset_queues; + } + /* Validate Tx offloads. */ + diag = validate_offloads(port_id, + dev_conf->txmode.offloads, + dev->data->dev_conf.txmode.offloads, "Tx", + rte_eth_dev_tx_offload_name); + if (diag != 0) { + ret = diag; + goto reset_queues; + } + + return 0; +reset_queues: + rte_eth_dev_rx_queue_config(dev, 0); + rte_eth_dev_tx_queue_config(dev, 0); rollback: memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf)); @@ -1782,6 +1894,22 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, return -EINVAL; } + /* + * If LRO is enabled, check that the maximum aggregated packet + * size is supported by the configured device. + */ + if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) { + if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0) + dev->data->dev_conf.rxmode.max_lro_pkt_size = + dev->data->dev_conf.rxmode.max_rx_pkt_len; + int ret = check_lro_pkt_size(port_id, + dev->data->dev_conf.rxmode.max_lro_pkt_size, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + dev_info.max_lro_pkt_size); + if (ret != 0) + return ret; + } + ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc, socket_id, &local_conf, mp); if (!ret) { @@ -2838,6 +2966,7 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) * return status and does not know if get is successful or not. */ memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); + dev_info->switch_info.domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -2856,6 +2985,12 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) return eth_err(port_id, diag); } + /* Maximum number of queues should be <= RTE_MAX_QUEUES_PER_PORT */ + dev_info->max_rx_queues = RTE_MIN(dev_info->max_rx_queues, + RTE_MAX_QUEUES_PER_PORT); + dev_info->max_tx_queues = RTE_MIN(dev_info->max_tx_queues, + RTE_MAX_QUEUES_PER_PORT); + dev_info->driver_name = dev->device->driver->name; dev_info->nb_rx_queues = dev->data->nb_rx_queues; dev_info->nb_tx_queues = dev->data->nb_tx_queues; @@ -3117,53 +3252,53 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) int mask = 0; int cur, org = 0; uint64_t orig_offloads; - uint64_t *dev_offloads; + uint64_t dev_offloads; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; /* save original values in case of failure */ orig_offloads = dev->data->dev_conf.rxmode.offloads; - dev_offloads = &dev->data->dev_conf.rxmode.offloads; + dev_offloads = orig_offloads; /*check which option changed by application*/ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); - org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); + org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); if (cur != org) { if (cur) - *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; else - *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; + dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; mask |= ETH_VLAN_STRIP_MASK; } cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); - org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); + org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); if (cur != org) { if (cur) - *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; else - *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; + dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; mask |= ETH_VLAN_FILTER_MASK; } cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); - org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); + org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); if (cur != org) { if (cur) - *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; + dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; else - *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; + dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; mask |= ETH_VLAN_EXTEND_MASK; } cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); - org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); + org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); if (cur != org) { if (cur) - *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; else - *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; + dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; mask |= ETH_QINQ_STRIP_MASK; } @@ -3172,10 +3307,11 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) return ret; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP); + dev->data->dev_conf.rxmode.offloads = dev_offloads; ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); if (ret) { /* hit an error restore original values */ - *dev_offloads = orig_offloads; + dev->data->dev_conf.rxmode.offloads = orig_offloads; } return eth_err(port_id, ret); @@ -3903,7 +4039,7 @@ rte_eth_dev_callback_unregister(uint16_t port_id, next = TAILQ_NEXT(cb, next); if (cb->cb_fn != cb_fn || cb->event != event || - (cb->cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) + (cb_arg != (void *)-1 && cb->cb_arg != cb_arg)) continue; /* @@ -4928,8 +5064,7 @@ rte_eth_switch_domain_alloc(uint16_t *domain_id) *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID; - for (i = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID + 1; - i < RTE_MAX_ETHPORTS; i++) { + for (i = 0; i < RTE_MAX_ETHPORTS; i++) { if (rte_eth_switch_domains[i].state == RTE_ETH_SWITCH_DOMAIN_UNUSED) { rte_eth_switch_domains[i].state =