X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=inline;f=lib%2Flibrte_ethdev%2Frte_ethdev.c;h=5cb651e3aef31e9ec9efc5f909c5c4166aefd7ec;hb=ca041cd44fcc8b22c0e84460254596096e8fe914;hp=72bb33881e21734dd45acc3773c8d80a6551a052;hpb=35b2d13fd6fdcbd191f2a30d74648faeb1186c65;p=dpdk.git diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c index 72bb33881e..5cb651e3ae 100644 --- a/lib/librte_ethdev/rte_ethdev.c +++ b/lib/librte_ethdev/rte_ethdev.c @@ -601,7 +601,7 @@ rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) return port_id; } -int __rte_experimental +int rte_eth_dev_owner_new(uint64_t *owner_id) { rte_eth_dev_shared_data_prepare(); @@ -654,7 +654,7 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, return 0; } -int __rte_experimental +int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner) { @@ -670,7 +670,7 @@ rte_eth_dev_owner_set(const uint16_t port_id, return ret; } -int __rte_experimental +int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) { const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) @@ -687,10 +687,11 @@ rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) return ret; } -void __rte_experimental +int rte_eth_dev_owner_delete(const uint64_t owner_id) { uint16_t port_id; + int ret = 0; rte_eth_dev_shared_data_prepare(); @@ -708,12 +709,15 @@ rte_eth_dev_owner_delete(const uint64_t owner_id) RTE_ETHDEV_LOG(ERR, "Invalid owner id=%016"PRIx64"\n", owner_id); + ret = -EINVAL; } rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + + return ret; } -int __rte_experimental +int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) { int ret = 0; @@ -1125,7 +1129,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); if (dev->data->dev_started) { @@ -1144,7 +1147,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, */ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + goto rollback; /* If number of queues specified by application for both Rx and Tx is * zero, use driver preferred values. This cannot be done individually @@ -1380,24 +1385,62 @@ rte_eth_dev_mac_restore(struct rte_eth_dev *dev, } } -static void +static int rte_eth_dev_config_restore(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info, uint16_t port_id) { + int ret; + if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) rte_eth_dev_mac_restore(dev, dev_info); /* replay promiscuous configuration */ - if (rte_eth_promiscuous_get(port_id) == 1) - rte_eth_promiscuous_enable(port_id); - else if (rte_eth_promiscuous_get(port_id) == 0) - rte_eth_promiscuous_disable(port_id); + /* + * use callbacks directly since we don't need port_id check and + * would like to bypass the same value set + */ + if (rte_eth_promiscuous_get(port_id) == 1 && + *dev->dev_ops->promiscuous_enable != NULL) { + ret = eth_err(port_id, + (*dev->dev_ops->promiscuous_enable)(dev)); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to enable promiscuous mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } else if (rte_eth_promiscuous_get(port_id) == 0 && + *dev->dev_ops->promiscuous_disable != NULL) { + ret = eth_err(port_id, + (*dev->dev_ops->promiscuous_disable)(dev)); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to disable promiscuous mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } /* replay all multicast configuration */ - if (rte_eth_allmulticast_get(port_id) == 1) - rte_eth_allmulticast_enable(port_id); - else if (rte_eth_allmulticast_get(port_id) == 0) - rte_eth_allmulticast_disable(port_id); + if (rte_eth_allmulticast_get(port_id) == 1) { + ret = rte_eth_allmulticast_enable(port_id); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to enable allmulticast mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } else if (rte_eth_allmulticast_get(port_id) == 0) { + ret = rte_eth_allmulticast_disable(port_id); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to disable allmulticast mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } + + return 0; } int @@ -1406,6 +1449,7 @@ rte_eth_dev_start(uint16_t port_id) struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; int diag; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1420,7 +1464,9 @@ rte_eth_dev_start(uint16_t port_id) return 0; } - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; /* Lets restore MAC now if device does not support live change */ if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) @@ -1432,7 +1478,14 @@ rte_eth_dev_start(uint16_t port_id) else return eth_err(port_id, diag); - rte_eth_dev_config_restore(dev, &dev_info, port_id); + ret = rte_eth_dev_config_restore(dev, &dev_info, port_id); + if (ret != 0) { + RTE_ETHDEV_LOG(ERR, + "Error during restoring configuration for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + rte_eth_dev_stop(port_id); + return ret; + } if (dev->data->dev_conf.intr_conf.lsc == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); @@ -1535,7 +1588,7 @@ rte_eth_dev_reset(uint16_t port_id) return eth_err(port_id, ret); } -int __rte_experimental +int rte_eth_dev_is_removed(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1579,7 +1632,11 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, return -EINVAL; } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + if (mp == NULL) { + RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n"); + return -EINVAL; + } + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); /* @@ -1587,7 +1644,10 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, * This value must be provided in the private data of the memory pool. * First check that the memory pool has a valid private data. */ - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n", mp->name, (int)mp->private_data_size, @@ -1698,6 +1758,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, struct rte_eth_dev_info dev_info; struct rte_eth_txconf local_conf; void **txq; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1707,10 +1768,11 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, return -EINVAL; } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; /* Use default specified by driver, if nb_tx_desc is zero */ if (nb_tx_desc == 0) { @@ -1851,30 +1913,44 @@ rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) return eth_err(port_id, ret); } -void +int rte_eth_promiscuous_enable(uint16_t port_id) { struct rte_eth_dev *dev; + int diag = 0; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable); - (*dev->dev_ops->promiscuous_enable)(dev); - dev->data->promiscuous = 1; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); + + if (dev->data->promiscuous == 0) { + diag = (*dev->dev_ops->promiscuous_enable)(dev); + dev->data->promiscuous = (diag == 0) ? 1 : 0; + } + + return eth_err(port_id, diag); } -void +int rte_eth_promiscuous_disable(uint16_t port_id) { struct rte_eth_dev *dev; + int diag = 0; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable); - dev->data->promiscuous = 0; - (*dev->dev_ops->promiscuous_disable)(dev); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); + + if (dev->data->promiscuous == 1) { + dev->data->promiscuous = 0; + diag = (*dev->dev_ops->promiscuous_disable)(dev); + if (diag != 0) + dev->data->promiscuous = 1; + } + + return eth_err(port_id, diag); } int @@ -1888,30 +1964,42 @@ rte_eth_promiscuous_get(uint16_t port_id) return dev->data->promiscuous; } -void +int rte_eth_allmulticast_enable(uint16_t port_id) { struct rte_eth_dev *dev; + uint8_t old_allmulticast; + int diag; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable); - (*dev->dev_ops->allmulticast_enable)(dev); - dev->data->all_multicast = 1; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); + old_allmulticast = dev->data->all_multicast; + diag = (*dev->dev_ops->allmulticast_enable)(dev); + dev->data->all_multicast = (diag == 0) ? 1 : old_allmulticast; + + return eth_err(port_id, diag); } -void +int rte_eth_allmulticast_disable(uint16_t port_id) { struct rte_eth_dev *dev; + uint8_t old_allmulticast; + int diag; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); + old_allmulticast = dev->data->all_multicast; dev->data->all_multicast = 0; - (*dev->dev_ops->allmulticast_disable)(dev); + diag = (*dev->dev_ops->allmulticast_disable)(dev); + if (diag != 0) + dev->data->all_multicast = old_allmulticast; + + return eth_err(port_id, diag); } int @@ -1925,40 +2013,44 @@ rte_eth_allmulticast_get(uint16_t port_id) return dev->data->all_multicast; } -void +int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) rte_eth_linkstatus_get(dev, eth_link); else { - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); (*dev->dev_ops->link_update)(dev, 1); *eth_link = dev->data->dev_link; } + + return 0; } -void +int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) rte_eth_linkstatus_get(dev, eth_link); else { - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); (*dev->dev_ops->link_update)(dev, 0); *eth_link = dev->data->dev_link; } + + return 0; } int @@ -1980,12 +2072,16 @@ int rte_eth_stats_reset(uint16_t port_id) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); - (*dev->dev_ops->stats_reset)(dev); + ret = (*dev->dev_ops->stats_reset)(dev); + if (ret != 0) + return eth_err(port_id, ret); + dev->data->rx_mbuf_alloc_failed = 0; return 0; @@ -2461,22 +2557,20 @@ rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, } /* reset ethdev extended statistics */ -void +int rte_eth_xstats_reset(uint16_t port_id) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; /* implemented by the driver */ - if (dev->dev_ops->xstats_reset != NULL) { - (*dev->dev_ops->xstats_reset)(dev); - return; - } + if (dev->dev_ops->xstats_reset != NULL) + return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); /* fallback to default */ - rte_eth_stats_reset(port_id); + return rte_eth_stats_reset(port_id); } static int @@ -2535,7 +2629,7 @@ rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) fw_version, fw_size)); } -void +int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) { struct rte_eth_dev *dev; @@ -2543,25 +2637,41 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) .nb_max = UINT16_MAX, .nb_min = 0, .nb_align = 1, + .nb_seg_max = UINT16_MAX, + .nb_mtu_seg_max = UINT16_MAX, }; + int diag; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + /* + * Init dev_info before port_id check since caller does not have + * return status and does not know if get is successful or not. + */ + memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); dev_info->rx_desc_lim = lim; dev_info->tx_desc_lim = lim; dev_info->device = dev->device; dev_info->min_mtu = RTE_ETHER_MIN_MTU; dev_info->max_mtu = UINT16_MAX; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); - (*dev->dev_ops->dev_infos_get)(dev, dev_info); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); + if (diag != 0) { + /* Cleanup already filled in device information */ + memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); + return eth_err(port_id, diag); + } + dev_info->driver_name = dev->device->driver->name; dev_info->nb_rx_queues = dev->data->nb_rx_queues; dev_info->nb_tx_queues = dev->data->nb_tx_queues; dev_info->dev_flags = &dev->data->dev_flags; + + return 0; } int @@ -2590,14 +2700,16 @@ rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, return j; } -void +int rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); + + return 0; } @@ -2631,7 +2743,10 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) * which relies on dev->dev_ops->dev_infos_get. */ if (*dev->dev_ops->dev_infos_get != NULL) { - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) return -EINVAL; } @@ -2726,53 +2841,56 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) int mask = 0; int cur, org = 0; uint64_t orig_offloads; + uint64_t *dev_offloads; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; /* save original values in case of failure */ orig_offloads = dev->data->dev_conf.rxmode.offloads; + dev_offloads = &dev->data->dev_conf.rxmode.offloads; /*check which option changed by application*/ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); if (cur != org) { if (cur) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_STRIP; + *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_VLAN_STRIP; + *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; mask |= ETH_VLAN_STRIP_MASK; } cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); if (cur != org) { if (cur) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_FILTER; + *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_VLAN_FILTER; + *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; mask |= ETH_VLAN_FILTER_MASK; } cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); if (cur != org) { if (cur) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_EXTEND; + *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_VLAN_EXTEND; + *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; mask |= ETH_VLAN_EXTEND_MASK; } + cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); + if (cur != org) { + if (cur) + *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + else + *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; + mask |= ETH_QINQ_STRIP_MASK; + } + /*no change*/ if (mask == 0) return ret; @@ -2781,7 +2899,7 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); if (ret) { /* hit an error restore original values */ - dev->data->dev_conf.rxmode.offloads = orig_offloads; + *dev_offloads = orig_offloads; } return eth_err(port_id, ret); @@ -2791,23 +2909,25 @@ int rte_eth_dev_get_vlan_offload(uint16_t port_id) { struct rte_eth_dev *dev; + uint64_t *dev_offloads; int ret = 0; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; + dev_offloads = &dev->data->dev_conf.rxmode.offloads; - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP) + if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ret |= ETH_VLAN_STRIP_OFFLOAD; - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER) + if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ret |= ETH_VLAN_FILTER_OFFLOAD; - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND) + if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ret |= ETH_VLAN_EXTEND_OFFLOAD; + if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) + ret |= DEV_RX_OFFLOAD_QINQ_STRIP; + return ret; } @@ -2974,10 +3094,15 @@ rte_eth_dev_rss_hash_update(uint16_t port_id, { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != dev_info.flow_type_rss_offloads) { RTE_ETHDEV_LOG(ERR, @@ -3083,9 +3208,11 @@ get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; unsigned i; + int ret; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return -1; for (i = 0; i < dev_info.max_mac_addrs; i++) if (memcmp(addr, &dev->data->mac_addrs[i], @@ -3216,8 +3343,12 @@ get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; unsigned i; + int ret; + + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return -1; - rte_eth_dev_info_get(port_id, &dev_info); if (!dev->data->hash_mac_addrs) return -1; @@ -3302,11 +3433,15 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; struct rte_eth_link link; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); link = dev->data->dev_link; if (queue_idx > dev_info.max_tx_queues) { @@ -3585,7 +3720,7 @@ rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) return 0; } -int __rte_experimental +int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) { struct rte_intr_handle *intr_handle; @@ -3647,7 +3782,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, RTE_MEMZONE_IOVA_CONTIG, align); } -int __rte_experimental +int rte_eth_dev_create(struct rte_device *device, const char *name, size_t priv_data_size, ethdev_bus_specific_init ethdev_bus_specific_init, @@ -3710,7 +3845,7 @@ probe_failed: return retval; } -int __rte_experimental +int rte_eth_dev_destroy(struct rte_eth_dev *ethdev, ethdev_uninit_t ethdev_uninit) { @@ -4171,6 +4306,18 @@ rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) timestamp)); } +int +rte_eth_read_clock(uint16_t port_id, uint64_t *clock) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); + return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); +} + int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) { @@ -4219,7 +4366,7 @@ rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); } -int __rte_experimental +int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo) { @@ -4232,7 +4379,7 @@ rte_eth_dev_get_module_info(uint16_t port_id, return (*dev->dev_ops->get_module_info)(dev, modinfo); } -int __rte_experimental +int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) { @@ -4334,15 +4481,14 @@ rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc) { - struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); - - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; if (nb_rx_desc != NULL) rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); @@ -4388,7 +4534,7 @@ static struct rte_eth_dev_switch { enum rte_eth_switch_domain_state state; } rte_eth_switch_domains[RTE_MAX_ETHPORTS]; -int __rte_experimental +int rte_eth_switch_domain_alloc(uint16_t *domain_id) { unsigned int i; @@ -4409,7 +4555,7 @@ rte_eth_switch_domain_alloc(uint16_t *domain_id) return -ENOSPC; } -int __rte_experimental +int rte_eth_switch_domain_free(uint16_t domain_id) { if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || @@ -4489,7 +4635,7 @@ rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) } } -int __rte_experimental +int rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) { struct rte_kvargs args;