X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ethdev%2Frte_ethdev.c;h=750bc9d023fdc018f7e9a19eb7f4a4f3f332d43d;hb=eb5902504a1349f40530117b9c33dc352835ea20;hp=5f858174b29a6118dd85eb9fb13dd6ba56a281be;hpb=68b931bff2872044258ffa16030ac83eb9fbd6b0;p=dpdk.git diff --git a/lib/librte_ethdev/rte_ethdev.c b/lib/librte_ethdev/rte_ethdev.c index 5f858174b2..750bc9d023 100644 --- a/lib/librte_ethdev/rte_ethdev.c +++ b/lib/librte_ethdev/rte_ethdev.c @@ -48,7 +48,6 @@ int rte_eth_dev_logtype; static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data"; struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS]; -static uint16_t eth_dev_last_created_port; /* spinlock for eth device callbacks */ static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER; @@ -167,6 +166,25 @@ static const struct { #undef RTE_TX_OFFLOAD_BIT2STR +static const struct { + uint64_t option; + const char *name; +} rte_burst_option_names[] = { + { RTE_ETH_BURST_SCALAR, "Scalar" }, + { RTE_ETH_BURST_VECTOR, "Vector" }, + + { RTE_ETH_BURST_ALTIVEC, "AltiVec" }, + { RTE_ETH_BURST_NEON, "Neon" }, + { RTE_ETH_BURST_SSE, "SSE" }, + { RTE_ETH_BURST_AVX2, "AVX2" }, + { RTE_ETH_BURST_AVX512, "AVX512" }, + + { RTE_ETH_BURST_SCATTERED, "Scattered" }, + { RTE_ETH_BURST_BULK_ALLOC, "Bulk Alloc" }, + { RTE_ETH_BURST_SIMPLE, "Simple" }, + { RTE_ETH_BURST_PER_QUEUE, "Per Queue" }, +}; + /** * The user application callback description. * @@ -331,8 +349,7 @@ uint16_t rte_eth_find_next(uint16_t port_id) { while (port_id < RTE_MAX_ETHPORTS && - rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && - rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) + rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED) port_id++; if (port_id >= RTE_MAX_ETHPORTS) @@ -341,6 +358,34 @@ rte_eth_find_next(uint16_t port_id) return port_id; } +/* + * Macro to iterate over all valid ports for internal usage. + * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports. + */ +#define RTE_ETH_FOREACH_VALID_DEV(port_id) \ + for (port_id = rte_eth_find_next(0); \ + port_id < RTE_MAX_ETHPORTS; \ + port_id = rte_eth_find_next(port_id + 1)) + +uint16_t +rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent) +{ + port_id = rte_eth_find_next(port_id); + while (port_id < RTE_MAX_ETHPORTS && + rte_eth_devices[port_id].device != parent) + port_id = rte_eth_find_next(port_id + 1); + + return port_id; +} + +uint16_t +rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id) +{ + RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS); + return rte_eth_find_next_of(port_id, + rte_eth_devices[ref_port_id].device); +} + static void rte_eth_dev_shared_data_prepare(void) { @@ -431,8 +476,6 @@ eth_dev_get(uint16_t port_id) eth_dev->data = &rte_eth_dev_shared_data->data[port_id]; - eth_dev_last_created_port = port_id; - return eth_dev; } @@ -441,6 +484,18 @@ rte_eth_dev_allocate(const char *name) { uint16_t port_id; struct rte_eth_dev *eth_dev = NULL; + size_t name_len; + + name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN); + if (name_len == 0) { + RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n"); + return NULL; + } + + if (name_len >= RTE_ETH_NAME_MAX_LEN) { + RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n"); + return NULL; + } rte_eth_dev_shared_data_prepare(); @@ -462,9 +517,9 @@ rte_eth_dev_allocate(const char *name) } eth_dev = eth_dev_get(port_id); - snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name); + strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name)); eth_dev->data->port_id = port_id; - eth_dev->data->mtu = ETHER_MTU; + eth_dev->data->mtu = RTE_ETHER_MTU; unlock: rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); @@ -557,19 +612,15 @@ rte_eth_is_valid_owner_id(uint64_t owner_id) uint64_t rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id) { + port_id = rte_eth_find_next(port_id); while (port_id < RTE_MAX_ETHPORTS && - ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED && - rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) || - rte_eth_devices[port_id].data->owner.id != owner_id)) - port_id++; - - if (port_id >= RTE_MAX_ETHPORTS) - return RTE_MAX_ETHPORTS; + rte_eth_devices[port_id].data->owner.id != owner_id) + port_id = rte_eth_find_next(port_id + 1); return port_id; } -int __rte_experimental +int rte_eth_dev_owner_new(uint64_t *owner_id) { rte_eth_dev_shared_data_prepare(); @@ -588,7 +639,6 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, { struct rte_eth_dev *ethdev = &rte_eth_devices[port_id]; struct rte_eth_dev_owner *port_owner; - int sret; if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) { RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n", @@ -612,11 +662,8 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, return -EPERM; } - sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s", - new_owner->name); - if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN) - RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n", - port_id); + /* can not truncate (same structure) */ + strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN); port_owner->id = new_owner->id; @@ -626,7 +673,7 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id, return 0; } -int __rte_experimental +int rte_eth_dev_owner_set(const uint16_t port_id, const struct rte_eth_dev_owner *owner) { @@ -642,7 +689,7 @@ rte_eth_dev_owner_set(const uint16_t port_id, return ret; } -int __rte_experimental +int rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) { const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner) @@ -659,10 +706,11 @@ rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id) return ret; } -void __rte_experimental +int rte_eth_dev_owner_delete(const uint64_t owner_id) { uint16_t port_id; + int ret = 0; rte_eth_dev_shared_data_prepare(); @@ -680,12 +728,15 @@ rte_eth_dev_owner_delete(const uint64_t owner_id) RTE_ETHDEV_LOG(ERR, "Invalid owner id=%016"PRIx64"\n", owner_id); + ret = -EINVAL; } rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock); + + return ret; } -int __rte_experimental +int rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner) { int ret = 0; @@ -741,14 +792,13 @@ rte_eth_dev_count_avail(void) return count; } -uint16_t __rte_experimental +uint16_t rte_eth_dev_count_total(void) { uint16_t port, count = 0; - for (port = 0; port < RTE_MAX_ETHPORTS; port++) - if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED) - count++; + RTE_ETH_FOREACH_VALID_DEV(port) + count++; return count; } @@ -782,13 +832,11 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id) return -EINVAL; } - for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) { - if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED && - !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) { + RTE_ETH_FOREACH_VALID_DEV(pid) + if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) { *port_id = pid; return 0; } - } return -ENODEV; } @@ -1100,7 +1148,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP); if (dev->data->dev_started) { @@ -1119,7 +1166,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, */ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf)); - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + goto rollback; /* If number of queues specified by application for both Rx and Tx is * zero, use driver preferred values. This cannot be done individually @@ -1199,20 +1248,20 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, dev_info.max_rx_pktlen); ret = -EINVAL; goto rollback; - } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) { + } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) { RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n", port_id, dev_conf->rxmode.max_rx_pkt_len, - (unsigned)ETHER_MIN_LEN); + (unsigned int)RTE_ETHER_MIN_LEN); ret = -EINVAL; goto rollback; } } else { - if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN || - dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN) + if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN || + dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN) /* Use default value */ dev->data->dev_conf.rxmode.max_rx_pkt_len = - ETHER_MAX_LEN; + RTE_ETHER_MAX_LEN; } /* Any requested offloading must be within its device capabilities */ @@ -1321,7 +1370,7 @@ static void rte_eth_dev_mac_restore(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - struct ether_addr *addr; + struct rte_ether_addr *addr; uint16_t i; uint32_t pool = 0; uint64_t pool_mask; @@ -1338,7 +1387,7 @@ rte_eth_dev_mac_restore(struct rte_eth_dev *dev, addr = &dev->data->mac_addrs[i]; /* skip zero address */ - if (is_zero_ether_addr(addr)) + if (rte_is_zero_ether_addr(addr)) continue; pool = 0; @@ -1355,24 +1404,70 @@ rte_eth_dev_mac_restore(struct rte_eth_dev *dev, } } -static void +static int rte_eth_dev_config_restore(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info, uint16_t port_id) { + int ret; + if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)) rte_eth_dev_mac_restore(dev, dev_info); /* replay promiscuous configuration */ - if (rte_eth_promiscuous_get(port_id) == 1) - rte_eth_promiscuous_enable(port_id); - else if (rte_eth_promiscuous_get(port_id) == 0) - rte_eth_promiscuous_disable(port_id); + /* + * use callbacks directly since we don't need port_id check and + * would like to bypass the same value set + */ + if (rte_eth_promiscuous_get(port_id) == 1 && + *dev->dev_ops->promiscuous_enable != NULL) { + ret = eth_err(port_id, + (*dev->dev_ops->promiscuous_enable)(dev)); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to enable promiscuous mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } else if (rte_eth_promiscuous_get(port_id) == 0 && + *dev->dev_ops->promiscuous_disable != NULL) { + ret = eth_err(port_id, + (*dev->dev_ops->promiscuous_disable)(dev)); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to disable promiscuous mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } /* replay all multicast configuration */ - if (rte_eth_allmulticast_get(port_id) == 1) - rte_eth_allmulticast_enable(port_id); - else if (rte_eth_allmulticast_get(port_id) == 0) - rte_eth_allmulticast_disable(port_id); + /* + * use callbacks directly since we don't need port_id check and + * would like to bypass the same value set + */ + if (rte_eth_allmulticast_get(port_id) == 1 && + *dev->dev_ops->allmulticast_enable != NULL) { + ret = eth_err(port_id, + (*dev->dev_ops->allmulticast_enable)(dev)); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to enable allmulticast mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } else if (rte_eth_allmulticast_get(port_id) == 0 && + *dev->dev_ops->allmulticast_disable != NULL) { + ret = eth_err(port_id, + (*dev->dev_ops->allmulticast_disable)(dev)); + if (ret != 0 && ret != -ENOTSUP) { + RTE_ETHDEV_LOG(ERR, + "Failed to disable allmulticast mode for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + return ret; + } + } + + return 0; } int @@ -1381,6 +1476,7 @@ rte_eth_dev_start(uint16_t port_id) struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; int diag; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1395,7 +1491,9 @@ rte_eth_dev_start(uint16_t port_id) return 0; } - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; /* Lets restore MAC now if device does not support live change */ if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR) @@ -1407,7 +1505,14 @@ rte_eth_dev_start(uint16_t port_id) else return eth_err(port_id, diag); - rte_eth_dev_config_restore(dev, &dev_info, port_id); + ret = rte_eth_dev_config_restore(dev, &dev_info, port_id); + if (ret != 0) { + RTE_ETHDEV_LOG(ERR, + "Error during restoring configuration for device (port %u): %s\n", + port_id, rte_strerror(-ret)); + rte_eth_dev_stop(port_id); + return ret; + } if (dev->data->dev_conf.intr_conf.lsc == 0) { RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); @@ -1510,7 +1615,7 @@ rte_eth_dev_reset(uint16_t port_id) return eth_err(port_id, ret); } -int __rte_experimental +int rte_eth_dev_is_removed(uint16_t port_id) { struct rte_eth_dev *dev; @@ -1554,7 +1659,11 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, return -EINVAL; } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + if (mp == NULL) { + RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n"); + return -EINVAL; + } + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP); /* @@ -1562,7 +1671,10 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, * This value must be provided in the private data of the memory pool. * First check that the memory pool has a valid private data. */ - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) { RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n", mp->name, (int)mp->private_data_size, @@ -1594,7 +1706,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) { RTE_ETHDEV_LOG(ERR, - "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n", + "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", nb_rx_desc, dev_info.rx_desc_lim.nb_max, dev_info.rx_desc_lim.nb_min, dev_info.rx_desc_lim.nb_align); @@ -1646,7 +1758,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, local_conf.offloads) { RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be " - "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n", + "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", port_id, rx_queue_id, local_conf.offloads, dev_info.rx_queue_offload_capa, __func__); @@ -1673,6 +1785,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, struct rte_eth_dev_info dev_info; struct rte_eth_txconf local_conf; void **txq; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); @@ -1682,10 +1795,11 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, return -EINVAL; } - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP); - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; /* Use default specified by driver, if nb_tx_desc is zero */ if (nb_tx_desc == 0) { @@ -1698,7 +1812,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, nb_tx_desc < dev_info.tx_desc_lim.nb_min || nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) { RTE_ETHDEV_LOG(ERR, - "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n", + "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n", nb_tx_desc, dev_info.tx_desc_lim.nb_max, dev_info.tx_desc_lim.nb_min, dev_info.tx_desc_lim.nb_align); @@ -1750,7 +1864,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id, local_conf.offloads) { RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be " - "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n", + "within per-queue offload capabilities 0x%"PRIx64" in %s()\n", port_id, tx_queue_id, local_conf.offloads, dev_info.tx_queue_offload_capa, __func__); @@ -1826,30 +1940,44 @@ rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt) return eth_err(port_id, ret); } -void +int rte_eth_promiscuous_enable(uint16_t port_id) { struct rte_eth_dev *dev; + int diag = 0; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable); - (*dev->dev_ops->promiscuous_enable)(dev); - dev->data->promiscuous = 1; + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP); + + if (dev->data->promiscuous == 0) { + diag = (*dev->dev_ops->promiscuous_enable)(dev); + dev->data->promiscuous = (diag == 0) ? 1 : 0; + } + + return eth_err(port_id, diag); } -void +int rte_eth_promiscuous_disable(uint16_t port_id) { struct rte_eth_dev *dev; + int diag = 0; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable); - dev->data->promiscuous = 0; - (*dev->dev_ops->promiscuous_disable)(dev); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP); + + if (dev->data->promiscuous == 1) { + dev->data->promiscuous = 0; + diag = (*dev->dev_ops->promiscuous_disable)(dev); + if (diag != 0) + dev->data->promiscuous = 1; + } + + return eth_err(port_id, diag); } int @@ -1863,30 +1991,44 @@ rte_eth_promiscuous_get(uint16_t port_id) return dev->data->promiscuous; } -void +int rte_eth_allmulticast_enable(uint16_t port_id) { struct rte_eth_dev *dev; + int diag; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable); - (*dev->dev_ops->allmulticast_enable)(dev); - dev->data->all_multicast = 1; + if (dev->data->all_multicast == 1) + return 0; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP); + diag = (*dev->dev_ops->allmulticast_enable)(dev); + dev->data->all_multicast = (diag == 0) ? 1 : 0; + + return eth_err(port_id, diag); } -void +int rte_eth_allmulticast_disable(uint16_t port_id) { struct rte_eth_dev *dev; + int diag; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable); + if (dev->data->all_multicast == 0) + return 0; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP); dev->data->all_multicast = 0; - (*dev->dev_ops->allmulticast_disable)(dev); + diag = (*dev->dev_ops->allmulticast_disable)(dev); + if (diag != 0) + dev->data->all_multicast = 1; + + return eth_err(port_id, diag); } int @@ -1900,40 +2042,44 @@ rte_eth_allmulticast_get(uint16_t port_id) return dev->data->all_multicast; } -void +int rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) rte_eth_linkstatus_get(dev, eth_link); else { - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); (*dev->dev_ops->link_update)(dev, 1); *eth_link = dev->data->dev_link; } + + return 0; } -void +int rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; if (dev->data->dev_conf.intr_conf.lsc && dev->data->dev_started) rte_eth_linkstatus_get(dev, eth_link); else { - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update); + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP); (*dev->dev_ops->link_update)(dev, 0); *eth_link = dev->data->dev_link; } + + return 0; } int @@ -1955,12 +2101,16 @@ int rte_eth_stats_reset(uint16_t port_id) { struct rte_eth_dev *dev; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP); - (*dev->dev_ops->stats_reset)(dev); + ret = (*dev->dev_ops->stats_reset)(dev); + if (ret != 0) + return eth_err(port_id, ret); + dev->data->rx_mbuf_alloc_failed = 0; return 0; @@ -2063,9 +2213,9 @@ rte_eth_basic_stats_get_names(struct rte_eth_dev *dev, uint16_t num_q; for (idx = 0; idx < RTE_NB_STATS; idx++) { - snprintf(xstats_names[cnt_used_entries].name, - sizeof(xstats_names[0].name), - "%s", rte_stats_strings[idx].name); + strlcpy(xstats_names[cnt_used_entries].name, + rte_stats_strings[idx].name, + sizeof(xstats_names[0].name)); cnt_used_entries++; } num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); @@ -2436,22 +2586,20 @@ rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats, } /* reset ethdev extended statistics */ -void +int rte_eth_xstats_reset(uint16_t port_id) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; /* implemented by the driver */ - if (dev->dev_ops->xstats_reset != NULL) { - (*dev->dev_ops->xstats_reset)(dev); - return; - } + if (dev->dev_ops->xstats_reset != NULL) + return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev)); /* fallback to default */ - rte_eth_stats_reset(port_id); + return rte_eth_stats_reset(port_id); } static int @@ -2510,7 +2658,7 @@ rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size) fw_version, fw_size)); } -void +int rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) { struct rte_eth_dev *dev; @@ -2518,23 +2666,41 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info) .nb_max = UINT16_MAX, .nb_min = 0, .nb_align = 1, + .nb_seg_max = UINT16_MAX, + .nb_mtu_seg_max = UINT16_MAX, }; + int diag; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + /* + * Init dev_info before port_id check since caller does not have + * return status and does not know if get is successful or not. + */ + memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); dev_info->rx_desc_lim = lim; dev_info->tx_desc_lim = lim; dev_info->device = dev->device; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->max_mtu = UINT16_MAX; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); + diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info); + if (diag != 0) { + /* Cleanup already filled in device information */ + memset(dev_info, 0, sizeof(struct rte_eth_dev_info)); + return eth_err(port_id, diag); + } - RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); - (*dev->dev_ops->dev_infos_get)(dev, dev_info); dev_info->driver_name = dev->device->driver->name; dev_info->nb_rx_queues = dev->data->nb_rx_queues; dev_info->nb_tx_queues = dev->data->nb_tx_queues; dev_info->dev_flags = &dev->data->dev_flags; + + return 0; } int @@ -2563,14 +2729,16 @@ rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask, return j; } -void -rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr) +int +rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr) { struct rte_eth_dev *dev; - RTE_ETH_VALID_PORTID_OR_RET(port_id); + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); + rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr); + + return 0; } @@ -2590,12 +2758,28 @@ int rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu) { int ret; + struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP); + /* + * Check if the device supports dev_infos_get, if it does not + * skip min_mtu/max_mtu validation here as this requires values + * that are populated within the call to rte_eth_dev_info_get() + * which relies on dev->dev_ops->dev_infos_get. + */ + if (*dev->dev_ops->dev_infos_get != NULL) { + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + + if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu) + return -EINVAL; + } + ret = (*dev->dev_ops->mtu_set)(dev, mtu); if (!ret) dev->data->mtu = mtu; @@ -2686,53 +2870,56 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) int mask = 0; int cur, org = 0; uint64_t orig_offloads; + uint64_t *dev_offloads; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; /* save original values in case of failure */ orig_offloads = dev->data->dev_conf.rxmode.offloads; + dev_offloads = &dev->data->dev_conf.rxmode.offloads; /*check which option changed by application*/ cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP); if (cur != org) { if (cur) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_STRIP; + *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_VLAN_STRIP; + *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP; mask |= ETH_VLAN_STRIP_MASK; } cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER); if (cur != org) { if (cur) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_FILTER; + *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_VLAN_FILTER; + *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER; mask |= ETH_VLAN_FILTER_MASK; } cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD); - org = !!(dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND); if (cur != org) { if (cur) - dev->data->dev_conf.rxmode.offloads |= - DEV_RX_OFFLOAD_VLAN_EXTEND; + *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND; else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_VLAN_EXTEND; + *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND; mask |= ETH_VLAN_EXTEND_MASK; } + cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD); + org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP); + if (cur != org) { + if (cur) + *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP; + else + *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP; + mask |= ETH_QINQ_STRIP_MASK; + } + /*no change*/ if (mask == 0) return ret; @@ -2741,7 +2928,7 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask) ret = (*dev->dev_ops->vlan_offload_set)(dev, mask); if (ret) { /* hit an error restore original values */ - dev->data->dev_conf.rxmode.offloads = orig_offloads; + *dev_offloads = orig_offloads; } return eth_err(port_id, ret); @@ -2751,23 +2938,25 @@ int rte_eth_dev_get_vlan_offload(uint16_t port_id) { struct rte_eth_dev *dev; + uint64_t *dev_offloads; int ret = 0; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; + dev_offloads = &dev->data->dev_conf.rxmode.offloads; - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_STRIP) + if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP) ret |= ETH_VLAN_STRIP_OFFLOAD; - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER) + if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) ret |= ETH_VLAN_FILTER_OFFLOAD; - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND) + if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) ret |= ETH_VLAN_EXTEND_OFFLOAD; + if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP) + ret |= ETH_QINQ_STRIP_OFFLOAD; + return ret; } @@ -2934,10 +3123,15 @@ rte_eth_dev_rss_hash_update(uint16_t port_id, { struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, }; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) != dev_info.flow_type_rss_offloads) { RTE_ETHDEV_LOG(ERR, @@ -3038,26 +3232,29 @@ rte_eth_led_off(uint16_t port_id) * an empty spot. */ static int -get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) +get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; unsigned i; + int ret; - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return -1; for (i = 0; i < dev_info.max_mac_addrs; i++) - if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0) + if (memcmp(addr, &dev->data->mac_addrs[i], + RTE_ETHER_ADDR_LEN) == 0) return i; return -1; } -static const struct ether_addr null_mac_addr; +static const struct rte_ether_addr null_mac_addr; int -rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, +rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr, uint32_t pool) { struct rte_eth_dev *dev; @@ -3069,7 +3266,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, dev = &rte_eth_devices[port_id]; RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP); - if (is_zero_ether_addr(addr)) { + if (rte_is_zero_ether_addr(addr)) { RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", port_id); return -EINVAL; @@ -3100,7 +3297,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, if (ret == 0) { /* Update address in NIC data structure */ - ether_addr_copy(addr, &dev->data->mac_addrs[index]); + rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]); /* Update pool bitmap in NIC data structure */ dev->data->mac_pool_sel[index] |= (1ULL << pool); @@ -3110,7 +3307,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr, } int -rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr) +rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr) { struct rte_eth_dev *dev; int index; @@ -3132,7 +3329,7 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr) (*dev->dev_ops->mac_addr_remove)(dev, index); /* Update address in NIC data structure */ - ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); + rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]); /* reset pool bitmap */ dev->data->mac_pool_sel[index] = 0; @@ -3141,14 +3338,14 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr) } int -rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr) +rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr) { struct rte_eth_dev *dev; int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - if (!is_valid_assigned_ether_addr(addr)) + if (!rte_is_valid_assigned_ether_addr(addr)) return -EINVAL; dev = &rte_eth_devices[port_id]; @@ -3159,7 +3356,7 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr) return ret; /* Update default address in NIC data structure */ - ether_addr_copy(addr, &dev->data->mac_addrs[0]); + rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); return 0; } @@ -3170,26 +3367,30 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr) * an empty spot. */ static int -get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr) +get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr) { struct rte_eth_dev_info dev_info; struct rte_eth_dev *dev = &rte_eth_devices[port_id]; unsigned i; + int ret; + + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return -1; - rte_eth_dev_info_get(port_id, &dev_info); if (!dev->data->hash_mac_addrs) return -1; for (i = 0; i < dev_info.max_hash_mac_addrs; i++) if (memcmp(addr, &dev->data->hash_mac_addrs[i], - ETHER_ADDR_LEN) == 0) + RTE_ETHER_ADDR_LEN) == 0) return i; return -1; } int -rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, +rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr, uint8_t on) { int index; @@ -3199,7 +3400,7 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; - if (is_zero_ether_addr(addr)) { + if (rte_is_zero_ether_addr(addr)) { RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n", port_id); return -EINVAL; @@ -3231,10 +3432,10 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr, if (ret == 0) { /* Update address in NIC data structure */ if (on) - ether_addr_copy(addr, + rte_ether_addr_copy(addr, &dev->data->hash_mac_addrs[index]); else - ether_addr_copy(&null_mac_addr, + rte_ether_addr_copy(&null_mac_addr, &dev->data->hash_mac_addrs[index]); } @@ -3261,11 +3462,15 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx, struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; struct rte_eth_link link; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; + dev = &rte_eth_devices[port_id]; - rte_eth_dev_info_get(port_id, &dev_info); link = dev->data->dev_link; if (queue_idx > dev_info.max_tx_queues) { @@ -3544,7 +3749,7 @@ rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data) return 0; } -int __rte_experimental +int rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id) { struct rte_intr_handle *intr_handle; @@ -3588,9 +3793,15 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, { char z_name[RTE_MEMZONE_NAMESIZE]; const struct rte_memzone *mz; + int rc; - snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", - dev->data->port_id, queue_id, ring_name); + rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s", + dev->data->port_id, queue_id, ring_name); + if (rc >= RTE_MEMZONE_NAMESIZE) { + RTE_ETHDEV_LOG(ERR, "ring name too long\n"); + rte_errno = ENAMETOOLONG; + return NULL; + } mz = rte_memzone_lookup(z_name); if (mz) @@ -3600,7 +3811,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, RTE_MEMZONE_IOVA_CONTIG, align); } -int __rte_experimental +int rte_eth_dev_create(struct rte_device *device, const char *name, size_t priv_data_size, ethdev_bus_specific_init ethdev_bus_specific_init, @@ -3663,7 +3874,7 @@ probe_failed: return retval; } -int __rte_experimental +int rte_eth_dev_destroy(struct rte_eth_dev *ethdev, ethdev_uninit_t ethdev_uninit) { @@ -4018,9 +4229,73 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id, return 0; } +int +rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (mode == NULL) + return -EINVAL; + + dev = &rte_eth_devices[port_id]; + + if (queue_id >= dev->data->nb_rx_queues) { + RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id); + return -EINVAL; + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP); + memset(mode, 0, sizeof(*mode)); + return eth_err(port_id, + dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode)); +} + +int +rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (mode == NULL) + return -EINVAL; + + dev = &rte_eth_devices[port_id]; + + if (queue_id >= dev->data->nb_tx_queues) { + RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id); + return -EINVAL; + } + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP); + memset(mode, 0, sizeof(*mode)); + return eth_err(port_id, + dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode)); +} + +const char * +rte_eth_burst_mode_option_name(uint64_t option) +{ + const char *name = ""; + unsigned int i; + + for (i = 0; i < RTE_DIM(rte_burst_option_names); ++i) { + if (option == rte_burst_option_names[i].option) { + name = rte_burst_option_names[i].name; + break; + } + } + + return name; +} + int rte_eth_dev_set_mc_addr_list(uint16_t port_id, - struct ether_addr *mc_addr_set, + struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr) { struct rte_eth_dev *dev; @@ -4124,6 +4399,18 @@ rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp) timestamp)); } +int +rte_eth_read_clock(uint16_t port_id, uint64_t *clock) +{ + struct rte_eth_dev *dev; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + dev = &rte_eth_devices[port_id]; + + RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP); + return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock)); +} + int rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info) { @@ -4172,7 +4459,7 @@ rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info)); } -int __rte_experimental +int rte_eth_dev_get_module_info(uint16_t port_id, struct rte_eth_dev_module_info *modinfo) { @@ -4185,7 +4472,7 @@ rte_eth_dev_get_module_info(uint16_t port_id, return (*dev->dev_ops->get_module_info)(dev, modinfo); } -int __rte_experimental +int rte_eth_dev_get_module_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info) { @@ -4287,15 +4574,14 @@ rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id, uint16_t *nb_rx_desc, uint16_t *nb_tx_desc) { - struct rte_eth_dev *dev; struct rte_eth_dev_info dev_info; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP); - - rte_eth_dev_info_get(port_id, &dev_info); + ret = rte_eth_dev_info_get(port_id, &dev_info); + if (ret != 0) + return ret; if (nb_rx_desc != NULL) rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim); @@ -4341,7 +4627,7 @@ static struct rte_eth_dev_switch { enum rte_eth_switch_domain_state state; } rte_eth_switch_domains[RTE_MAX_ETHPORTS]; -int __rte_experimental +int rte_eth_switch_domain_alloc(uint16_t *domain_id) { unsigned int i; @@ -4362,7 +4648,7 @@ rte_eth_switch_domain_alloc(uint16_t *domain_id) return -ENOSPC; } -int __rte_experimental +int rte_eth_switch_domain_free(uint16_t domain_id) { if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID || @@ -4442,7 +4728,7 @@ rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in) } } -int __rte_experimental +int rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da) { struct rte_kvargs args;