ethdev: change device info get callback to return int
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
index e54239c..30b0c78 100644 (file)
@@ -601,7 +601,7 @@ rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
        return port_id;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_new(uint64_t *owner_id)
 {
        rte_eth_dev_shared_data_prepare();
@@ -654,7 +654,7 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
        return 0;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_set(const uint16_t port_id,
                      const struct rte_eth_dev_owner *owner)
 {
@@ -670,7 +670,7 @@ rte_eth_dev_owner_set(const uint16_t port_id,
        return ret;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
 {
        const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
@@ -687,7 +687,7 @@ rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
        return ret;
 }
 
-void __rte_experimental
+void
 rte_eth_dev_owner_delete(const uint64_t owner_id)
 {
        uint16_t port_id;
@@ -713,7 +713,7 @@ rte_eth_dev_owner_delete(const uint64_t owner_id)
        rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
 {
        int ret = 0;
@@ -1125,7 +1125,6 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 
        dev = &rte_eth_devices[port_id];
 
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
 
        if (dev->data->dev_started) {
@@ -1144,7 +1143,9 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
         */
        memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
 
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               goto rollback;
 
        /* If number of queues specified by application for both Rx and Tx is
         * zero, use driver preferred values. This cannot be done individually
@@ -1406,6 +1407,7 @@ rte_eth_dev_start(uint16_t port_id)
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
        int diag;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1420,7 +1422,9 @@ rte_eth_dev_start(uint16_t port_id)
                return 0;
        }
 
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
 
        /* Lets restore MAC now if device does not support live change */
        if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
@@ -1535,7 +1539,7 @@ rte_eth_dev_reset(uint16_t port_id)
        return eth_err(port_id, ret);
 }
 
-int __rte_experimental
+int
 rte_eth_dev_is_removed(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
@@ -1584,7 +1588,6 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
                return -EINVAL;
        }
 
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
 
        /*
@@ -1592,7 +1595,10 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
         * This value must be provided in the private data of the memory pool.
         * First check that the memory pool has a valid private data.
         */
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
+
        if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
                RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
                        mp->name, (int)mp->private_data_size,
@@ -1703,6 +1709,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
        struct rte_eth_dev_info dev_info;
        struct rte_eth_txconf local_conf;
        void **txq;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1712,10 +1719,11 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
                return -EINVAL;
        }
 
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
 
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
 
        /* Use default specified by driver, if nb_tx_desc is zero */
        if (nb_tx_desc == 0) {
@@ -2540,7 +2548,7 @@ rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
                                                        fw_version, fw_size));
 }
 
-void
+int
 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
 {
        struct rte_eth_dev *dev;
@@ -2548,25 +2556,41 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
                .nb_max = UINT16_MAX,
                .nb_min = 0,
                .nb_align = 1,
+               .nb_seg_max = UINT16_MAX,
+               .nb_mtu_seg_max = UINT16_MAX,
        };
+       int diag;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       /*
+        * Init dev_info before port_id check since caller does not have
+        * return status and does not know if get is successful or not.
+        */
+       memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
-       memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
        dev_info->rx_desc_lim = lim;
        dev_info->tx_desc_lim = lim;
        dev_info->device = dev->device;
        dev_info->min_mtu = RTE_ETHER_MIN_MTU;
        dev_info->max_mtu = UINT16_MAX;
 
-       RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
-       (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+       diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+       if (diag != 0) {
+               /* Cleanup already filled in device information */
+               memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+               return eth_err(port_id, diag);
+       }
+
        dev_info->driver_name = dev->device->driver->name;
        dev_info->nb_rx_queues = dev->data->nb_rx_queues;
        dev_info->nb_tx_queues = dev->data->nb_tx_queues;
 
        dev_info->dev_flags = &dev->data->dev_flags;
+
+       return 0;
 }
 
 int
@@ -2636,7 +2660,10 @@ rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
         * which relies on dev->dev_ops->dev_infos_get.
         */
        if (*dev->dev_ops->dev_infos_get != NULL) {
-               rte_eth_dev_info_get(port_id, &dev_info);
+               ret = rte_eth_dev_info_get(port_id, &dev_info);
+               if (ret != 0)
+                       return ret;
+
                if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
                        return -EINVAL;
        }
@@ -2731,53 +2758,56 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
        int mask = 0;
        int cur, org = 0;
        uint64_t orig_offloads;
+       uint64_t *dev_offloads;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
        /* save original values in case of failure */
        orig_offloads = dev->data->dev_conf.rxmode.offloads;
+       dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
        /*check which option changed by application*/
        cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-       org = !!(dev->data->dev_conf.rxmode.offloads &
-                DEV_RX_OFFLOAD_VLAN_STRIP);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
        if (cur != org) {
                if (cur)
-                       dev->data->dev_conf.rxmode.offloads |=
-                               DEV_RX_OFFLOAD_VLAN_STRIP;
+                       *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
                else
-                       dev->data->dev_conf.rxmode.offloads &=
-                               ~DEV_RX_OFFLOAD_VLAN_STRIP;
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
                mask |= ETH_VLAN_STRIP_MASK;
        }
 
        cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-       org = !!(dev->data->dev_conf.rxmode.offloads &
-                DEV_RX_OFFLOAD_VLAN_FILTER);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
        if (cur != org) {
                if (cur)
-                       dev->data->dev_conf.rxmode.offloads |=
-                               DEV_RX_OFFLOAD_VLAN_FILTER;
+                       *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
                else
-                       dev->data->dev_conf.rxmode.offloads &=
-                               ~DEV_RX_OFFLOAD_VLAN_FILTER;
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
                mask |= ETH_VLAN_FILTER_MASK;
        }
 
        cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-       org = !!(dev->data->dev_conf.rxmode.offloads &
-                DEV_RX_OFFLOAD_VLAN_EXTEND);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
        if (cur != org) {
                if (cur)
-                       dev->data->dev_conf.rxmode.offloads |=
-                               DEV_RX_OFFLOAD_VLAN_EXTEND;
+                       *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
                else
-                       dev->data->dev_conf.rxmode.offloads &=
-                               ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
                mask |= ETH_VLAN_EXTEND_MASK;
        }
 
+       cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+       if (cur != org) {
+               if (cur)
+                       *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+               else
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+               mask |= ETH_QINQ_STRIP_MASK;
+       }
+
        /*no change*/
        if (mask == 0)
                return ret;
@@ -2786,7 +2816,7 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
        ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
        if (ret) {
                /* hit an error restore  original values */
-               dev->data->dev_conf.rxmode.offloads = orig_offloads;
+               *dev_offloads = orig_offloads;
        }
 
        return eth_err(port_id, ret);
@@ -2796,23 +2826,25 @@ int
 rte_eth_dev_get_vlan_offload(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
+       uint64_t *dev_offloads;
        int ret = 0;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
+       dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-       if (dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                ret |= ETH_VLAN_STRIP_OFFLOAD;
 
-       if (dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                ret |= ETH_VLAN_FILTER_OFFLOAD;
 
-       if (dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_VLAN_EXTEND)
+       if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
                ret |= ETH_VLAN_EXTEND_OFFLOAD;
 
+       if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+               ret |= DEV_RX_OFFLOAD_QINQ_STRIP;
+
        return ret;
 }
 
@@ -2979,10 +3011,15 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
 {
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
+
        dev = &rte_eth_devices[port_id];
-       rte_eth_dev_info_get(port_id, &dev_info);
        if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
            dev_info.flow_type_rss_offloads) {
                RTE_ETHDEV_LOG(ERR,
@@ -3088,9 +3125,11 @@ get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
        struct rte_eth_dev_info dev_info;
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        unsigned i;
+       int ret;
 
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return -1;
 
        for (i = 0; i < dev_info.max_mac_addrs; i++)
                if (memcmp(addr, &dev->data->mac_addrs[i],
@@ -3221,8 +3260,12 @@ get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
        struct rte_eth_dev_info dev_info;
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        unsigned i;
+       int ret;
+
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return -1;
 
-       rte_eth_dev_info_get(port_id, &dev_info);
        if (!dev->data->hash_mac_addrs)
                return -1;
 
@@ -3307,11 +3350,15 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
        struct rte_eth_link link;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
+
        dev = &rte_eth_devices[port_id];
-       rte_eth_dev_info_get(port_id, &dev_info);
        link = dev->data->dev_link;
 
        if (queue_idx > dev_info.max_tx_queues) {
@@ -3590,7 +3637,7 @@ rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
        return 0;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
 {
        struct rte_intr_handle *intr_handle;
@@ -3652,7 +3699,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
                        RTE_MEMZONE_IOVA_CONTIG, align);
 }
 
-int __rte_experimental
+int
 rte_eth_dev_create(struct rte_device *device, const char *name,
        size_t priv_data_size,
        ethdev_bus_specific_init ethdev_bus_specific_init,
@@ -3715,7 +3762,7 @@ probe_failed:
        return retval;
 }
 
-int  __rte_experimental
+int
 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
        ethdev_uninit_t ethdev_uninit)
 {
@@ -4236,7 +4283,7 @@ rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
        return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
 }
 
-int __rte_experimental
+int
 rte_eth_dev_get_module_info(uint16_t port_id,
                            struct rte_eth_dev_module_info *modinfo)
 {
@@ -4249,7 +4296,7 @@ rte_eth_dev_get_module_info(uint16_t port_id,
        return (*dev->dev_ops->get_module_info)(dev, modinfo);
 }
 
-int __rte_experimental
+int
 rte_eth_dev_get_module_eeprom(uint16_t port_id,
                              struct rte_dev_eeprom_info *info)
 {
@@ -4351,15 +4398,14 @@ rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
                                 uint16_t *nb_rx_desc,
                                 uint16_t *nb_tx_desc)
 {
-       struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
-       dev = &rte_eth_devices[port_id];
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
 
        if (nb_rx_desc != NULL)
                rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
@@ -4405,7 +4451,7 @@ static struct rte_eth_dev_switch {
        enum rte_eth_switch_domain_state state;
 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
 
-int __rte_experimental
+int
 rte_eth_switch_domain_alloc(uint16_t *domain_id)
 {
        unsigned int i;
@@ -4426,7 +4472,7 @@ rte_eth_switch_domain_alloc(uint16_t *domain_id)
        return -ENOSPC;
 }
 
-int __rte_experimental
+int
 rte_eth_switch_domain_free(uint16_t domain_id)
 {
        if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
@@ -4506,7 +4552,7 @@ rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
        }
 }
 
-int __rte_experimental
+int
 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
 {
        struct rte_kvargs args;