ethdev: move egress metadata to dynamic field
[dpdk.git] / lib / librte_ethdev / rte_ethdev.c
index b2ac590..652c369 100644 (file)
@@ -37,8 +37,8 @@
 #include <rte_string_fns.h>
 #include <rte_kvargs.h>
 #include <rte_class.h>
+#include <rte_ether.h>
 
-#include "rte_ether.h"
 #include "rte_ethdev.h"
 #include "rte_ethdev_driver.h"
 #include "ethdev_profile.h"
@@ -48,7 +48,6 @@ int rte_eth_dev_logtype;
 
 static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
 struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static uint16_t eth_dev_last_created_port;
 
 /* spinlock for eth device callbacks */
 static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
@@ -162,7 +161,6 @@ static const struct {
        RTE_TX_OFFLOAD_BIT2STR(UDP_TNL_TSO),
        RTE_TX_OFFLOAD_BIT2STR(IP_TNL_TSO),
        RTE_TX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
-       RTE_TX_OFFLOAD_BIT2STR(MATCH_METADATA),
 };
 
 #undef RTE_TX_OFFLOAD_BIT2STR
@@ -187,7 +185,7 @@ enum {
        STAT_QMAP_RX
 };
 
-int __rte_experimental
+int
 rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
 {
        int ret;
@@ -288,7 +286,7 @@ error:
        return ret;
 }
 
-uint16_t __rte_experimental
+uint16_t
 rte_eth_iterator_next(struct rte_dev_iterator *iter)
 {
        if (iter->cls == NULL) /* invalid ethdev iterator */
@@ -317,7 +315,7 @@ rte_eth_iterator_next(struct rte_dev_iterator *iter)
        return RTE_MAX_ETHPORTS;
 }
 
-void __rte_experimental
+void
 rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
 {
        if (iter->bus_str == NULL)
@@ -331,8 +329,7 @@ uint16_t
 rte_eth_find_next(uint16_t port_id)
 {
        while (port_id < RTE_MAX_ETHPORTS &&
-              rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
-              rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
+                       rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
                port_id++;
 
        if (port_id >= RTE_MAX_ETHPORTS)
@@ -341,6 +338,34 @@ rte_eth_find_next(uint16_t port_id)
        return port_id;
 }
 
+/*
+ * Macro to iterate over all valid ports for internal usage.
+ * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
+ */
+#define RTE_ETH_FOREACH_VALID_DEV(port_id) \
+       for (port_id = rte_eth_find_next(0); \
+            port_id < RTE_MAX_ETHPORTS; \
+            port_id = rte_eth_find_next(port_id + 1))
+
+uint16_t
+rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
+{
+       port_id = rte_eth_find_next(port_id);
+       while (port_id < RTE_MAX_ETHPORTS &&
+                       rte_eth_devices[port_id].device != parent)
+               port_id = rte_eth_find_next(port_id + 1);
+
+       return port_id;
+}
+
+uint16_t
+rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
+{
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
+       return rte_eth_find_next_of(port_id,
+                       rte_eth_devices[ref_port_id].device);
+}
+
 static void
 rte_eth_dev_shared_data_prepare(void)
 {
@@ -431,8 +456,6 @@ eth_dev_get(uint16_t port_id)
 
        eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
 
-       eth_dev_last_created_port = port_id;
-
        return eth_dev;
 }
 
@@ -441,6 +464,18 @@ rte_eth_dev_allocate(const char *name)
 {
        uint16_t port_id;
        struct rte_eth_dev *eth_dev = NULL;
+       size_t name_len;
+
+       name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
+       if (name_len == 0) {
+               RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
+               return NULL;
+       }
+
+       if (name_len >= RTE_ETH_NAME_MAX_LEN) {
+               RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
+               return NULL;
+       }
 
        rte_eth_dev_shared_data_prepare();
 
@@ -462,9 +497,9 @@ rte_eth_dev_allocate(const char *name)
        }
 
        eth_dev = eth_dev_get(port_id);
-       snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
+       strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
        eth_dev->data->port_id = port_id;
-       eth_dev->data->mtu = ETHER_MTU;
+       eth_dev->data->mtu = RTE_ETHER_MTU;
 
 unlock:
        rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
@@ -557,19 +592,15 @@ rte_eth_is_valid_owner_id(uint64_t owner_id)
 uint64_t
 rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
 {
+       port_id = rte_eth_find_next(port_id);
        while (port_id < RTE_MAX_ETHPORTS &&
-              ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
-              rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
-              rte_eth_devices[port_id].data->owner.id != owner_id))
-               port_id++;
-
-       if (port_id >= RTE_MAX_ETHPORTS)
-               return RTE_MAX_ETHPORTS;
+                       rte_eth_devices[port_id].data->owner.id != owner_id)
+               port_id = rte_eth_find_next(port_id + 1);
 
        return port_id;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_new(uint64_t *owner_id)
 {
        rte_eth_dev_shared_data_prepare();
@@ -588,7 +619,6 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
 {
        struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
        struct rte_eth_dev_owner *port_owner;
-       int sret;
 
        if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
                RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
@@ -612,11 +642,8 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
                return -EPERM;
        }
 
-       sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
-                       new_owner->name);
-       if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
-               RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
-                       port_id);
+       /* can not truncate (same structure) */
+       strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
 
        port_owner->id = new_owner->id;
 
@@ -626,7 +653,7 @@ _rte_eth_dev_owner_set(const uint16_t port_id, const uint64_t old_owner_id,
        return 0;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_set(const uint16_t port_id,
                      const struct rte_eth_dev_owner *owner)
 {
@@ -642,7 +669,7 @@ rte_eth_dev_owner_set(const uint16_t port_id,
        return ret;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
 {
        const struct rte_eth_dev_owner new_owner = (struct rte_eth_dev_owner)
@@ -659,10 +686,11 @@ rte_eth_dev_owner_unset(const uint16_t port_id, const uint64_t owner_id)
        return ret;
 }
 
-void __rte_experimental
+int
 rte_eth_dev_owner_delete(const uint64_t owner_id)
 {
        uint16_t port_id;
+       int ret = 0;
 
        rte_eth_dev_shared_data_prepare();
 
@@ -680,12 +708,15 @@ rte_eth_dev_owner_delete(const uint64_t owner_id)
                RTE_ETHDEV_LOG(ERR,
                               "Invalid owner id=%016"PRIx64"\n",
                               owner_id);
+               ret = -EINVAL;
        }
 
        rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
+
+       return ret;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_owner_get(const uint16_t port_id, struct rte_eth_dev_owner *owner)
 {
        int ret = 0;
@@ -721,12 +752,6 @@ rte_eth_dev_get_sec_ctx(uint16_t port_id)
        return rte_eth_devices[port_id].security_ctx;
 }
 
-uint16_t
-rte_eth_dev_count(void)
-{
-       return rte_eth_dev_count_avail();
-}
-
 uint16_t
 rte_eth_dev_count_avail(void)
 {
@@ -741,14 +766,13 @@ rte_eth_dev_count_avail(void)
        return count;
 }
 
-uint16_t __rte_experimental
+uint16_t
 rte_eth_dev_count_total(void)
 {
        uint16_t port, count = 0;
 
-       for (port = 0; port < RTE_MAX_ETHPORTS; port++)
-               if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
-                       count++;
+       RTE_ETH_FOREACH_VALID_DEV(port)
+               count++;
 
        return count;
 }
@@ -782,13 +806,11 @@ rte_eth_dev_get_port_by_name(const char *name, uint16_t *port_id)
                return -EINVAL;
        }
 
-       for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
-               if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
-                   !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
+       RTE_ETH_FOREACH_VALID_DEV(pid)
+               if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
                        *port_id = pid;
                        return 0;
                }
-       }
 
        return -ENODEV;
 }
@@ -875,6 +897,13 @@ rte_eth_dev_rx_queue_start(uint16_t port_id, uint16_t rx_queue_id)
 
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_start, -ENOTSUP);
 
+       if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
+               RTE_ETHDEV_LOG(INFO,
+                       "Can't start Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+                       rx_queue_id, port_id);
+               return -EINVAL;
+       }
+
        if (dev->data->rx_queue_state[rx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
                RTE_ETHDEV_LOG(INFO,
                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
@@ -902,6 +931,13 @@ rte_eth_dev_rx_queue_stop(uint16_t port_id, uint16_t rx_queue_id)
 
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_stop, -ENOTSUP);
 
+       if (rte_eth_dev_is_rx_hairpin_queue(dev, rx_queue_id)) {
+               RTE_ETHDEV_LOG(INFO,
+                       "Can't stop Rx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+                       rx_queue_id, port_id);
+               return -EINVAL;
+       }
+
        if (dev->data->rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
                RTE_ETHDEV_LOG(INFO,
                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
@@ -935,6 +971,13 @@ rte_eth_dev_tx_queue_start(uint16_t port_id, uint16_t tx_queue_id)
 
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_start, -ENOTSUP);
 
+       if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
+               RTE_ETHDEV_LOG(INFO,
+                       "Can't start Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+                       tx_queue_id, port_id);
+               return -EINVAL;
+       }
+
        if (dev->data->tx_queue_state[tx_queue_id] != RTE_ETH_QUEUE_STATE_STOPPED) {
                RTE_ETHDEV_LOG(INFO,
                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already started\n",
@@ -960,6 +1003,13 @@ rte_eth_dev_tx_queue_stop(uint16_t port_id, uint16_t tx_queue_id)
 
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_stop, -ENOTSUP);
 
+       if (rte_eth_dev_is_tx_hairpin_queue(dev, tx_queue_id)) {
+               RTE_ETHDEV_LOG(INFO,
+                       "Can't stop Tx hairpin queue %"PRIu16" of device with port_id=%"PRIu16"\n",
+                       tx_queue_id, port_id);
+               return -EINVAL;
+       }
+
        if (dev->data->tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STOPPED) {
                RTE_ETHDEV_LOG(INFO,
                        "Queue %"PRIu16" of device with port_id=%"PRIu16" already stopped\n",
@@ -1092,17 +1142,35 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
 {
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
-       struct rte_eth_conf local_conf = *dev_conf;
+       struct rte_eth_conf orig_conf;
        int diag;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
        dev = &rte_eth_devices[port_id];
 
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
 
-       rte_eth_dev_info_get(port_id, &dev_info);
+       if (dev->data->dev_started) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Port %u must be stopped to allow configuration\n",
+                       port_id);
+               return -EBUSY;
+       }
+
+        /* Store original config, as rollback required on failure */
+       memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
+
+       /*
+        * Copy the dev_conf parameter into the dev structure.
+        * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
+        */
+       memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               goto rollback;
 
        /* If number of queues specified by application for both Rx and Tx is
         * zero, use driver preferred values. This cannot be done individually
@@ -1123,26 +1191,18 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                RTE_ETHDEV_LOG(ERR,
                        "Number of RX queues requested (%u) is greater than max supported(%d)\n",
                        nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
 
        if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
                RTE_ETHDEV_LOG(ERR,
                        "Number of TX queues requested (%u) is greater than max supported(%d)\n",
                        nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
-               return -EINVAL;
-       }
-
-       if (dev->data->dev_started) {
-               RTE_ETHDEV_LOG(ERR,
-                       "Port %u must be stopped to allow configuration\n",
-                       port_id);
-               return -EBUSY;
+               ret = -EINVAL;
+               goto rollback;
        }
 
-       /* Copy the dev_conf parameter into the dev structure */
-       memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
-
        /*
         * Check that the numbers of RX and TX queues are not greater
         * than the maximum number of RX and TX queues supported by the
@@ -1151,13 +1211,15 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
        if (nb_rx_q > dev_info.max_rx_queues) {
                RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
                        port_id, nb_rx_q, dev_info.max_rx_queues);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
 
        if (nb_tx_q > dev_info.max_tx_queues) {
                RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
                        port_id, nb_tx_q, dev_info.max_tx_queues);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
 
        /* Check that the device supports requested interrupts */
@@ -1165,63 +1227,72 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                        (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
                RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
                        dev->device->driver->name);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
        if ((dev_conf->intr_conf.rmv == 1) &&
                        (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
                RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
                        dev->device->driver->name);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
 
        /*
         * If jumbo frames are enabled, check that the maximum RX packet
         * length is supported by the configured device.
         */
-       if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+       if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
                        RTE_ETHDEV_LOG(ERR,
                                "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
                                port_id, dev_conf->rxmode.max_rx_pkt_len,
                                dev_info.max_rx_pktlen);
-                       return -EINVAL;
-               } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+                       ret = -EINVAL;
+                       goto rollback;
+               } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
                        RTE_ETHDEV_LOG(ERR,
                                "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
                                port_id, dev_conf->rxmode.max_rx_pkt_len,
-                               (unsigned)ETHER_MIN_LEN);
-                       return -EINVAL;
+                               (unsigned int)RTE_ETHER_MIN_LEN);
+                       ret = -EINVAL;
+                       goto rollback;
                }
        } else {
-               if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
-                       dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+               if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
+                       dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
                        /* Use default value */
                        dev->data->dev_conf.rxmode.max_rx_pkt_len =
-                                                       ETHER_MAX_LEN;
+                                                       RTE_ETHER_MAX_LEN;
        }
 
        /* Any requested offloading must be within its device capabilities */
-       if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
-            local_conf.rxmode.offloads) {
+       if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
+            dev_conf->rxmode.offloads) {
                RTE_ETHDEV_LOG(ERR,
                        "Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
                        "capabilities 0x%"PRIx64" in %s()\n",
-                       port_id, local_conf.rxmode.offloads,
+                       port_id, dev_conf->rxmode.offloads,
                        dev_info.rx_offload_capa,
                        __func__);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
-       if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
-            local_conf.txmode.offloads) {
+       if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
+            dev_conf->txmode.offloads) {
                RTE_ETHDEV_LOG(ERR,
                        "Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
                        "capabilities 0x%"PRIx64" in %s()\n",
-                       port_id, local_conf.txmode.offloads,
+                       port_id, dev_conf->txmode.offloads,
                        dev_info.tx_offload_capa,
                        __func__);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
 
+       dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf =
+               rte_eth_rss_hf_refine(dev_conf->rx_adv_conf.rss_conf.rss_hf);
+
        /* Check that device supports requested rss hash functions. */
        if ((dev_info.flow_type_rss_offloads |
             dev_conf->rx_adv_conf.rss_conf.rss_hf) !=
@@ -1230,7 +1301,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                        "Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
                        port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
                        dev_info.flow_type_rss_offloads);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto rollback;
        }
 
        /*
@@ -1241,7 +1313,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                RTE_ETHDEV_LOG(ERR,
                        "Port%u rte_eth_dev_rx_queue_config = %d\n",
                        port_id, diag);
-               return diag;
+               ret = diag;
+               goto rollback;
        }
 
        diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
@@ -1250,7 +1323,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                        "Port%u rte_eth_dev_tx_queue_config = %d\n",
                        port_id, diag);
                rte_eth_dev_rx_queue_config(dev, 0);
-               return diag;
+               ret = diag;
+               goto rollback;
        }
 
        diag = (*dev->dev_ops->dev_configure)(dev);
@@ -1259,7 +1333,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                        port_id, diag);
                rte_eth_dev_rx_queue_config(dev, 0);
                rte_eth_dev_tx_queue_config(dev, 0);
-               return eth_err(port_id, diag);
+               ret = eth_err(port_id, diag);
+               goto rollback;
        }
 
        /* Initialize Rx profiling if enabled at compilation time. */
@@ -1269,10 +1344,16 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                        port_id, diag);
                rte_eth_dev_rx_queue_config(dev, 0);
                rte_eth_dev_tx_queue_config(dev, 0);
-               return eth_err(port_id, diag);
+               ret = eth_err(port_id, diag);
+               goto rollback;
        }
 
        return 0;
+
+rollback:
+       memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
+
+       return ret;
 }
 
 void
@@ -1294,7 +1375,7 @@ static void
 rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
                        struct rte_eth_dev_info *dev_info)
 {
-       struct ether_addr *addr;
+       struct rte_ether_addr *addr;
        uint16_t i;
        uint32_t pool = 0;
        uint64_t pool_mask;
@@ -1311,7 +1392,7 @@ rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
                        addr = &dev->data->mac_addrs[i];
 
                        /* skip zero address */
-                       if (is_zero_ether_addr(addr))
+                       if (rte_is_zero_ether_addr(addr))
                                continue;
 
                        pool = 0;
@@ -1328,24 +1409,70 @@ rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
        }
 }
 
-static void
+static int
 rte_eth_dev_config_restore(struct rte_eth_dev *dev,
                           struct rte_eth_dev_info *dev_info, uint16_t port_id)
 {
+       int ret;
+
        if (!(*dev_info->dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR))
                rte_eth_dev_mac_restore(dev, dev_info);
 
        /* replay promiscuous configuration */
-       if (rte_eth_promiscuous_get(port_id) == 1)
-               rte_eth_promiscuous_enable(port_id);
-       else if (rte_eth_promiscuous_get(port_id) == 0)
-               rte_eth_promiscuous_disable(port_id);
+       /*
+        * use callbacks directly since we don't need port_id check and
+        * would like to bypass the same value set
+        */
+       if (rte_eth_promiscuous_get(port_id) == 1 &&
+           *dev->dev_ops->promiscuous_enable != NULL) {
+               ret = eth_err(port_id,
+                             (*dev->dev_ops->promiscuous_enable)(dev));
+               if (ret != 0 && ret != -ENOTSUP) {
+                       RTE_ETHDEV_LOG(ERR,
+                               "Failed to enable promiscuous mode for device (port %u): %s\n",
+                               port_id, rte_strerror(-ret));
+                       return ret;
+               }
+       } else if (rte_eth_promiscuous_get(port_id) == 0 &&
+                  *dev->dev_ops->promiscuous_disable != NULL) {
+               ret = eth_err(port_id,
+                             (*dev->dev_ops->promiscuous_disable)(dev));
+               if (ret != 0 && ret != -ENOTSUP) {
+                       RTE_ETHDEV_LOG(ERR,
+                               "Failed to disable promiscuous mode for device (port %u): %s\n",
+                               port_id, rte_strerror(-ret));
+                       return ret;
+               }
+       }
 
        /* replay all multicast configuration */
-       if (rte_eth_allmulticast_get(port_id) == 1)
-               rte_eth_allmulticast_enable(port_id);
-       else if (rte_eth_allmulticast_get(port_id) == 0)
-               rte_eth_allmulticast_disable(port_id);
+       /*
+        * use callbacks directly since we don't need port_id check and
+        * would like to bypass the same value set
+        */
+       if (rte_eth_allmulticast_get(port_id) == 1 &&
+           *dev->dev_ops->allmulticast_enable != NULL) {
+               ret = eth_err(port_id,
+                             (*dev->dev_ops->allmulticast_enable)(dev));
+               if (ret != 0 && ret != -ENOTSUP) {
+                       RTE_ETHDEV_LOG(ERR,
+                               "Failed to enable allmulticast mode for device (port %u): %s\n",
+                               port_id, rte_strerror(-ret));
+                       return ret;
+               }
+       } else if (rte_eth_allmulticast_get(port_id) == 0 &&
+                  *dev->dev_ops->allmulticast_disable != NULL) {
+               ret = eth_err(port_id,
+                             (*dev->dev_ops->allmulticast_disable)(dev));
+               if (ret != 0 && ret != -ENOTSUP) {
+                       RTE_ETHDEV_LOG(ERR,
+                               "Failed to disable allmulticast mode for device (port %u): %s\n",
+                               port_id, rte_strerror(-ret));
+                       return ret;
+               }
+       }
+
+       return 0;
 }
 
 int
@@ -1354,6 +1481,7 @@ rte_eth_dev_start(uint16_t port_id)
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
        int diag;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1368,7 +1496,9 @@ rte_eth_dev_start(uint16_t port_id)
                return 0;
        }
 
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
 
        /* Lets restore MAC now if device does not support live change */
        if (*dev_info.dev_flags & RTE_ETH_DEV_NOLIVE_MAC_ADDR)
@@ -1380,7 +1510,14 @@ rte_eth_dev_start(uint16_t port_id)
        else
                return eth_err(port_id, diag);
 
-       rte_eth_dev_config_restore(dev, &dev_info, port_id);
+       ret = rte_eth_dev_config_restore(dev, &dev_info, port_id);
+       if (ret != 0) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Error during restoring configuration for device (port %u): %s\n",
+                       port_id, rte_strerror(-ret));
+               rte_eth_dev_stop(port_id);
+               return ret;
+       }
 
        if (dev->data->dev_conf.intr_conf.lsc == 0) {
                RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
@@ -1483,7 +1620,7 @@ rte_eth_dev_reset(uint16_t port_id)
        return eth_err(port_id, ret);
 }
 
-int __rte_experimental
+int
 rte_eth_dev_is_removed(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
@@ -1527,7 +1664,11 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
                return -EINVAL;
        }
 
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+       if (mp == NULL) {
+               RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
+               return -EINVAL;
+       }
+
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
 
        /*
@@ -1535,7 +1676,10 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
         * This value must be provided in the private data of the memory pool.
         * First check that the memory pool has a valid private data.
         */
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
+
        if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
                RTE_ETHDEV_LOG(ERR, "%s private_data_size %d < %d\n",
                        mp->name, (int)mp->private_data_size,
@@ -1567,7 +1711,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
                        nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
 
                RTE_ETHDEV_LOG(ERR,
-                       "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
+                       "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
                        nb_rx_desc, dev_info.rx_desc_lim.nb_max,
                        dev_info.rx_desc_lim.nb_min,
                        dev_info.rx_desc_lim.nb_align);
@@ -1619,7 +1763,7 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
             local_conf.offloads) {
                RTE_ETHDEV_LOG(ERR,
                        "Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
-                       "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+                       "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
                        port_id, rx_queue_id, local_conf.offloads,
                        dev_info.rx_queue_offload_capa,
                        __func__);
@@ -1637,6 +1781,78 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
        return eth_err(port_id, ret);
 }
 
+int
+rte_eth_rx_hairpin_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+                              uint16_t nb_rx_desc,
+                              const struct rte_eth_hairpin_conf *conf)
+{
+       int ret;
+       struct rte_eth_dev *dev;
+       struct rte_eth_hairpin_cap cap;
+       void **rxq;
+       int i;
+       int count;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+       dev = &rte_eth_devices[port_id];
+       if (rx_queue_id >= dev->data->nb_rx_queues) {
+               RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
+               return -EINVAL;
+       }
+       ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
+       if (ret != 0)
+               return ret;
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_hairpin_queue_setup,
+                               -ENOTSUP);
+       /* if nb_rx_desc is zero use max number of desc from the driver. */
+       if (nb_rx_desc == 0)
+               nb_rx_desc = cap.max_nb_desc;
+       if (nb_rx_desc > cap.max_nb_desc) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Invalid value for nb_rx_desc(=%hu), should be: <= %hu",
+                       nb_rx_desc, cap.max_nb_desc);
+               return -EINVAL;
+       }
+       if (conf->peer_count > cap.max_rx_2_tx) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Invalid value for number of peers for Rx queue(=%hu), should be: <= %hu",
+                       conf->peer_count, cap.max_rx_2_tx);
+               return -EINVAL;
+       }
+       if (conf->peer_count == 0) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Invalid value for number of peers for Rx queue(=%hu), should be: > 0",
+                       conf->peer_count);
+               return -EINVAL;
+       }
+       for (i = 0, count = 0; i < dev->data->nb_rx_queues &&
+            cap.max_nb_queues != UINT16_MAX; i++) {
+               if (i == rx_queue_id || rte_eth_dev_is_rx_hairpin_queue(dev, i))
+                       count++;
+       }
+       if (count > cap.max_nb_queues) {
+               RTE_ETHDEV_LOG(ERR, "To many Rx hairpin queues max is %d",
+               cap.max_nb_queues);
+               return -EINVAL;
+       }
+       if (dev->data->dev_started)
+               return -EBUSY;
+       rxq = dev->data->rx_queues;
+       if (rxq[rx_queue_id] != NULL) {
+               RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release,
+                                       -ENOTSUP);
+               (*dev->dev_ops->rx_queue_release)(rxq[rx_queue_id]);
+               rxq[rx_queue_id] = NULL;
+       }
+       ret = (*dev->dev_ops->rx_hairpin_queue_setup)(dev, rx_queue_id,
+                                                     nb_rx_desc, conf);
+       if (ret == 0)
+               dev->data->rx_queue_state[rx_queue_id] =
+                       RTE_ETH_QUEUE_STATE_HAIRPIN;
+       return eth_err(port_id, ret);
+}
+
 int
 rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
                       uint16_t nb_tx_desc, unsigned int socket_id,
@@ -1646,6 +1862,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
        struct rte_eth_dev_info dev_info;
        struct rte_eth_txconf local_conf;
        void **txq;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
 
@@ -1655,10 +1872,11 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
                return -EINVAL;
        }
 
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_setup, -ENOTSUP);
 
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
 
        /* Use default specified by driver, if nb_tx_desc is zero */
        if (nb_tx_desc == 0) {
@@ -1671,7 +1889,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
            nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
            nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
                RTE_ETHDEV_LOG(ERR,
-                       "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
+                       "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
                        nb_tx_desc, dev_info.tx_desc_lim.nb_max,
                        dev_info.tx_desc_lim.nb_min,
                        dev_info.tx_desc_lim.nb_align);
@@ -1723,7 +1941,7 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
             local_conf.offloads) {
                RTE_ETHDEV_LOG(ERR,
                        "Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
-                       "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+                       "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
                        port_id, tx_queue_id, local_conf.offloads,
                        dev_info.tx_queue_offload_capa,
                        __func__);
@@ -1734,6 +1952,77 @@ rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
                       tx_queue_id, nb_tx_desc, socket_id, &local_conf));
 }
 
+int
+rte_eth_tx_hairpin_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
+                              uint16_t nb_tx_desc,
+                              const struct rte_eth_hairpin_conf *conf)
+{
+       struct rte_eth_dev *dev;
+       struct rte_eth_hairpin_cap cap;
+       void **txq;
+       int i;
+       int count;
+       int ret;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+       dev = &rte_eth_devices[port_id];
+       if (tx_queue_id >= dev->data->nb_tx_queues) {
+               RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
+               return -EINVAL;
+       }
+       ret = rte_eth_dev_hairpin_capability_get(port_id, &cap);
+       if (ret != 0)
+               return ret;
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_hairpin_queue_setup,
+                               -ENOTSUP);
+       /* if nb_rx_desc is zero use max number of desc from the driver. */
+       if (nb_tx_desc == 0)
+               nb_tx_desc = cap.max_nb_desc;
+       if (nb_tx_desc > cap.max_nb_desc) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Invalid value for nb_tx_desc(=%hu), should be: <= %hu",
+                       nb_tx_desc, cap.max_nb_desc);
+               return -EINVAL;
+       }
+       if (conf->peer_count > cap.max_tx_2_rx) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Invalid value for number of peers for Tx queue(=%hu), should be: <= %hu",
+                       conf->peer_count, cap.max_tx_2_rx);
+               return -EINVAL;
+       }
+       if (conf->peer_count == 0) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Invalid value for number of peers for Tx queue(=%hu), should be: > 0",
+                       conf->peer_count);
+               return -EINVAL;
+       }
+       for (i = 0, count = 0; i < dev->data->nb_tx_queues &&
+            cap.max_nb_queues != UINT16_MAX; i++) {
+               if (i == tx_queue_id || rte_eth_dev_is_tx_hairpin_queue(dev, i))
+                       count++;
+       }
+       if (count > cap.max_nb_queues) {
+               RTE_ETHDEV_LOG(ERR, "To many Tx hairpin queues max is %d",
+               cap.max_nb_queues);
+               return -EINVAL;
+       }
+       if (dev->data->dev_started)
+               return -EBUSY;
+       txq = dev->data->tx_queues;
+       if (txq[tx_queue_id] != NULL) {
+               RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release,
+                                       -ENOTSUP);
+               (*dev->dev_ops->tx_queue_release)(txq[tx_queue_id]);
+               txq[tx_queue_id] = NULL;
+       }
+       ret = (*dev->dev_ops->tx_hairpin_queue_setup)
+               (dev, tx_queue_id, nb_tx_desc, conf);
+       if (ret == 0)
+               dev->data->tx_queue_state[tx_queue_id] =
+                       RTE_ETH_QUEUE_STATE_HAIRPIN;
+       return eth_err(port_id, ret);
+}
+
 void
 rte_eth_tx_buffer_drop_callback(struct rte_mbuf **pkts, uint16_t unsent,
                void *userdata __rte_unused)
@@ -1799,30 +2088,46 @@ rte_eth_tx_done_cleanup(uint16_t port_id, uint16_t queue_id, uint32_t free_cnt)
        return eth_err(port_id, ret);
 }
 
-void
+int
 rte_eth_promiscuous_enable(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
+       int diag = 0;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
-       RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_enable);
-       (*dev->dev_ops->promiscuous_enable)(dev);
-       dev->data->promiscuous = 1;
+       if (dev->data->promiscuous == 1)
+               return 0;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_enable, -ENOTSUP);
+
+       diag = (*dev->dev_ops->promiscuous_enable)(dev);
+       dev->data->promiscuous = (diag == 0) ? 1 : 0;
+
+       return eth_err(port_id, diag);
 }
 
-void
+int
 rte_eth_promiscuous_disable(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
+       int diag = 0;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
-       RTE_FUNC_PTR_OR_RET(*dev->dev_ops->promiscuous_disable);
+       if (dev->data->promiscuous == 0)
+               return 0;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->promiscuous_disable, -ENOTSUP);
+
        dev->data->promiscuous = 0;
-       (*dev->dev_ops->promiscuous_disable)(dev);
+       diag = (*dev->dev_ops->promiscuous_disable)(dev);
+       if (diag != 0)
+               dev->data->promiscuous = 1;
+
+       return eth_err(port_id, diag);
 }
 
 int
@@ -1836,30 +2141,44 @@ rte_eth_promiscuous_get(uint16_t port_id)
        return dev->data->promiscuous;
 }
 
-void
+int
 rte_eth_allmulticast_enable(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
+       int diag;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
-       RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_enable);
-       (*dev->dev_ops->allmulticast_enable)(dev);
-       dev->data->all_multicast = 1;
+       if (dev->data->all_multicast == 1)
+               return 0;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_enable, -ENOTSUP);
+       diag = (*dev->dev_ops->allmulticast_enable)(dev);
+       dev->data->all_multicast = (diag == 0) ? 1 : 0;
+
+       return eth_err(port_id, diag);
 }
 
-void
+int
 rte_eth_allmulticast_disable(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
+       int diag;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
-       RTE_FUNC_PTR_OR_RET(*dev->dev_ops->allmulticast_disable);
+       if (dev->data->all_multicast == 0)
+               return 0;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->allmulticast_disable, -ENOTSUP);
        dev->data->all_multicast = 0;
-       (*dev->dev_ops->allmulticast_disable)(dev);
+       diag = (*dev->dev_ops->allmulticast_disable)(dev);
+       if (diag != 0)
+               dev->data->all_multicast = 1;
+
+       return eth_err(port_id, diag);
 }
 
 int
@@ -1873,40 +2192,44 @@ rte_eth_allmulticast_get(uint16_t port_id)
        return dev->data->all_multicast;
 }
 
-void
+int
 rte_eth_link_get(uint16_t port_id, struct rte_eth_link *eth_link)
 {
        struct rte_eth_dev *dev;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
        if (dev->data->dev_conf.intr_conf.lsc &&
            dev->data->dev_started)
                rte_eth_linkstatus_get(dev, eth_link);
        else {
-               RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+               RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
                (*dev->dev_ops->link_update)(dev, 1);
                *eth_link = dev->data->dev_link;
        }
+
+       return 0;
 }
 
-void
+int
 rte_eth_link_get_nowait(uint16_t port_id, struct rte_eth_link *eth_link)
 {
        struct rte_eth_dev *dev;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
        if (dev->data->dev_conf.intr_conf.lsc &&
            dev->data->dev_started)
                rte_eth_linkstatus_get(dev, eth_link);
        else {
-               RTE_FUNC_PTR_OR_RET(*dev->dev_ops->link_update);
+               RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->link_update, -ENOTSUP);
                (*dev->dev_ops->link_update)(dev, 0);
                *eth_link = dev->data->dev_link;
        }
+
+       return 0;
 }
 
 int
@@ -1928,12 +2251,16 @@ int
 rte_eth_stats_reset(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->stats_reset, -ENOTSUP);
-       (*dev->dev_ops->stats_reset)(dev);
+       ret = (*dev->dev_ops->stats_reset)(dev);
+       if (ret != 0)
+               return eth_err(port_id, ret);
+
        dev->data->rx_mbuf_alloc_failed = 0;
 
        return 0;
@@ -2036,9 +2363,9 @@ rte_eth_basic_stats_get_names(struct rte_eth_dev *dev,
        uint16_t num_q;
 
        for (idx = 0; idx < RTE_NB_STATS; idx++) {
-               snprintf(xstats_names[cnt_used_entries].name,
-                       sizeof(xstats_names[0].name),
-                       "%s", rte_stats_strings[idx].name);
+               strlcpy(xstats_names[cnt_used_entries].name,
+                       rte_stats_strings[idx].name,
+                       sizeof(xstats_names[0].name));
                cnt_used_entries++;
        }
        num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
@@ -2409,22 +2736,20 @@ rte_eth_xstats_get(uint16_t port_id, struct rte_eth_xstat *xstats,
 }
 
 /* reset ethdev extended statistics */
-void
+int
 rte_eth_xstats_reset(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
        /* implemented by the driver */
-       if (dev->dev_ops->xstats_reset != NULL) {
-               (*dev->dev_ops->xstats_reset)(dev);
-               return;
-       }
+       if (dev->dev_ops->xstats_reset != NULL)
+               return eth_err(port_id, (*dev->dev_ops->xstats_reset)(dev));
 
        /* fallback to default */
-       rte_eth_stats_reset(port_id);
+       return rte_eth_stats_reset(port_id);
 }
 
 static int
@@ -2483,7 +2808,7 @@ rte_eth_dev_fw_version_get(uint16_t port_id, char *fw_version, size_t fw_size)
                                                        fw_version, fw_size));
 }
 
-void
+int
 rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
 {
        struct rte_eth_dev *dev;
@@ -2491,23 +2816,41 @@ rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
                .nb_max = UINT16_MAX,
                .nb_min = 0,
                .nb_align = 1,
+               .nb_seg_max = UINT16_MAX,
+               .nb_mtu_seg_max = UINT16_MAX,
        };
+       int diag;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       /*
+        * Init dev_info before port_id check since caller does not have
+        * return status and does not know if get is successful or not.
+        */
+       memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
-       memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
        dev_info->rx_desc_lim = lim;
        dev_info->tx_desc_lim = lim;
        dev_info->device = dev->device;
+       dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+       dev_info->max_mtu = UINT16_MAX;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+       diag = (*dev->dev_ops->dev_infos_get)(dev, dev_info);
+       if (diag != 0) {
+               /* Cleanup already filled in device information */
+               memset(dev_info, 0, sizeof(struct rte_eth_dev_info));
+               return eth_err(port_id, diag);
+       }
 
-       RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
-       (*dev->dev_ops->dev_infos_get)(dev, dev_info);
        dev_info->driver_name = dev->device->driver->name;
        dev_info->nb_rx_queues = dev->data->nb_rx_queues;
        dev_info->nb_tx_queues = dev->data->nb_tx_queues;
 
        dev_info->dev_flags = &dev->data->dev_flags;
+
+       return 0;
 }
 
 int
@@ -2536,14 +2879,16 @@ rte_eth_dev_get_supported_ptypes(uint16_t port_id, uint32_t ptype_mask,
        return j;
 }
 
-void
-rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
+int
+rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
 {
        struct rte_eth_dev *dev;
 
-       RTE_ETH_VALID_PORTID_OR_RET(port_id);
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
-       ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
+       rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
+
+       return 0;
 }
 
 
@@ -2563,12 +2908,28 @@ int
 rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
 {
        int ret;
+       struct rte_eth_dev_info dev_info;
        struct rte_eth_dev *dev;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
 
+       /*
+        * Check if the device supports dev_infos_get, if it does not
+        * skip min_mtu/max_mtu validation here as this requires values
+        * that are populated within the call to rte_eth_dev_info_get()
+        * which relies on dev->dev_ops->dev_infos_get.
+        */
+       if (*dev->dev_ops->dev_infos_get != NULL) {
+               ret = rte_eth_dev_info_get(port_id, &dev_info);
+               if (ret != 0)
+                       return ret;
+
+               if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
+                       return -EINVAL;
+       }
+
        ret = (*dev->dev_ops->mtu_set)(dev, mtu);
        if (!ret)
                dev->data->mtu = mtu;
@@ -2659,53 +3020,56 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
        int mask = 0;
        int cur, org = 0;
        uint64_t orig_offloads;
+       uint64_t *dev_offloads;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
 
        /* save original values in case of failure */
        orig_offloads = dev->data->dev_conf.rxmode.offloads;
+       dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
        /*check which option changed by application*/
        cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
-       org = !!(dev->data->dev_conf.rxmode.offloads &
-                DEV_RX_OFFLOAD_VLAN_STRIP);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
        if (cur != org) {
                if (cur)
-                       dev->data->dev_conf.rxmode.offloads |=
-                               DEV_RX_OFFLOAD_VLAN_STRIP;
+                       *dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
                else
-                       dev->data->dev_conf.rxmode.offloads &=
-                               ~DEV_RX_OFFLOAD_VLAN_STRIP;
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
                mask |= ETH_VLAN_STRIP_MASK;
        }
 
        cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
-       org = !!(dev->data->dev_conf.rxmode.offloads &
-                DEV_RX_OFFLOAD_VLAN_FILTER);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
        if (cur != org) {
                if (cur)
-                       dev->data->dev_conf.rxmode.offloads |=
-                               DEV_RX_OFFLOAD_VLAN_FILTER;
+                       *dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
                else
-                       dev->data->dev_conf.rxmode.offloads &=
-                               ~DEV_RX_OFFLOAD_VLAN_FILTER;
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
                mask |= ETH_VLAN_FILTER_MASK;
        }
 
        cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
-       org = !!(dev->data->dev_conf.rxmode.offloads &
-                DEV_RX_OFFLOAD_VLAN_EXTEND);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
        if (cur != org) {
                if (cur)
-                       dev->data->dev_conf.rxmode.offloads |=
-                               DEV_RX_OFFLOAD_VLAN_EXTEND;
+                       *dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
                else
-                       dev->data->dev_conf.rxmode.offloads &=
-                               ~DEV_RX_OFFLOAD_VLAN_EXTEND;
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
                mask |= ETH_VLAN_EXTEND_MASK;
        }
 
+       cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
+       org = !!(*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+       if (cur != org) {
+               if (cur)
+                       *dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+               else
+                       *dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
+               mask |= ETH_QINQ_STRIP_MASK;
+       }
+
        /*no change*/
        if (mask == 0)
                return ret;
@@ -2714,7 +3078,7 @@ rte_eth_dev_set_vlan_offload(uint16_t port_id, int offload_mask)
        ret = (*dev->dev_ops->vlan_offload_set)(dev, mask);
        if (ret) {
                /* hit an error restore  original values */
-               dev->data->dev_conf.rxmode.offloads = orig_offloads;
+               *dev_offloads = orig_offloads;
        }
 
        return eth_err(port_id, ret);
@@ -2724,23 +3088,25 @@ int
 rte_eth_dev_get_vlan_offload(uint16_t port_id)
 {
        struct rte_eth_dev *dev;
+       uint64_t *dev_offloads;
        int ret = 0;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
        dev = &rte_eth_devices[port_id];
+       dev_offloads = &dev->data->dev_conf.rxmode.offloads;
 
-       if (dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_VLAN_STRIP)
+       if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                ret |= ETH_VLAN_STRIP_OFFLOAD;
 
-       if (dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_VLAN_FILTER)
+       if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                ret |= ETH_VLAN_FILTER_OFFLOAD;
 
-       if (dev->data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_VLAN_EXTEND)
+       if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
                ret |= ETH_VLAN_EXTEND_OFFLOAD;
 
+       if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
+               ret |= ETH_QINQ_STRIP_OFFLOAD;
+
        return ret;
 }
 
@@ -2907,10 +3273,17 @@ rte_eth_dev_rss_hash_update(uint16_t port_id,
 {
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info = { .flow_type_rss_offloads = 0, };
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
+
+       rss_conf->rss_hf = rte_eth_rss_hf_refine(rss_conf->rss_hf);
+
        dev = &rte_eth_devices[port_id];
-       rte_eth_dev_info_get(port_id, &dev_info);
        if ((dev_info.flow_type_rss_offloads | rss_conf->rss_hf) !=
            dev_info.flow_type_rss_offloads) {
                RTE_ETHDEV_LOG(ERR,
@@ -3011,26 +3384,29 @@ rte_eth_led_off(uint16_t port_id)
  * an empty spot.
  */
 static int
-get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
+get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
 {
        struct rte_eth_dev_info dev_info;
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        unsigned i;
+       int ret;
 
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return -1;
 
        for (i = 0; i < dev_info.max_mac_addrs; i++)
-               if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
+               if (memcmp(addr, &dev->data->mac_addrs[i],
+                               RTE_ETHER_ADDR_LEN) == 0)
                        return i;
 
        return -1;
 }
 
-static const struct ether_addr null_mac_addr;
+static const struct rte_ether_addr null_mac_addr;
 
 int
-rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
+rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
                        uint32_t pool)
 {
        struct rte_eth_dev *dev;
@@ -3042,7 +3418,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
        dev = &rte_eth_devices[port_id];
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
 
-       if (is_zero_ether_addr(addr)) {
+       if (rte_is_zero_ether_addr(addr)) {
                RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
                        port_id);
                return -EINVAL;
@@ -3073,7 +3449,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
 
        if (ret == 0) {
                /* Update address in NIC data structure */
-               ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+               rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
 
                /* Update pool bitmap in NIC data structure */
                dev->data->mac_pool_sel[index] |= (1ULL << pool);
@@ -3083,7 +3459,7 @@ rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
 }
 
 int
-rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
+rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
 {
        struct rte_eth_dev *dev;
        int index;
@@ -3105,7 +3481,7 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
        (*dev->dev_ops->mac_addr_remove)(dev, index);
 
        /* Update address in NIC data structure */
-       ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
+       rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
 
        /* reset pool bitmap */
        dev->data->mac_pool_sel[index] = 0;
@@ -3114,14 +3490,14 @@ rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
 }
 
 int
-rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
+rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
 {
        struct rte_eth_dev *dev;
        int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
-       if (!is_valid_assigned_ether_addr(addr))
+       if (!rte_is_valid_assigned_ether_addr(addr))
                return -EINVAL;
 
        dev = &rte_eth_devices[port_id];
@@ -3132,7 +3508,7 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
                return ret;
 
        /* Update default address in NIC data structure */
-       ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+       rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
 
        return 0;
 }
@@ -3143,26 +3519,30 @@ rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
  * an empty spot.
  */
 static int
-get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
+get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
 {
        struct rte_eth_dev_info dev_info;
        struct rte_eth_dev *dev = &rte_eth_devices[port_id];
        unsigned i;
+       int ret;
+
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return -1;
 
-       rte_eth_dev_info_get(port_id, &dev_info);
        if (!dev->data->hash_mac_addrs)
                return -1;
 
        for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
                if (memcmp(addr, &dev->data->hash_mac_addrs[i],
-                       ETHER_ADDR_LEN) == 0)
+                       RTE_ETHER_ADDR_LEN) == 0)
                        return i;
 
        return -1;
 }
 
 int
-rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
+rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
                                uint8_t on)
 {
        int index;
@@ -3172,7 +3552,7 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
        dev = &rte_eth_devices[port_id];
-       if (is_zero_ether_addr(addr)) {
+       if (rte_is_zero_ether_addr(addr)) {
                RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
                        port_id);
                return -EINVAL;
@@ -3204,10 +3584,10 @@ rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
        if (ret == 0) {
                /* Update address in NIC data structure */
                if (on)
-                       ether_addr_copy(addr,
+                       rte_ether_addr_copy(addr,
                                        &dev->data->hash_mac_addrs[index]);
                else
-                       ether_addr_copy(&null_mac_addr,
+                       rte_ether_addr_copy(&null_mac_addr,
                                        &dev->data->hash_mac_addrs[index]);
        }
 
@@ -3234,11 +3614,15 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
        struct rte_eth_link link;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
+
        dev = &rte_eth_devices[port_id];
-       rte_eth_dev_info_get(port_id, &dev_info);
        link = dev->data->dev_link;
 
        if (queue_idx > dev_info.max_tx_queues) {
@@ -3517,7 +3901,7 @@ rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
        return 0;
 }
 
-int __rte_experimental
+int
 rte_eth_dev_rx_intr_ctl_q_get_fd(uint16_t port_id, uint16_t queue_id)
 {
        struct rte_intr_handle *intr_handle;
@@ -3561,9 +3945,15 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
 {
        char z_name[RTE_MEMZONE_NAMESIZE];
        const struct rte_memzone *mz;
+       int rc;
 
-       snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
-                dev->data->port_id, queue_id, ring_name);
+       rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+                     dev->data->port_id, queue_id, ring_name);
+       if (rc >= RTE_MEMZONE_NAMESIZE) {
+               RTE_ETHDEV_LOG(ERR, "ring name too long\n");
+               rte_errno = ENAMETOOLONG;
+               return NULL;
+       }
 
        mz = rte_memzone_lookup(z_name);
        if (mz)
@@ -3573,7 +3963,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
                        RTE_MEMZONE_IOVA_CONTIG, align);
 }
 
-int __rte_experimental
+int
 rte_eth_dev_create(struct rte_device *device, const char *name,
        size_t priv_data_size,
        ethdev_bus_specific_init ethdev_bus_specific_init,
@@ -3636,7 +4026,7 @@ probe_failed:
        return retval;
 }
 
-int  __rte_experimental
+int
 rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
        ethdev_uninit_t ethdev_uninit)
 {
@@ -3647,11 +4037,10 @@ rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
                return -ENODEV;
 
        RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
-       if (ethdev_uninit) {
-               ret = ethdev_uninit(ethdev);
-               if (ret)
-                       return ret;
-       }
+
+       ret = ethdev_uninit(ethdev);
+       if (ret)
+               return ret;
 
        return rte_eth_dev_release_port(ethdev);
 }
@@ -3763,12 +4152,19 @@ rte_eth_add_rx_callback(uint16_t port_id, uint16_t queue_id,
        rte_errno = ENOTSUP;
        return NULL;
 #endif
+       struct rte_eth_dev *dev;
+
        /* check input parameters */
        if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
                    queue_id >= rte_eth_devices[port_id].data->nb_rx_queues) {
                rte_errno = EINVAL;
                return NULL;
        }
+       dev = &rte_eth_devices[port_id];
+       if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
+               rte_errno = EINVAL;
+               return NULL;
+       }
        struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
 
        if (cb == NULL) {
@@ -3840,6 +4236,8 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
        rte_errno = ENOTSUP;
        return NULL;
 #endif
+       struct rte_eth_dev *dev;
+
        /* check input parameters */
        if (!rte_eth_dev_is_valid_port(port_id) || fn == NULL ||
                    queue_id >= rte_eth_devices[port_id].data->nb_tx_queues) {
@@ -3847,6 +4245,12 @@ rte_eth_add_tx_callback(uint16_t port_id, uint16_t queue_id,
                return NULL;
        }
 
+       dev = &rte_eth_devices[port_id];
+       if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
+               rte_errno = EINVAL;
+               return NULL;
+       }
+
        struct rte_eth_rxtx_callback *cb = rte_zmalloc(NULL, sizeof(*cb), 0);
 
        if (cb == NULL) {
@@ -3960,6 +4364,13 @@ rte_eth_rx_queue_info_get(uint16_t port_id, uint16_t queue_id,
                return -EINVAL;
        }
 
+       if (rte_eth_dev_is_rx_hairpin_queue(dev, queue_id)) {
+               RTE_ETHDEV_LOG(INFO,
+                       "Can't get hairpin Rx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
+                       queue_id, port_id);
+               return -EINVAL;
+       }
+
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rxq_info_get, -ENOTSUP);
 
        memset(qinfo, 0, sizeof(*qinfo));
@@ -3984,6 +4395,13 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
                return -EINVAL;
        }
 
+       if (rte_eth_dev_is_tx_hairpin_queue(dev, queue_id)) {
+               RTE_ETHDEV_LOG(INFO,
+                       "Can't get hairpin Tx queue %"PRIu16" info of device with port_id=%"PRIu16"\n",
+                       queue_id, port_id);
+               return -EINVAL;
+       }
+
        RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->txq_info_get, -ENOTSUP);
 
        memset(qinfo, 0, sizeof(*qinfo));
@@ -3992,9 +4410,57 @@ rte_eth_tx_queue_info_get(uint16_t port_id, uint16_t queue_id,
        return 0;
 }
 
+int
+rte_eth_rx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
+                         struct rte_eth_burst_mode *mode)
+{
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+       if (mode == NULL)
+               return -EINVAL;
+
+       dev = &rte_eth_devices[port_id];
+
+       if (queue_id >= dev->data->nb_rx_queues) {
+               RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+               return -EINVAL;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_burst_mode_get, -ENOTSUP);
+       memset(mode, 0, sizeof(*mode));
+       return eth_err(port_id,
+                      dev->dev_ops->rx_burst_mode_get(dev, queue_id, mode));
+}
+
+int
+rte_eth_tx_burst_mode_get(uint16_t port_id, uint16_t queue_id,
+                         struct rte_eth_burst_mode *mode)
+{
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+       if (mode == NULL)
+               return -EINVAL;
+
+       dev = &rte_eth_devices[port_id];
+
+       if (queue_id >= dev->data->nb_tx_queues) {
+               RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+               return -EINVAL;
+       }
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_burst_mode_get, -ENOTSUP);
+       memset(mode, 0, sizeof(*mode));
+       return eth_err(port_id,
+                      dev->dev_ops->tx_burst_mode_get(dev, queue_id, mode));
+}
+
 int
 rte_eth_dev_set_mc_addr_list(uint16_t port_id,
-                            struct ether_addr *mc_addr_set,
+                            struct rte_ether_addr *mc_addr_set,
                             uint32_t nb_mc_addr)
 {
        struct rte_eth_dev *dev;
@@ -4098,6 +4564,18 @@ rte_eth_timesync_write_time(uint16_t port_id, const struct timespec *timestamp)
                                                                timestamp));
 }
 
+int
+rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
+{
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+       dev = &rte_eth_devices[port_id];
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
+       return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
+}
+
 int
 rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
 {
@@ -4146,7 +4624,7 @@ rte_eth_dev_set_eeprom(uint16_t port_id, struct rte_dev_eeprom_info *info)
        return eth_err(port_id, (*dev->dev_ops->set_eeprom)(dev, info));
 }
 
-int __rte_experimental
+int
 rte_eth_dev_get_module_info(uint16_t port_id,
                            struct rte_eth_dev_module_info *modinfo)
 {
@@ -4159,7 +4637,7 @@ rte_eth_dev_get_module_info(uint16_t port_id,
        return (*dev->dev_ops->get_module_info)(dev, modinfo);
 }
 
-int __rte_experimental
+int
 rte_eth_dev_get_module_eeprom(uint16_t port_id,
                              struct rte_dev_eeprom_info *info)
 {
@@ -4261,15 +4739,14 @@ rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
                                 uint16_t *nb_rx_desc,
                                 uint16_t *nb_tx_desc)
 {
-       struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
+       int ret;
 
        RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
 
-       dev = &rte_eth_devices[port_id];
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
-
-       rte_eth_dev_info_get(port_id, &dev_info);
+       ret = rte_eth_dev_info_get(port_id, &dev_info);
+       if (ret != 0)
+               return ret;
 
        if (nb_rx_desc != NULL)
                rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
@@ -4280,6 +4757,38 @@ rte_eth_dev_adjust_nb_rx_tx_desc(uint16_t port_id,
        return 0;
 }
 
+int
+rte_eth_dev_hairpin_capability_get(uint16_t port_id,
+                                  struct rte_eth_hairpin_cap *cap)
+{
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+       dev = &rte_eth_devices[port_id];
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_cap_get, -ENOTSUP);
+       memset(cap, 0, sizeof(*cap));
+       return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
+}
+
+int
+rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       if (dev->data->rx_queue_state[queue_id] ==
+           RTE_ETH_QUEUE_STATE_HAIRPIN)
+               return 1;
+       return 0;
+}
+
+int
+rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+       if (dev->data->tx_queue_state[queue_id] ==
+           RTE_ETH_QUEUE_STATE_HAIRPIN)
+               return 1;
+       return 0;
+}
+
 int
 rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
 {
@@ -4311,11 +4820,11 @@ enum rte_eth_switch_domain_state {
  * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
  * ethdev ports in a single process.
  */
-struct rte_eth_dev_switch {
+static struct rte_eth_dev_switch {
        enum rte_eth_switch_domain_state state;
 } rte_eth_switch_domains[RTE_MAX_ETHPORTS];
 
-int __rte_experimental
+int
 rte_eth_switch_domain_alloc(uint16_t *domain_id)
 {
        unsigned int i;
@@ -4336,7 +4845,7 @@ rte_eth_switch_domain_alloc(uint16_t *domain_id)
        return -ENOSPC;
 }
 
-int __rte_experimental
+int
 rte_eth_switch_domain_free(uint16_t domain_id)
 {
        if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
@@ -4416,7 +4925,7 @@ rte_eth_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
        }
 }
 
-int __rte_experimental
+int
 rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
 {
        struct rte_kvargs args;