net/i40e: add outer VLAN processing
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
index f2519f0..52eb6df 100644 (file)
@@ -18,6 +18,7 @@
 #include <rte_dev.h>
 #include <rte_fslmc.h>
 #include <rte_flow_driver.h>
+#include "rte_dpaa2_mempool.h"
 
 #include "dpaa2_pmd_logs.h"
 #include <fslmc_vfio.h>
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-               DEV_RX_OFFLOAD_CHECKSUM |
-               DEV_RX_OFFLOAD_SCTP_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_VLAN_FILTER |
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
-               DEV_RX_OFFLOAD_TIMESTAMP;
+               RTE_ETH_RX_OFFLOAD_CHECKSUM |
+               RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+               RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-               DEV_RX_OFFLOAD_RSS_HASH |
-               DEV_RX_OFFLOAD_SCATTER;
+               RTE_ETH_RX_OFFLOAD_RSS_HASH |
+               RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_MT_LOCKFREE |
-               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+               RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-               DEV_TX_OFFLOAD_MULTI_SEGS;
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -75,6 +75,9 @@ int dpaa2_timestamp_dynfield_offset = -1;
 /* Enable error queue */
 bool dpaa2_enable_err_queue;
 
+#define MAX_NB_RX_DESC         11264
+int total_nb_rx_desc;
+
 struct rte_dpaa2_xstats_name_off {
        char name[RTE_ETH_XSTATS_NAME_SIZE];
        uint8_t page_id; /* dpni statistics page id */
@@ -143,15 +146,15 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
-               /* VLAN Filter not avaialble */
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               /* VLAN Filter not available */
                if (!priv->max_vlan_filters) {
                        DPAA2_PMD_INFO("VLAN filter not available");
                        return -ENOTSUP;
                }
 
                if (dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_VLAN_FILTER)
+                       RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
                                                      priv->token, true);
                else
@@ -252,13 +255,14 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                        dev_rx_offloads_nodis;
        dev_info->tx_offload_capa = dev_tx_offloads_sup |
                                        dev_tx_offloads_nodis;
-       dev_info->speed_capa = ETH_LINK_SPEED_1G |
-                       ETH_LINK_SPEED_2_5G |
-                       ETH_LINK_SPEED_10G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+                       RTE_ETH_LINK_SPEED_2_5G |
+                       RTE_ETH_LINK_SPEED_10G;
+       dev_info->dev_capa &= ~RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP;
 
        dev_info->max_hash_mac_addrs = 0;
        dev_info->max_vfs = 0;
-       dev_info->max_vmdq_pools = ETH_16_POOLS;
+       dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
        dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
        dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -271,10 +275,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
        if (dpaa2_svr_family == SVR_LX2160A) {
-               dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-                               ETH_LINK_SPEED_40G |
-                               ETH_LINK_SPEED_50G |
-                               ETH_LINK_SPEED_100G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+                               RTE_ETH_LINK_SPEED_40G |
+                               RTE_ETH_LINK_SPEED_50G |
+                               RTE_ETH_LINK_SPEED_100G;
        }
 
        return 0;
@@ -292,16 +296,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
                uint64_t flags;
                const char *output;
        } rx_offload_map[] = {
-                       {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-                       {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-                       {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-                       {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-                       {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-                       {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-                       {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
-                       {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-                       {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-                       {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+                       {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+                       {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+                       {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+                       {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+                       {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+                       {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+                       {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
        };
 
        /* Update Rx offload info */
@@ -328,15 +331,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
                uint64_t flags;
                const char *output;
        } tx_offload_map[] = {
-                       {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-                       {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-                       {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-                       {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-                       {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-                       {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-                       {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-                       {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-                       {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+                       {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+                       {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+                       {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+                       {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+                       {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
        };
 
        /* Update Tx offload info */
@@ -396,6 +399,8 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev)
        if (dpaa2_enable_err_queue) {
                priv->rx_err_vq = rte_zmalloc("dpni_rx_err",
                        sizeof(struct dpaa2_queue), 0);
+               if (!priv->rx_err_vq)
+                       goto fail;
 
                dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq;
                dpaa2_q->q_storage = rte_malloc("err_dq_storage",
@@ -505,8 +510,7 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev)
                /* cleaning up queue storage */
                for (i = 0; i < priv->nb_rx_queues; i++) {
                        dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i];
-                       if (dpaa2_q->q_storage)
-                               rte_free(dpaa2_q->q_storage);
+                       rte_free(dpaa2_q->q_storage);
                }
                /* cleanup tx queue cscn */
                for (i = 0; i < priv->nb_tx_queues; i++) {
@@ -540,6 +544,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        int tx_l3_csum_offload = false;
        int tx_l4_csum_offload = false;
        int ret, tc_index;
+       uint32_t max_rx_pktlen;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -559,28 +564,22 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                tx_offloads, dev_tx_offloads_nodis);
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
-                       ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
-                               priv->token, eth_conf->rxmode.max_rx_pkt_len
-                               - RTE_ETHER_CRC_LEN);
-                       if (ret) {
-                               DPAA2_PMD_ERR(
-                                       "Unable to set mtu. check config");
-                               return ret;
-                       }
-                       dev->data->mtu =
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                               RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
-                               VLAN_TAG_SIZE;
-                               DPAA2_PMD_INFO("MTU configured for the device: %d",
-                                               dev->data->mtu);
-               } else {
-                       return -1;
+       max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
+                               RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+       if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
+               ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
+                       priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
+               if (ret != 0) {
+                       DPAA2_PMD_ERR("Unable to set mtu. check config");
+                       return ret;
                }
+               DPAA2_PMD_INFO("MTU configured for the device: %d",
+                               dev->data->mtu);
+       } else {
+               return -1;
        }
 
-       if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+       if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
                for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
                        ret = dpaa2_setup_flow_dist(dev,
                                        eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -594,12 +593,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
                rx_l3_csum_offload = true;
 
-       if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-               (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-               (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+       if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+               (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+               (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
                rx_l4_csum_offload = true;
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -617,7 +616,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        }
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-       if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
        {
                ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -630,12 +629,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                dpaa2_enable_ts[dev->data->port_id] = true;
        }
 
-       if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+       if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
                tx_l3_csum_offload = true;
 
-       if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-               (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-               (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+       if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+               (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+               (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
                tx_l4_csum_offload = true;
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -667,8 +666,32 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-               dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+               dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
+
+       if (eth_conf->lpbk_mode) {
+               ret = dpaa2_dev_recycle_config(dev);
+               if (ret) {
+                       DPAA2_PMD_ERR("Error to configure %s to recycle port.",
+                               dev->data->name);
+
+                       return ret;
+               }
+       } else {
+               /** User may disable loopback mode by calling
+                * "dev_configure" with lpbk_mode cleared.
+                * No matter the port was configured recycle or not,
+                * recycle de-configure is called here.
+                * If port is not recycled, the de-configure will return directly.
+                */
+               ret = dpaa2_dev_recycle_deconfig(dev);
+               if (ret) {
+                       DPAA2_PMD_ERR("Error to de-configure recycle port %s.",
+                               dev->data->name);
+
+                       return ret;
+               }
+       }
 
        dpaa2_tm_init(dev);
 
@@ -700,6 +723,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
        DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p",
                        dev, rx_queue_id, mb_pool, rx_conf);
 
+       total_nb_rx_desc += nb_rx_desc;
+       if (total_nb_rx_desc > MAX_NB_RX_DESC) {
+               DPAA2_PMD_WARN("\nTotal nb_rx_desc exceeds %d limit. Please use Normal buffers",
+                              MAX_NB_RX_DESC);
+               DPAA2_PMD_WARN("To use Normal buffers, run 'export DPNI_NORMAL_BUF=1' before running dynamic_dpl.sh script");
+       }
+
        /* Rx deferred start is not supported */
        if (rx_conf->rx_deferred_start) {
                DPAA2_PMD_ERR("%p:Rx deferred start not supported",
@@ -708,9 +738,14 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev,
        }
 
        if (!priv->bp_list || priv->bp_list->mp != mb_pool) {
+               if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+                       ret = rte_dpaa2_bpid_info_init(mb_pool);
+                       if (ret)
+                               return ret;
+               }
                bpid = mempool_to_bpid(mb_pool);
-               ret = dpaa2_attach_bp_list(priv,
-                                          rte_dpaa2_bpid_info[bpid].bp_list);
+               ret = dpaa2_attach_bp_list(priv, dpni,
+                               rte_dpaa2_bpid_info[bpid].bp_list);
                if (ret)
                        return ret;
        }
@@ -848,6 +883,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
        struct dpni_queue tx_conf_cfg;
        struct dpni_queue tx_flow_cfg;
        uint8_t options = 0, flow_id;
+       uint16_t channel_id;
        struct dpni_queue_id qid;
        uint32_t tc_id;
        int ret;
@@ -873,20 +909,6 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
        memset(&tx_conf_cfg, 0, sizeof(struct dpni_queue));
        memset(&tx_flow_cfg, 0, sizeof(struct dpni_queue));
 
-       tc_id = tx_queue_id;
-       flow_id = 0;
-
-       ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
-                       tc_id, flow_id, options, &tx_flow_cfg);
-       if (ret) {
-               DPAA2_PMD_ERR("Error in setting the tx flow: "
-                       "tc_id=%d, flow=%d err=%d",
-                       tc_id, flow_id, ret);
-                       return -1;
-       }
-
-       dpaa2_q->flow_id = flow_id;
-
        if (tx_queue_id == 0) {
                /*Set tx-conf and error configuration*/
                if (priv->flags & DPAA2_TX_CONF_ENABLE)
@@ -903,10 +925,26 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                        return -1;
                }
        }
+
+       tc_id = tx_queue_id % priv->num_tx_tc;
+       channel_id = (uint8_t)(tx_queue_id / priv->num_tx_tc) % priv->num_channels;
+       flow_id = 0;
+
+       ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX,
+                       ((channel_id << 8) | tc_id), flow_id, options, &tx_flow_cfg);
+       if (ret) {
+               DPAA2_PMD_ERR("Error in setting the tx flow: "
+                       "tc_id=%d, flow=%d err=%d",
+                       tc_id, flow_id, ret);
+                       return -1;
+       }
+
+       dpaa2_q->flow_id = flow_id;
+
        dpaa2_q->tc_index = tc_id;
 
        ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
-                            DPNI_QUEUE_TX, dpaa2_q->tc_index,
+                            DPNI_QUEUE_TX, ((channel_id << 8) | dpaa2_q->tc_index),
                             dpaa2_q->flow_id, &tx_flow_cfg, &qid);
        if (ret) {
                DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -922,7 +960,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
                cong_notif_cfg.threshold_entry = nb_tx_desc;
                /* Notify that the queue is not congested when the data in
-                * the queue is below this thershold.(90% of value)
+                * the queue is below this threshold.(90% of value)
                 */
                cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10;
                cong_notif_cfg.message_ctx = 0;
@@ -938,7 +976,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW,
                                                       priv->token,
                                                       DPNI_QUEUE_TX,
-                                                      tc_id,
+                                                      ((channel_id << 8) | tc_id),
                                                       &cong_notif_cfg);
                if (ret) {
                        DPAA2_PMD_ERR(
@@ -955,7 +993,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                options = options | DPNI_QUEUE_OPT_USER_CTX;
                tx_conf_cfg.user_context = (size_t)(dpaa2_q);
                ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token,
-                            DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+                            DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
                             dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg);
                if (ret) {
                        DPAA2_PMD_ERR("Error in setting the tx conf flow: "
@@ -966,7 +1004,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
                }
 
                ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
-                            DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index,
+                            DPNI_QUEUE_TX_CONFIRM, ((channel_id << 8) | dpaa2_tx_conf_q->tc_index),
                             dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid);
                if (ret) {
                        DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret);
@@ -990,6 +1028,9 @@ dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 
        memset(&cfg, 0, sizeof(struct dpni_queue));
        PMD_INIT_FUNC_TRACE();
+
+       total_nb_rx_desc -= dpaa2_q->nb_desc;
+
        if (dpaa2_q->cgid != 0xff) {
                options = DPNI_QUEUE_OPT_CLEAR_CGID;
                cfg.cgid = dpaa2_q->cgid;
@@ -1064,7 +1105,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
  * Dpaa2 link Interrupt handler
  *
  * @param param
- *  The address of parameter (struct rte_eth_dev *) regsitered before.
+ *  The address of parameter (struct rte_eth_dev *) registered before.
  *
  * @return
  *  void
@@ -1145,17 +1186,15 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
        struct dpni_queue cfg;
        struct dpni_error_cfg   err_cfg;
-       uint16_t qdid;
        struct dpni_queue_id qid;
        struct dpaa2_queue *dpaa2_q;
        int ret, i;
        struct rte_intr_handle *intr_handle;
 
        dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
-       intr_handle = &dpaa2_dev->intr_handle;
+       intr_handle = dpaa2_dev->intr_handle;
 
        PMD_INIT_FUNC_TRACE();
-
        ret = dpni_enable(dpni, CMD_PRI_LOW, priv->token);
        if (ret) {
                DPAA2_PMD_ERR("Failure in enabling dpni %d device: err=%d",
@@ -1166,14 +1205,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
        /* Power up the phy. Needed to make the link go UP */
        dpaa2_dev_set_link_up(dev);
 
-       ret = dpni_get_qdid(dpni, CMD_PRI_LOW, priv->token,
-                           DPNI_QUEUE_TX, &qdid);
-       if (ret) {
-               DPAA2_PMD_ERR("Error in getting qdid: err=%d", ret);
-               return ret;
-       }
-       priv->qdid = qdid;
-
        for (i = 0; i < data->nb_rx_queues; i++) {
                dpaa2_q = (struct dpaa2_queue *)data->rx_queues[i];
                ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token,
@@ -1223,8 +1254,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
        }
 
        /* if the interrupts were configured on this devices*/
-       if (intr_handle && (intr_handle->fd) &&
-           (dev->data->dev_conf.intr_conf.lsc != 0)) {
+       if (intr_handle && rte_intr_fd_get(intr_handle) &&
+           dev->data->dev_conf.intr_conf.lsc != 0) {
                /* Registering LSC interrupt handler */
                rte_intr_callback_register(intr_handle,
                                           dpaa2_interrupt_handler,
@@ -1258,13 +1289,18 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
        struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private;
        int ret;
        struct rte_eth_link link;
-       struct rte_intr_handle *intr_handle = dev->intr_handle;
+       struct rte_device *rdev = dev->device;
+       struct rte_intr_handle *intr_handle;
+       struct rte_dpaa2_device *dpaa2_dev;
+
+       dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
+       intr_handle = dpaa2_dev->intr_handle;
 
        PMD_INIT_FUNC_TRACE();
 
        /* reset interrupt callback  */
-       if (intr_handle && (intr_handle->fd) &&
-           (dev->data->dev_conf.intr_conf.lsc != 0)) {
+       if (intr_handle && rte_intr_fd_get(intr_handle) &&
+           dev->data->dev_conf.intr_conf.lsc != 0) {
                /*disable dpni irqs */
                dpaa2_eth_setup_irqs(dev, 0);
 
@@ -1466,19 +1502,6 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       /* check that mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
-               return -EINVAL;
-
-       if (frame_size > DPAA2_ETH_MAX_LEN)
-               dev->data->dev_conf.rxmode.offloads |=
-                                               DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev->data->dev_conf.rxmode.offloads &=
-                                               ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        /* Set the Max Rx frame length as 'mtu' +
         * Maximum Ethernet header length
         */
@@ -1876,7 +1899,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
                        DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
                        return -1;
                }
-               if (state.up == ETH_LINK_DOWN &&
+               if (state.up == RTE_ETH_LINK_DOWN &&
                    wait_to_complete)
                        rte_delay_ms(CHECK_INTERVAL);
                else
@@ -1888,9 +1911,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
        link.link_speed = state.rate;
 
        if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-               link.link_duplex = ETH_LINK_HALF_DUPLEX;
+               link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
        else
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        ret = rte_eth_linkstatus_set(dev, &link);
        if (ret == -1)
@@ -1981,7 +2004,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev)
        }
 
        /*changing  tx burst function to avoid any more enqueues */
-       dev->tx_pkt_burst = dummy_dev_tx;
+       dev->tx_pkt_burst = rte_eth_pkt_burst_dummy;
 
        /* Loop while dpni_disable() attempts to drain the egress FQs
         * and confirm them back to us.
@@ -2051,9 +2074,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                 *      No TX side flow control (send Pause frame disabled)
                 */
                if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-                       fc_conf->mode = RTE_FC_FULL;
+                       fc_conf->mode = RTE_ETH_FC_FULL;
                else
-                       fc_conf->mode = RTE_FC_RX_PAUSE;
+                       fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
        } else {
                /* DPNI_LINK_OPT_PAUSE not set
                 *  if ASYM_PAUSE set,
@@ -2063,9 +2086,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                 *      Flow control disabled
                 */
                if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-                       fc_conf->mode = RTE_FC_TX_PAUSE;
+                       fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
                else
-                       fc_conf->mode = RTE_FC_NONE;
+                       fc_conf->mode = RTE_ETH_FC_NONE;
        }
 
        return ret;
@@ -2109,14 +2132,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
        /* update cfg with fc_conf */
        switch (fc_conf->mode) {
-       case RTE_FC_FULL:
+       case RTE_ETH_FC_FULL:
                /* Full flow control;
                 * OPT_PAUSE set, ASYM_PAUSE not set
                 */
                cfg.options |= DPNI_LINK_OPT_PAUSE;
                cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
                break;
-       case RTE_FC_TX_PAUSE:
+       case RTE_ETH_FC_TX_PAUSE:
                /* Enable RX flow control
                 * OPT_PAUSE not set;
                 * ASYM_PAUSE set;
@@ -2124,7 +2147,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
                cfg.options &= ~DPNI_LINK_OPT_PAUSE;
                break;
-       case RTE_FC_RX_PAUSE:
+       case RTE_ETH_FC_RX_PAUSE:
                /* Enable TX Flow control
                 * OPT_PAUSE set
                 * ASYM_PAUSE set
@@ -2132,7 +2155,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                cfg.options |= DPNI_LINK_OPT_PAUSE;
                cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
                break;
-       case RTE_FC_NONE:
+       case RTE_ETH_FC_NONE:
                /* Disable Flow control
                 * OPT_PAUSE not set
                 * ASYM_PAUSE not set
@@ -2255,7 +2278,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
                ocfg.oa = 1;
                /* Late arrival window size disabled */
                ocfg.olws = 0;
-               /* ORL resource exhaustaion advance NESN disabled */
+               /* ORL resource exhaustion advance NESN disabled */
                ocfg.oeane = 0;
                /* Loose ordering enabled */
                ocfg.oloe = 1;
@@ -2603,6 +2626,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
                return -1;
        }
 
+       if (eth_dev->data->dev_conf.lpbk_mode)
+               dpaa2_dev_recycle_deconfig(eth_dev);
+
        /* Clean the device first */
        ret = dpni_reset(dpni_dev, CMD_PRI_LOW, priv->token);
        if (ret) {
@@ -2620,9 +2646,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        }
 
        priv->num_rx_tc = attr.num_rx_tcs;
+       priv->num_tx_tc = attr.num_tx_tcs;
        priv->qos_entries = attr.qos_entries;
        priv->fs_entries = attr.fs_entries;
        priv->dist_queues = attr.num_queues;
+       priv->num_channels = attr.num_channels;
+       priv->channel_inuse = 0;
+       rte_spinlock_init(&priv->lpbk_qp_lock);
 
        /* only if the custom CG is enabled */
        if (attr.options & DPNI_OPT_CUSTOM_CG)
@@ -2636,8 +2666,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        for (i = 0; i < attr.num_rx_tcs; i++)
                priv->nb_rx_queues += attr.num_queues;
 
-       /* Using number of TX queues as number of TX TCs */
-       priv->nb_tx_queues = attr.num_tx_tcs;
+       priv->nb_tx_queues = attr.num_tx_tcs * attr.num_channels;
 
        DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d",
                        priv->num_rx_tc, priv->nb_rx_queues,
@@ -2739,13 +2768,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        }
        eth_dev->tx_pkt_burst = dpaa2_dev_tx;
 
-       /*Init fields w.r.t. classficaition*/
+       /* Init fields w.r.t. classification */
        memset(&priv->extract.qos_key_extract, 0,
                sizeof(struct dpaa2_key_extract));
        priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64);
        if (!priv->extract.qos_extract_param) {
                DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow "
-                           " classificaiton ", ret);
+                           " classification ", ret);
                goto init_err;
        }
        priv->extract.qos_key_extract.key_info.ipv4_src_offset =
@@ -2763,7 +2792,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
                priv->extract.tc_extract_param[i] =
                        (size_t)rte_malloc(NULL, 256, 64);
                if (!priv->extract.tc_extract_param[i]) {
-                       DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton",
+                       DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classification",
                                     ret);
                        goto init_err;
                }
@@ -2808,7 +2837,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
                        return ret;
                }
        }
-       RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
+       RTE_LOG(INFO, PMD, "%s: netdev created, connected to %s\n",
+               eth_dev->data->name, dpaa2_dev->ep_name);
+
        return 0;
 init_err:
        dpaa2_dev_close(eth_dev);