drivers/net: update Rx RSS hash offload capabilities
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
index 5c0aafb..26805f1 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause
  *
  *   Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- *   Copyright 2017 NXP
+ *   Copyright 2017-2019 NXP
  *
  */
 /* System headers */
@@ -15,6 +15,7 @@
 #include <sys/types.h>
 #include <sys/syscall.h>
 
+#include <rte_string_fns.h>
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_interrupts.h>
@@ -47,7 +48,8 @@
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-               DEV_RX_OFFLOAD_JUMBO_FRAME;
+               DEV_RX_OFFLOAD_JUMBO_FRAME |
+               DEV_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
@@ -55,11 +57,12 @@ static uint64_t dev_rx_offloads_nodis =
                DEV_RX_OFFLOAD_UDP_CKSUM |
                DEV_RX_OFFLOAD_TCP_CKSUM |
                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_CRC_STRIP |
-               DEV_RX_OFFLOAD_SCATTER;
+               DEV_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
-static uint64_t dev_tx_offloads_sup;
+static uint64_t dev_tx_offloads_sup =
+               DEV_TX_OFFLOAD_MT_LOCKFREE |
+               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
@@ -68,9 +71,7 @@ static uint64_t dev_tx_offloads_nodis =
                DEV_TX_OFFLOAD_TCP_CKSUM |
                DEV_TX_OFFLOAD_SCTP_CKSUM |
                DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_MULTI_SEGS |
-               DEV_TX_OFFLOAD_MT_LOCKFREE |
-               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               DEV_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -124,7 +125,7 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = {
 
 static struct rte_dpaa_driver rte_dpaa_pmd;
 
-static void
+static int
 dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
 
 static inline void
@@ -146,14 +147,33 @@ static int
 dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
-       uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+       uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
                                + VLAN_TAG_SIZE;
+       uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
+       if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
                return -EINVAL;
-       if (frame_size > ETHER_MAX_LEN)
+       /*
+        * Refuse mtu that requires the support of scattered packets
+        * when this feature has not been enabled before.
+        */
+       if (dev->data->min_rx_buf_size &&
+               !dev->data->scattered_rx && frame_size > buffsz) {
+               DPAA_PMD_ERR("SG not enabled, will not fit in one buffer");
+               return -EINVAL;
+       }
+
+       /* check <seg size> * <max_seg>  >= max_frame */
+       if (dev->data->min_rx_buf_size && dev->data->scattered_rx &&
+               (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) {
+               DPAA_PMD_ERR("Too big to fit for Max SG list %d",
+                               buffsz * DPAA_SGT_MAX_ENTRIES);
+               return -EINVAL;
+       }
+
+       if (frame_size > RTE_ETHER_MAX_LEN)
                dev->data->dev_conf.rxmode.offloads &=
                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
@@ -177,32 +197,49 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
        PMD_INIT_FUNC_TRACE();
 
-       /* Rx offloads validation */
+       /* Rx offloads which are enabled by default */
        if (dev_rx_offloads_nodis & ~rx_offloads) {
-               DPAA_PMD_WARN(
-               "Rx offloads non configurable - requested 0x%" PRIx64
-               " ignored 0x%" PRIx64,
-                       rx_offloads, dev_rx_offloads_nodis);
+               DPAA_PMD_INFO(
+               "Some of rx offloads enabled by default - requested 0x%" PRIx64
+               " fixed are 0x%" PRIx64,
+               rx_offloads, dev_rx_offloads_nodis);
        }
 
-       /* Tx offloads validation */
+       /* Tx offloads which are enabled by default */
        if (dev_tx_offloads_nodis & ~tx_offloads) {
-               DPAA_PMD_WARN(
-               "Tx offloads non configurable - requested 0x%" PRIx64
-               " ignored 0x%" PRIx64,
-                       tx_offloads, dev_tx_offloads_nodis);
+               DPAA_PMD_INFO(
+               "Some of tx offloads enabled by default - requested 0x%" PRIx64
+               " fixed are 0x%" PRIx64,
+               tx_offloads, dev_tx_offloads_nodis);
        }
 
        if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+               uint32_t max_len;
+
+               DPAA_PMD_DEBUG("enabling jumbo");
+
                if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
-                   DPAA_MAX_RX_PKT_LEN) {
-                       fman_if_set_maxfrm(dpaa_intf->fif,
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
-                       return 0;
-               } else {
-                       return -1;
+                   DPAA_MAX_RX_PKT_LEN)
+                       max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+               else {
+                       DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
+                               "supported is %d",
+                               dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                               DPAA_MAX_RX_PKT_LEN);
+                       max_len = DPAA_MAX_RX_PKT_LEN;
                }
+
+               fman_if_set_maxfrm(dpaa_intf->fif, max_len);
+               dev->data->mtu = max_len
+                       - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
+       }
+
+       if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+               DPAA_PMD_DEBUG("enabling scatter mode");
+               fman_if_set_sg(dpaa_intf->fif, 1);
+               dev->data->scattered_rx = 1;
        }
+
        return 0;
 }
 
@@ -210,12 +247,15 @@ static const uint32_t *
 dpaa_supported_ptypes_get(struct rte_eth_dev *dev)
 {
        static const uint32_t ptypes[] = {
-               /*todo -= add more types */
                RTE_PTYPE_L2_ETHER,
-               RTE_PTYPE_L3_IPV4,
-               RTE_PTYPE_L3_IPV4_EXT,
-               RTE_PTYPE_L3_IPV6,
-               RTE_PTYPE_L3_IPV6_EXT,
+               RTE_PTYPE_L2_ETHER_VLAN,
+               RTE_PTYPE_L2_ETHER_ARP,
+               RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
+               RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
+               RTE_PTYPE_L4_ICMP,
+               RTE_PTYPE_L4_TCP,
+               RTE_PTYPE_L4_UDP,
+               RTE_PTYPE_L4_FRAG,
                RTE_PTYPE_L4_TCP,
                RTE_PTYPE_L4_UDP,
                RTE_PTYPE_L4_SCTP
@@ -291,30 +331,40 @@ dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused,
                return 0;
 }
 
-static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
-                             struct rte_eth_dev_info *dev_info)
+static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
+                            struct rte_eth_dev_info *dev_info)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
 
-       PMD_INIT_FUNC_TRACE();
+       DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
 
        dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
        dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
-       dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE;
        dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
        dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
        dev_info->max_hash_mac_addrs = 0;
        dev_info->max_vfs = 0;
        dev_info->max_vmdq_pools = ETH_16_POOLS;
        dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
-       dev_info->speed_capa = (ETH_LINK_SPEED_1G |
-                               ETH_LINK_SPEED_10G);
+
+       if (dpaa_intf->fif->mac_type == fman_mac_1g) {
+               dev_info->speed_capa = ETH_LINK_SPEED_1G;
+       } else if (dpaa_intf->fif->mac_type == fman_mac_10g) {
+               dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
+       } else {
+               DPAA_PMD_ERR("invalid link_speed: %s, %d",
+                            dpaa_intf->name, dpaa_intf->fif->mac_type);
+               return -EINVAL;
+       }
+
        dev_info->rx_offload_capa = dev_rx_offloads_sup |
                                        dev_rx_offloads_nodis;
        dev_info->tx_offload_capa = dev_tx_offloads_sup |
                                        dev_tx_offloads_nodis;
        dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
        dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
+
+       return 0;
 }
 
 static int dpaa_eth_link_update(struct rte_eth_dev *dev,
@@ -350,13 +400,15 @@ static int dpaa_eth_stats_get(struct rte_eth_dev *dev,
        return 0;
 }
 
-static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
+static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
        fman_if_stats_reset(dpaa_intf->fif);
+
+       return 0;
 }
 
 static int
@@ -395,10 +447,9 @@ dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 
        if (xstats_names != NULL)
                for (i = 0; i < stat_cnt; i++)
-                       snprintf(xstats_names[i].name,
-                                sizeof(xstats_names[i].name),
-                                "%s",
-                                dpaa_xstats_strings[i].name);
+                       strlcpy(xstats_names[i].name,
+                               dpaa_xstats_strings[i].name,
+                               sizeof(xstats_names[i].name));
 
        return stat_cnt;
 }
@@ -466,40 +517,48 @@ dpaa_xstats_get_names_by_id(
        return limit;
 }
 
-static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
        fman_if_promiscuous_enable(dpaa_intf->fif);
+
+       return 0;
 }
 
-static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
        fman_if_promiscuous_disable(dpaa_intf->fif);
+
+       return 0;
 }
 
-static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
+static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
        fman_if_set_mcast_filter_table(dpaa_intf->fif);
+
+       return 0;
 }
 
-static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
+static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
 {
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
        fman_if_reset_mcast_filter_table(dpaa_intf->fif);
+
+       return 0;
 }
 
 static
@@ -514,6 +573,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        struct qm_mcc_initfq opts = {0};
        u32 flags = 0;
        int ret;
+       u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -527,6 +587,28 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
                        queue_idx, rxq->fqid);
 
+       /* Max packet can fit in single buffer */
+       if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
+               ;
+       } else if (dev->data->dev_conf.rxmode.offloads &
+                       DEV_RX_OFFLOAD_SCATTER) {
+               if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
+                       buffsz * DPAA_SGT_MAX_ENTRIES) {
+                       DPAA_PMD_ERR("max RxPkt size %d too big to fit "
+                               "MaxSGlist %d",
+                               dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                               buffsz * DPAA_SGT_MAX_ENTRIES);
+                       rte_errno = EOVERFLOW;
+                       return -rte_errno;
+               }
+       } else {
+               DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
+                    " larger than a single mbuf (%u) and scattered"
+                    " mode has not been requested",
+                    dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                    buffsz - RTE_PKTMBUF_HEADROOM);
+       }
+
        if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
                struct fman_if_ic_params icp;
                uint32_t fd_offset;
@@ -553,12 +635,19 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                fman_if_set_bp(dpaa_intf->fif, mp->size,
                               dpaa_intf->bp_info->bpid, bp_size);
                dpaa_intf->valid = 1;
-               DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d",
-                           dpaa_intf->name, fd_offset,
-                       fman_if_get_fdoff(dpaa_intf->fif));
+               DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
+                               dpaa_intf->name, fd_offset,
+                               fman_if_get_fdoff(dpaa_intf->fif));
        }
+       DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
+               fman_if_get_sg_enable(dpaa_intf->fif),
+               dev->data->dev_conf.rxmode.max_rx_pkt_len);
        /* checking if push mode only, no error check for now */
-       if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+       if (!rxq->is_static &&
+           dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+               struct qman_portal *qp;
+               int q_fd;
+
                dpaa_push_queue_idx++;
                opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
                opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
@@ -594,10 +683,47 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                "ret:%d(%s)", rxq->fqid, ret, strerror(ret));
                        return ret;
                }
-               rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
-               rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+               if (dpaa_svr_family == SVR_LS1043A_FAMILY) {
+                       rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch;
+               } else {
+                       rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb;
+                       rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare;
+               }
+
                rxq->is_static = true;
+
+               /* Allocate qman specific portals */
+               qp = fsl_qman_fq_portal_create(&q_fd);
+               if (!qp) {
+                       DPAA_PMD_ERR("Unable to alloc fq portal");
+                       return -1;
+               }
+               rxq->qp = qp;
+
+               /* Set up the device interrupt handler */
+               if (!dev->intr_handle) {
+                       struct rte_dpaa_device *dpaa_dev;
+                       struct rte_device *rdev = dev->device;
+
+                       dpaa_dev = container_of(rdev, struct rte_dpaa_device,
+                                               device);
+                       dev->intr_handle = &dpaa_dev->intr_handle;
+                       dev->intr_handle->intr_vec = rte_zmalloc(NULL,
+                                       dpaa_push_mode_max_queue, 0);
+                       if (!dev->intr_handle->intr_vec) {
+                               DPAA_PMD_ERR("intr_vec alloc failed");
+                               return -ENOMEM;
+                       }
+                       dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
+                       dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
+               }
+
+               dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
+               dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
+               dev->intr_handle->efds[queue_idx] = q_fd;
+               rxq->q_fd = q_fd;
        }
+       rxq->bp_array = rte_dpaa_bpid_info;
        dev->data->rx_queues[queue_idx] = rxq;
 
        /* configure the CGR size as per the desc size */
@@ -630,7 +756,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
        struct qm_mcc_initfq opts = {0};
 
        if (dpaa_push_mode_max_queue)
-               DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
+               DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n"
+                             "PUSH mode already enabled for first %d queues.\n"
                              "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
                              dpaa_push_mode_max_queue);
 
@@ -857,7 +984,7 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
 
 static int
 dpaa_dev_add_mac_addr(struct rte_eth_dev *dev,
-                            struct ether_addr *addr,
+                            struct rte_ether_addr *addr,
                             uint32_t index,
                             __rte_unused uint32_t pool)
 {
@@ -887,7 +1014,7 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev,
 
 static int
 dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
-                      struct ether_addr *addr)
+                      struct rte_ether_addr *addr)
 {
        int ret;
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
@@ -901,6 +1028,40 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev,
        return ret;
 }
 
+static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
+                                     uint16_t queue_id)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+       struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
+
+       if (!rxq->is_static)
+               return -EINVAL;
+
+       return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
+}
+
+static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
+                                      uint16_t queue_id)
+{
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
+       struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
+       uint32_t temp;
+       ssize_t temp1;
+
+       if (!rxq->is_static)
+               return -EINVAL;
+
+       qman_fq_portal_irqsource_remove(rxq->qp, ~0);
+
+       temp1 = read(rxq->q_fd, &temp, sizeof(temp));
+       if (temp1 != sizeof(temp))
+               DPAA_EVENTDEV_ERR("irq read error");
+
+       qman_fq_portal_thread_irq(rxq->qp);
+
+       return 0;
+}
+
 static struct eth_dev_ops dpaa_devops = {
        .dev_configure            = dpaa_eth_dev_configure,
        .dev_start                = dpaa_eth_dev_start,
@@ -938,6 +1099,9 @@ static struct eth_dev_ops dpaa_devops = {
        .mac_addr_set             = dpaa_dev_set_mac_addr,
 
        .fw_version_get           = dpaa_fw_version_get,
+
+       .rx_queue_intr_enable     = dpaa_dev_queue_intr_enable,
+       .rx_queue_intr_disable    = dpaa_dev_queue_intr_disable,
 };
 
 static bool
@@ -1012,7 +1176,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
 {
        struct qm_mcc_initfq opts = {0};
        int ret;
-       u32 flags = 0;
+       u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE;
        struct qm_mcc_initcgr cgr_opts = {
                .we_mask = QM_CGR_WE_CS_THRES |
                                QM_CGR_WE_CSTD_EN |
@@ -1023,17 +1187,18 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
                }
        };
 
-       PMD_INIT_FUNC_TRACE();
-
-       ret = qman_reserve_fqid(fqid);
-       if (ret) {
-               DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
-                            fqid, ret);
-               return -EINVAL;
+       if (fqid) {
+               ret = qman_reserve_fqid(fqid);
+               if (ret) {
+                       DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
+                                    fqid, ret);
+                       return -EINVAL;
+               }
+       } else {
+               flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
        }
-
        DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
-       ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq);
+       ret = qman_create_fq(fqid, flags, fq);
        if (ret) {
                DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d",
                        fqid, ret);
@@ -1052,7 +1217,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
                if (ret) {
                        DPAA_PMD_WARN(
                                "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
-                               fqid, ret);
+                               fq->fqid, ret);
                        goto without_cgr;
                }
                opts.we_mask |= QM_INITFQ_WE_CGID;
@@ -1060,7 +1225,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx,
                opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
        }
 without_cgr:
-       ret = qman_init_fq(fq, flags, &opts);
+       ret = qman_init_fq(fq, 0, &opts);
        if (ret)
                DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret);
        return ret;
@@ -1073,8 +1238,6 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
        struct qm_mcc_initfq opts = {0};
        int ret;
 
-       PMD_INIT_FUNC_TRACE();
-
        ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
                             QMAN_FQ_FLAG_TO_DCPORTAL, fq);
        if (ret) {
@@ -1134,7 +1297,7 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid)
 static int
 dpaa_dev_init(struct rte_eth_dev *eth_dev)
 {
-       int num_cores, num_rx_fqs, fqid;
+       int num_rx_fqs, fqid;
        int loop, ret = 0;
        int dev_id;
        struct rte_dpaa_device *dpaa_device;
@@ -1146,9 +1309,19 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 
        PMD_INIT_FUNC_TRACE();
 
+       dpaa_intf = eth_dev->data->dev_private;
        /* For secondary processes, the primary has done all the work */
-       if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               eth_dev->dev_ops = &dpaa_devops;
+               /* Plugging of UCODE burst API not supported in Secondary */
+               eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
+               eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+               qman_set_fq_lookup_table(
+                               dpaa_intf->rx_queues->qman_fq_lookup_table);
+#endif
                return 0;
+       }
 
        dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
        dev_id = dpaa_device->id.dev_id;
@@ -1213,7 +1386,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
                if (default_q)
                        fqid = cfg->rx_def;
                else
-                       fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid *
+                       fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx *
                                DPAA_PCD_FQID_MULTIPLIER + loop;
 
                if (dpaa_intf->cgr_rx)
@@ -1229,23 +1402,22 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        dpaa_intf->nb_rx_queues = num_rx_fqs;
 
        /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
-       num_cores = rte_lcore_count();
        dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
-               num_cores, MAX_CACHELINE);
+               MAX_DPAA_CORES, MAX_CACHELINE);
        if (!dpaa_intf->tx_queues) {
                DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
                ret = -ENOMEM;
                goto free_rx;
        }
 
-       for (loop = 0; loop < num_cores; loop++) {
+       for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
                ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
                                         fman_intf);
                if (ret)
                        goto free_tx;
                dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
        }
-       dpaa_intf->nb_tx_queues = num_cores;
+       dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
 
 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
        dpaa_debug_queue_init(&dpaa_intf->debug_queues[
@@ -1264,7 +1436,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        /* reset bpool list, initialize bpool dynamically */
        list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) {
                list_del(&bp->node);
-               free(bp);
+               rte_free(bp);
        }
 
        /* Populate ethdev structure */
@@ -1274,17 +1446,17 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 
        /* Allocate memory for storing MAC addresses */
        eth_dev->data->mac_addrs = rte_zmalloc("mac_addr",
-               ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
+               RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0);
        if (eth_dev->data->mac_addrs == NULL) {
                DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
                                                "store MAC addresses",
-                               ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
+                               RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
                ret = -ENOMEM;
                goto free_tx;
        }
 
        /* copy the primary mac address */
-       ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
+       rte_ether_addr_copy(&fman_intf->mac_addr, &eth_dev->data->mac_addrs[0]);
 
        RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
                dpaa_device->name,
@@ -1304,6 +1476,9 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
        fman_if_reset_mcast_filter_table(fman_intf);
        /* Reset interface statistics */
        fman_if_stats_reset(fman_intf);
+       /* Disable SG by default */
+       fman_if_set_sg(fman_intf, 0);
+       fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
 
        return 0;
 
@@ -1360,10 +1535,6 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
        rte_free(dpaa_intf->tx_queues);
        dpaa_intf->tx_queues = NULL;
 
-       /* free memory for storing MAC addresses */
-       rte_free(dev->data->mac_addrs);
-       dev->data->mac_addrs = NULL;
-
        dev->dev_ops = NULL;
        dev->rx_pkt_burst = NULL;
        dev->tx_pkt_burst = NULL;
@@ -1372,7 +1543,7 @@ dpaa_dev_uninit(struct rte_eth_dev *dev)
 }
 
 static int
-rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
               struct rte_dpaa_device *dpaa_dev)
 {
        int diag;
@@ -1381,6 +1552,16 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
 
        PMD_INIT_FUNC_TRACE();
 
+       if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
+               RTE_PKTMBUF_HEADROOM) {
+               DPAA_PMD_ERR(
+               "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
+               RTE_PKTMBUF_HEADROOM,
+               DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
+
+               return -1;
+       }
+
        /* In case of secondary process, the device is already configured
         * and no further action is required, except portal initialization
         * and verifying secondary attachment to port name.
@@ -1389,25 +1570,13 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
                eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
                if (!eth_dev)
                        return -ENOMEM;
+               eth_dev->device = &dpaa_dev->device;
+               eth_dev->dev_ops = &dpaa_devops;
                rte_eth_dev_probing_finish(eth_dev);
                return 0;
        }
 
-       if (!is_global_init) {
-               /* One time load of Qman/Bman drivers */
-               ret = qman_global_init();
-               if (ret) {
-                       DPAA_PMD_ERR("QMAN initialization failed: %d",
-                                    ret);
-                       return ret;
-               }
-               ret = bman_global_init();
-               if (ret) {
-                       DPAA_PMD_ERR("BMAN initialization failed: %d",
-                                    ret);
-                       return ret;
-               }
-
+       if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
                if (access("/tmp/fmc.bin", F_OK) == -1) {
                        RTE_LOG(INFO, PMD,
                                "* FMC not configured.Enabling default mode\n");
@@ -1439,22 +1608,30 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
                }
        }
 
-       eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
-       if (eth_dev == NULL)
-               return -ENOMEM;
+       /* In case of secondary process, the device is already configured
+        * and no further action is required, except portal initialization
+        * and verifying secondary attachment to port name.
+        */
+       if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
+               eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name);
+               if (!eth_dev)
+                       return -ENOMEM;
+       } else {
+               eth_dev = rte_eth_dev_allocate(dpaa_dev->name);
+               if (eth_dev == NULL)
+                       return -ENOMEM;
 
-       eth_dev->data->dev_private = rte_zmalloc(
-                                       "ethdev private structure",
-                                       sizeof(struct dpaa_if),
-                                       RTE_CACHE_LINE_SIZE);
-       if (!eth_dev->data->dev_private) {
-               DPAA_PMD_ERR("Cannot allocate memzone for port data");
-               rte_eth_dev_release_port(eth_dev);
-               return -ENOMEM;
+               eth_dev->data->dev_private = rte_zmalloc(
+                                               "ethdev private structure",
+                                               sizeof(struct dpaa_if),
+                                               RTE_CACHE_LINE_SIZE);
+               if (!eth_dev->data->dev_private) {
+                       DPAA_PMD_ERR("Cannot allocate memzone for port data");
+                       rte_eth_dev_release_port(eth_dev);
+                       return -ENOMEM;
+               }
        }
-
        eth_dev->device = &dpaa_dev->device;
-       eth_dev->device->driver = &dpaa_drv->driver;
        dpaa_dev->eth_dev = eth_dev;
 
        /* Invoke PMD device initialization function */
@@ -1464,9 +1641,6 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
                return 0;
        }
 
-       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-               rte_free(eth_dev->data->dev_private);
-
        rte_eth_dev_release_port(eth_dev);
        return diag;
 }
@@ -1481,9 +1655,6 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
        eth_dev = dpaa_dev->eth_dev;
        dpaa_dev_uninit(eth_dev);
 
-       if (rte_eal_process_type() == RTE_PROC_PRIMARY)
-               rte_free(eth_dev->data->dev_private);
-
        rte_eth_dev_release_port(eth_dev);
 
        return 0;