net/dpaa: fix event queue detach
[dpdk.git] / drivers / net / dpaa / dpaa_ethdev.c
index c244c6f..e5a072c 100644 (file)
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
-               DEV_RX_OFFLOAD_SCATTER;
+               RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_RSS_HASH;
+               RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_RSS_HASH;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-               DEV_TX_OFFLOAD_MT_LOCKFREE |
-               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+               RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-               DEV_TX_OFFLOAD_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_MULTI_SEGS;
+               RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* Keep track of whether QMAN and BMAN have been globally initialized */
 static int is_global_init;
@@ -167,8 +166,6 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN)
-               return -EINVAL;
        /*
         * Refuse mtu that requires the support of scattered packets
         * when this feature has not been enabled before.
@@ -187,13 +184,6 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       if (mtu > RTE_ETHER_MTU)
-               dev->data->dev_conf.rxmode.offloads |=
-                                               DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev->data->dev_conf.rxmode.offloads &=
-                                               ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
        fman_if_set_maxfrm(dev->process_private, frame_size);
 
        return 0;
@@ -205,6 +195,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
        uint64_t rx_offloads = eth_conf->rxmode.offloads;
        uint64_t tx_offloads = eth_conf->txmode.offloads;
+       struct dpaa_if *dpaa_intf = dev->data->dev_private;
        struct rte_device *rdev = dev->device;
        struct rte_eth_link *link = &dev->data->dev_link;
        struct rte_dpaa_device *dpaa_dev;
@@ -213,14 +204,24 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        struct rte_intr_handle *intr_handle;
        uint32_t max_rx_pktlen;
        int speed, duplex;
-       int ret;
+       int ret, rx_status;
 
        PMD_INIT_FUNC_TRACE();
 
        dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
-       intr_handle = &dpaa_dev->intr_handle;
+       intr_handle = dpaa_dev->intr_handle;
        __fif = container_of(fif, struct __fman_if, __if);
 
+       /* Check if interface is enabled in case of shared MAC */
+       if (fif->is_shared_mac) {
+               rx_status = fman_if_get_rx_status(fif);
+               if (!rx_status) {
+                       DPAA_PMD_ERR("%s Interface not enabled in kernel!",
+                                    dpaa_intf->name);
+                       return -EHOSTDOWN;
+               }
+       }
+
        /* Rx offloads which are enabled by default */
        if (dev_rx_offloads_nodis & ~rx_offloads) {
                DPAA_PMD_INFO(
@@ -248,7 +249,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
        fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
 
-       if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
                DPAA_PMD_DEBUG("enabling scatter mode");
                fman_if_set_sg(dev->process_private, 1);
                dev->data->scattered_rx = 1;
@@ -265,13 +266,14 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        }
 
        /* if the interrupts were configured on this devices*/
-       if (intr_handle && intr_handle->fd) {
+       if (intr_handle && rte_intr_fd_get(intr_handle)) {
                if (dev->data->dev_conf.intr_conf.lsc != 0)
                        rte_intr_callback_register(intr_handle,
                                           dpaa_interrupt_handler,
                                           (void *)dev);
 
-               ret = dpaa_intr_enable(__fif->node_name, intr_handle->fd);
+               ret = dpaa_intr_enable(__fif->node_name,
+                                      rte_intr_fd_get(intr_handle));
                if (ret) {
                        if (dev->data->dev_conf.intr_conf.lsc != 0) {
                                rte_intr_callback_unregister(intr_handle,
@@ -293,43 +295,43 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
 
        /* Configure link only if link is UP*/
        if (link->link_status) {
-               if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+               if (eth_conf->link_speeds == RTE_ETH_LINK_SPEED_AUTONEG) {
                        /* Start autoneg only if link is not in autoneg mode */
                        if (!link->link_autoneg)
                                dpaa_restart_link_autoneg(__fif->node_name);
-               } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
-                       switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
-                       case ETH_LINK_SPEED_10M_HD:
-                               speed = ETH_SPEED_NUM_10M;
-                               duplex = ETH_LINK_HALF_DUPLEX;
+               } else if (eth_conf->link_speeds & RTE_ETH_LINK_SPEED_FIXED) {
+                       switch (eth_conf->link_speeds &  RTE_ETH_LINK_SPEED_FIXED) {
+                       case RTE_ETH_LINK_SPEED_10M_HD:
+                               speed = RTE_ETH_SPEED_NUM_10M;
+                               duplex = RTE_ETH_LINK_HALF_DUPLEX;
                                break;
-                       case ETH_LINK_SPEED_10M:
-                               speed = ETH_SPEED_NUM_10M;
-                               duplex = ETH_LINK_FULL_DUPLEX;
+                       case RTE_ETH_LINK_SPEED_10M:
+                               speed = RTE_ETH_SPEED_NUM_10M;
+                               duplex = RTE_ETH_LINK_FULL_DUPLEX;
                                break;
-                       case ETH_LINK_SPEED_100M_HD:
-                               speed = ETH_SPEED_NUM_100M;
-                               duplex = ETH_LINK_HALF_DUPLEX;
+                       case RTE_ETH_LINK_SPEED_100M_HD:
+                               speed = RTE_ETH_SPEED_NUM_100M;
+                               duplex = RTE_ETH_LINK_HALF_DUPLEX;
                                break;
-                       case ETH_LINK_SPEED_100M:
-                               speed = ETH_SPEED_NUM_100M;
-                               duplex = ETH_LINK_FULL_DUPLEX;
+                       case RTE_ETH_LINK_SPEED_100M:
+                               speed = RTE_ETH_SPEED_NUM_100M;
+                               duplex = RTE_ETH_LINK_FULL_DUPLEX;
                                break;
-                       case ETH_LINK_SPEED_1G:
-                               speed = ETH_SPEED_NUM_1G;
-                               duplex = ETH_LINK_FULL_DUPLEX;
+                       case RTE_ETH_LINK_SPEED_1G:
+                               speed = RTE_ETH_SPEED_NUM_1G;
+                               duplex = RTE_ETH_LINK_FULL_DUPLEX;
                                break;
-                       case ETH_LINK_SPEED_2_5G:
-                               speed = ETH_SPEED_NUM_2_5G;
-                               duplex = ETH_LINK_FULL_DUPLEX;
+                       case RTE_ETH_LINK_SPEED_2_5G:
+                               speed = RTE_ETH_SPEED_NUM_2_5G;
+                               duplex = RTE_ETH_LINK_FULL_DUPLEX;
                                break;
-                       case ETH_LINK_SPEED_10G:
-                               speed = ETH_SPEED_NUM_10G;
-                               duplex = ETH_LINK_FULL_DUPLEX;
+                       case RTE_ETH_LINK_SPEED_10G:
+                               speed = RTE_ETH_SPEED_NUM_10G;
+                               duplex = RTE_ETH_LINK_FULL_DUPLEX;
                                break;
                        default:
-                               speed = ETH_SPEED_NUM_NONE;
-                               duplex = ETH_LINK_FULL_DUPLEX;
+                               speed = RTE_ETH_SPEED_NUM_NONE;
+                               duplex = RTE_ETH_LINK_FULL_DUPLEX;
                                break;
                        }
                        /* Set link speed */
@@ -378,9 +380,13 @@ static void dpaa_interrupt_handler(void *param)
        int bytes_read;
 
        dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
-       intr_handle = &dpaa_dev->intr_handle;
+       intr_handle = dpaa_dev->intr_handle;
+
+       if (rte_intr_fd_get(intr_handle) < 0)
+               return;
 
-       bytes_read = read(intr_handle->fd, &buf, sizeof(uint64_t));
+       bytes_read = read(rte_intr_fd_get(intr_handle), &buf,
+                         sizeof(uint64_t));
        if (bytes_read < 0)
                DPAA_PMD_ERR("Error reading eventfd\n");
        dpaa_eth_link_update(dev, 0);
@@ -450,7 +456,7 @@ static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
        }
 
        dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
-       intr_handle = &dpaa_dev->intr_handle;
+       intr_handle = dpaa_dev->intr_handle;
        __fif = container_of(fif, struct __fman_if, __if);
 
        ret = dpaa_eth_dev_stop(dev);
@@ -459,7 +465,7 @@ static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
        if (link->link_status && !link->link_autoneg)
                dpaa_restart_link_autoneg(__fif->node_name);
 
-       if (intr_handle && intr_handle->fd &&
+       if (intr_handle && rte_intr_fd_get(intr_handle) &&
            dev->data->dev_conf.intr_conf.lsc != 0) {
                dpaa_intr_disable(__fif->node_name);
                rte_intr_callback_unregister(intr_handle,
@@ -468,8 +474,7 @@ static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
        }
 
        /* release configuration memory */
-       if (dpaa_intf->fc_conf)
-               rte_free(dpaa_intf->fc_conf);
+       rte_free(dpaa_intf->fc_conf);
 
        /* Release RX congestion Groups */
        if (dpaa_intf->cgr_rx) {
@@ -545,30 +550,30 @@ static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
        dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER;
        dev_info->max_hash_mac_addrs = 0;
        dev_info->max_vfs = 0;
-       dev_info->max_vmdq_pools = ETH_16_POOLS;
+       dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
        dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
 
        if (fif->mac_type == fman_mac_1g) {
-               dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-                                       | ETH_LINK_SPEED_10M
-                                       | ETH_LINK_SPEED_100M_HD
-                                       | ETH_LINK_SPEED_100M
-                                       | ETH_LINK_SPEED_1G;
+               dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+                                       | RTE_ETH_LINK_SPEED_10M
+                                       | RTE_ETH_LINK_SPEED_100M_HD
+                                       | RTE_ETH_LINK_SPEED_100M
+                                       | RTE_ETH_LINK_SPEED_1G;
        } else if (fif->mac_type == fman_mac_2_5g) {
-               dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-                                       | ETH_LINK_SPEED_10M
-                                       | ETH_LINK_SPEED_100M_HD
-                                       | ETH_LINK_SPEED_100M
-                                       | ETH_LINK_SPEED_1G
-                                       | ETH_LINK_SPEED_2_5G;
+               dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+                                       | RTE_ETH_LINK_SPEED_10M
+                                       | RTE_ETH_LINK_SPEED_100M_HD
+                                       | RTE_ETH_LINK_SPEED_100M
+                                       | RTE_ETH_LINK_SPEED_1G
+                                       | RTE_ETH_LINK_SPEED_2_5G;
        } else if (fif->mac_type == fman_mac_10g) {
-               dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
-                                       | ETH_LINK_SPEED_10M
-                                       | ETH_LINK_SPEED_100M_HD
-                                       | ETH_LINK_SPEED_100M
-                                       | ETH_LINK_SPEED_1G
-                                       | ETH_LINK_SPEED_2_5G
-                                       | ETH_LINK_SPEED_10G;
+               dev_info->speed_capa = RTE_ETH_LINK_SPEED_10M_HD
+                                       | RTE_ETH_LINK_SPEED_10M
+                                       | RTE_ETH_LINK_SPEED_100M_HD
+                                       | RTE_ETH_LINK_SPEED_100M
+                                       | RTE_ETH_LINK_SPEED_1G
+                                       | RTE_ETH_LINK_SPEED_2_5G
+                                       | RTE_ETH_LINK_SPEED_10G;
        } else {
                DPAA_PMD_ERR("invalid link_speed: %s, %d",
                             dpaa_intf->name, fif->mac_type);
@@ -601,13 +606,12 @@ dpaa_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
                uint64_t flags;
                const char *output;
        } rx_offload_map[] = {
-                       {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
-                       {DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
-                       {DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-                       {DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-                       {DEV_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-                       {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-                       {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}
+                       {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"},
+                       {RTE_ETH_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+                       {RTE_ETH_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+                       {RTE_ETH_RX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+                       {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}
        };
 
        /* Update Rx offload info */
@@ -634,14 +638,14 @@ dpaa_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
                uint64_t flags;
                const char *output;
        } tx_offload_map[] = {
-                       {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-                       {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-                       {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-                       {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-                       {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-                       {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-                       {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-                       {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+                       {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+                       {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+                       {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+                       {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
        };
 
        /* Update Tx offload info */
@@ -675,7 +679,7 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
                        ret = dpaa_get_link_status(__fif->node_name, link);
                        if (ret)
                                return ret;
-                       if (link->link_status == ETH_LINK_DOWN &&
+                       if (link->link_status == RTE_ETH_LINK_DOWN &&
                            wait_to_complete)
                                rte_delay_ms(CHECK_INTERVAL);
                        else
@@ -686,15 +690,15 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev,
        }
 
        if (ioctl_version < 2) {
-               link->link_duplex = ETH_LINK_FULL_DUPLEX;
-               link->link_autoneg = ETH_LINK_AUTONEG;
+               link->link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
+               link->link_autoneg = RTE_ETH_LINK_AUTONEG;
 
                if (fif->mac_type == fman_mac_1g)
-                       link->link_speed = ETH_SPEED_NUM_1G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_1G;
                else if (fif->mac_type == fman_mac_2_5g)
-                       link->link_speed = ETH_SPEED_NUM_2_5G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_2_5G;
                else if (fif->mac_type == fman_mac_10g)
-                       link->link_speed = ETH_SPEED_NUM_10G;
+                       link->link_speed = RTE_ETH_SPEED_NUM_10G;
                else
                        DPAA_PMD_ERR("invalid link_speed: %s, %d",
                                     dpaa_intf->name, fif->mac_type);
@@ -973,7 +977,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        if (max_rx_pktlen <= buffsz) {
                ;
        } else if (dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_SCATTER) {
+                       RTE_ETH_RX_OFFLOAD_SCATTER) {
                if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
                        DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
                                "MaxSGlist %d",
@@ -1036,7 +1040,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                                   QM_FQCTRL_CTXASTASHING |
                                   QM_FQCTRL_PREFERINCACHE;
                opts.fqd.context_a.stashing.exclusive = 0;
-               /* In muticore scenario stashing becomes a bottleneck on LS1046.
+               /* In multicore scenario stashing becomes a bottleneck on LS1046.
                 * So do not enable stashing in this case
                 */
                if (dpaa_svr_family != SVR_LS1046A_FAMILY)
@@ -1083,26 +1087,38 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                rxq->qp = qp;
 
                /* Set up the device interrupt handler */
-               if (!dev->intr_handle) {
+               if (dev->intr_handle == NULL) {
                        struct rte_dpaa_device *dpaa_dev;
                        struct rte_device *rdev = dev->device;
 
                        dpaa_dev = container_of(rdev, struct rte_dpaa_device,
                                                device);
-                       dev->intr_handle = &dpaa_dev->intr_handle;
-                       dev->intr_handle->intr_vec = rte_zmalloc(NULL,
-                                       dpaa_push_mode_max_queue, 0);
-                       if (!dev->intr_handle->intr_vec) {
+                       dev->intr_handle = dpaa_dev->intr_handle;
+                       if (rte_intr_vec_list_alloc(dev->intr_handle,
+                                       NULL, dpaa_push_mode_max_queue)) {
                                DPAA_PMD_ERR("intr_vec alloc failed");
                                return -ENOMEM;
                        }
-                       dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
-                       dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
+                       if (rte_intr_nb_efd_set(dev->intr_handle,
+                                       dpaa_push_mode_max_queue))
+                               return -rte_errno;
+
+                       if (rte_intr_max_intr_set(dev->intr_handle,
+                                       dpaa_push_mode_max_queue))
+                               return -rte_errno;
                }
 
-               dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
-               dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
-               dev->intr_handle->efds[queue_idx] = q_fd;
+               if (rte_intr_type_set(dev->intr_handle, RTE_INTR_HANDLE_EXT))
+                       return -rte_errno;
+
+               if (rte_intr_vec_list_index_set(dev->intr_handle,
+                                               queue_idx, queue_idx + 1))
+                       return -rte_errno;
+
+               if (rte_intr_efds_index_set(dev->intr_handle, queue_idx,
+                                                  q_fd))
+                       return -rte_errno;
+
                rxq->q_fd = q_fd;
        }
        rxq->bp_array = rte_dpaa_bpid_info;
@@ -1195,23 +1211,17 @@ int
 dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
                int eth_rx_queue_id)
 {
-       struct qm_mcc_initfq opts;
+       struct qm_mcc_initfq opts = {0};
        int ret;
        u32 flags = 0;
        struct dpaa_if *dpaa_intf = dev->data->dev_private;
        struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
 
-       dpaa_poll_queue_default_config(&opts);
-
-       if (dpaa_intf->cgr_rx) {
-               opts.we_mask |= QM_INITFQ_WE_CGID;
-               opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
-               opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
-       }
-
+       qman_retire_fq(rxq, NULL);
+       qman_oos_fq(rxq);
        ret = qman_init_fq(rxq, flags, &opts);
        if (ret) {
-               DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
+               DPAA_PMD_ERR("detach rx fqid %d failed with ret: %d",
                             rxq->fqid, ret);
        }
 
@@ -1279,7 +1289,7 @@ static int dpaa_link_down(struct rte_eth_dev *dev)
        __fif = container_of(fif, struct __fman_if, __if);
 
        if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-               dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
+               dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_DOWN);
        else
                return dpaa_eth_dev_stop(dev);
        return 0;
@@ -1295,7 +1305,7 @@ static int dpaa_link_up(struct rte_eth_dev *dev)
        __fif = container_of(fif, struct __fman_if, __if);
 
        if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
-               dpaa_update_link_status(__fif->node_name, ETH_LINK_UP);
+               dpaa_update_link_status(__fif->node_name, RTE_ETH_LINK_UP);
        else
                dpaa_eth_dev_start(dev);
        return 0;
@@ -1325,10 +1335,10 @@ dpaa_flow_ctrl_set(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
-       if (fc_conf->mode == RTE_FC_NONE) {
+       if (fc_conf->mode == RTE_ETH_FC_NONE) {
                return 0;
-       } else if (fc_conf->mode == RTE_FC_TX_PAUSE ||
-                fc_conf->mode == RTE_FC_FULL) {
+       } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE ||
+                fc_conf->mode == RTE_ETH_FC_FULL) {
                fman_if_set_fc_threshold(dev->process_private,
                                         fc_conf->high_water,
                                         fc_conf->low_water,
@@ -1372,11 +1382,11 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev,
        }
        ret = fman_if_get_fc_threshold(dev->process_private);
        if (ret) {
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
                fc_conf->pause_time =
                        fman_if_get_fc_quanta(dev->process_private);
        } else {
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
        }
 
        return 0;
@@ -1637,10 +1647,10 @@ static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf,
        fc_conf = dpaa_intf->fc_conf;
        ret = fman_if_get_fc_threshold(fman_intf);
        if (ret) {
-               fc_conf->mode = RTE_FC_TX_PAUSE;
+               fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
                fc_conf->pause_time = fman_if_get_fc_quanta(fman_intf);
        } else {
-               fc_conf->mode = RTE_FC_NONE;
+               fc_conf->mode = RTE_ETH_FC_NONE;
        }
 
        return 0;
@@ -1738,6 +1748,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq,
        /* no tx-confirmation */
        opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
        opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
+       if (fman_ip_rev >= FMAN_V3) {
+               /* Set B0V bit in contextA to set ASPID to 0 */
+               opts.fqd.context_a.hi |= 0x04000000;
+       }
        DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
 
        if (cgr_tx) {
@@ -1860,7 +1874,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
 
        dpaa_intf->name = dpaa_device->name;
 
-       /* save fman_if & cfg in the interface struture */
+       /* save fman_if & cfg in the interface structure */
        eth_dev->process_private = fman_intf;
        dpaa_intf->ifid = dev_id;
        dpaa_intf->cfg = cfg;
@@ -2163,7 +2177,7 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
                if (dpaa_svr_family == SVR_LS1043A_FAMILY)
                        dpaa_push_mode_max_queue = 0;
 
-               /* if push mode queues to be enabled. Currenly we are allowing
+               /* if push mode queues to be enabled. Currently we are allowing
                 * only one queue per thread.
                 */
                if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {