drivers: remove direct access to interrupt handle
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
index 9011dcf..73d17f7 100644 (file)
 
 /* Supported Rx offloads */
 static uint64_t dev_rx_offloads_sup =
-               DEV_RX_OFFLOAD_CHECKSUM |
-               DEV_RX_OFFLOAD_SCTP_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_VLAN_FILTER |
-               DEV_RX_OFFLOAD_JUMBO_FRAME |
-               DEV_RX_OFFLOAD_TIMESTAMP;
+               RTE_ETH_RX_OFFLOAD_CHECKSUM |
+               RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+               RTE_ETH_RX_OFFLOAD_TIMESTAMP;
 
 /* Rx offloads which cannot be disabled */
 static uint64_t dev_rx_offloads_nodis =
-               DEV_RX_OFFLOAD_RSS_HASH |
-               DEV_RX_OFFLOAD_SCATTER;
+               RTE_ETH_RX_OFFLOAD_RSS_HASH |
+               RTE_ETH_RX_OFFLOAD_SCATTER;
 
 /* Supported Tx offloads */
 static uint64_t dev_tx_offloads_sup =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_MT_LOCKFREE |
-               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
+               RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               RTE_ETH_TX_OFFLOAD_MT_LOCKFREE |
+               RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
 
 /* Tx offloads which cannot be disabled */
 static uint64_t dev_tx_offloads_nodis =
-               DEV_TX_OFFLOAD_MULTI_SEGS;
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
 
 /* enable timestamp in mbuf */
 bool dpaa2_enable_ts[RTE_MAX_ETHPORTS];
@@ -143,7 +142,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
        PMD_INIT_FUNC_TRACE();
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
                /* VLAN Filter not avaialble */
                if (!priv->max_vlan_filters) {
                        DPAA2_PMD_INFO("VLAN filter not available");
@@ -151,7 +150,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                }
 
                if (dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_VLAN_FILTER)
+                       RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
                        ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
                                                      priv->token, true);
                else
@@ -226,9 +225,11 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev,
                       mc_ver_info.major,
                       mc_ver_info.minor,
                       mc_ver_info.revision);
+       if (ret < 0)
+               return -EINVAL;
 
        ret += 1; /* add the size of '\0' */
-       if (fw_size < (uint32_t)ret)
+       if (fw_size < (size_t)ret)
                return ret;
        else
                return 0;
@@ -250,13 +251,13 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                                        dev_rx_offloads_nodis;
        dev_info->tx_offload_capa = dev_tx_offloads_sup |
                                        dev_tx_offloads_nodis;
-       dev_info->speed_capa = ETH_LINK_SPEED_1G |
-                       ETH_LINK_SPEED_2_5G |
-                       ETH_LINK_SPEED_10G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G |
+                       RTE_ETH_LINK_SPEED_2_5G |
+                       RTE_ETH_LINK_SPEED_10G;
 
        dev_info->max_hash_mac_addrs = 0;
        dev_info->max_vfs = 0;
-       dev_info->max_vmdq_pools = ETH_16_POOLS;
+       dev_info->max_vmdq_pools = RTE_ETH_16_POOLS;
        dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 
        dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size;
@@ -269,10 +270,10 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC;
 
        if (dpaa2_svr_family == SVR_LX2160A) {
-               dev_info->speed_capa |= ETH_LINK_SPEED_25G |
-                               ETH_LINK_SPEED_40G |
-                               ETH_LINK_SPEED_50G |
-                               ETH_LINK_SPEED_100G;
+               dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G |
+                               RTE_ETH_LINK_SPEED_40G |
+                               RTE_ETH_LINK_SPEED_50G |
+                               RTE_ETH_LINK_SPEED_100G;
        }
 
        return 0;
@@ -290,16 +291,15 @@ dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev,
                uint64_t flags;
                const char *output;
        } rx_offload_map[] = {
-                       {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"},
-                       {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-                       {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-                       {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
-                       {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
-                       {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
-                       {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
-                       {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
-                       {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
-                       {DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
+                       {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"},
+                       {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
+                       {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
+                       {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
+                       {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
+                       {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"},
+                       {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"}
        };
 
        /* Update Rx offload info */
@@ -326,15 +326,15 @@ dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev,
                uint64_t flags;
                const char *output;
        } tx_offload_map[] = {
-                       {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
-                       {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
-                       {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
-                       {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
-                       {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
-                       {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
-                       {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
-                       {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
-                       {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
+                       {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"},
+                       {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
+                       {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"},
+                       {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"},
+                       {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"},
+                       {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"},
+                       {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"}
        };
 
        /* Update Tx offload info */
@@ -538,6 +538,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        int tx_l3_csum_offload = false;
        int tx_l4_csum_offload = false;
        int ret, tc_index;
+       uint32_t max_rx_pktlen;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -557,26 +558,22 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                tx_offloads, dev_tx_offloads_nodis);
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
-                       ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
-                               priv->token, eth_conf->rxmode.max_rx_pkt_len
-                               - RTE_ETHER_CRC_LEN);
-                       if (ret) {
-                               DPAA2_PMD_ERR(
-                                       "Unable to set mtu. check config");
-                               return ret;
-                       }
-                       dev->data->mtu =
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                               RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
-                               VLAN_TAG_SIZE;
-               } else {
-                       return -1;
+       max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
+                               RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+       if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
+               ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
+                       priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
+               if (ret != 0) {
+                       DPAA2_PMD_ERR("Unable to set mtu. check config");
+                       return ret;
                }
+               DPAA2_PMD_INFO("MTU configured for the device: %d",
+                               dev->data->mtu);
+       } else {
+               return -1;
        }
 
-       if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
+       if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) {
                for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) {
                        ret = dpaa2_setup_flow_dist(dev,
                                        eth_conf->rx_adv_conf.rss_conf.rss_hf,
@@ -590,12 +587,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
                rx_l3_csum_offload = true;
 
-       if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
-               (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) ||
-               (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM))
+       if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) ||
+               (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) ||
+               (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM))
                rx_l4_csum_offload = true;
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -613,7 +610,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        }
 
 #if !defined(RTE_LIBRTE_IEEE1588)
-       if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP)
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP)
 #endif
        {
                ret = rte_mbuf_dyn_rx_timestamp_register(
@@ -626,12 +623,12 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                dpaa2_enable_ts[dev->data->port_id] = true;
        }
 
-       if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+       if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
                tx_l3_csum_offload = true;
 
-       if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
-               (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
-               (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+       if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) ||
+               (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) ||
+               (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM))
                tx_l4_csum_offload = true;
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
@@ -663,8 +660,8 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
-               dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+       if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+               dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK);
 
        dpaa2_tm_init(dev);
 
@@ -974,9 +971,9 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev,
 }
 
 static void
-dpaa2_dev_rx_queue_release(void *q __rte_unused)
+dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
-       struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q;
+       struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id];
        struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private;
        struct fsl_mc_io *dpni =
                (struct fsl_mc_io *)priv->eth_dev->process_private;
@@ -1002,17 +999,10 @@ dpaa2_dev_rx_queue_release(void *q __rte_unused)
        }
 }
 
-static void
-dpaa2_dev_tx_queue_release(void *q __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-}
-
 static uint32_t
-dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+dpaa2_dev_rx_queue_count(void *rx_queue)
 {
        int32_t ret;
-       struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct dpaa2_queue *dpaa2_q;
        struct qbman_swp *swp;
        struct qbman_fq_query_np_rslt state;
@@ -1029,12 +1019,12 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        }
        swp = DPAA2_PER_LCORE_PORTAL;
 
-       dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id];
+       dpaa2_q = rx_queue;
 
        if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) {
                frame_cnt = qbman_fq_state_frame_count(&state);
-               DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u",
-                               rx_queue_id, frame_cnt);
+               DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u",
+                               rx_queue, frame_cnt);
        }
        return frame_cnt;
 }
@@ -1155,7 +1145,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
        struct rte_intr_handle *intr_handle;
 
        dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device);
-       intr_handle = &dpaa2_dev->intr_handle;
+       intr_handle = dpaa2_dev->intr_handle;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1226,8 +1216,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
        }
 
        /* if the interrupts were configured on this devices*/
-       if (intr_handle && (intr_handle->fd) &&
-           (dev->data->dev_conf.intr_conf.lsc != 0)) {
+       if (intr_handle && rte_intr_fd_get(intr_handle) &&
+           dev->data->dev_conf.intr_conf.lsc != 0) {
                /* Registering LSC interrupt handler */
                rte_intr_callback_register(intr_handle,
                                           dpaa2_interrupt_handler,
@@ -1266,8 +1256,8 @@ dpaa2_dev_stop(struct rte_eth_dev *dev)
        PMD_INIT_FUNC_TRACE();
 
        /* reset interrupt callback  */
-       if (intr_handle && (intr_handle->fd) &&
-           (dev->data->dev_conf.intr_conf.lsc != 0)) {
+       if (intr_handle && rte_intr_fd_get(intr_handle) &&
+           dev->data->dev_conf.intr_conf.lsc != 0) {
                /*disable dpni irqs */
                dpaa2_eth_setup_irqs(dev, 0);
 
@@ -1469,19 +1459,6 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       /* check that mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
-               return -EINVAL;
-
-       if (frame_size > DPAA2_ETH_MAX_LEN)
-               dev->data->dev_conf.rxmode.offloads |=
-                                               DEV_RX_OFFLOAD_JUMBO_FRAME;
-       else
-               dev->data->dev_conf.rxmode.offloads &=
-                                               ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        /* Set the Max Rx frame length as 'mtu' +
         * Maximum Ethernet header length
         */
@@ -1793,8 +1770,8 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 static int
 dpaa2_xstats_get_names_by_id(
        struct rte_eth_dev *dev,
-       struct rte_eth_xstat_name *xstats_names,
        const uint64_t *ids,
+       struct rte_eth_xstat_name *xstats_names,
        unsigned int limit)
 {
        unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings);
@@ -1879,7 +1856,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
                        DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret);
                        return -1;
                }
-               if (state.up == ETH_LINK_DOWN &&
+               if (state.up == RTE_ETH_LINK_DOWN &&
                    wait_to_complete)
                        rte_delay_ms(CHECK_INTERVAL);
                else
@@ -1891,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev,
        link.link_speed = state.rate;
 
        if (state.options & DPNI_LINK_OPT_HALF_DUPLEX)
-               link.link_duplex = ETH_LINK_HALF_DUPLEX;
+               link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX;
        else
-               link.link_duplex = ETH_LINK_FULL_DUPLEX;
+               link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
 
        ret = rte_eth_linkstatus_set(dev, &link);
        if (ret == -1)
@@ -2054,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                 *      No TX side flow control (send Pause frame disabled)
                 */
                if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE))
-                       fc_conf->mode = RTE_FC_FULL;
+                       fc_conf->mode = RTE_ETH_FC_FULL;
                else
-                       fc_conf->mode = RTE_FC_RX_PAUSE;
+                       fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
        } else {
                /* DPNI_LINK_OPT_PAUSE not set
                 *  if ASYM_PAUSE set,
@@ -2066,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                 *      Flow control disabled
                 */
                if (state.options & DPNI_LINK_OPT_ASYM_PAUSE)
-                       fc_conf->mode = RTE_FC_TX_PAUSE;
+                       fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
                else
-                       fc_conf->mode = RTE_FC_NONE;
+                       fc_conf->mode = RTE_ETH_FC_NONE;
        }
 
        return ret;
@@ -2112,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
 
        /* update cfg with fc_conf */
        switch (fc_conf->mode) {
-       case RTE_FC_FULL:
+       case RTE_ETH_FC_FULL:
                /* Full flow control;
                 * OPT_PAUSE set, ASYM_PAUSE not set
                 */
                cfg.options |= DPNI_LINK_OPT_PAUSE;
                cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
                break;
-       case RTE_FC_TX_PAUSE:
+       case RTE_ETH_FC_TX_PAUSE:
                /* Enable RX flow control
                 * OPT_PAUSE not set;
                 * ASYM_PAUSE set;
@@ -2127,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
                cfg.options &= ~DPNI_LINK_OPT_PAUSE;
                break;
-       case RTE_FC_RX_PAUSE:
+       case RTE_ETH_FC_RX_PAUSE:
                /* Enable TX Flow control
                 * OPT_PAUSE set
                 * ASYM_PAUSE set
@@ -2135,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
                cfg.options |= DPNI_LINK_OPT_PAUSE;
                cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE;
                break;
-       case RTE_FC_NONE:
+       case RTE_ETH_FC_NONE:
                /* Disable Flow control
                 * OPT_PAUSE not set
                 * ASYM_PAUSE not set
@@ -2271,7 +2248,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
 
                ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token,
                                   dpaa2_ethq->tc_index, flow_id,
-                                  OPR_OPT_CREATE, &ocfg);
+                                  OPR_OPT_CREATE, &ocfg, 0);
                if (ret) {
                        DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret);
                        return ret;
@@ -2380,6 +2357,22 @@ dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
        return 0;
 }
 
+void
+rte_pmd_dpaa2_thread_init(void)
+{
+       int ret;
+
+       if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+               ret = dpaa2_affine_qbman_swp();
+               if (ret) {
+                       DPAA2_PMD_ERR(
+                               "Failed to allocate IO portal, tid: %d\n",
+                               rte_gettid());
+                       return;
+               }
+       }
+}
+
 static struct eth_dev_ops dpaa2_ethdev_ops = {
        .dev_configure    = dpaa2_eth_dev_configure,
        .dev_start            = dpaa2_dev_start,
@@ -2409,7 +2402,6 @@ static struct eth_dev_ops dpaa2_ethdev_ops = {
        .rx_queue_setup    = dpaa2_dev_rx_queue_setup,
        .rx_queue_release  = dpaa2_dev_rx_queue_release,
        .tx_queue_setup    = dpaa2_dev_tx_queue_setup,
-       .tx_queue_release  = dpaa2_dev_tx_queue_release,
        .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get,
        .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get,
        .flow_ctrl_get        = dpaa2_flow_ctrl_get,
@@ -2804,6 +2796,11 @@ init_err:
        return ret;
 }
 
+int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev)
+{
+       return dev->device->driver == &rte_dpaa2_pmd.driver;
+}
+
 static int
 rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
                struct rte_dpaa2_device *dpaa2_dev)
@@ -2887,10 +2884,10 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
        .remove = rte_dpaa2_remove,
 };
 
-RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
-RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
+RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd);
+RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME,
                DRIVER_LOOPBACK_MODE "=<int> "
                DRIVER_NO_PREFETCH_MODE "=<int>"
                DRIVER_TX_CONF "=<int>"
                DRIVER_ERROR_QUEUE "=<int>");
-RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);