net/mlx5: add flex parser DevX object management
[dpdk.git] / drivers / net / axgbe / axgbe_ethdev.c
index 458d64b..7d40c18 100644 (file)
 #include "rte_time.h"
 
 static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
-static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
 static int  axgbe_dev_configure(struct rte_eth_dev *dev);
 static int  axgbe_dev_start(struct rte_eth_dev *dev);
-static void axgbe_dev_stop(struct rte_eth_dev *dev);
+static int  axgbe_dev_stop(struct rte_eth_dev *dev);
 static void axgbe_dev_interrupt_handler(void *param);
 static int axgbe_dev_close(struct rte_eth_dev *dev);
+static int axgbe_dev_reset(struct rte_eth_dev *dev);
 static int axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
 static int axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
 static int axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
@@ -57,8 +57,8 @@ axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev,
                           unsigned int n);
 static int
 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
-                                struct rte_eth_xstat_name *xstats_names,
                                 const uint64_t *ids,
+                                struct rte_eth_xstat_name *xstats_names,
                                 unsigned int size);
 static int axgbe_dev_xstats_reset(struct rte_eth_dev *dev);
 static int axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
@@ -110,6 +110,11 @@ axgbe_set_tstamp_time(struct axgbe_port *pdata, unsigned int sec,
 static void
 axgbe_update_tstamp_addend(struct axgbe_port *pdata,
                        unsigned int addend);
+static int
+       axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on);
+static int axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+                               enum rte_vlan_type vlan_type, uint16_t tpid);
+static int axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask);
 
 struct axgbe_xstats {
        char name[RTE_ETH_XSTATS_NAME_SIZE];
@@ -216,6 +221,7 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
        .dev_start            = axgbe_dev_start,
        .dev_stop             = axgbe_dev_stop,
        .dev_close            = axgbe_dev_close,
+       .dev_reset            = axgbe_dev_reset,
        .promiscuous_enable   = axgbe_dev_promiscuous_enable,
        .promiscuous_disable  = axgbe_dev_promiscuous_disable,
        .allmulticast_enable  = axgbe_dev_allmulticast_enable,
@@ -251,6 +257,9 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
        .txq_info_get                 = axgbe_txq_info_get,
        .dev_supported_ptypes_get     = axgbe_dev_supported_ptypes_get,
        .mtu_set                = axgb_mtu_set,
+       .vlan_filter_set      = axgbe_vlan_filter_set,
+       .vlan_tpid_set        = axgbe_vlan_tpid_set,
+       .vlan_offload_set     = axgbe_vlan_offload_set,
        .timesync_enable              = axgbe_timesync_enable,
        .timesync_disable             = axgbe_timesync_disable,
        .timesync_read_rx_timestamp   = axgbe_timesync_read_rx_timestamp,
@@ -258,6 +267,7 @@ static const struct eth_dev_ops axgbe_eth_dev_ops = {
        .timesync_adjust_time         = axgbe_timesync_adjust_time,
        .timesync_read_time           = axgbe_timesync_read_time,
        .timesync_write_time          = axgbe_timesync_write_time,
+       .fw_version_get                 = axgbe_dev_fw_version_get,
 };
 
 static int axgbe_phy_reset(struct axgbe_port *pdata)
@@ -303,7 +313,7 @@ axgbe_dev_interrupt_handler(void *param)
                }
        }
        /* Unmask interrupts since disabled after generation */
-       rte_intr_ack(&pdata->pci_dev->intr_handle);
+       rte_intr_ack(pdata->pci_dev->intr_handle);
 }
 
 /*
@@ -316,7 +326,7 @@ axgbe_dev_configure(struct rte_eth_dev *dev)
        struct axgbe_port *pdata =  dev->data->dev_private;
        /* Checksum offload to hardware */
        pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
-                               DEV_RX_OFFLOAD_CHECKSUM;
+                               RTE_ETH_RX_OFFLOAD_CHECKSUM;
        return 0;
 }
 
@@ -325,9 +335,9 @@ axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
 {
        struct axgbe_port *pdata = dev->data->dev_private;
 
-       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+       if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_RSS)
                pdata->rss_enable = 1;
-       else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+       else if (dev->data->dev_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_NONE)
                pdata->rss_enable = 0;
        else
                return  -1;
@@ -340,7 +350,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
        struct axgbe_port *pdata = dev->data->dev_private;
        int ret;
        struct rte_eth_dev_data *dev_data = dev->data;
-       uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+       uint16_t max_pkt_len;
 
        dev->dev_ops = &axgbe_eth_dev_ops;
 
@@ -364,7 +374,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
        }
 
        /* enable uio/vfio intr/eventfd mapping */
-       rte_intr_enable(&pdata->pci_dev->intr_handle);
+       rte_intr_enable(pdata->pci_dev->intr_handle);
 
        /* phy start*/
        pdata->phy_if.phy_start(pdata);
@@ -373,7 +383,9 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 
        rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
        rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
-       if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
+
+       max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+       if ((dev_data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ||
                                max_pkt_len > pdata->rx_buf_size)
                dev_data->scattered_rx = 1;
 
@@ -387,17 +399,17 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 }
 
 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
-static void
+static int
 axgbe_dev_stop(struct rte_eth_dev *dev)
 {
        struct axgbe_port *pdata = dev->data->dev_private;
 
        PMD_INIT_FUNC_TRACE();
 
-       rte_intr_disable(&pdata->pci_dev->intr_handle);
+       rte_intr_disable(pdata->pci_dev->intr_handle);
 
        if (rte_bit_relaxed_get32(AXGBE_STOPPED, &pdata->dev_state))
-               return;
+               return 0;
 
        rte_bit_relaxed_set32(AXGBE_STOPPED, &pdata->dev_state);
        axgbe_dev_disable_tx(dev);
@@ -407,13 +419,7 @@ axgbe_dev_stop(struct rte_eth_dev *dev)
        pdata->hw_if.exit(pdata);
        memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
        rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
-}
 
-/* Clear all resources like TX/RX queues. */
-static int
-axgbe_dev_close(struct rte_eth_dev *dev)
-{
-       axgbe_dev_clear_queues(dev);
        return 0;
 }
 
@@ -515,8 +521,8 @@ axgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
        }
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
                        continue;
                pdata->rss_table[i] = reta_conf[idx].reta[shift];
@@ -546,8 +552,8 @@ axgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
        }
 
        for (i = 0; i < reta_size; i++) {
-               idx = i / RTE_RETA_GROUP_SIZE;
-               shift = i % RTE_RETA_GROUP_SIZE;
+               idx = i / RTE_ETH_RETA_GROUP_SIZE;
+               shift = i % RTE_ETH_RETA_GROUP_SIZE;
                if ((reta_conf[idx].mask & (1ULL << shift)) == 0)
                        continue;
                reta_conf[idx].reta[shift] = pdata->rss_table[i];
@@ -584,13 +590,13 @@ axgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
 
        pdata->rss_hf = rss_conf->rss_hf & AXGBE_RSS_OFFLOAD;
 
-       if (pdata->rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+       if (pdata->rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6))
                AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
        if (pdata->rss_hf &
-           (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+           (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP))
                AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
        if (pdata->rss_hf &
-           (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+           (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP))
                AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
 
        /* Set the RSS options */
@@ -625,6 +631,20 @@ axgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+axgbe_dev_reset(struct rte_eth_dev *dev)
+{
+       int ret = 0;
+
+       ret = axgbe_dev_close(dev);
+       if (ret)
+               return ret;
+
+       ret = eth_axgbe_dev_init(dev);
+
+       return ret;
+}
+
 static void
 axgbe_dev_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
 {
@@ -745,7 +765,7 @@ axgbe_dev_link_update(struct rte_eth_dev *dev,
        link.link_status = pdata->phy_link;
        link.link_speed = pdata->phy_speed;
        link.link_autoneg = !(dev->data->dev_conf.link_speeds &
-                             ETH_LINK_SPEED_FIXED);
+                             RTE_ETH_LINK_SPEED_FIXED);
        ret = rte_eth_linkstatus_set(dev, &link);
        if (ret == -1)
                PMD_DRV_LOG(ERR, "No change in link status\n");
@@ -1058,8 +1078,8 @@ axgbe_dev_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
 
 static int
 axgbe_dev_xstats_get_names_by_id(struct rte_eth_dev *dev,
-                                struct rte_eth_xstat_name *xstats_names,
                                 const uint64_t *ids,
+                                struct rte_eth_xstat_name *xstats_names,
                                 unsigned int size)
 {
        struct rte_eth_xstat_name xstats_names_copy[AXGBE_XSTATS_COUNT];
@@ -1111,22 +1131,33 @@ axgbe_dev_stats_get(struct rte_eth_dev *dev,
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
-               stats->q_ipackets[i] = rxq->pkts;
-               stats->ipackets += rxq->pkts;
-               stats->q_ibytes[i] = rxq->bytes;
-               stats->ibytes += rxq->bytes;
-               stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
-               stats->q_errors[i] = rxq->errors + rxq->rx_mbuf_alloc_failed;
-               stats->ierrors += rxq->errors;
+               if (rxq) {
+                       stats->q_ipackets[i] = rxq->pkts;
+                       stats->ipackets += rxq->pkts;
+                       stats->q_ibytes[i] = rxq->bytes;
+                       stats->ibytes += rxq->bytes;
+                       stats->rx_nombuf += rxq->rx_mbuf_alloc_failed;
+                       stats->q_errors[i] = rxq->errors
+                               + rxq->rx_mbuf_alloc_failed;
+                       stats->ierrors += rxq->errors;
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
+                                       dev->data->port_id);
+               }
        }
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
-               stats->q_opackets[i] = txq->pkts;
-               stats->opackets += txq->pkts;
-               stats->q_obytes[i] = txq->bytes;
-               stats->obytes += txq->bytes;
-               stats->oerrors += txq->errors;
+               if (txq) {
+                       stats->q_opackets[i] = txq->pkts;
+                       stats->opackets += txq->pkts;
+                       stats->q_obytes[i] = txq->bytes;
+                       stats->obytes += txq->bytes;
+                       stats->oerrors += txq->errors;
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
+                                       dev->data->port_id);
+               }
        }
 
        return 0;
@@ -1141,16 +1172,26 @@ axgbe_dev_stats_reset(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                rxq = dev->data->rx_queues[i];
-               rxq->pkts = 0;
-               rxq->bytes = 0;
-               rxq->errors = 0;
-               rxq->rx_mbuf_alloc_failed = 0;
+               if (rxq) {
+                       rxq->pkts = 0;
+                       rxq->bytes = 0;
+                       rxq->errors = 0;
+                       rxq->rx_mbuf_alloc_failed = 0;
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Rx queue not setup for port %d\n",
+                                       dev->data->port_id);
+               }
        }
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
-               txq->pkts = 0;
-               txq->bytes = 0;
-               txq->errors = 0;
+               if (txq) {
+                       txq->pkts = 0;
+                       txq->bytes = 0;
+                       txq->errors = 0;
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Tx queue not setup for port %d\n",
+                                       dev->data->port_id);
+               }
        }
 
        return 0;
@@ -1167,20 +1208,24 @@ axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
        dev_info->max_mac_addrs = pdata->hw_feat.addn_mac + 1;
        dev_info->max_hash_mac_addrs = pdata->hw_feat.hash_table_size;
-       dev_info->speed_capa =  ETH_LINK_SPEED_10G;
+       dev_info->speed_capa = RTE_ETH_LINK_SPEED_10G;
 
        dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM  |
-               DEV_RX_OFFLOAD_TCP_CKSUM  |
-               DEV_RX_OFFLOAD_JUMBO_FRAME      |
-               DEV_RX_OFFLOAD_SCATTER    |
-               DEV_RX_OFFLOAD_KEEP_CRC;
+               RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
+               RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
+               RTE_ETH_RX_OFFLOAD_VLAN_EXTEND |
+               RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
+               RTE_ETH_RX_OFFLOAD_UDP_CKSUM  |
+               RTE_ETH_RX_OFFLOAD_TCP_CKSUM  |
+               RTE_ETH_RX_OFFLOAD_SCATTER        |
+               RTE_ETH_RX_OFFLOAD_KEEP_CRC;
 
        dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_IPV4_CKSUM  |
-               DEV_TX_OFFLOAD_UDP_CKSUM   |
-               DEV_TX_OFFLOAD_TCP_CKSUM;
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
+               RTE_ETH_TX_OFFLOAD_QINQ_INSERT |
+               RTE_ETH_TX_OFFLOAD_IPV4_CKSUM  |
+               RTE_ETH_TX_OFFLOAD_UDP_CKSUM   |
+               RTE_ETH_TX_OFFLOAD_TCP_CKSUM;
 
        if (pdata->hw_feat.rss) {
                dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
@@ -1217,13 +1262,13 @@ axgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        fc.autoneg = pdata->pause_autoneg;
 
        if (pdata->rx_pause && pdata->tx_pause)
-               fc.mode = RTE_FC_FULL;
+               fc.mode = RTE_ETH_FC_FULL;
        else if (pdata->rx_pause)
-               fc.mode = RTE_FC_RX_PAUSE;
+               fc.mode = RTE_ETH_FC_RX_PAUSE;
        else if (pdata->tx_pause)
-               fc.mode = RTE_FC_TX_PAUSE;
+               fc.mode = RTE_ETH_FC_TX_PAUSE;
        else
-               fc.mode = RTE_FC_NONE;
+               fc.mode = RTE_ETH_FC_NONE;
 
        fc_conf->high_water =  (1024 + (fc.low_water[0] << 9)) / 1024;
        fc_conf->low_water =  (1024 + (fc.high_water[0] << 9)) / 1024;
@@ -1253,13 +1298,13 @@ axgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
        AXGMAC_IOWRITE(pdata, reg, reg_val);
        fc.mode = fc_conf->mode;
 
-       if (fc.mode == RTE_FC_FULL) {
+       if (fc.mode == RTE_ETH_FC_FULL) {
                pdata->tx_pause = 1;
                pdata->rx_pause = 1;
-       } else if (fc.mode == RTE_FC_RX_PAUSE) {
+       } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
                pdata->tx_pause = 0;
                pdata->rx_pause = 1;
-       } else if (fc.mode == RTE_FC_TX_PAUSE) {
+       } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
                pdata->tx_pause = 1;
                pdata->rx_pause = 0;
        } else {
@@ -1341,15 +1386,15 @@ axgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
 
        fc.mode = pfc_conf->fc.mode;
 
-       if (fc.mode == RTE_FC_FULL) {
+       if (fc.mode == RTE_ETH_FC_FULL) {
                pdata->tx_pause = 1;
                pdata->rx_pause = 1;
                AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-       } else if (fc.mode == RTE_FC_RX_PAUSE) {
+       } else if (fc.mode == RTE_ETH_FC_RX_PAUSE) {
                pdata->tx_pause = 0;
                pdata->rx_pause = 1;
                AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 1);
-       } else if (fc.mode == RTE_FC_TX_PAUSE) {
+       } else if (fc.mode == RTE_ETH_FC_TX_PAUSE) {
                pdata->tx_pause = 1;
                pdata->rx_pause = 0;
                AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
@@ -1432,31 +1477,18 @@ axgbe_dev_supported_ptypes_get(struct rte_eth_dev *dev)
 
 static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
-       struct rte_eth_dev_info dev_info;
        struct axgbe_port *pdata = dev->data->dev_private;
-       uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
-       unsigned int val = 0;
-       axgbe_dev_info_get(dev, &dev_info);
-       /* check that mtu is within the allowed range */
-       if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
-               return -EINVAL;
+       unsigned int val;
+
        /* mtu setting is forbidden if port is start */
        if (dev->data->dev_started) {
                PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
                                dev->data->port_id);
                return -EBUSY;
        }
-       if (frame_size > RTE_ETHER_MAX_LEN) {
-               dev->data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
-               val = 1;
-       } else {
-               dev->data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-               val = 0;
-       }
+       val = mtu > RTE_ETHER_MTU ? 1 : 0;
        AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
        return 0;
 }
 
@@ -1763,14 +1795,163 @@ axgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
        return 0;
 }
 
+static int
+axgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+       unsigned long vid_bit, vid_idx;
+
+       vid_bit = VLAN_TABLE_BIT(vid);
+       vid_idx = VLAN_TABLE_IDX(vid);
+
+       if (on) {
+               PMD_DRV_LOG(DEBUG, "Set VLAN vid=%d for device = %s\n",
+                           vid, pdata->eth_dev->device->name);
+               pdata->active_vlans[vid_idx] |= vid_bit;
+       } else {
+               PMD_DRV_LOG(DEBUG, "Reset VLAN vid=%d for device = %s\n",
+                           vid, pdata->eth_dev->device->name);
+               pdata->active_vlans[vid_idx] &= ~vid_bit;
+       }
+       pdata->hw_if.update_vlan_hash_table(pdata);
+       return 0;
+}
+
+static int
+axgbe_vlan_tpid_set(struct rte_eth_dev *dev,
+                   enum rte_vlan_type vlan_type,
+                   uint16_t tpid)
+{
+       struct axgbe_port *pdata = dev->data->dev_private;
+       uint32_t reg = 0;
+       uint32_t qinq = 0;
+
+       qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
+       PMD_DRV_LOG(DEBUG, "EDVLP: qinq = 0x%x\n", qinq);
+
+       switch (vlan_type) {
+       case RTE_ETH_VLAN_TYPE_INNER:
+               PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_INNER\n");
+               if (qinq) {
+                       if (tpid != 0x8100 && tpid != 0x88a8)
+                               PMD_DRV_LOG(ERR,
+                                           "tag supported 0x8100/0x88A8\n");
+                       PMD_DRV_LOG(DEBUG, "qinq with inner tag\n");
+
+                       /*Enable Inner VLAN Tag */
+                       AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 1);
+                       reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
+                       PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
+
+               } else {
+                       PMD_DRV_LOG(ERR,
+                                   "Inner type not supported in single tag\n");
+               }
+               break;
+       case RTE_ETH_VLAN_TYPE_OUTER:
+               PMD_DRV_LOG(DEBUG, "RTE_ETH_VLAN_TYPE_OUTER\n");
+               if (qinq) {
+                       PMD_DRV_LOG(DEBUG, "double tagging is enabled\n");
+                       /*Enable outer VLAN tag*/
+                       AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERIVLT, 0);
+                       reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, ERIVLT);
+                       PMD_DRV_LOG(DEBUG, "bit ERIVLT = 0x%x\n", reg);
+
+                       AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 1);
+                       reg = AXGMAC_IOREAD_BITS(pdata, MAC_VLANIR, CSVL);
+                       PMD_DRV_LOG(DEBUG, "bit CSVL = 0x%x\n", reg);
+               } else {
+                       if (tpid != 0x8100 && tpid != 0x88a8)
+                               PMD_DRV_LOG(ERR,
+                                           "tag supported 0x8100/0x88A8\n");
+               }
+               break;
+       case RTE_ETH_VLAN_TYPE_MAX:
+               PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_MAX\n");
+               break;
+       case RTE_ETH_VLAN_TYPE_UNKNOWN:
+               PMD_DRV_LOG(ERR, "RTE_ETH_VLAN_TYPE_UNKNOWN\n");
+               break;
+       }
+       return 0;
+}
+
+static void axgbe_vlan_extend_enable(struct axgbe_port *pdata)
+{
+       int qinq = 0;
+
+       AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 1);
+       qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
+       PMD_DRV_LOG(DEBUG, "vlan double tag enabled EDVLP:qinq=0x%x\n", qinq);
+}
+
+static void axgbe_vlan_extend_disable(struct axgbe_port *pdata)
+{
+       int qinq = 0;
+
+       AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EDVLP, 0);
+       qinq = AXGMAC_IOREAD_BITS(pdata, MAC_VLANTR, EDVLP);
+       PMD_DRV_LOG(DEBUG, "vlan double tag disable EDVLP:qinq=0x%x\n", qinq);
+}
+
+static int
+axgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+       struct axgbe_port *pdata = dev->data->dev_private;
+
+       /* Indicate that VLAN Tx CTAGs come from context descriptors */
+       AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+       AXGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+       if (mask & RTE_ETH_VLAN_STRIP_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
+                       PMD_DRV_LOG(DEBUG, "Strip ON for device = %s\n",
+                                   pdata->eth_dev->device->name);
+                       pdata->hw_if.enable_rx_vlan_stripping(pdata);
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Strip OFF for device = %s\n",
+                                   pdata->eth_dev->device->name);
+                       pdata->hw_if.disable_rx_vlan_stripping(pdata);
+               }
+       }
+       if (mask & RTE_ETH_VLAN_FILTER_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) {
+                       PMD_DRV_LOG(DEBUG, "Filter ON for device = %s\n",
+                                   pdata->eth_dev->device->name);
+                       pdata->hw_if.enable_rx_vlan_filtering(pdata);
+               } else {
+                       PMD_DRV_LOG(DEBUG, "Filter OFF for device = %s\n",
+                                   pdata->eth_dev->device->name);
+                       pdata->hw_if.disable_rx_vlan_filtering(pdata);
+               }
+       }
+       if (mask & RTE_ETH_VLAN_EXTEND_MASK) {
+               if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND) {
+                       PMD_DRV_LOG(DEBUG, "enabling vlan extended mode\n");
+                       axgbe_vlan_extend_enable(pdata);
+                       /* Set global registers with default ethertype*/
+                       axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_OUTER,
+                                           RTE_ETHER_TYPE_VLAN);
+                       axgbe_vlan_tpid_set(dev, RTE_ETH_VLAN_TYPE_INNER,
+                                           RTE_ETHER_TYPE_VLAN);
+               } else {
+                       PMD_DRV_LOG(DEBUG, "disabling vlan extended mode\n");
+                       axgbe_vlan_extend_disable(pdata);
+               }
+       }
+       return 0;
+}
+
 static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
 {
-       unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+       unsigned int mac_hfr0, mac_hfr1, mac_hfr2, mac_hfr3;
        struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
 
        mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
        mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
        mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
+       mac_hfr3 = AXGMAC_IOREAD(pdata, MAC_HWF3R);
 
        memset(hw_feat, 0, sizeof(*hw_feat));
 
@@ -1821,6 +2002,12 @@ static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
        hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
                                                AUXSNAPNUM);
 
+       /* Hardware feature register 3 */
+       hw_feat->tx_q_vlan_tag_ins  = AXGMAC_GET_BITS(mac_hfr3,
+                                                     MAC_HWF3R, CBTISEL);
+       hw_feat->no_of_vlan_extn    = AXGMAC_GET_BITS(mac_hfr3,
+                                                     MAC_HWF3R, NRVF);
+
        /* Translate the Hash Table size into actual number */
        switch (hw_feat->hash_table_size) {
        case 0:
@@ -1979,6 +2166,8 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
+       eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
        pdata = eth_dev->data->dev_private;
        /* initial state */
        rte_bit_relaxed_set32(AXGBE_DOWN, &pdata->dev_state);
@@ -2122,7 +2311,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
                return ret;
        }
 
-       rte_intr_callback_register(&pci_dev->intr_handle,
+       rte_intr_callback_register(pci_dev->intr_handle,
                                   axgbe_dev_interrupt_handler,
                                   (void *)eth_dev);
        PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
@@ -2133,7 +2322,7 @@ eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
 }
 
 static int
-eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+axgbe_dev_close(struct rte_eth_dev *eth_dev)
 {
        struct rte_pci_device *pci_dev;
 
@@ -2143,14 +2332,11 @@ eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
                return 0;
 
        pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
-       eth_dev->dev_ops = NULL;
-       eth_dev->rx_pkt_burst = NULL;
-       eth_dev->tx_pkt_burst = NULL;
        axgbe_dev_clear_queues(eth_dev);
 
        /* disable uio intr before callback unregister */
-       rte_intr_disable(&pci_dev->intr_handle);
-       rte_intr_callback_unregister(&pci_dev->intr_handle,
+       rte_intr_disable(pci_dev->intr_handle);
+       rte_intr_callback_unregister(pci_dev->intr_handle,
                                     axgbe_dev_interrupt_handler,
                                     (void *)eth_dev);
 
@@ -2166,7 +2352,7 @@ static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
 
 static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
 {
-       return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
+       return rte_eth_dev_pci_generic_remove(pci_dev, axgbe_dev_close);
 }
 
 static struct rte_pci_driver rte_axgbe_pmd = {
@@ -2179,5 +2365,5 @@ static struct rte_pci_driver rte_axgbe_pmd = {
 RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
-RTE_LOG_REGISTER(axgbe_logtype_init, pmd.net.axgbe.init, NOTICE);
-RTE_LOG_REGISTER(axgbe_logtype_driver, pmd.net.axgbe.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(axgbe_logtype_driver, driver, NOTICE);