net/txgbe: add Tx done cleanup
[dpdk.git] / drivers / net / txgbe / txgbe_ethdev.c
index 8559300..2c79b47 100644 (file)
 #include "base/txgbe.h"
 #include "txgbe_ethdev.h"
 #include "txgbe_rxtx.h"
+#include "txgbe_regs_group.h"
+
+static const struct reg_info txgbe_regs_general[] = {
+       {TXGBE_RST, 1, 1, "TXGBE_RST"},
+       {TXGBE_STAT, 1, 1, "TXGBE_STAT"},
+       {TXGBE_PORTCTL, 1, 1, "TXGBE_PORTCTL"},
+       {TXGBE_SDP, 1, 1, "TXGBE_SDP"},
+       {TXGBE_SDPCTL, 1, 1, "TXGBE_SDPCTL"},
+       {TXGBE_LEDCTL, 1, 1, "TXGBE_LEDCTL"},
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_nvm[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_interrupt[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_fctl_others[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_rxdma[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_rx[] = {
+       {0, 0, 0, ""}
+};
+
+static struct reg_info txgbe_regs_tx[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_wakeup[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_dcb[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_mac[] = {
+       {0, 0, 0, ""}
+};
+
+static const struct reg_info txgbe_regs_diagnostic[] = {
+       {0, 0, 0, ""},
+};
+
+/* PF registers */
+static const struct reg_info *txgbe_regs_others[] = {
+                               txgbe_regs_general,
+                               txgbe_regs_nvm,
+                               txgbe_regs_interrupt,
+                               txgbe_regs_fctl_others,
+                               txgbe_regs_rxdma,
+                               txgbe_regs_rx,
+                               txgbe_regs_tx,
+                               txgbe_regs_wakeup,
+                               txgbe_regs_dcb,
+                               txgbe_regs_mac,
+                               txgbe_regs_diagnostic,
+                               NULL};
 
 static int  txgbe_dev_set_link_up(struct rte_eth_dev *dev);
 static int  txgbe_dev_set_link_down(struct rte_eth_dev *dev);
@@ -336,6 +402,43 @@ txgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
        return 0;
 }
 
+static void
+txgbe_dcb_init(struct txgbe_hw *hw, struct txgbe_dcb_config *dcb_config)
+{
+       int i;
+       u8 bwgp;
+       struct txgbe_dcb_tc_config *tc;
+
+       UNREFERENCED_PARAMETER(hw);
+
+       dcb_config->num_tcs.pg_tcs = TXGBE_DCB_TC_MAX;
+       dcb_config->num_tcs.pfc_tcs = TXGBE_DCB_TC_MAX;
+       bwgp = (u8)(100 / TXGBE_DCB_TC_MAX);
+       for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[TXGBE_DCB_TX_CONFIG].bwg_id = i;
+               tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = bwgp + (i & 1);
+               tc->path[TXGBE_DCB_RX_CONFIG].bwg_id = i;
+               tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = bwgp + (i & 1);
+               tc->pfc = txgbe_dcb_pfc_disabled;
+       }
+
+       /* Initialize default user to priority mapping, UPx->TC0 */
+       tc = &dcb_config->tc_config[0];
+       tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+       tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+       for (i = 0; i < TXGBE_DCB_BWG_MAX; i++) {
+               dcb_config->bw_percentage[i][TXGBE_DCB_TX_CONFIG] = 100;
+               dcb_config->bw_percentage[i][TXGBE_DCB_RX_CONFIG] = 100;
+       }
+       dcb_config->rx_pba_cfg = txgbe_dcb_pba_equal;
+       dcb_config->pfc_mode_enable = false;
+       dcb_config->vt_mode = true;
+       dcb_config->round_robin_enable = false;
+       /* support all DCB capabilities */
+       dcb_config->support.capabilities = 0xFF;
+}
+
 /*
  * Ensure that all locks are released before first NVM or PHY access
  */
@@ -366,15 +469,20 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        struct txgbe_hw *hw = TXGBE_DEV_HW(eth_dev);
        struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(eth_dev);
        struct txgbe_hwstrip *hwstrip = TXGBE_DEV_HWSTRIP(eth_dev);
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(eth_dev);
+       struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(eth_dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        const struct rte_memzone *mz;
        uint32_t ctrl_ext;
        uint16_t csum;
-       int err;
+       int err, i;
 
        PMD_INIT_FUNC_TRACE();
 
        eth_dev->dev_ops = &txgbe_eth_dev_ops;
+       eth_dev->rx_queue_count       = txgbe_dev_rx_queue_count;
+       eth_dev->rx_descriptor_status = txgbe_dev_rx_descriptor_status;
+       eth_dev->tx_descriptor_status = txgbe_dev_tx_descriptor_status;
        eth_dev->rx_pkt_burst = &txgbe_recv_pkts;
        eth_dev->tx_pkt_burst = &txgbe_xmit_pkts;
        eth_dev->tx_pkt_prepare = &txgbe_prep_pkts;
@@ -431,6 +539,20 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* Unlock any pending hardware semaphore */
        txgbe_swfw_lock_reset(hw);
 
+       /* Initialize DCB configuration*/
+       memset(dcb_config, 0, sizeof(struct txgbe_dcb_config));
+       txgbe_dcb_init(hw, dcb_config);
+
+       /* Get Hardware Flow Control setting */
+       hw->fc.requested_mode = txgbe_fc_full;
+       hw->fc.current_mode = txgbe_fc_full;
+       hw->fc.pause_time = TXGBE_FC_PAUSE_TIME;
+       for (i = 0; i < TXGBE_DCB_TC_MAX; i++) {
+               hw->fc.low_water[i] = TXGBE_FC_XON_LOTH;
+               hw->fc.high_water[i] = TXGBE_FC_XOFF_HITH;
+       }
+       hw->fc.send_xon = 1;
+
        err = hw->rom.init_params(hw);
        if (err != 0) {
                PMD_INIT_LOG(ERR, "The EEPROM init failed: %d", err);
@@ -548,6 +670,9 @@ eth_txgbe_dev_init(struct rte_eth_dev *eth_dev, void *init_params __rte_unused)
        /* enable support intr */
        txgbe_enable_intr(eth_dev);
 
+       /* initialize bandwidth configuration info */
+       memset(bw_conf, 0, sizeof(struct txgbe_bw_conf));
+
        return 0;
 }
 
@@ -943,6 +1068,17 @@ txgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        return 0;
 }
 
+static void
+txgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       /* VLNCTL: enable vlan filtering and allow all vlan tags through */
+       uint32_t vlanctrl = rd32(hw, TXGBE_VLANCTL);
+
+       vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
+       wr32(hw, TXGBE_VLANCTL, vlanctrl);
+}
+
 static int
 txgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q)
 {
@@ -1151,6 +1287,83 @@ txgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
        intr->mask_misc |= TXGBE_ICRMISC_GPIO;
 }
 
+int
+txgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+                       uint16_t tx_rate, uint64_t q_msk)
+{
+       struct txgbe_hw *hw;
+       struct txgbe_vf_info *vfinfo;
+       struct rte_eth_link link;
+       uint8_t  nb_q_per_pool;
+       uint32_t queue_stride;
+       uint32_t queue_idx, idx = 0, vf_idx;
+       uint32_t queue_end;
+       uint16_t total_rate = 0;
+       struct rte_pci_device *pci_dev;
+       int ret;
+
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+       ret = rte_eth_link_get_nowait(dev->data->port_id, &link);
+       if (ret < 0)
+               return ret;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (tx_rate > link.link_speed)
+               return -EINVAL;
+
+       if (q_msk == 0)
+               return 0;
+
+       hw = TXGBE_DEV_HW(dev);
+       vfinfo = *(TXGBE_DEV_VFDATA(dev));
+       nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       queue_stride = TXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       queue_idx = vf * queue_stride;
+       queue_end = queue_idx + nb_q_per_pool - 1;
+       if (queue_end >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (vfinfo) {
+               for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+                       if (vf_idx == vf)
+                               continue;
+                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+                               idx++)
+                               total_rate += vfinfo[vf_idx].tx_rate[idx];
+               }
+       } else {
+               return -EINVAL;
+       }
+
+       /* Store tx_rate for this vf. */
+       for (idx = 0; idx < nb_q_per_pool; idx++) {
+               if (((uint64_t)0x1 << idx) & q_msk) {
+                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
+                               vfinfo[vf].tx_rate[idx] = tx_rate;
+                       total_rate += tx_rate;
+               }
+       }
+
+       if (total_rate > dev->data->dev_link.link_speed) {
+               /* Reset stored TX rate of the VF if it causes exceed
+                * link speed.
+                */
+               memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+               return -EINVAL;
+       }
+
+       /* Set ARBTXRATE of each queue/pool for vf X  */
+       for (; queue_idx <= queue_end; queue_idx++) {
+               if (0x1 & q_msk)
+                       txgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+               q_msk = q_msk >> 1;
+       }
+
+       return 0;
+}
+
 /*
  * Configure device link speed and setup link.
  * It returns 0 on success.
@@ -1160,6 +1373,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
 {
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
        struct txgbe_hw_stats *hw_stats = TXGBE_DEV_STATS(dev);
+       struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t intr_vector = 0;
@@ -1169,6 +1383,7 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        uint32_t allowed_speeds = 0;
        int mask = 0;
        int status;
+       uint16_t vf, idx;
        uint32_t *link_speeds;
 
        PMD_INIT_FUNC_TRACE();
@@ -1205,6 +1420,9 @@ txgbe_dev_start(struct rte_eth_dev *dev)
        hw->mac.start_hw(hw);
        hw->mac.get_link_status = true;
 
+       /* configure PF module if SRIOV enabled */
+       txgbe_pf_host_configure(dev);
+
        txgbe_dev_phy_intr_setup(dev);
 
        /* check and configure queue intr-vector mapping */
@@ -1248,6 +1466,26 @@ txgbe_dev_start(struct rte_eth_dev *dev)
                goto error;
        }
 
+       if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+               /* Enable vlan filtering for VMDq */
+               txgbe_vmdq_vlan_hw_filter_enable(dev);
+       }
+
+       /* Configure DCB hw */
+       txgbe_configure_pb(dev);
+       txgbe_configure_port(dev);
+       txgbe_configure_dcb(dev);
+
+       /* Restore vf rate limit */
+       if (vfinfo != NULL) {
+               for (vf = 0; vf < pci_dev->max_vfs; vf++)
+                       for (idx = 0; idx < TXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+                               if (vfinfo[vf].tx_rate[idx] != 0)
+                                       txgbe_set_vf_rate_limit(dev, vf,
+                                               vfinfo[vf].tx_rate[idx],
+                                               1 << idx);
+       }
+
        err = txgbe_dev_rxtx_start(dev);
        if (err < 0) {
                PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
@@ -1368,9 +1606,12 @@ static int
 txgbe_dev_stop(struct rte_eth_dev *dev)
 {
        struct rte_eth_link link;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
        struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_vf_info *vfinfo = *TXGBE_DEV_VFDATA(dev);
        struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+       int vf;
 
        if (hw->adapter_stopped)
                return 0;
@@ -1389,6 +1630,9 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
        /* stop adapter */
        txgbe_stop_hw(hw);
 
+       for (vf = 0; vfinfo != NULL && vf < pci_dev->max_vfs; vf++)
+               vfinfo[vf].clear_to_send = false;
+
        if (hw->phy.media_type == txgbe_media_type_copper) {
                /* Turn off the copper */
                hw->phy.set_phy_power(hw, false);
@@ -1420,6 +1664,7 @@ txgbe_dev_stop(struct rte_eth_dev *dev)
                intr_handle->intr_vec = NULL;
        }
 
+       adapter->rss_reta_updated = 0;
        wr32m(hw, TXGBE_LEDCTL, 0xFFFFFFFF, TXGBE_LEDCTL_SEL_MASK);
 
        hw->adapter_stopped = true;
@@ -2047,6 +2292,27 @@ txgbe_dev_xstats_reset(struct rte_eth_dev *dev)
        return 0;
 }
 
+static int
+txgbe_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       u16 eeprom_verh, eeprom_verl;
+       u32 etrack_id;
+       int ret;
+
+       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_H, &eeprom_verh);
+       hw->rom.readw_sw(hw, TXGBE_EEPROM_VERSION_L, &eeprom_verl);
+
+       etrack_id = (eeprom_verh << 16) | eeprom_verl;
+       ret = snprintf(fw_version, fw_size, "0x%08x", etrack_id);
+
+       ret += 1; /* add the size of '\0' */
+       if (fw_size < (u32)ret)
+               return ret;
+       else
+               return 0;
+}
+
 static int
 txgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 {
@@ -2226,6 +2492,65 @@ txgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
        return txgbe_dev_link_update_share(dev, wait_to_complete);
 }
 
+static int
+txgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl |= (TXGBE_PSRCTL_UCP | TXGBE_PSRCTL_MCP);
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
+static int
+txgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl &= (~TXGBE_PSRCTL_UCP);
+       if (dev->data->all_multicast == 1)
+               fctrl |= TXGBE_PSRCTL_MCP;
+       else
+               fctrl &= (~TXGBE_PSRCTL_MCP);
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
+static int
+txgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl |= TXGBE_PSRCTL_MCP;
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
+static int
+txgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t fctrl;
+
+       if (dev->data->promiscuous == 1)
+               return 0; /* must remain in all_multicast mode */
+
+       fctrl = rd32(hw, TXGBE_PSRCTL);
+       fctrl &= (~TXGBE_PSRCTL_MCP);
+       wr32(hw, TXGBE_PSRCTL, fctrl);
+
+       return 0;
+}
+
 /**
  * It clears the interrupt causes and enables the interrupt.
  * It will be called once only during nic initialized.
@@ -2519,6 +2844,266 @@ txgbe_dev_interrupt_handler(void *param)
        txgbe_dev_interrupt_action(dev, dev->intr_handle);
 }
 
+static int
+txgbe_dev_led_on(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw;
+
+       hw = TXGBE_DEV_HW(dev);
+       return txgbe_led_on(hw, 4) == 0 ? 0 : -ENOTSUP;
+}
+
+static int
+txgbe_dev_led_off(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw;
+
+       hw = TXGBE_DEV_HW(dev);
+       return txgbe_led_off(hw, 4) == 0 ? 0 : -ENOTSUP;
+}
+
+static int
+txgbe_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct txgbe_hw *hw;
+       uint32_t mflcn_reg;
+       uint32_t fccfg_reg;
+       int rx_pause;
+       int tx_pause;
+
+       hw = TXGBE_DEV_HW(dev);
+
+       fc_conf->pause_time = hw->fc.pause_time;
+       fc_conf->high_water = hw->fc.high_water[0];
+       fc_conf->low_water = hw->fc.low_water[0];
+       fc_conf->send_xon = hw->fc.send_xon;
+       fc_conf->autoneg = !hw->fc.disable_fc_autoneg;
+
+       /*
+        * Return rx_pause status according to actual setting of
+        * RXFCCFG register.
+        */
+       mflcn_reg = rd32(hw, TXGBE_RXFCCFG);
+       if (mflcn_reg & (TXGBE_RXFCCFG_FC | TXGBE_RXFCCFG_PFC))
+               rx_pause = 1;
+       else
+               rx_pause = 0;
+
+       /*
+        * Return tx_pause status according to actual setting of
+        * TXFCCFG register.
+        */
+       fccfg_reg = rd32(hw, TXGBE_TXFCCFG);
+       if (fccfg_reg & (TXGBE_TXFCCFG_FC | TXGBE_TXFCCFG_PFC))
+               tx_pause = 1;
+       else
+               tx_pause = 0;
+
+       if (rx_pause && tx_pause)
+               fc_conf->mode = RTE_FC_FULL;
+       else if (rx_pause)
+               fc_conf->mode = RTE_FC_RX_PAUSE;
+       else if (tx_pause)
+               fc_conf->mode = RTE_FC_TX_PAUSE;
+       else
+               fc_conf->mode = RTE_FC_NONE;
+
+       return 0;
+}
+
+static int
+txgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
+{
+       struct txgbe_hw *hw;
+       int err;
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+       enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+               txgbe_fc_none,
+               txgbe_fc_rx_pause,
+               txgbe_fc_tx_pause,
+               txgbe_fc_full
+       };
+
+       PMD_INIT_FUNC_TRACE();
+
+       hw = TXGBE_DEV_HW(dev);
+       rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(0));
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+
+       /*
+        * At least reserve one Ethernet frame for watermark
+        * high_water/low_water in kilo bytes for txgbe
+        */
+       max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+       if (fc_conf->high_water > max_high_water ||
+           fc_conf->high_water < fc_conf->low_water) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+               return -EINVAL;
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[fc_conf->mode];
+       hw->fc.pause_time     = fc_conf->pause_time;
+       hw->fc.high_water[0]  = fc_conf->high_water;
+       hw->fc.low_water[0]   = fc_conf->low_water;
+       hw->fc.send_xon       = fc_conf->send_xon;
+       hw->fc.disable_fc_autoneg = !fc_conf->autoneg;
+
+       err = txgbe_fc_enable(hw);
+
+       /* Not negotiated is not an error case */
+       if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED) {
+               wr32m(hw, TXGBE_MACRXFLT, TXGBE_MACRXFLT_CTL_MASK,
+                     (fc_conf->mac_ctrl_frame_fwd
+                      ? TXGBE_MACRXFLT_CTL_NOPS : TXGBE_MACRXFLT_CTL_DROP));
+               txgbe_flush(hw);
+
+               return 0;
+       }
+
+       PMD_INIT_LOG(ERR, "txgbe_fc_enable = 0x%x", err);
+       return -EIO;
+}
+
+static int
+txgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev,
+               struct rte_eth_pfc_conf *pfc_conf)
+{
+       int err;
+       uint32_t rx_buf_size;
+       uint32_t max_high_water;
+       uint8_t tc_num;
+       uint8_t  map[TXGBE_DCB_UP_MAX] = { 0 };
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+
+       enum txgbe_fc_mode rte_fcmode_2_txgbe_fcmode[] = {
+               txgbe_fc_none,
+               txgbe_fc_rx_pause,
+               txgbe_fc_tx_pause,
+               txgbe_fc_full
+       };
+
+       PMD_INIT_FUNC_TRACE();
+
+       txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
+       tc_num = map[pfc_conf->priority];
+       rx_buf_size = rd32(hw, TXGBE_PBRXSIZE(tc_num));
+       PMD_INIT_LOG(DEBUG, "Rx packet buffer size = 0x%x", rx_buf_size);
+       /*
+        * At least reserve one Ethernet frame for watermark
+        * high_water/low_water in kilo bytes for txgbe
+        */
+       max_high_water = (rx_buf_size - RTE_ETHER_MAX_LEN) >> 10;
+       if (pfc_conf->fc.high_water > max_high_water ||
+           pfc_conf->fc.high_water <= pfc_conf->fc.low_water) {
+               PMD_INIT_LOG(ERR, "Invalid high/low water setup value in KB");
+               PMD_INIT_LOG(ERR, "High_water must <= 0x%x", max_high_water);
+               return -EINVAL;
+       }
+
+       hw->fc.requested_mode = rte_fcmode_2_txgbe_fcmode[pfc_conf->fc.mode];
+       hw->fc.pause_time = pfc_conf->fc.pause_time;
+       hw->fc.send_xon = pfc_conf->fc.send_xon;
+       hw->fc.low_water[tc_num] =  pfc_conf->fc.low_water;
+       hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
+
+       err = txgbe_dcb_pfc_enable(hw, tc_num);
+
+       /* Not negotiated is not an error case */
+       if (err == 0 || err == TXGBE_ERR_FC_NOT_NEGOTIATED)
+               return 0;
+
+       PMD_INIT_LOG(ERR, "txgbe_dcb_pfc_enable = 0x%x", err);
+       return -EIO;
+}
+
+int
+txgbe_dev_rss_reta_update(struct rte_eth_dev *dev,
+                         struct rte_eth_rss_reta_entry64 *reta_conf,
+                         uint16_t reta_size)
+{
+       uint8_t i, j, mask;
+       uint32_t reta;
+       uint16_t idx, shift;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (!txgbe_rss_update_sp(hw->mac.type)) {
+               PMD_DRV_LOG(ERR, "RSS reta update is not supported on this "
+                       "NIC.");
+               return -ENOTSUP;
+       }
+
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += 4) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+               if (!mask)
+                       continue;
+
+               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               for (j = 0; j < 4; j++) {
+                       if (RS8(mask, j, 0x1)) {
+                               reta  &= ~(MS32(8 * j, 0xFF));
+                               reta |= LS32(reta_conf[idx].reta[shift + j],
+                                               8 * j, 0xFF);
+                       }
+               }
+               wr32a(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
+       }
+       adapter->rss_reta_updated = 1;
+
+       return 0;
+}
+
+int
+txgbe_dev_rss_reta_query(struct rte_eth_dev *dev,
+                        struct rte_eth_rss_reta_entry64 *reta_conf,
+                        uint16_t reta_size)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint8_t i, j, mask;
+       uint32_t reta;
+       uint16_t idx, shift;
+
+       PMD_INIT_FUNC_TRACE();
+
+       if (reta_size != ETH_RSS_RETA_SIZE_128) {
+               PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
+                       "(%d) doesn't match the number hardware can supported "
+                       "(%d)", reta_size, ETH_RSS_RETA_SIZE_128);
+               return -EINVAL;
+       }
+
+       for (i = 0; i < reta_size; i += 4) {
+               idx = i / RTE_RETA_GROUP_SIZE;
+               shift = i % RTE_RETA_GROUP_SIZE;
+               mask = (uint8_t)RS64(reta_conf[idx].mask, shift, 0xF);
+               if (!mask)
+                       continue;
+
+               reta = rd32a(hw, TXGBE_REG_RSSTBL, i >> 2);
+               for (j = 0; j < 4; j++) {
+                       if (RS8(mask, j, 0x1))
+                               reta_conf[idx].reta[shift + j] =
+                                       (uint16_t)RS32(reta, 8 * j, 0xFF);
+               }
+       }
+
+       return 0;
+}
+
 static int
 txgbe_add_rar(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
                                uint32_t index, uint32_t pool)
@@ -2549,6 +3134,46 @@ txgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *addr)
        return 0;
 }
 
+static int
+txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct rte_eth_dev_info dev_info;
+       uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+       struct rte_eth_dev_data *dev_data = dev->data;
+       int ret;
+
+       ret = txgbe_dev_info_get(dev, &dev_info);
+       if (ret != 0)
+               return ret;
+
+       /* check that mtu is within the allowed range */
+       if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
+               return -EINVAL;
+
+       /* If device is started, refuse mtu that requires the support of
+        * scattered packets when this feature has not been enabled before.
+        */
+       if (dev_data->dev_started && !dev_data->scattered_rx &&
+           (frame_size + 2 * TXGBE_VLAN_TAG_SIZE >
+            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+               PMD_INIT_LOG(ERR, "Stop port first.");
+               return -EINVAL;
+       }
+
+       /* update max frame size */
+       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+
+       if (hw->mode)
+               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+                       TXGBE_FRAME_SIZE_MAX);
+       else
+               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+                       TXGBE_FRMSZ_MAX(frame_size));
+
+       return 0;
+}
+
 static uint32_t
 txgbe_uta_vector(struct txgbe_hw *hw, struct rte_ether_addr *uc_addr)
 {
@@ -2667,6 +3292,25 @@ txgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
        return 0;
 }
 
+uint32_t
+txgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+       uint32_t new_val = orig_val;
+
+       if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+               new_val |= TXGBE_POOLETHCTL_UTA;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+               new_val |= TXGBE_POOLETHCTL_MCHA;
+       if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+               new_val |= TXGBE_POOLETHCTL_UCHA;
+       if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+               new_val |= TXGBE_POOLETHCTL_BCA;
+       if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+               new_val |= TXGBE_POOLETHCTL_MCP;
+
+       return new_val;
+}
+
 static int
 txgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
@@ -2799,6 +3443,37 @@ txgbe_configure_msix(struct rte_eth_dev *dev)
                        | TXGBE_ITR_WRDSA);
 }
 
+int
+txgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+                          uint16_t queue_idx, uint16_t tx_rate)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t bcnrc_val;
+
+       if (queue_idx >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (tx_rate != 0) {
+               bcnrc_val = TXGBE_ARBTXRATE_MAX(tx_rate);
+               bcnrc_val |= TXGBE_ARBTXRATE_MIN(tx_rate / 2);
+       } else {
+               bcnrc_val = 0;
+       }
+
+       /*
+        * Set global transmit compensation time to the MMW_SIZE in ARBTXMMW
+        * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
+        */
+       wr32(hw, TXGBE_ARBTXMMW, 0x14);
+
+       /* Set ARBTXRATE of queue X */
+       wr32(hw, TXGBE_ARBPOOLIDX, queue_idx);
+       wr32(hw, TXGBE_ARBTXRATE, bcnrc_val);
+       txgbe_flush(hw);
+
+       return 0;
+}
+
 static u8 *
 txgbe_dev_addr_list_itr(__rte_unused struct txgbe_hw *hw,
                        u8 **mc_addr_ptr, u32 *vmdq)
@@ -2825,6 +3500,501 @@ txgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
                                         txgbe_dev_addr_list_itr, TRUE);
 }
 
+static uint64_t
+txgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint64_t systime_cycles;
+
+       systime_cycles = (uint64_t)rd32(hw, TXGBE_TSTIMEL);
+       systime_cycles |= (uint64_t)rd32(hw, TXGBE_TSTIMEH) << 32;
+
+       return systime_cycles;
+}
+
+static uint64_t
+txgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint64_t rx_tstamp_cycles;
+
+       /* TSRXSTMPL stores ns and TSRXSTMPH stores seconds. */
+       rx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSRXSTMPL);
+       rx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSRXSTMPH) << 32;
+
+       return rx_tstamp_cycles;
+}
+
+static uint64_t
+txgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint64_t tx_tstamp_cycles;
+
+       /* TSTXSTMPL stores ns and TSTXSTMPH stores seconds. */
+       tx_tstamp_cycles = (uint64_t)rd32(hw, TXGBE_TSTXSTMPL);
+       tx_tstamp_cycles |= (uint64_t)rd32(hw, TXGBE_TSTXSTMPH) << 32;
+
+       return tx_tstamp_cycles;
+}
+
+static void
+txgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       struct rte_eth_link link;
+       uint32_t incval = 0;
+       uint32_t shift = 0;
+
+       /* Get current link speed. */
+       txgbe_dev_link_update(dev, 1);
+       rte_eth_linkstatus_get(dev, &link);
+
+       switch (link.link_speed) {
+       case ETH_SPEED_NUM_100M:
+               incval = TXGBE_INCVAL_100;
+               shift = TXGBE_INCVAL_SHIFT_100;
+               break;
+       case ETH_SPEED_NUM_1G:
+               incval = TXGBE_INCVAL_1GB;
+               shift = TXGBE_INCVAL_SHIFT_1GB;
+               break;
+       case ETH_SPEED_NUM_10G:
+       default:
+               incval = TXGBE_INCVAL_10GB;
+               shift = TXGBE_INCVAL_SHIFT_10GB;
+               break;
+       }
+
+       wr32(hw, TXGBE_TSTIMEINC, TXGBE_TSTIMEINC_VP(incval, 2));
+
+       memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+       memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+       adapter->systime_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+       adapter->systime_tc.cc_shift = shift;
+       adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->rx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+       adapter->rx_tstamp_tc.cc_shift = shift;
+       adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+       adapter->tx_tstamp_tc.cc_mask = TXGBE_CYCLECOUNTER_MASK;
+       adapter->tx_tstamp_tc.cc_shift = shift;
+       adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+txgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+       adapter->systime_tc.nsec += delta;
+       adapter->rx_tstamp_tc.nsec += delta;
+       adapter->tx_tstamp_tc.nsec += delta;
+
+       return 0;
+}
+
+static int
+txgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+       uint64_t ns;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+       ns = rte_timespec_to_ns(ts);
+       /* Set the timecounters to a new value. */
+       adapter->systime_tc.nsec = ns;
+       adapter->rx_tstamp_tc.nsec = ns;
+       adapter->tx_tstamp_tc.nsec = ns;
+
+       return 0;
+}
+
+static int
+txgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+       uint64_t ns, systime_cycles;
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+
+       systime_cycles = txgbe_read_systime_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+       *ts = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+txgbe_timesync_enable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t tsync_ctl;
+
+       /* Stop the timesync system time. */
+       wr32(hw, TXGBE_TSTIMEINC, 0x0);
+       /* Reset the timesync system time value. */
+       wr32(hw, TXGBE_TSTIMEL, 0x0);
+       wr32(hw, TXGBE_TSTIMEH, 0x0);
+
+       txgbe_start_timecounters(dev);
+
+       /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+       wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588),
+               RTE_ETHER_TYPE_1588 | TXGBE_ETFLT_ENA | TXGBE_ETFLT_1588);
+
+       /* Enable timestamping of received PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
+       tsync_ctl |= TXGBE_TSRXCTL_ENA;
+       wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
+
+       /* Enable timestamping of transmitted PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
+       tsync_ctl |= TXGBE_TSTXCTL_ENA;
+       wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
+
+       txgbe_flush(hw);
+
+       return 0;
+}
+
+static int
+txgbe_timesync_disable(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t tsync_ctl;
+
+       /* Disable timestamping of transmitted PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSTXCTL);
+       tsync_ctl &= ~TXGBE_TSTXCTL_ENA;
+       wr32(hw, TXGBE_TSTXCTL, tsync_ctl);
+
+       /* Disable timestamping of received PTP packets. */
+       tsync_ctl = rd32(hw, TXGBE_TSRXCTL);
+       tsync_ctl &= ~TXGBE_TSRXCTL_ENA;
+       wr32(hw, TXGBE_TSRXCTL, tsync_ctl);
+
+       /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+       wr32(hw, TXGBE_ETFLT(TXGBE_ETF_ID_1588), 0);
+
+       /* Stop incrementating the System Time registers. */
+       wr32(hw, TXGBE_TSTIMEINC, 0);
+
+       return 0;
+}
+
+static int
+txgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+                                struct timespec *timestamp,
+                                uint32_t flags __rte_unused)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       uint32_t tsync_rxctl;
+       uint64_t rx_tstamp_cycles;
+       uint64_t ns;
+
+       tsync_rxctl = rd32(hw, TXGBE_TSRXCTL);
+       if ((tsync_rxctl & TXGBE_TSRXCTL_VLD) == 0)
+               return -EINVAL;
+
+       rx_tstamp_cycles = txgbe_read_rx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return  0;
+}
+
+static int
+txgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+                                struct timespec *timestamp)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
+       uint32_t tsync_txctl;
+       uint64_t tx_tstamp_cycles;
+       uint64_t ns;
+
+       tsync_txctl = rd32(hw, TXGBE_TSTXCTL);
+       if ((tsync_txctl & TXGBE_TSTXCTL_VLD) == 0)
+               return -EINVAL;
+
+       tx_tstamp_cycles = txgbe_read_tx_tstamp_cyclecounter(dev);
+       ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+       *timestamp = rte_ns_to_timespec(ns);
+
+       return 0;
+}
+
+static int
+txgbe_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+       int count = 0;
+       int g_ind = 0;
+       const struct reg_info *reg_group;
+       const struct reg_info **reg_set = txgbe_regs_others;
+
+       while ((reg_group = reg_set[g_ind++]))
+               count += txgbe_regs_group_count(reg_group);
+
+       return count;
+}
+
+static int
+txgbe_get_regs(struct rte_eth_dev *dev,
+             struct rte_dev_reg_info *regs)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t *data = regs->data;
+       int g_ind = 0;
+       int count = 0;
+       const struct reg_info *reg_group;
+       const struct reg_info **reg_set = txgbe_regs_others;
+
+       if (data == NULL) {
+               regs->length = txgbe_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
+       /* Support only full register dump */
+       if (regs->length == 0 ||
+           regs->length == (uint32_t)txgbe_get_reg_length(dev)) {
+               regs->version = hw->mac.type << 24 |
+                               hw->revision_id << 16 |
+                               hw->device_id;
+               while ((reg_group = reg_set[g_ind++]))
+                       count += txgbe_read_regs_group(dev, &data[count],
+                                                     reg_group);
+               return 0;
+       }
+
+       return -ENOTSUP;
+}
+
+static int
+txgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+       /* Return unit is byte count */
+       return hw->rom.word_size * 2;
+}
+
+static int
+txgbe_get_eeprom(struct rte_eth_dev *dev,
+               struct rte_dev_eeprom_info *in_eeprom)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_rom_info *eeprom = &hw->rom;
+       uint16_t *data = in_eeprom->data;
+       int first, length;
+
+       first = in_eeprom->offset >> 1;
+       length = in_eeprom->length >> 1;
+       if (first > hw->rom.word_size ||
+           ((first + length) > hw->rom.word_size))
+               return -EINVAL;
+
+       in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+       return eeprom->readw_buffer(hw, first, length, data);
+}
+
+static int
+txgbe_set_eeprom(struct rte_eth_dev *dev,
+               struct rte_dev_eeprom_info *in_eeprom)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       struct txgbe_rom_info *eeprom = &hw->rom;
+       uint16_t *data = in_eeprom->data;
+       int first, length;
+
+       first = in_eeprom->offset >> 1;
+       length = in_eeprom->length >> 1;
+       if (first > hw->rom.word_size ||
+           ((first + length) > hw->rom.word_size))
+               return -EINVAL;
+
+       in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+       return eeprom->writew_buffer(hw,  first, length, data);
+}
+
+static int
+txgbe_get_module_info(struct rte_eth_dev *dev,
+                     struct rte_eth_dev_module_info *modinfo)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t status;
+       uint8_t sff8472_rev, addr_mode;
+       bool page_swap = false;
+
+       /* Check whether we support SFF-8472 or not */
+       status = hw->phy.read_i2c_eeprom(hw,
+                                            TXGBE_SFF_SFF_8472_COMP,
+                                            &sff8472_rev);
+       if (status != 0)
+               return -EIO;
+
+       /* addressing mode is not supported */
+       status = hw->phy.read_i2c_eeprom(hw,
+                                            TXGBE_SFF_SFF_8472_SWAP,
+                                            &addr_mode);
+       if (status != 0)
+               return -EIO;
+
+       if (addr_mode & TXGBE_SFF_ADDRESSING_MODE) {
+               PMD_DRV_LOG(ERR,
+                           "Address change required to access page 0xA2, "
+                           "but not supported. Please report the module "
+                           "type to the driver maintainers.");
+               page_swap = true;
+       }
+
+       if (sff8472_rev == TXGBE_SFF_SFF_8472_UNSUP || page_swap) {
+               /* We have a SFP, but it does not support SFF-8472 */
+               modinfo->type = RTE_ETH_MODULE_SFF_8079;
+               modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8079_LEN;
+       } else {
+               /* We have a SFP which supports a revision of SFF-8472. */
+               modinfo->type = RTE_ETH_MODULE_SFF_8472;
+               modinfo->eeprom_len = RTE_ETH_MODULE_SFF_8472_LEN;
+       }
+
+       return 0;
+}
+
+static int
+txgbe_get_module_eeprom(struct rte_eth_dev *dev,
+                       struct rte_dev_eeprom_info *info)
+{
+       struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+       uint32_t status = TXGBE_ERR_PHY_ADDR_INVALID;
+       uint8_t databyte = 0xFF;
+       uint8_t *data = info->data;
+       uint32_t i = 0;
+
+       if (info->length == 0)
+               return -EINVAL;
+
+       for (i = info->offset; i < info->offset + info->length; i++) {
+               if (i < RTE_ETH_MODULE_SFF_8079_LEN)
+                       status = hw->phy.read_i2c_eeprom(hw, i, &databyte);
+               else
+                       status = hw->phy.read_i2c_sff8472(hw, i, &databyte);
+
+               if (status != 0)
+                       return -EIO;
+
+               data[i - info->offset] = databyte;
+       }
+
+       return 0;
+}
+
+bool
+txgbe_rss_update_sp(enum txgbe_mac_type mac_type)
+{
+       switch (mac_type) {
+       case txgbe_mac_raptor:
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static int
+txgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+                       struct rte_eth_dcb_info *dcb_info)
+{
+       struct txgbe_dcb_config *dcb_config = TXGBE_DEV_DCB_CONFIG(dev);
+       struct txgbe_dcb_tc_config *tc;
+       struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+       uint8_t nb_tcs;
+       uint8_t i, j;
+
+       if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+               dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+       else
+               dcb_info->nb_tcs = 1;
+
+       tc_queue = &dcb_info->tc_queue;
+       nb_tcs = dcb_info->nb_tcs;
+
+       if (dcb_config->vt_mode) { /* vt is enabled */
+               struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+               if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+                       for (j = 0; j < nb_tcs; j++) {
+                               tc_queue->tc_rxq[0][j].base = j;
+                               tc_queue->tc_rxq[0][j].nb_queue = 1;
+                               tc_queue->tc_txq[0][j].base = j;
+                               tc_queue->tc_txq[0][j].nb_queue = 1;
+                       }
+               } else {
+                       for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+                               for (j = 0; j < nb_tcs; j++) {
+                                       tc_queue->tc_rxq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_rxq[i][j].nb_queue = 1;
+                                       tc_queue->tc_txq[i][j].base =
+                                               i * nb_tcs + j;
+                                       tc_queue->tc_txq[i][j].nb_queue = 1;
+                               }
+                       }
+               }
+       } else { /* vt is disabled */
+               struct rte_eth_dcb_rx_conf *rx_conf =
+                               &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+                       dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+               if (dcb_info->nb_tcs == ETH_4_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+               } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+                       for (i = 0; i < dcb_info->nb_tcs; i++) {
+                               dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+                               dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+                       }
+                       dcb_info->tc_queue.tc_txq[0][0].base = 0;
+                       dcb_info->tc_queue.tc_txq[0][1].base = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].base = 64;
+                       dcb_info->tc_queue.tc_txq[0][3].base = 80;
+                       dcb_info->tc_queue.tc_txq[0][4].base = 96;
+                       dcb_info->tc_queue.tc_txq[0][5].base = 104;
+                       dcb_info->tc_queue.tc_txq[0][6].base = 112;
+                       dcb_info->tc_queue.tc_txq[0][7].base = 120;
+                       dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+                       dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+                       dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+                       dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+               }
+       }
+       for (i = 0; i < dcb_info->nb_tcs; i++) {
+               tc = &dcb_config->tc_config[i];
+               dcb_info->tc_bws[i] = tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent;
+       }
+       return 0;
+}
+
 static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .dev_configure              = txgbe_dev_configure,
        .dev_infos_get              = txgbe_dev_info_get,
@@ -2834,6 +4004,10 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .dev_set_link_down          = txgbe_dev_set_link_down,
        .dev_close                  = txgbe_dev_close,
        .dev_reset                  = txgbe_dev_reset,
+       .promiscuous_enable         = txgbe_dev_promiscuous_enable,
+       .promiscuous_disable        = txgbe_dev_promiscuous_disable,
+       .allmulticast_enable        = txgbe_dev_allmulticast_enable,
+       .allmulticast_disable       = txgbe_dev_allmulticast_disable,
        .link_update                = txgbe_dev_link_update,
        .stats_get                  = txgbe_dev_stats_get,
        .xstats_get                 = txgbe_dev_xstats_get,
@@ -2843,7 +4017,9 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .xstats_get_names           = txgbe_dev_xstats_get_names,
        .xstats_get_names_by_id     = txgbe_dev_xstats_get_names_by_id,
        .queue_stats_mapping_set    = txgbe_dev_queue_stats_mapping_set,
+       .fw_version_get             = txgbe_fw_version_get,
        .dev_supported_ptypes_get   = txgbe_dev_supported_ptypes_get,
+       .mtu_set                    = txgbe_dev_mtu_set,
        .vlan_filter_set            = txgbe_vlan_filter_set,
        .vlan_tpid_set              = txgbe_vlan_tpid_set,
        .vlan_offload_set           = txgbe_vlan_offload_set,
@@ -2858,14 +4034,39 @@ static const struct eth_dev_ops txgbe_eth_dev_ops = {
        .rx_queue_release           = txgbe_dev_rx_queue_release,
        .tx_queue_setup             = txgbe_dev_tx_queue_setup,
        .tx_queue_release           = txgbe_dev_tx_queue_release,
+       .dev_led_on                 = txgbe_dev_led_on,
+       .dev_led_off                = txgbe_dev_led_off,
+       .flow_ctrl_get              = txgbe_flow_ctrl_get,
+       .flow_ctrl_set              = txgbe_flow_ctrl_set,
+       .priority_flow_ctrl_set     = txgbe_priority_flow_ctrl_set,
        .mac_addr_add               = txgbe_add_rar,
        .mac_addr_remove            = txgbe_remove_rar,
        .mac_addr_set               = txgbe_set_default_mac_addr,
        .uc_hash_table_set          = txgbe_uc_hash_table_set,
        .uc_all_hash_table_set      = txgbe_uc_all_hash_table_set,
+       .set_queue_rate_limit       = txgbe_set_queue_rate_limit,
+       .reta_update                = txgbe_dev_rss_reta_update,
+       .reta_query                 = txgbe_dev_rss_reta_query,
+       .rss_hash_update            = txgbe_dev_rss_hash_update,
+       .rss_hash_conf_get          = txgbe_dev_rss_hash_conf_get,
        .set_mc_addr_list           = txgbe_dev_set_mc_addr_list,
        .rxq_info_get               = txgbe_rxq_info_get,
        .txq_info_get               = txgbe_txq_info_get,
+       .timesync_enable            = txgbe_timesync_enable,
+       .timesync_disable           = txgbe_timesync_disable,
+       .timesync_read_rx_timestamp = txgbe_timesync_read_rx_timestamp,
+       .timesync_read_tx_timestamp = txgbe_timesync_read_tx_timestamp,
+       .get_reg                    = txgbe_get_regs,
+       .get_eeprom_length          = txgbe_get_eeprom_length,
+       .get_eeprom                 = txgbe_get_eeprom,
+       .set_eeprom                 = txgbe_set_eeprom,
+       .get_module_info            = txgbe_get_module_info,
+       .get_module_eeprom          = txgbe_get_module_eeprom,
+       .get_dcb_info               = txgbe_dev_get_dcb_info,
+       .timesync_adjust_time       = txgbe_timesync_adjust_time,
+       .timesync_read_time         = txgbe_timesync_read_time,
+       .timesync_write_time        = txgbe_timesync_write_time,
+       .tx_done_cleanup            = txgbe_dev_tx_done_cleanup,
 };
 
 RTE_PMD_REGISTER_PCI(net_txgbe, rte_txgbe_pmd);