net/ixgbe: move private APIs to a specific file
authorWenzhuo Lu <wenzhuo.lu@intel.com>
Tue, 11 Apr 2017 08:31:24 +0000 (16:31 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Wed, 19 Apr 2017 13:37:37 +0000 (15:37 +0200)
Create a new file rte_pmd_ixgbe.c for all the private
APIs. Move all the related code to the new file.

Signed-off-by: Wenzhuo Lu <wenzhuo.lu@intel.com>
drivers/net/ixgbe/Makefile
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_ethdev.h
drivers/net/ixgbe/rte_pmd_ixgbe.c [new file with mode: 0644]

index f62f3d5..0a6b7f2 100644 (file)
@@ -120,6 +120,7 @@ ifeq ($(CONFIG_RTE_NIC_BYPASS),y)
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c
 SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_82599_bypass.c
 endif
+SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += rte_pmd_ixgbe.c
 
 # install this header file
 SYMLINK-$(CONFIG_RTE_LIBRTE_IXGBE_PMD)-include := rte_pmd_ixgbe.h
index ef781ca..c226e0a 100644 (file)
@@ -74,8 +74,6 @@
 #include "base/ixgbe_phy.h"
 #include "ixgbe_regs.h"
 
-#include "rte_pmd_ixgbe.h"
-
 /*
  * High threshold controlling when to start sending XOFF frames. Must be at
  * least 8 bytes less than receive packet buffer size. This value is in units
@@ -2402,6 +2400,80 @@ ixgbe_dev_phy_intr_setup(struct rte_eth_dev *dev)
        }
 }
 
+int
+ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+                       uint16_t tx_rate, uint64_t q_msk)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_vf_info *vfinfo;
+       struct rte_eth_link link;
+       uint8_t  nb_q_per_pool;
+       uint32_t queue_stride;
+       uint32_t queue_idx, idx = 0, vf_idx;
+       uint32_t queue_end;
+       uint16_t total_rate = 0;
+       struct rte_pci_device *pci_dev;
+
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+       rte_eth_link_get_nowait(dev->data->port_id, &link);
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (tx_rate > link.link_speed)
+               return -EINVAL;
+
+       if (q_msk == 0)
+               return 0;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+       queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+       queue_idx = vf * queue_stride;
+       queue_end = queue_idx + nb_q_per_pool - 1;
+       if (queue_end >= hw->mac.max_tx_queues)
+               return -EINVAL;
+
+       if (vfinfo) {
+               for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
+                       if (vf_idx == vf)
+                               continue;
+                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+                               idx++)
+                               total_rate += vfinfo[vf_idx].tx_rate[idx];
+               }
+       } else {
+               return -EINVAL;
+       }
+
+       /* Store tx_rate for this vf. */
+       for (idx = 0; idx < nb_q_per_pool; idx++) {
+               if (((uint64_t)0x1 << idx) & q_msk) {
+                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
+                               vfinfo[vf].tx_rate[idx] = tx_rate;
+                       total_rate += tx_rate;
+               }
+       }
+
+       if (total_rate > dev->data->dev_link.link_speed) {
+               /* Reset stored TX rate of the VF if it causes exceed
+                * link speed.
+                */
+               memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+               return -EINVAL;
+       }
+
+       /* Set RTTBCNRC of each queue/pool for vf X  */
+       for (; queue_idx <= queue_end; queue_idx++) {
+               if (0x1 & q_msk)
+                       ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+               q_msk = q_msk >> 1;
+       }
+
+       return 0;
+}
+
 /*
  * Configure device link speed and setup link.
  * It returns 0 on success.
@@ -2517,8 +2589,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev)
                for (vf = 0; vf < pci_dev->max_vfs; vf++)
                        for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
                                if (vfinfo[vf].tx_rate[idx] != 0)
-                                       rte_pmd_ixgbe_set_vf_rate_limit(
-                                               dev->data->port_id, vf,
+                                       ixgbe_set_vf_rate_limit(
+                                               dev, vf,
                                                vfinfo[vf].tx_rate[idx],
                                                1 << idx);
        }
@@ -4406,39 +4478,10 @@ is_device_supported(struct rte_eth_dev *dev, struct rte_pci_driver *drv)
        return true;
 }
 
-int
-rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
-               struct ether_addr *mac_addr)
+bool
+is_ixgbe_supported(struct rte_eth_dev *dev)
 {
-       struct ixgbe_hw *hw;
-       struct ixgbe_vf_info *vfinfo;
-       int rar_entry;
-       uint8_t *new_mac = (uint8_t *)(mac_addr);
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-       rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
-       if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
-               rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
-                               ETHER_ADDR_LEN);
-               return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
-                               IXGBE_RAH_AV);
-       }
-       return -EINVAL;
+       return is_device_supported(dev, &rte_ixgbe_pmd);
 }
 
 static int
@@ -4763,7 +4806,7 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        }
 }
 
-static int
+int
 ixgbe_vt_check(struct ixgbe_hw *hw)
 {
        uint32_t reg_val;
@@ -4909,703 +4952,180 @@ ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
        return new_val;
 }
 
-int
-rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+#define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
+#define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
+#define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
+#define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
+#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
+       ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
+       ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+                     struct rte_eth_mirror_conf *mirror_conf,
+                     uint8_t rule_id, uint8_t on)
 {
-       struct ixgbe_hw *hw;
-       struct ixgbe_vf_info *vfinfo;
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-       uint32_t ctrl;
+       uint32_t mr_ctl, vlvf;
+       uint32_t mp_lsb = 0;
+       uint32_t mv_msb = 0;
+       uint32_t mv_lsb = 0;
+       uint32_t mp_msb = 0;
+       uint8_t i = 0;
+       int reg_index = 0;
+       uint64_t vlan_mask = 0;
 
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+       const uint8_t pool_mask_offset = 32;
+       const uint8_t vlan_mask_offset = 32;
+       const uint8_t dst_pool_offset = 8;
+       const uint8_t rule_mr_offset  = 4;
+       const uint8_t mirror_rule_mask = 0x0F;
 
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct ixgbe_mirror_info *mr_info =
+                       (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       uint8_t mirror_type = 0;
 
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
+       if (ixgbe_vt_check(hw) < 0)
                return -ENOTSUP;
 
-       if (vf >= pci_dev->max_vfs)
+       if (rule_id >= IXGBE_MAX_MIRROR_RULES)
                return -EINVAL;
 
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-
-       ctrl = IXGBE_PF_CONTROL_MSG;
-       if (vfinfo[vf].clear_to_send)
-               ctrl |= IXGBE_VT_MSGTYPE_CTS;
+       if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
+               PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
+                           mirror_conf->rule_type);
+               return -EINVAL;
+       }
 
-       ixgbe_write_mbx(hw, &ctrl, 1, vf);
+       if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
+               mirror_type |= IXGBE_MRCTL_VLME;
+               /* Check if vlan id is valid and find conresponding VLAN ID
+                * index in VLVF
+                */
+               for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
+                       if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+                               /* search vlan id related pool vlan filter
+                                * index
+                                */
+                               reg_index = ixgbe_find_vlvf_slot(
+                                               hw,
+                                               mirror_conf->vlan.vlan_id[i],
+                                               false);
+                               if (reg_index < 0)
+                                       return -EINVAL;
+                               vlvf = IXGBE_READ_REG(hw,
+                                                     IXGBE_VLVF(reg_index));
+                               if ((vlvf & IXGBE_VLVF_VIEN) &&
+                                   ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
+                                     mirror_conf->vlan.vlan_id[i]))
+                                       vlan_mask |= (1ULL << reg_index);
+                               else
+                                       return -EINVAL;
+                       }
+               }
 
-       return 0;
-}
+               if (on) {
+                       mv_lsb = vlan_mask & 0xFFFFFFFF;
+                       mv_msb = vlan_mask >> vlan_mask_offset;
 
-int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
-       struct ixgbe_hw *hw;
-       struct ixgbe_mac_info *mac;
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
+                       mr_info->mr_conf[rule_id].vlan.vlan_mask =
+                                               mirror_conf->vlan.vlan_mask;
+                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+                               if (mirror_conf->vlan.vlan_mask & (1ULL << i))
+                                       mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+                                               mirror_conf->vlan.vlan_id[i];
+                       }
+               } else {
+                       mv_lsb = 0;
+                       mv_msb = 0;
+                       mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+                               mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+               }
+       }
 
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+       /**
+        * if enable pool mirror, write related pool mask register,if disable
+        * pool mirror, clear PFMRVM register
+        */
+       if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+               mirror_type |= IXGBE_MRCTL_VPME;
+               if (on) {
+                       mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+                       mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+                       mr_info->mr_conf[rule_id].pool_mask =
+                                       mirror_conf->pool_mask;
 
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
+               } else {
+                       mp_lsb = 0;
+                       mp_msb = 0;
+                       mr_info->mr_conf[rule_id].pool_mask = 0;
+               }
+       }
+       if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+               mirror_type |= IXGBE_MRCTL_UPME;
+       if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+               mirror_type |= IXGBE_MRCTL_DPME;
 
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
+       /* read  mirror control register and recalculate it */
+       mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
 
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
+       if (on) {
+               mr_ctl |= mirror_type;
+               mr_ctl &= mirror_rule_mask;
+               mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+       } else {
+               mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+       }
 
-       if (on > 1)
-               return -EINVAL;
+       mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
+       mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
 
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       mac = &hw->mac;
+       /* write mirrror control  register */
+       IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
-       mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+       /* write pool mirrror control  register */
+       if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+                               mp_msb);
+       }
+       /* write VLAN mirrror control  register */
+       if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+               IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+                               mv_msb);
+       }
 
        return 0;
 }
 
-int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
 {
-       struct ixgbe_hw *hw;
-       struct ixgbe_mac_info *mac;
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+       int mr_ctl = 0;
+       uint32_t lsb_val = 0;
+       uint32_t msb_val = 0;
+       const uint8_t rule_mr_offset = 4;
 
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
+       struct ixgbe_hw *hw =
+               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       struct ixgbe_mirror_info *mr_info =
+               (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
 
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
+       if (ixgbe_vt_check(hw) < 0)
                return -ENOTSUP;
 
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
+       memset(&mr_info->mr_conf[rule_id], 0,
+              sizeof(struct rte_eth_mirror_conf));
 
-       if (on > 1)
-               return -EINVAL;
+       /* clear PFVMCTL register */
+       IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
 
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       mac = &hw->mac;
-       mac->ops.set_mac_anti_spoofing(hw, on, vf);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
-{
-       struct ixgbe_hw *hw;
-       uint32_t ctrl;
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
-
-       if (vlan_id > ETHER_MAX_VLAN_ID)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
-       if (vlan_id) {
-               ctrl = vlan_id;
-               ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
-       } else {
-               ctrl = 0;
-       }
-
-       IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
-{
-       struct ixgbe_hw *hw;
-       uint32_t ctrl;
-       struct rte_eth_dev *dev;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (on > 1)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
-       /* enable or disable VMDQ loopback */
-       if (on)
-               ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
-       else
-               ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-
-       IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
-{
-       struct ixgbe_hw *hw;
-       uint32_t reg_value;
-       int i;
-       int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
-       struct rte_eth_dev *dev;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (on > 1)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       for (i = 0; i <= num_queues; i++) {
-               reg_value = IXGBE_QDE_WRITE |
-                               (i << IXGBE_QDE_IDX_SHIFT) |
-                               (on & IXGBE_QDE_ENABLE);
-               IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
-       }
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
-{
-       struct ixgbe_hw *hw;
-       uint32_t reg_value;
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       /* only support VF's 0 to 63 */
-       if ((vf >= pci_dev->max_vfs) || (vf > 63))
-               return -EINVAL;
-
-       if (on > 1)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
-       if (on)
-               reg_value |= IXGBE_SRRCTL_DROP_EN;
-       else
-               reg_value &= ~IXGBE_SRRCTL_DROP_EN;
-
-       IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
-{
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-       struct ixgbe_hw *hw;
-       uint16_t queues_per_pool;
-       uint32_t q;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
-
-       if (on > 1)
-               return -EINVAL;
-
-       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
-
-       /* The PF has 128 queue pairs and in SRIOV configuration
-        * those queues will be assigned to VF's, so RXDCTL
-        * registers will be dealing with queues which will be
-        * assigned to VF's.
-        * Let's say we have SRIOV configured with 31 VF's then the
-        * first 124 queues 0-123 will be allocated to VF's and only
-        * the last 4 queues 123-127 will be assigned to the PF.
-        */
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-                                 ETH_16_POOLS;
-       else
-               queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
-                                 ETH_64_POOLS;
-
-       for (q = 0; q < queues_per_pool; q++)
-               (*dev->dev_ops->vlan_strip_queue_set)(dev,
-                               q + vf * queues_per_pool, on);
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on)
-{
-       int val = 0;
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-       struct ixgbe_hw *hw;
-       uint32_t vmolr;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
-
-       if (on > 1)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
-
-       if (hw->mac.type == ixgbe_mac_82598EB) {
-               PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
-                            " on 82599 hardware and newer");
-               return -ENOTSUP;
-       }
-       if (ixgbe_vt_check(hw) < 0)
-               return -ENOTSUP;
-
-       val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
-       if (on)
-               vmolr |= val;
-       else
-               vmolr &= ~val;
-
-       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
-{
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-       uint32_t reg, addr;
-       uint32_t val;
-       const uint8_t bit1 = 0x1;
-       struct ixgbe_hw *hw;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
-
-       if (on > 1)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       if (ixgbe_vt_check(hw) < 0)
-               return -ENOTSUP;
-
-       /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
-       if (vf >= 32) {
-               addr = IXGBE_VFRE(1);
-               val = bit1 << (vf - 32);
-       } else {
-               addr = IXGBE_VFRE(0);
-               val = bit1 << vf;
-       }
-
-       reg = IXGBE_READ_REG(hw, addr);
-
-       if (on)
-               reg |= val;
-       else
-               reg &= ~val;
-
-       IXGBE_WRITE_REG(hw, addr, reg);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
-{
-       struct rte_eth_dev *dev;
-       struct rte_pci_device *pci_dev;
-       uint32_t reg, addr;
-       uint32_t val;
-       const uint8_t bit1 = 0x1;
-
-       struct ixgbe_hw *hw;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
-
-       if (on > 1)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       if (ixgbe_vt_check(hw) < 0)
-               return -ENOTSUP;
-
-       /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
-       if (vf >= 32) {
-               addr = IXGBE_VFTE(1);
-               val = bit1 << (vf - 32);
-       } else {
-               addr = IXGBE_VFTE(0);
-               val = bit1 << vf;
-       }
-
-       reg = IXGBE_READ_REG(hw, addr);
-
-       if (on)
-               reg |= val;
-       else
-               reg &= ~val;
-
-       IXGBE_WRITE_REG(hw, addr, reg);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
-                       uint64_t vf_mask, uint8_t vlan_on)
-{
-       struct rte_eth_dev *dev;
-       int ret = 0;
-       uint16_t vf_idx;
-       struct ixgbe_hw *hw;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       if (ixgbe_vt_check(hw) < 0)
-               return -ENOTSUP;
-
-       for (vf_idx = 0; vf_idx < 64; vf_idx++) {
-               if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
-                       ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
-                                                  vlan_on, false);
-                       if (ret < 0)
-                               return ret;
-               }
-       }
-
-       return ret;
-}
-
-int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
-       uint16_t tx_rate, uint64_t q_msk)
-{
-       struct rte_eth_dev *dev;
-       struct ixgbe_hw *hw;
-       struct ixgbe_vf_info *vfinfo;
-       struct rte_eth_link link;
-       uint8_t  nb_q_per_pool;
-       uint32_t queue_stride;
-       uint32_t queue_idx, idx = 0, vf_idx;
-       uint32_t queue_end;
-       uint16_t total_rate = 0;
-       struct rte_pci_device *pci_dev;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-       pci_dev = IXGBE_DEV_TO_PCI(dev);
-       rte_eth_link_get_nowait(port, &link);
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (vf >= pci_dev->max_vfs)
-               return -EINVAL;
-
-       if (tx_rate > link.link_speed)
-               return -EINVAL;
-
-       if (q_msk == 0)
-               return 0;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-       nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
-       queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
-       queue_idx = vf * queue_stride;
-       queue_end = queue_idx + nb_q_per_pool - 1;
-       if (queue_end >= hw->mac.max_tx_queues)
-               return -EINVAL;
-
-       if (vfinfo) {
-               for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
-                       if (vf_idx == vf)
-                               continue;
-                       for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
-                               idx++)
-                               total_rate += vfinfo[vf_idx].tx_rate[idx];
-               }
-       } else {
-               return -EINVAL;
-       }
-
-       /* Store tx_rate for this vf. */
-       for (idx = 0; idx < nb_q_per_pool; idx++) {
-               if (((uint64_t)0x1 << idx) & q_msk) {
-                       if (vfinfo[vf].tx_rate[idx] != tx_rate)
-                               vfinfo[vf].tx_rate[idx] = tx_rate;
-                       total_rate += tx_rate;
-               }
-       }
-
-       if (total_rate > dev->data->dev_link.link_speed) {
-               /* Reset stored TX rate of the VF if it causes exceed
-                * link speed.
-                */
-               memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
-               return -EINVAL;
-       }
-
-       /* Set RTTBCNRC of each queue/pool for vf X  */
-       for (; queue_idx <= queue_end; queue_idx++) {
-               if (0x1 & q_msk)
-                       ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
-               q_msk = q_msk >> 1;
-       }
-
-       return 0;
-}
-
-#define IXGBE_MRCTL_VPME  0x01 /* Virtual Pool Mirroring. */
-#define IXGBE_MRCTL_UPME  0x02 /* Uplink Port Mirroring. */
-#define IXGBE_MRCTL_DPME  0x04 /* Downlink Port Mirroring. */
-#define IXGBE_MRCTL_VLME  0x08 /* VLAN Mirroring. */
-#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
-       ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
-       ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
-
-static int
-ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
-                       struct rte_eth_mirror_conf *mirror_conf,
-                       uint8_t rule_id, uint8_t on)
-{
-       uint32_t mr_ctl, vlvf;
-       uint32_t mp_lsb = 0;
-       uint32_t mv_msb = 0;
-       uint32_t mv_lsb = 0;
-       uint32_t mp_msb = 0;
-       uint8_t i = 0;
-       int reg_index = 0;
-       uint64_t vlan_mask = 0;
-
-       const uint8_t pool_mask_offset = 32;
-       const uint8_t vlan_mask_offset = 32;
-       const uint8_t dst_pool_offset = 8;
-       const uint8_t rule_mr_offset  = 4;
-       const uint8_t mirror_rule_mask = 0x0F;
-
-       struct ixgbe_mirror_info *mr_info =
-                       (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-       struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint8_t mirror_type = 0;
-
-       if (ixgbe_vt_check(hw) < 0)
-               return -ENOTSUP;
-
-       if (rule_id >= IXGBE_MAX_MIRROR_RULES)
-               return -EINVAL;
-
-       if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
-               PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
-                       mirror_conf->rule_type);
-               return -EINVAL;
-       }
-
-       if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
-               mirror_type |= IXGBE_MRCTL_VLME;
-               /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
-               for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
-                       if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
-                               /* search vlan id related pool vlan filter index */
-                               reg_index = ixgbe_find_vlvf_slot(hw,
-                                                mirror_conf->vlan.vlan_id[i],
-                                                false);
-                               if (reg_index < 0)
-                                       return -EINVAL;
-                               vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
-                               if ((vlvf & IXGBE_VLVF_VIEN) &&
-                                   ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
-                                     mirror_conf->vlan.vlan_id[i]))
-                                       vlan_mask |= (1ULL << reg_index);
-                               else
-                                       return -EINVAL;
-                       }
-               }
-
-               if (on) {
-                       mv_lsb = vlan_mask & 0xFFFFFFFF;
-                       mv_msb = vlan_mask >> vlan_mask_offset;
-
-                       mr_info->mr_conf[rule_id].vlan.vlan_mask =
-                                               mirror_conf->vlan.vlan_mask;
-                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
-                               if (mirror_conf->vlan.vlan_mask & (1ULL << i))
-                                       mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
-                                               mirror_conf->vlan.vlan_id[i];
-                       }
-               } else {
-                       mv_lsb = 0;
-                       mv_msb = 0;
-                       mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
-                       for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
-                               mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
-               }
-       }
-
-       /*
-        * if enable pool mirror, write related pool mask register,if disable
-        * pool mirror, clear PFMRVM register
-        */
-       if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
-               mirror_type |= IXGBE_MRCTL_VPME;
-               if (on) {
-                       mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
-                       mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
-                       mr_info->mr_conf[rule_id].pool_mask =
-                                       mirror_conf->pool_mask;
-
-               } else {
-                       mp_lsb = 0;
-                       mp_msb = 0;
-                       mr_info->mr_conf[rule_id].pool_mask = 0;
-               }
-       }
-       if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
-               mirror_type |= IXGBE_MRCTL_UPME;
-       if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
-               mirror_type |= IXGBE_MRCTL_DPME;
-
-       /* read  mirror control register and recalculate it */
-       mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
-
-       if (on) {
-               mr_ctl |= mirror_type;
-               mr_ctl &= mirror_rule_mask;
-               mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
-       } else
-               mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
-
-       mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
-       mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
-
-       /* write mirrror control  register */
-       IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
-       /* write pool mirrror control  register */
-       if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
-               IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
-               IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
-                               mp_msb);
-       }
-       /* write VLAN mirrror control  register */
-       if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
-               IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
-               IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
-                               mv_msb);
-       }
-
-       return 0;
-}
-
-static int
-ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
-{
-       int mr_ctl = 0;
-       uint32_t lsb_val = 0;
-       uint32_t msb_val = 0;
-       const uint8_t rule_mr_offset = 4;
-
-       struct ixgbe_hw *hw =
-               IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       struct ixgbe_mirror_info *mr_info =
-               (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-
-       if (ixgbe_vt_check(hw) < 0)
-               return -ENOTSUP;
-
-       memset(&mr_info->mr_conf[rule_id], 0,
-               sizeof(struct rte_eth_mirror_conf));
-
-       /* clear PFVMCTL register */
-       IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
-       /* clear pool mask register */
-       IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
-       IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+       /* clear pool mask register */
+       IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+       IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
 
        /* clear vlan mask register */
        IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
@@ -8252,303 +7772,6 @@ int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw)
        return IXGBE_SUCCESS;
 }
 
-int
-rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
-{
-       struct ixgbe_hw *hw;
-       struct rte_eth_dev *dev;
-       uint32_t ctrl;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       /* Stop the data paths */
-       if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
-               return -ENOTSUP;
-       /*
-        * Workaround:
-        * As no ixgbe_disable_sec_rx_path equivalent is
-        * implemented for tx in the base code, and we are
-        * not allowed to modify the base code in DPDK, so
-        * just call the hand-written one directly for now.
-        * The hardware support has been checked by
-        * ixgbe_disable_sec_rx_path().
-        */
-       ixgbe_disable_sec_tx_path_generic(hw);
-
-       /* Enable Ethernet CRC (required by MACsec offload) */
-       ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-       ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
-       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
-
-       /* Enable the TX and RX crypto engines */
-       ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
-       ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
-
-       ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-       ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
-
-       ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
-       ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
-       ctrl |= 0x3;
-       IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
-
-       /* Enable SA lookup */
-       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
-       ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
-       ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
-                    IXGBE_LSECTXCTRL_AUTH;
-       ctrl |= IXGBE_LSECTXCTRL_AISCI;
-       ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
-       ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
-       IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
-
-       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
-       ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
-       ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
-       ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
-       if (rp)
-               ctrl |= IXGBE_LSECRXCTRL_RP;
-       else
-               ctrl &= ~IXGBE_LSECRXCTRL_RP;
-       IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
-
-       /* Start the data paths */
-       ixgbe_enable_sec_rx_path(hw);
-       /*
-        * Workaround:
-        * As no ixgbe_enable_sec_rx_path equivalent is
-        * implemented for tx in the base code, and we are
-        * not allowed to modify the base code in DPDK, so
-        * just call the hand-written one directly for now.
-        */
-       ixgbe_enable_sec_tx_path_generic(hw);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_disable(uint8_t port)
-{
-       struct ixgbe_hw *hw;
-       struct rte_eth_dev *dev;
-       uint32_t ctrl;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       /* Stop the data paths */
-       if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
-               return -ENOTSUP;
-       /*
-        * Workaround:
-        * As no ixgbe_disable_sec_rx_path equivalent is
-        * implemented for tx in the base code, and we are
-        * not allowed to modify the base code in DPDK, so
-        * just call the hand-written one directly for now.
-        * The hardware support has been checked by
-        * ixgbe_disable_sec_rx_path().
-        */
-       ixgbe_disable_sec_tx_path_generic(hw);
-
-       /* Disable the TX and RX crypto engines */
-       ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
-       ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
-
-       ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
-       ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
-       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
-
-       /* Disable SA lookup */
-       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
-       ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
-       ctrl |= IXGBE_LSECTXCTRL_DISABLE;
-       IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
-
-       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
-       ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
-       ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
-       IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
-
-       /* Start the data paths */
-       ixgbe_enable_sec_rx_path(hw);
-       /*
-        * Workaround:
-        * As no ixgbe_enable_sec_rx_path equivalent is
-        * implemented for tx in the base code, and we are
-        * not allowed to modify the base code in DPDK, so
-        * just call the hand-written one directly for now.
-        */
-       ixgbe_enable_sec_tx_path_generic(hw);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
-{
-       struct ixgbe_hw *hw;
-       struct rte_eth_dev *dev;
-       uint32_t ctrl;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
-       IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
-
-       ctrl = mac[4] | (mac[5] << 8);
-       IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
-{
-       struct ixgbe_hw *hw;
-       struct rte_eth_dev *dev;
-       uint32_t ctrl;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
-       IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
-
-       pi = rte_cpu_to_be_16(pi);
-       ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
-       IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
-                                uint32_t pn, uint8_t *key)
-{
-       struct ixgbe_hw *hw;
-       struct rte_eth_dev *dev;
-       uint32_t ctrl, i;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       if (idx != 0 && idx != 1)
-               return -EINVAL;
-
-       if (an >= 4)
-               return -EINVAL;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       /* Set the PN and key */
-       pn = rte_cpu_to_be_32(pn);
-       if (idx == 0) {
-               IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
-
-               for (i = 0; i < 4; i++) {
-                       ctrl = (key[i * 4 + 0] <<  0) |
-                              (key[i * 4 + 1] <<  8) |
-                              (key[i * 4 + 2] << 16) |
-                              (key[i * 4 + 3] << 24);
-                       IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
-               }
-       } else {
-               IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
-
-               for (i = 0; i < 4; i++) {
-                       ctrl = (key[i * 4 + 0] <<  0) |
-                              (key[i * 4 + 1] <<  8) |
-                              (key[i * 4 + 2] << 16) |
-                              (key[i * 4 + 3] << 24);
-                       IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
-               }
-       }
-
-       /* Set AN and select the SA */
-       ctrl = (an << idx * 2) | (idx << 4);
-       IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
-
-       return 0;
-}
-
-int
-rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
-                                uint32_t pn, uint8_t *key)
-{
-       struct ixgbe_hw *hw;
-       struct rte_eth_dev *dev;
-       uint32_t ctrl, i;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
-       if (idx != 0 && idx != 1)
-               return -EINVAL;
-
-       if (an >= 4)
-               return -EINVAL;
-
-       /* Set the PN */
-       pn = rte_cpu_to_be_32(pn);
-       IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
-
-       /* Set the key */
-       for (i = 0; i < 4; i++) {
-               ctrl = (key[i * 4 + 0] <<  0) |
-                      (key[i * 4 + 1] <<  8) |
-                      (key[i * 4 + 2] << 16) |
-                      (key[i * 4 + 3] << 24);
-               IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
-       }
-
-       /* Set the AN and validate the SA */
-       ctrl = an | (1 << 2);
-       IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
-
-       return 0;
-}
-
 /* restore n-tuple filter */
 static inline void
 ixgbe_ntuple_filter_restore(struct rte_eth_dev *dev)
@@ -8715,79 +7938,6 @@ ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev)
        return 0;
 }
 
-int
-rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
-                             uint8_t tc_num,
-                             uint8_t *bw_weight)
-{
-       struct rte_eth_dev *dev;
-       struct ixgbe_dcb_config *dcb_config;
-       struct ixgbe_dcb_tc_config *tc;
-       struct rte_eth_conf *eth_conf;
-       struct ixgbe_bw_conf *bw_conf;
-       uint8_t i;
-       uint8_t nb_tcs;
-       uint16_t sum;
-
-       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
-       dev = &rte_eth_devices[port];
-
-       if (!is_device_supported(dev, &rte_ixgbe_pmd))
-               return -ENOTSUP;
-
-       if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
-               PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
-                           IXGBE_DCB_MAX_TRAFFIC_CLASS);
-               return -EINVAL;
-       }
-
-       dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
-       bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
-       eth_conf = &dev->data->dev_conf;
-
-       if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
-               nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
-       } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
-               if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
-                   ETH_32_POOLS)
-                       nb_tcs = ETH_4_TCS;
-               else
-                       nb_tcs = ETH_8_TCS;
-       } else {
-               nb_tcs = 1;
-       }
-
-       if (nb_tcs != tc_num) {
-               PMD_DRV_LOG(ERR,
-                           "Weight should be set for all %d enabled TCs.",
-                           nb_tcs);
-               return -EINVAL;
-       }
-
-       sum = 0;
-       for (i = 0; i < nb_tcs; i++)
-               sum += bw_weight[i];
-       if (sum != 100) {
-               PMD_DRV_LOG(ERR,
-                           "The summary of the TC weight should be 100.");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < nb_tcs; i++) {
-               tc = &dcb_config->tc_config[i];
-               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
-       }
-       for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
-               tc = &dcb_config->tc_config[i];
-               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
-       }
-
-       bw_conf->tc_num = nb_tcs;
-
-       return 0;
-}
-
 RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd);
 RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
 RTE_PMD_REGISTER_KMOD_DEP(net_ixgbe, "* igb_uio | uio_pci_generic | vfio");
index a32ba4d..5176b02 100644 (file)
@@ -666,6 +666,11 @@ int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
 
 int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
 
+int ixgbe_vt_check(struct ixgbe_hw *hw);
+int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+                           uint16_t tx_rate, uint64_t q_msk);
+bool is_ixgbe_supported(struct rte_eth_dev *dev);
+
 static inline int
 ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
                              uint16_t ethertype)
diff --git a/drivers/net/ixgbe/rte_pmd_ixgbe.c b/drivers/net/ixgbe/rte_pmd_ixgbe.c
new file mode 100644 (file)
index 0000000..e8fc9a6
--- /dev/null
@@ -0,0 +1,910 @@
+/*-
+ *   BSD LICENSE
+ *
+ *   Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
+ *   All rights reserved.
+ *
+ *   Redistribution and use in source and binary forms, with or without
+ *   modification, are permitted provided that the following conditions
+ *   are met:
+ *
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in
+ *       the documentation and/or other materials provided with the
+ *       distribution.
+ *     * Neither the name of Intel Corporation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_ethdev.h>
+
+#include "base/ixgbe_api.h"
+#include "ixgbe_ethdev.h"
+#include "rte_pmd_ixgbe.h"
+
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+                             struct ether_addr *mac_addr)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_vf_info *vfinfo;
+       int rar_entry;
+       uint8_t *new_mac = (uint8_t *)(mac_addr);
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+       rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+       if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+               rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+                          ETHER_ADDR_LEN);
+               return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+                                          IXGBE_RAH_AV);
+       }
+       return -EINVAL;
+}
+
+int
+rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_vf_info *vfinfo;
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+       uint32_t ctrl;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+
+       ctrl = IXGBE_PF_CONTROL_MSG;
+       if (vfinfo[vf].clear_to_send)
+               ctrl |= IXGBE_VT_MSGTYPE_CTS;
+
+       ixgbe_write_mbx(hw, &ctrl, 1, vf);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_mac_info *mac;
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mac = &hw->mac;
+
+       mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       struct ixgbe_mac_info *mac;
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       mac = &hw->mac;
+       mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+       struct ixgbe_hw *hw;
+       uint32_t ctrl;
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (vlan_id > ETHER_MAX_VLAN_ID)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+       if (vlan_id) {
+               ctrl = vlan_id;
+               ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+       } else {
+               ctrl = 0;
+       }
+
+       IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       uint32_t ctrl;
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+       /* enable or disable VMDQ loopback */
+       if (on)
+               ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+       else
+               ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       uint32_t reg_value;
+       int i;
+       int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       for (i = 0; i <= num_queues; i++) {
+               reg_value = IXGBE_QDE_WRITE |
+                               (i << IXGBE_QDE_IDX_SHIFT) |
+                               (on & IXGBE_QDE_ENABLE);
+               IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+       }
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct ixgbe_hw *hw;
+       uint32_t reg_value;
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       /* only support VF's 0 to 63 */
+       if ((vf >= pci_dev->max_vfs) || (vf > 63))
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+       if (on)
+               reg_value |= IXGBE_SRRCTL_DROP_EN;
+       else
+               reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+       IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+       struct ixgbe_hw *hw;
+       uint16_t queues_per_pool;
+       uint32_t q;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+       /* The PF has 128 queue pairs and in SRIOV configuration
+        * those queues will be assigned to VF's, so RXDCTL
+        * registers will be dealing with queues which will be
+        * assigned to VF's.
+        * Let's say we have SRIOV configured with 31 VF's then the
+        * first 124 queues 0-123 will be allocated to VF's and only
+        * the last 4 queues 123-127 will be assigned to the PF.
+        */
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+                                 ETH_16_POOLS;
+       else
+               queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
+                                 ETH_64_POOLS;
+
+       for (q = 0; q < queues_per_pool; q++)
+               (*dev->dev_ops->vlan_strip_queue_set)(dev,
+                               q + vf * queues_per_pool, on);
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf,
+                           uint16_t rx_mask, uint8_t on)
+{
+       int val = 0;
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+       struct ixgbe_hw *hw;
+       uint32_t vmolr;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+                            " on 82599 hardware and newer");
+               return -ENOTSUP;
+       }
+       if (ixgbe_vt_check(hw) < 0)
+               return -ENOTSUP;
+
+       val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+       if (on)
+               vmolr |= val;
+       else
+               vmolr &= ~val;
+
+       IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+       uint32_t reg, addr;
+       uint32_t val;
+       const uint8_t bit1 = 0x1;
+       struct ixgbe_hw *hw;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (ixgbe_vt_check(hw) < 0)
+               return -ENOTSUP;
+
+       /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+       if (vf >= 32) {
+               addr = IXGBE_VFRE(1);
+               val = bit1 << (vf - 32);
+       } else {
+               addr = IXGBE_VFRE(0);
+               val = bit1 << vf;
+       }
+
+       reg = IXGBE_READ_REG(hw, addr);
+
+       if (on)
+               reg |= val;
+       else
+               reg &= ~val;
+
+       IXGBE_WRITE_REG(hw, addr, reg);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
+{
+       struct rte_eth_dev *dev;
+       struct rte_pci_device *pci_dev;
+       uint32_t reg, addr;
+       uint32_t val;
+       const uint8_t bit1 = 0x1;
+
+       struct ixgbe_hw *hw;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+       pci_dev = IXGBE_DEV_TO_PCI(dev);
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (vf >= pci_dev->max_vfs)
+               return -EINVAL;
+
+       if (on > 1)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (ixgbe_vt_check(hw) < 0)
+               return -ENOTSUP;
+
+       /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+       if (vf >= 32) {
+               addr = IXGBE_VFTE(1);
+               val = bit1 << (vf - 32);
+       } else {
+               addr = IXGBE_VFTE(0);
+               val = bit1 << vf;
+       }
+
+       reg = IXGBE_READ_REG(hw, addr);
+
+       if (on)
+               reg |= val;
+       else
+               reg &= ~val;
+
+       IXGBE_WRITE_REG(hw, addr, reg);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
+                                uint64_t vf_mask, uint8_t vlan_on)
+{
+       struct rte_eth_dev *dev;
+       int ret = 0;
+       uint16_t vf_idx;
+       struct ixgbe_hw *hw;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+       if (ixgbe_vt_check(hw) < 0)
+               return -ENOTSUP;
+
+       for (vf_idx = 0; vf_idx < 64; vf_idx++) {
+               if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
+                       ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
+                                                  vlan_on, false);
+                       if (ret < 0)
+                               return ret;
+               }
+       }
+
+       return ret;
+}
+
+int
+rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
+                               uint16_t tx_rate, uint64_t q_msk)
+{
+       struct rte_eth_dev *dev;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       return ixgbe_set_vf_rate_limit(dev, vf, tx_rate, q_msk);
+}
+
+int
+rte_pmd_ixgbe_macsec_enable(uint8_t port, uint8_t en, uint8_t rp)
+{
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev *dev;
+       uint32_t ctrl;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Stop the data paths */
+       if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+               return -ENOTSUP;
+       /**
+        * Workaround:
+        * As no ixgbe_disable_sec_rx_path equivalent is
+        * implemented for tx in the base code, and we are
+        * not allowed to modify the base code in DPDK, so
+        * just call the hand-written one directly for now.
+        * The hardware support has been checked by
+        * ixgbe_disable_sec_rx_path().
+        */
+       ixgbe_disable_sec_tx_path_generic(hw);
+
+       /* Enable Ethernet CRC (required by MACsec offload) */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+       ctrl |= IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_RXCRCSTRP;
+       IXGBE_WRITE_REG(hw, IXGBE_HLREG0, ctrl);
+
+       /* Enable the TX and RX crypto engines */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+       ctrl &= ~IXGBE_SECTXCTRL_SECTX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+       ctrl &= ~IXGBE_SECRXCTRL_SECRX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
+       ctrl &= ~IXGBE_SECTX_MINSECIFG_MASK;
+       ctrl |= 0x3;
+       IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, ctrl);
+
+       /* Enable SA lookup */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+       ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+       ctrl |= en ? IXGBE_LSECTXCTRL_AUTH_ENCRYPT :
+                    IXGBE_LSECTXCTRL_AUTH;
+       ctrl |= IXGBE_LSECTXCTRL_AISCI;
+       ctrl &= ~IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+       ctrl |= IXGBE_MACSEC_PNTHRSH & IXGBE_LSECTXCTRL_PNTHRSH_MASK;
+       IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+       ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+       ctrl |= IXGBE_LSECRXCTRL_STRICT << IXGBE_LSECRXCTRL_EN_SHIFT;
+       ctrl &= ~IXGBE_LSECRXCTRL_PLSH;
+       if (rp)
+               ctrl |= IXGBE_LSECRXCTRL_RP;
+       else
+               ctrl &= ~IXGBE_LSECRXCTRL_RP;
+       IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+       /* Start the data paths */
+       ixgbe_enable_sec_rx_path(hw);
+       /**
+        * Workaround:
+        * As no ixgbe_enable_sec_rx_path equivalent is
+        * implemented for tx in the base code, and we are
+        * not allowed to modify the base code in DPDK, so
+        * just call the hand-written one directly for now.
+        */
+       ixgbe_enable_sec_tx_path_generic(hw);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_disable(uint8_t port)
+{
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev *dev;
+       uint32_t ctrl;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Stop the data paths */
+       if (ixgbe_disable_sec_rx_path(hw) != IXGBE_SUCCESS)
+               return -ENOTSUP;
+       /**
+        * Workaround:
+        * As no ixgbe_disable_sec_rx_path equivalent is
+        * implemented for tx in the base code, and we are
+        * not allowed to modify the base code in DPDK, so
+        * just call the hand-written one directly for now.
+        * The hardware support has been checked by
+        * ixgbe_disable_sec_rx_path().
+        */
+       ixgbe_disable_sec_tx_path_generic(hw);
+
+       /* Disable the TX and RX crypto engines */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_SECTXCTRL);
+       ctrl |= IXGBE_SECTXCTRL_SECTX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECTXCTRL, ctrl);
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
+       ctrl |= IXGBE_SECRXCTRL_SECRX_DIS;
+       IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, ctrl);
+
+       /* Disable SA lookup */
+       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECTXCTRL);
+       ctrl &= ~IXGBE_LSECTXCTRL_EN_MASK;
+       ctrl |= IXGBE_LSECTXCTRL_DISABLE;
+       IXGBE_WRITE_REG(hw, IXGBE_LSECTXCTRL, ctrl);
+
+       ctrl = IXGBE_READ_REG(hw, IXGBE_LSECRXCTRL);
+       ctrl &= ~IXGBE_LSECRXCTRL_EN_MASK;
+       ctrl |= IXGBE_LSECRXCTRL_DISABLE << IXGBE_LSECRXCTRL_EN_SHIFT;
+       IXGBE_WRITE_REG(hw, IXGBE_LSECRXCTRL, ctrl);
+
+       /* Start the data paths */
+       ixgbe_enable_sec_rx_path(hw);
+       /**
+        * Workaround:
+        * As no ixgbe_enable_sec_rx_path equivalent is
+        * implemented for tx in the base code, and we are
+        * not allowed to modify the base code in DPDK, so
+        * just call the hand-written one directly for now.
+        */
+       ixgbe_enable_sec_tx_path_generic(hw);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_txsc(uint8_t port, uint8_t *mac)
+{
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev *dev;
+       uint32_t ctrl;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+       IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCL, ctrl);
+
+       ctrl = mac[4] | (mac[5] << 8);
+       IXGBE_WRITE_REG(hw, IXGBE_LSECTXSCH, ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_config_rxsc(uint8_t port, uint8_t *mac, uint16_t pi)
+{
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev *dev;
+       uint32_t ctrl;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       ctrl = mac[0] | (mac[1] << 8) | (mac[2] << 16) | (mac[3] << 24);
+       IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCL, ctrl);
+
+       pi = rte_cpu_to_be_16(pi);
+       ctrl = mac[4] | (mac[5] << 8) | (pi << 16);
+       IXGBE_WRITE_REG(hw, IXGBE_LSECRXSCH, ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_txsa(uint8_t port, uint8_t idx, uint8_t an,
+                                uint32_t pn, uint8_t *key)
+{
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev *dev;
+       uint32_t ctrl, i;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (idx != 0 && idx != 1)
+               return -EINVAL;
+
+       if (an >= 4)
+               return -EINVAL;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       /* Set the PN and key */
+       pn = rte_cpu_to_be_32(pn);
+       if (idx == 0) {
+               IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN0, pn);
+
+               for (i = 0; i < 4; i++) {
+                       ctrl = (key[i * 4 + 0] <<  0) |
+                              (key[i * 4 + 1] <<  8) |
+                              (key[i * 4 + 2] << 16) |
+                              (key[i * 4 + 3] << 24);
+                       IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY0(i), ctrl);
+               }
+       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_LSECTXPN1, pn);
+
+               for (i = 0; i < 4; i++) {
+                       ctrl = (key[i * 4 + 0] <<  0) |
+                              (key[i * 4 + 1] <<  8) |
+                              (key[i * 4 + 2] << 16) |
+                              (key[i * 4 + 3] << 24);
+                       IXGBE_WRITE_REG(hw, IXGBE_LSECTXKEY1(i), ctrl);
+               }
+       }
+
+       /* Set AN and select the SA */
+       ctrl = (an << idx * 2) | (idx << 4);
+       IXGBE_WRITE_REG(hw, IXGBE_LSECTXSA, ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_macsec_select_rxsa(uint8_t port, uint8_t idx, uint8_t an,
+                                uint32_t pn, uint8_t *key)
+{
+       struct ixgbe_hw *hw;
+       struct rte_eth_dev *dev;
+       uint32_t ctrl, i;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+       if (idx != 0 && idx != 1)
+               return -EINVAL;
+
+       if (an >= 4)
+               return -EINVAL;
+
+       /* Set the PN */
+       pn = rte_cpu_to_be_32(pn);
+       IXGBE_WRITE_REG(hw, IXGBE_LSECRXPN(idx), pn);
+
+       /* Set the key */
+       for (i = 0; i < 4; i++) {
+               ctrl = (key[i * 4 + 0] <<  0) |
+                      (key[i * 4 + 1] <<  8) |
+                      (key[i * 4 + 2] << 16) |
+                      (key[i * 4 + 3] << 24);
+               IXGBE_WRITE_REG(hw, IXGBE_LSECRXKEY(idx, i), ctrl);
+       }
+
+       /* Set the AN and validate the SA */
+       ctrl = an | (1 << 2);
+       IXGBE_WRITE_REG(hw, IXGBE_LSECRXSA(idx), ctrl);
+
+       return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tc_bw_alloc(uint8_t port,
+                             uint8_t tc_num,
+                             uint8_t *bw_weight)
+{
+       struct rte_eth_dev *dev;
+       struct ixgbe_dcb_config *dcb_config;
+       struct ixgbe_dcb_tc_config *tc;
+       struct rte_eth_conf *eth_conf;
+       struct ixgbe_bw_conf *bw_conf;
+       uint8_t i;
+       uint8_t nb_tcs;
+       uint16_t sum;
+
+       RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+       dev = &rte_eth_devices[port];
+
+       if (!is_ixgbe_supported(dev))
+               return -ENOTSUP;
+
+       if (tc_num > IXGBE_DCB_MAX_TRAFFIC_CLASS) {
+               PMD_DRV_LOG(ERR, "TCs should be no more than %d.",
+                           IXGBE_DCB_MAX_TRAFFIC_CLASS);
+               return -EINVAL;
+       }
+
+       dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+       bw_conf = IXGBE_DEV_PRIVATE_TO_BW_CONF(dev->data->dev_private);
+       eth_conf = &dev->data->dev_conf;
+
+       if (eth_conf->txmode.mq_mode == ETH_MQ_TX_DCB) {
+               nb_tcs = eth_conf->tx_adv_conf.dcb_tx_conf.nb_tcs;
+       } else if (eth_conf->txmode.mq_mode == ETH_MQ_TX_VMDQ_DCB) {
+               if (eth_conf->tx_adv_conf.vmdq_dcb_tx_conf.nb_queue_pools ==
+                   ETH_32_POOLS)
+                       nb_tcs = ETH_4_TCS;
+               else
+                       nb_tcs = ETH_8_TCS;
+       } else {
+               nb_tcs = 1;
+       }
+
+       if (nb_tcs != tc_num) {
+               PMD_DRV_LOG(ERR,
+                           "Weight should be set for all %d enabled TCs.",
+                           nb_tcs);
+               return -EINVAL;
+       }
+
+       sum = 0;
+       for (i = 0; i < nb_tcs; i++)
+               sum += bw_weight[i];
+       if (sum != 100) {
+               PMD_DRV_LOG(ERR,
+                           "The summary of the TC weight should be 100.");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < nb_tcs; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bw_weight[i];
+       }
+       for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+               tc = &dcb_config->tc_config[i];
+               tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = 0;
+       }
+
+       bw_conf->tc_num = nb_tcs;
+
+       return 0;
+}