- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
- else
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
-
- return 0;
-}
-
-static int
-ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
-{
- int i;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_uta_info *uta_info =
- IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
-
- /* The UTA table only exists on 82599 hardware and newer */
- if (hw->mac.type < ixgbe_mac_82599EB)
- return -ENOTSUP;
-
- if (on) {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
- uta_info->uta_shadow[i] = ~0;
- IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
- }
- } else {
- for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
- uta_info->uta_shadow[i] = 0;
- IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
- }
- }
- return 0;
-
-}
-
-uint32_t
-ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
-{
- uint32_t new_val = orig_val;
-
- if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
- new_val |= IXGBE_VMOLR_AUPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
- new_val |= IXGBE_VMOLR_ROMPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
- new_val |= IXGBE_VMOLR_ROPE;
- if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
- new_val |= IXGBE_VMOLR_BAM;
- if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
- new_val |= IXGBE_VMOLR_MPE;
-
- return new_val;
-}
-
-int
-rte_pmd_ixgbe_ping_vf(uint8_t port, uint16_t vf)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_vf_info *vfinfo;
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
- uint32_t ctrl;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
-
- ctrl = IXGBE_PF_CONTROL_MSG;
- if (vfinfo[vf].clear_to_send)
- ctrl |= IXGBE_VT_MSGTYPE_CTS;
-
- ixgbe_write_mbx(hw, &ctrl, 1, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
-
- mac->ops.set_vlan_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- struct ixgbe_mac_info *mac;
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- mac = &hw->mac;
- mac->ops.set_mac_anti_spoofing(hw, on, vf);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- if (vlan_id > ETHER_MAX_VLAN_ID)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
- if (vlan_id) {
- ctrl = vlan_id;
- ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
- } else {
- ctrl = 0;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t ctrl;
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
- /* enable or disable VMDQ loopback */
- if (on)
- ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
- else
- ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
-
- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- int i;
- int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- for (i = 0; i <= num_queues; i++) {
- reg_value = IXGBE_QDE_WRITE |
- (i << IXGBE_QDE_IDX_SHIFT) |
- (on & IXGBE_QDE_ENABLE);
- IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
- }
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct ixgbe_hw *hw;
- uint32_t reg_value;
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- /* only support VF's 0 to 63 */
- if ((vf >= pci_dev->max_vfs) || (vf > 63))
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
- if (on)
- reg_value |= IXGBE_SRRCTL_DROP_EN;
- else
- reg_value &= ~IXGBE_SRRCTL_DROP_EN;
-
- IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
- struct ixgbe_hw *hw;
- uint16_t queues_per_pool;
- uint32_t q;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
-
- /* The PF has 128 queue pairs and in SRIOV configuration
- * those queues will be assigned to VF's, so RXDCTL
- * registers will be dealing with queues which will be
- * assigned to VF's.
- * Let's say we have SRIOV configured with 31 VF's then the
- * first 124 queues 0-123 will be allocated to VF's and only
- * the last 4 queues 123-127 will be assigned to the PF.
- */
- if (hw->mac.type == ixgbe_mac_82598EB)
- queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
- ETH_16_POOLS;
- else
- queues_per_pool = (uint16_t)hw->mac.max_rx_queues /
- ETH_64_POOLS;
-
- for (q = 0; q < queues_per_pool; q++)
- (*dev->dev_ops->vlan_strip_queue_set)(dev,
- q + vf * queues_per_pool, on);
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_rxmode(uint8_t port, uint16_t vf, uint16_t rx_mask, uint8_t on)
-{
- int val = 0;
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
- struct ixgbe_hw *hw;
- uint32_t vmolr;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
-
- if (hw->mac.type == ixgbe_mac_82598EB) {
- PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
- " on 82599 hardware and newer");
- return -ENOTSUP;
- }
- if (ixgbe_vt_check(hw) < 0)
- return -ENOTSUP;
-
- val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
-
- if (on)
- vmolr |= val;
- else
- vmolr &= ~val;
-
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_rx(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
- struct ixgbe_hw *hw;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- if (ixgbe_vt_check(hw) < 0)
- return -ENOTSUP;
-
- /* for vf >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
- if (vf >= 32) {
- addr = IXGBE_VFRE(1);
- val = bit1 << (vf - 32);
- } else {
- addr = IXGBE_VFRE(0);
- val = bit1 << vf;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_tx(uint8_t port, uint16_t vf, uint8_t on)
-{
- struct rte_eth_dev *dev;
- struct rte_pci_device *pci_dev;
- uint32_t reg, addr;
- uint32_t val;
- const uint8_t bit1 = 0x1;
-
- struct ixgbe_hw *hw;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;
-
- if (on > 1)
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (ixgbe_vt_check(hw) < 0)
- return -ENOTSUP;
-
- /* for vf >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
- if (vf >= 32) {
- addr = IXGBE_VFTE(1);
- val = bit1 << (vf - 32);
- } else {
- addr = IXGBE_VFTE(0);
- val = bit1 << vf;
- }
-
- reg = IXGBE_READ_REG(hw, addr);
-
- if (on)
- reg |= val;
- else
- reg &= ~val;
-
- IXGBE_WRITE_REG(hw, addr, reg);
-
- return 0;
-}
-
-int
-rte_pmd_ixgbe_set_vf_vlan_filter(uint8_t port, uint16_t vlan,
- uint64_t vf_mask, uint8_t vlan_on)
-{
- struct rte_eth_dev *dev;
- int ret = 0;
- uint16_t vf_idx;
- struct ixgbe_hw *hw;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if ((vlan > ETHER_MAX_VLAN_ID) || (vf_mask == 0))
- return -EINVAL;
-
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (ixgbe_vt_check(hw) < 0)
- return -ENOTSUP;
-
- for (vf_idx = 0; vf_idx < 64; vf_idx++) {
- if (vf_mask & ((uint64_t)(1ULL << vf_idx))) {
- ret = hw->mac.ops.set_vfta(hw, vlan, vf_idx,
- vlan_on, false);
- if (ret < 0)
- return ret;
- }
- }
-
- return ret;
-}
-
-int rte_pmd_ixgbe_set_vf_rate_limit(uint8_t port, uint16_t vf,
- uint16_t tx_rate, uint64_t q_msk)
-{
- struct rte_eth_dev *dev;
- struct ixgbe_hw *hw;
- struct ixgbe_vf_info *vfinfo;
- struct rte_eth_link link;
- uint8_t nb_q_per_pool;
- uint32_t queue_stride;
- uint32_t queue_idx, idx = 0, vf_idx;
- uint32_t queue_end;
- uint16_t total_rate = 0;
- struct rte_pci_device *pci_dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
-
- dev = &rte_eth_devices[port];
- pci_dev = IXGBE_DEV_TO_PCI(dev);
- rte_eth_link_get_nowait(port, &link);
-
- if (!is_device_supported(dev, &rte_ixgbe_pmd))
- return -ENOTSUP;
-
- if (vf >= pci_dev->max_vfs)
- return -EINVAL;