- pci_dev = IXGBE_DEV_TO_PCI(dev);
- hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
- nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
- queue_stride = IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
- queue_idx = vf * queue_stride;
- queue_end = queue_idx + nb_q_per_pool - 1;
- if (queue_end >= hw->mac.max_tx_queues)
- return -EINVAL;
-
- if (vfinfo) {
- for (vf_idx = 0; vf_idx < pci_dev->max_vfs; vf_idx++) {
- if (vf_idx == vf)
- continue;
- for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
- idx++)
- total_rate += vfinfo[vf_idx].tx_rate[idx];
- }
- } else {
- return -EINVAL;
- }
-
- /* Store tx_rate for this vf. */
- for (idx = 0; idx < nb_q_per_pool; idx++) {
- if (((uint64_t)0x1 << idx) & q_msk) {
- if (vfinfo[vf].tx_rate[idx] != tx_rate)
- vfinfo[vf].tx_rate[idx] = tx_rate;
- total_rate += tx_rate;
- }
- }
-
- if (total_rate > dev->data->dev_link.link_speed) {
- /* Reset stored TX rate of the VF if it causes exceed
- * link speed.
- */
- memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
- return -EINVAL;
- }
-
- /* Set RTTBCNRC of each queue/pool for vf X */
- for (; queue_idx <= queue_end; queue_idx++) {
- if (0x1 & q_msk)
- ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
- q_msk = q_msk >> 1;
- }
-
- return 0;
-}
-
-#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
-#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
-#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
-#define IXGBE_MRCTL_VLME 0x08 /* VLAN Mirroring. */
-#define IXGBE_INVALID_MIRROR_TYPE(mirror_type) \
- ((mirror_type) & ~(uint8_t)(ETH_MIRROR_VIRTUAL_POOL_UP | \
- ETH_MIRROR_UPLINK_PORT | ETH_MIRROR_DOWNLINK_PORT | ETH_MIRROR_VLAN))
-
-static int
-ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
- struct rte_eth_mirror_conf *mirror_conf,
- uint8_t rule_id, uint8_t on)
-{
- uint32_t mr_ctl, vlvf;
- uint32_t mp_lsb = 0;
- uint32_t mv_msb = 0;
- uint32_t mv_lsb = 0;
- uint32_t mp_msb = 0;
- uint8_t i = 0;
- int reg_index = 0;
- uint64_t vlan_mask = 0;
-
- const uint8_t pool_mask_offset = 32;
- const uint8_t vlan_mask_offset = 32;
- const uint8_t dst_pool_offset = 8;
- const uint8_t rule_mr_offset = 4;
- const uint8_t mirror_rule_mask = 0x0F;
-
- struct ixgbe_mirror_info *mr_info =
- (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint8_t mirror_type = 0;
-
- if (ixgbe_vt_check(hw) < 0)
- return -ENOTSUP;
-
- if (rule_id >= IXGBE_MAX_MIRROR_RULES)
- return -EINVAL;
-
- if (IXGBE_INVALID_MIRROR_TYPE(mirror_conf->rule_type)) {
- PMD_DRV_LOG(ERR, "unsupported mirror type 0x%x.",
- mirror_conf->rule_type);
- return -EINVAL;
- }
-
- if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
- mirror_type |= IXGBE_MRCTL_VLME;
- /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
- for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
- if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
- /* search vlan id related pool vlan filter index */
- reg_index = ixgbe_find_vlvf_slot(hw,
- mirror_conf->vlan.vlan_id[i],
- false);
- if (reg_index < 0)
- return -EINVAL;
- vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
- if ((vlvf & IXGBE_VLVF_VIEN) &&
- ((vlvf & IXGBE_VLVF_VLANID_MASK) ==
- mirror_conf->vlan.vlan_id[i]))
- vlan_mask |= (1ULL << reg_index);
- else
- return -EINVAL;
- }
- }
-
- if (on) {
- mv_lsb = vlan_mask & 0xFFFFFFFF;
- mv_msb = vlan_mask >> vlan_mask_offset;
-
- mr_info->mr_conf[rule_id].vlan.vlan_mask =
- mirror_conf->vlan.vlan_mask;
- for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
- if (mirror_conf->vlan.vlan_mask & (1ULL << i))
- mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
- mirror_conf->vlan.vlan_id[i];
- }
- } else {
- mv_lsb = 0;
- mv_msb = 0;
- mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
- for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
- mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
- }
- }
-
- /*
- * if enable pool mirror, write related pool mask register,if disable
- * pool mirror, clear PFMRVM register
- */
- if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
- mirror_type |= IXGBE_MRCTL_VPME;
- if (on) {
- mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
- mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
- mr_info->mr_conf[rule_id].pool_mask =
- mirror_conf->pool_mask;
-
- } else {
- mp_lsb = 0;
- mp_msb = 0;
- mr_info->mr_conf[rule_id].pool_mask = 0;
- }
- }
- if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
- mirror_type |= IXGBE_MRCTL_UPME;
- if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
- mirror_type |= IXGBE_MRCTL_DPME;
-
- /* read mirror control register and recalculate it */
- mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
-
- if (on) {
- mr_ctl |= mirror_type;
- mr_ctl &= mirror_rule_mask;
- mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
- } else
- mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
-
- mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
- mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
-
- /* write mirrror control register */
- IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
- /* write pool mirrror control register */
- if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
- IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
- IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
- mp_msb);
- }
- /* write VLAN mirrror control register */
- if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
- IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
- IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
- mv_msb);
- }
-
- return 0;
-}
-
-static int
-ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
-{
- int mr_ctl = 0;
- uint32_t lsb_val = 0;
- uint32_t msb_val = 0;
- const uint8_t rule_mr_offset = 4;
-
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_mirror_info *mr_info =
- (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
-
- if (ixgbe_vt_check(hw) < 0)
- return -ENOTSUP;
-
- memset(&mr_info->mr_conf[rule_id], 0,
- sizeof(struct rte_eth_mirror_conf));
-
- /* clear PFVMCTL register */
- IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
-
- /* clear pool mask register */
- IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
- IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
-
- /* clear vlan mask register */
- IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
- IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
-
- return 0;
-}
-
-static int
-ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
-{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t mask;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask |= (1 << IXGBE_MISC_VEC_ID);
- RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- rte_intr_enable(intr_handle);
-
- return 0;
-}
-
-static int
-ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
-{
- uint32_t mask;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
- mask &= ~(1 << IXGBE_MISC_VEC_ID);
- RTE_SET_USED(queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
-
- return 0;
-}
-
-static int
-ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
-{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- uint32_t mask;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_interrupt *intr =
- IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- if (queue_id < 16) {
- ixgbe_disable_intr(hw);
- intr->mask |= (1 << queue_id);
- ixgbe_enable_intr(dev);
- } else if (queue_id < 32) {
- mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
- mask &= (1 << queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
- } else if (queue_id < 64) {
- mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
- mask &= (1 << (queue_id - 32));
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
- }
- rte_intr_enable(intr_handle);
-
- return 0;
-}
-
-static int
-ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
-{
- uint32_t mask;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_interrupt *intr =
- IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-
- if (queue_id < 16) {
- ixgbe_disable_intr(hw);
- intr->mask &= ~(1 << queue_id);
- ixgbe_enable_intr(dev);
- } else if (queue_id < 32) {
- mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
- mask &= ~(1 << queue_id);
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
- } else if (queue_id < 64) {
- mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
- mask &= ~(1 << (queue_id - 32));
- IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
- }
-
- return 0;
-}
-
-static void
-ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
- uint8_t queue, uint8_t msix_vector)
-{
- uint32_t tmp, idx;
-
- if (direction == -1) {
- /* other causes */
- msix_vector |= IXGBE_IVAR_ALLOC_VAL;
- tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
- tmp &= ~0xFF;
- tmp |= msix_vector;
- IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
- } else {
- /* rx or tx cause */
- msix_vector |= IXGBE_IVAR_ALLOC_VAL;
- idx = ((16 * (queue & 1)) + (8 * direction));
- tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
- tmp &= ~(0xFF << idx);
- tmp |= (msix_vector << idx);
- IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
- }
-}
-
-/**
- * set the IVAR registers, mapping interrupt causes to vectors
- * @param hw
- * pointer to ixgbe_hw struct
- * @direction
- * 0 for Rx, 1 for Tx, -1 for other causes
- * @queue
- * queue to map the corresponding interrupt to
- * @msix_vector
- * the vector to map to the corresponding queue
- */
-static void
-ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
- uint8_t queue, uint8_t msix_vector)
-{
- uint32_t tmp, idx;
-
- msix_vector |= IXGBE_IVAR_ALLOC_VAL;
- if (hw->mac.type == ixgbe_mac_82598EB) {
- if (direction == -1)
- direction = 0;
- idx = (((direction * 64) + queue) >> 2) & 0x1F;
- tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
- tmp &= ~(0xFF << (8 * (queue & 0x3)));
- tmp |= (msix_vector << (8 * (queue & 0x3)));
- IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
- } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
- if (direction == -1) {
- /* other causes */
- idx = ((queue & 1) * 8);
- tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
- tmp &= ~(0xFF << idx);
- tmp |= (msix_vector << idx);
- IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
- } else {
- /* rx or tx causes */
- idx = ((16 * (queue & 1)) + (8 * direction));
- tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
- tmp &= ~(0xFF << idx);
- tmp |= (msix_vector << idx);
- IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
- }
- }
-}
-
-static void
-ixgbevf_configure_msix(struct rte_eth_dev *dev)
-{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t q_idx;
- uint32_t vector_idx = IXGBE_MISC_VEC_ID;
-
- /* Configure VF other cause ivar */
- ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
-
- /* won't configure msix register if no mapping is done
- * between intr vector and event fd.
- */
- if (!rte_intr_dp_is_en(intr_handle))
- return;
-
- /* Configure all RX queues of VF */
- for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
- /* Force all queue use vector 0,
- * as IXGBE_VF_MAXMSIVECOTR = 1
- */
- ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
- intr_handle->intr_vec[q_idx] = vector_idx;
- }
-}
-
-/**
- * Sets up the hardware to properly generate MSI-X interrupts
- * @hw
- * board private structure
- */
-static void
-ixgbe_configure_msix(struct rte_eth_dev *dev)
-{
- struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
- struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
- struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
- uint32_t vec = IXGBE_MISC_VEC_ID;
- uint32_t mask;
- uint32_t gpie;
-
- /* won't configure msix register if no mapping is done
- * between intr vector and event fd
- */
- if (!rte_intr_dp_is_en(intr_handle))
- return;
-
- if (rte_intr_allow_others(intr_handle))
- vec = base = IXGBE_RX_VEC_START;
-
- /* setup GPIE for MSI-x mode */
- gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
- gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
- IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
- /* auto clearing and auto setting corresponding bits in EIMS
- * when MSI-X interrupt is triggered
- */
- if (hw->mac.type == ixgbe_mac_82598EB) {
- IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
- } else {
- IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
- IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
- }
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
-
- /* Populate the IVAR table and set the ITR values to the
- * corresponding register.
- */
- for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
- queue_id++) {
- /* by default, 1:1 mapping */
- ixgbe_set_ivar_map(hw, 0, queue_id, vec);
- intr_handle->intr_vec[queue_id] = vec;
- if (vec < base + intr_handle->nb_efd - 1)
- vec++;
- }
-
- switch (hw->mac.type) {
- case ixgbe_mac_82598EB:
- ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
- IXGBE_MISC_VEC_ID);
- break;
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
- break;
- default:
- break;
- }
- IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
- IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
-
- /* set up to autoclear timer, and the vectors */
- mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER |
- IXGBE_EIMS_MAILBOX |
- IXGBE_EIMS_LSC);
-
- IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
-}
-
-static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
- uint16_t queue_idx, uint16_t tx_rate)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t rf_dec, rf_int;
- uint32_t bcnrc_val;
- uint16_t link_speed = dev->data->dev_link.link_speed;
-
- if (queue_idx >= hw->mac.max_tx_queues)
- return -EINVAL;
-
- if (tx_rate != 0) {
- /* Calculate the rate factor values to set */
- rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
- rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
- rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
-
- bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
- bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
- IXGBE_RTTBCNRC_RF_INT_MASK_M);
- bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
- } else {
- bcnrc_val = 0;
- }
-
- /*
- * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
- * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
- * set as 0x4.
- */
- if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
- (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
- IXGBE_MAX_JUMBO_FRAME_SIZE))
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
- IXGBE_MMW_SIZE_JUMBO_FRAME);
- else
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
- IXGBE_MMW_SIZE_DEFAULT);
-
- /* Set RTTBCNRC of queue X */
- IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
- IXGBE_WRITE_FLUSH(hw);
-
- return 0;
-}
-
-static void
-ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
- __attribute__((unused)) uint32_t index,
- __attribute__((unused)) uint32_t pool)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int diag;
-
- /*
- * On a 82599 VF, adding again the same MAC addr is not an idempotent
- * operation. Trap this case to avoid exhausting the [very limited]
- * set of PF resources used to store VF MAC addresses.
- */
- if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
- return;
- diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
- if (diag == 0)
- return;
- PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
-}
-
-static void
-ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
- struct ether_addr *mac_addr;
- uint32_t i;
- int diag;
-
- /*
- * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
- * not support the deletion of a given MAC address.
- * Instead, it imposes to delete all MAC addresses, then to add again
- * all MAC addresses with the exception of the one to be deleted.
- */
- (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
-
- /*
- * Add again all MAC addresses, with the exception of the deleted one
- * and of the permanent MAC address.
- */
- for (i = 0, mac_addr = dev->data->mac_addrs;
- i < hw->mac.num_rar_entries; i++, mac_addr++) {
- /* Skip the deleted MAC address */
- if (i == index)
- continue;
- /* Skip NULL MAC addresses */
- if (is_zero_ether_addr(mac_addr))
- continue;
- /* Skip the permanent MAC address */
- if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
- continue;
- diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
- if (diag != 0)
- PMD_DRV_LOG(ERR,
- "Adding again MAC address "
- "%02x:%02x:%02x:%02x:%02x:%02x failed "
- "diag=%d",
- mac_addr->addr_bytes[0],
- mac_addr->addr_bytes[1],
- mac_addr->addr_bytes[2],
- mac_addr->addr_bytes[3],
- mac_addr->addr_bytes[4],
- mac_addr->addr_bytes[5],
- diag);
- }
-}
-
-static void
-ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-
- hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
-}
-
-#define MAC_TYPE_FILTER_SUP(type) do {\
- if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
- (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
- (type) != ixgbe_mac_X550EM_a)\
- return -ENOTSUP;\
-} while (0)
-
-int
-ixgbe_syn_filter_set(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter,
- bool add)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_filter_info *filter_info =
- IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
- uint32_t syn_info;
- uint32_t synqf;
-
- if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
- return -EINVAL;
-
- syn_info = filter_info->syn_info;
-
- if (add) {
- if (syn_info & IXGBE_SYN_FILTER_ENABLE)
- return -EINVAL;
- synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
- IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
-
- if (filter->hig_pri)
- synqf |= IXGBE_SYN_FILTER_SYNQFP;
- else
- synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
- } else {
- synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
- if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
- return -ENOENT;
- synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
- }
-
- filter_info->syn_info = synqf;
- IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
- IXGBE_WRITE_FLUSH(hw);
- return 0;
-}
-
-static int
-ixgbe_syn_filter_get(struct rte_eth_dev *dev,
- struct rte_eth_syn_filter *filter)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
-
- if (synqf & IXGBE_SYN_FILTER_ENABLE) {
- filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
- filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
- return 0;
- }
- return -ENOENT;
-}
-
-static int
-ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
- enum rte_filter_op filter_op,
- void *arg)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int ret;
-
- MAC_TYPE_FILTER_SUP(hw->mac.type);
-
- if (filter_op == RTE_ETH_FILTER_NOP)
- return 0;
-
- if (arg == NULL) {
- PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
- filter_op);
- return -EINVAL;
- }
-
- switch (filter_op) {
- case RTE_ETH_FILTER_ADD:
- ret = ixgbe_syn_filter_set(dev,
- (struct rte_eth_syn_filter *)arg,
- TRUE);
- break;
- case RTE_ETH_FILTER_DELETE:
- ret = ixgbe_syn_filter_set(dev,
- (struct rte_eth_syn_filter *)arg,
- FALSE);
- break;
- case RTE_ETH_FILTER_GET:
- ret = ixgbe_syn_filter_get(dev,
- (struct rte_eth_syn_filter *)arg);
- break;
- default:
- PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-
-static inline enum ixgbe_5tuple_protocol
-convert_protocol_type(uint8_t protocol_value)
-{
- if (protocol_value == IPPROTO_TCP)
- return IXGBE_FILTER_PROTOCOL_TCP;
- else if (protocol_value == IPPROTO_UDP)
- return IXGBE_FILTER_PROTOCOL_UDP;
- else if (protocol_value == IPPROTO_SCTP)
- return IXGBE_FILTER_PROTOCOL_SCTP;
- else
- return IXGBE_FILTER_PROTOCOL_NONE;
-}
-
-/* inject a 5-tuple filter to HW */
-static inline void
-ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
- struct ixgbe_5tuple_filter *filter)
-{
- struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- int i;
- uint32_t ftqf, sdpqf;
- uint32_t l34timir = 0;
- uint8_t mask = 0xff;
-
- i = filter->index;
-
- sdpqf = (uint32_t)(filter->filter_info.dst_port <<
- IXGBE_SDPQF_DSTPORT_SHIFT);
- sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);