+ if (on) {
+ mv_lsb = vlan_mask & 0xFFFFFFFF;
+ mv_msb = vlan_mask >> vlan_mask_offset;
+
+ mr_info->mr_conf[rule_id].vlan.vlan_mask =
+ mirror_conf->vlan.vlan_mask;
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i))
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+ mirror_conf->vlan.vlan_id[i];
+ }
+ } else {
+ mv_lsb = 0;
+ mv_msb = 0;
+ mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+ for (i = 0; i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+ }
+ }
+
+ /*
+ * if enable pool mirror, write related pool mask register,if disable
+ * pool mirror, clear PFMRVM register
+ */
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
+ mirror_type |= IXGBE_MRCTL_VPME;
+ if (on) {
+ mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+ mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+ mr_info->mr_conf[rule_id].pool_mask =
+ mirror_conf->pool_mask;
+
+ } else {
+ mp_lsb = 0;
+ mp_msb = 0;
+ mr_info->mr_conf[rule_id].pool_mask = 0;
+ }
+ }
+ if (mirror_conf->rule_type & ETH_MIRROR_UPLINK_PORT)
+ mirror_type |= IXGBE_MRCTL_UPME;
+ if (mirror_conf->rule_type & ETH_MIRROR_DOWNLINK_PORT)
+ mirror_type |= IXGBE_MRCTL_DPME;
+
+ /* read mirror control register and recalculate it */
+ mr_ctl = IXGBE_READ_REG(hw, IXGBE_MRCTL(rule_id));
+
+ if (on) {
+ mr_ctl |= mirror_type;
+ mr_ctl &= mirror_rule_mask;
+ mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+ } else
+ mr_ctl &= ~(mirror_conf->rule_type & mirror_rule_mask);
+
+ mr_info->mr_conf[rule_id].rule_type = mirror_conf->rule_type;
+ mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+ /* write mirrror control register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* write pool mirrror control register */
+ if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+ mp_msb);
+ }
+ /* write VLAN mirrror control register */
+ if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+ mv_msb);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+ int mr_ctl = 0;
+ uint32_t lsb_val = 0;
+ uint32_t msb_val = 0;
+ const uint8_t rule_mr_offset = 4;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+
+ if (ixgbe_vt_check(hw) < 0)
+ return -ENOTSUP;
+
+ memset(&mr_info->mr_conf[rule_id], 0,
+ sizeof(struct rte_eth_mirror_conf));
+
+ /* clear PFVMCTL register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* clear pool mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+
+ /* clear vlan mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ mask |= (1 << IXGBE_MISC_VEC_ID);
+ RTE_SET_USED(queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ mask &= ~(1 << IXGBE_MISC_VEC_ID);
+ RTE_SET_USED(queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask |= (1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= (1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= (1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+ixgbe_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ uint32_t mask;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (queue_id < 16) {
+ ixgbe_disable_intr(hw);
+ intr->mask &= ~(1 << queue_id);
+ ixgbe_enable_intr(dev);
+ } else if (queue_id < 32) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(0));
+ mask &= ~(1 << queue_id);
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
+ } else if (queue_id < 64) {
+ mask = IXGBE_READ_REG(hw, IXGBE_EIMS_EX(1));
+ mask &= ~(1 << (queue_id - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+ }
+
+ return 0;
+}
+
+static void
+ixgbevf_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ tmp &= ~0xFF;
+ tmp |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, tmp);
+ } else {
+ /* rx or tx cause */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), tmp);
+ }
+}
+
+/**
+ * set the IVAR registers, mapping interrupt causes to vectors
+ * @param hw
+ * pointer to ixgbe_hw struct
+ * @direction
+ * 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue
+ * queue to map the corresponding interrupt to
+ * @msix_vector
+ * the vector to map to the corresponding queue
+ */
+static void
+ixgbe_set_ivar_map(struct ixgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ if (direction == -1)
+ direction = 0;
+ idx = (((direction * 64) + queue) >> 2) & 0x1F;
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(idx));
+ tmp &= ~(0xFF << (8 * (queue & 0x3)));
+ tmp |= (msix_vector << (8 * (queue & 0x3)));
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
+ } else if ((hw->mac.type == ixgbe_mac_82599EB) ||
+ (hw->mac.type == ixgbe_mac_X540)) {
+ if (direction == -1) {
+ /* other causes */
+ idx = ((queue & 1) * 8);
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR_MISC, tmp);
+ } else {
+ /* rx or tx causes */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = IXGBE_READ_REG(hw, IXGBE_IVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ IXGBE_WRITE_REG(hw, IXGBE_IVAR(queue >> 1), tmp);
+ }
+ }
+}
+
+static void
+ixgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t q_idx;
+ uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+
+ /* Configure VF other cause ivar */
+ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd.
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ /* Configure all RX queues of VF */
+ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ /* Force all queue use vector 0,
+ * as IXGBE_VF_MAXMSIVECOTR = 1
+ */
+ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ intr_handle->intr_vec[q_idx] = vector_idx;
+ }
+}
+
+/**
+ * Sets up the hardware to properly generate MSI-X interrupts
+ * @hw
+ * board private structure
+ */
+static void
+ixgbe_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t queue_id, base = IXGBE_MISC_VEC_ID;
+ uint32_t vec = IXGBE_MISC_VEC_ID;
+ uint32_t mask;
+ uint32_t gpie;
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = base = IXGBE_RX_VEC_START;
+
+ /* setup GPIE for MSI-x mode */
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT |
+ IXGBE_GPIE_OCD | IXGBE_GPIE_EIAME;
+ /* auto clearing and auto setting corresponding bits in EIMS
+ * when MSI-X interrupt is triggered
+ */
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (queue_id = 0; queue_id < dev->data->nb_rx_queues;
+ queue_id++) {
+ /* by default, 1:1 mapping */
+ ixgbe_set_ivar_map(hw, 0, queue_id, vec);
+ intr_handle->intr_vec[queue_id] = vec;
+ if (vec < base + intr_handle->nb_efd - 1)
+ vec++;
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
+ IXGBE_MISC_VEC_ID);
+ break;
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X540:
+ ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
+ break;
+ default:
+ break;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID),
+ IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+
+ IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
+}
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rf_dec, rf_int;
+ uint32_t bcnrc_val;
+ uint16_t link_speed = dev->data->dev_link.link_speed;
+
+ if (queue_idx >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+ rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+ rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+ bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+ IXGBE_RTTBCNRC_RF_INT_MASK_M);
+ bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+ * set as 0x4.
+ */
+ if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+ IXGBE_MAX_JUMBO_FRAME_SIZE))
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_JUMBO_FRAME);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_DEFAULT);
+
+ /* Set RTTBCNRC of queue X */
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static void
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __attribute__((unused)) uint32_t index,
+ __attribute__((unused)) uint32_t pool)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int diag;
+
+ /*
+ * On a 82599 VF, adding again the same MAC addr is not an idempotent
+ * operation. Trap this case to avoid exhausting the [very limited]
+ * set of PF resources used to store VF MAC addresses.
+ */
+ if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ return;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag == 0)
+ return;
+ PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+ struct ether_addr *mac_addr;
+ uint32_t i;
+ int diag;
+
+ /*
+ * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+ * not support the deletion of a given MAC address.
+ * Instead, it imposes to delete all MAC addresses, then to add again
+ * all MAC addresses with the exception of the one to be deleted.
+ */
+ (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+ /*
+ * Add again all MAC addresses, with the exception of the deleted one
+ * and of the permanent MAC address.
+ */
+ for (i = 0, mac_addr = dev->data->mac_addrs;
+ i < hw->mac.num_rar_entries; i++, mac_addr++) {
+ /* Skip the deleted MAC address */
+ if (i == index)
+ continue;
+ /* Skip NULL MAC addresses */
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+ /* Skip the permanent MAC address */
+ if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ continue;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR,
+ "Adding again MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x failed "
+ "diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ }
+}
+
+static void
+ixgbevf_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *addr)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ hw->mac.ops.set_rar(hw, 0, (void *)addr, 0, 0);
+}
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+ (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+ (type) != ixgbe_mac_X550EM_a)\
+ return -ENOTSUP;\
+} while (0)
+
+int
+ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t syn_info;
+ uint32_t synqf;
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ syn_info = filter_info->syn_info;
+
+ if (add) {
+ if (syn_info & IXGBE_SYN_FILTER_ENABLE)
+ return -EINVAL;
+ synqf = (uint32_t)(((filter->queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+ IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+ if (filter->hig_pri)
+ synqf |= IXGBE_SYN_FILTER_SYNQFP;
+ else
+ synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+ } else {
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (!(syn_info & IXGBE_SYN_FILTER_ENABLE))
+ return -ENOENT;
+ synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+ }
+
+ filter_info->syn_info = synqf;
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ IXGBE_WRITE_FLUSH(hw);
+ return 0;
+}
+
+static int
+ixgbe_syn_filter_get(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+ filter->queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+ixgbe_syn_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_syn_filter_set(dev,
+ (struct rte_eth_syn_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_syn_filter_get(dev,
+ (struct rte_eth_syn_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return IXGBE_FILTER_PROTOCOL_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return IXGBE_FILTER_PROTOCOL_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return IXGBE_FILTER_PROTOCOL_SCTP;
+ else
+ return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+/* inject a 5-tuple filter to HW */
+static inline void
+ixgbe_inject_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int i;
+ uint32_t ftqf, sdpqf;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ i = filter->index;
+
+ sdpqf = (uint32_t)(filter->filter_info.dst_port <<
+ IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->filter_info.src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf = (uint32_t)(filter->filter_info.proto &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->filter_info.priority &
+ IXGBE_FTQF_PRIORITY_MASK) << IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->filter_info.src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->filter_info.dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->filter_info.src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->filter_info.dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->filter_info.proto_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(i), filter->filter_info.dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(i), filter->filter_info.src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(i), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(i), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(filter->queue <<
+ IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(i), l34timir);
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ int i, idx, shift;
+
+ /*
+ * look for an unused 5tuple filter index,
+ * and insert the filter to list.
+ */
+ for (i = 0; i < IXGBE_MAX_FTQF_FILTERS; i++) {
+ idx = i / (sizeof(uint32_t) * NBBY);
+ shift = i % (sizeof(uint32_t) * NBBY);
+ if (!(filter_info->fivetuple_mask[idx] & (1 << shift))) {
+ filter_info->fivetuple_mask[idx] |= 1 << shift;
+ filter->index = i;
+ TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
+ filter,
+ entries);
+ break;
+ }
+ }
+ if (i >= IXGBE_MAX_FTQF_FILTERS) {
+ PMD_DRV_LOG(ERR, "5tuple filters are full.");
+ return -ENOSYS;
+ }
+
+ ixgbe_inject_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: the pointer of the filter will be removed.
+ */
+static void
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ struct ixgbe_5tuple_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint16_t index = filter->index;
+
+ filter_info->fivetuple_mask[index / (sizeof(uint32_t) * NBBY)] &=
+ ~(1 << (index % (sizeof(uint32_t) * NBBY)));
+ TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
+ rte_free(filter);
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ixgbe_hw *hw;
+ uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before.
+ */
+ if (!dev->data->scattered_rx &&
+ (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ /*
+ * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+ * request of the version 2.0 of the mailbox API.
+ * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+ * of the mailbox API.
+ * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+ * prior to 3.11.33 which contains the following change:
+ * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+ */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return 0;
+}
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+ return -ENOTSUP;\
+} while (0)
+
+static inline struct ixgbe_5tuple_filter *
+ixgbe_5tuple_filter_lookup(struct ixgbe_5tuple_filter_list *filter_list,
+ struct ixgbe_5tuple_filter_info *key)
+{
+ struct ixgbe_5tuple_filter *it;
+
+ TAILQ_FOREACH(it, filter_list, entries) {
+ if (memcmp(key, &it->filter_info,
+ sizeof(struct ixgbe_5tuple_filter_info)) == 0) {
+ return it;
+ }
+ }
+ return NULL;
+}
+
+/* translate elements in struct rte_eth_ntuple_filter to struct ixgbe_5tuple_filter_info*/
+static inline int
+ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
+ struct ixgbe_5tuple_filter_info *filter_info)
+{
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL;
+
+ switch (filter->dst_ip_mask) {
+ case UINT32_MAX:
+ filter_info->dst_ip_mask = 0;
+ filter_info->dst_ip = filter->dst_ip;
+ break;
+ case 0:
+ filter_info->dst_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_ip_mask) {
+ case UINT32_MAX:
+ filter_info->src_ip_mask = 0;
+ filter_info->src_ip = filter->src_ip;
+ break;
+ case 0:
+ filter_info->src_ip_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_ip mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->dst_port_mask) {
+ case UINT16_MAX:
+ filter_info->dst_port_mask = 0;
+ filter_info->dst_port = filter->dst_port;
+ break;
+ case 0:
+ filter_info->dst_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid dst_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->src_port_mask) {
+ case UINT16_MAX:
+ filter_info->src_port_mask = 0;
+ filter_info->src_port = filter->src_port;
+ break;
+ case 0:
+ filter_info->src_port_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid src_port mask.");
+ return -EINVAL;
+ }
+
+ switch (filter->proto_mask) {
+ case UINT8_MAX:
+ filter_info->proto_mask = 0;
+ filter_info->proto =
+ convert_protocol_type(filter->proto);
+ break;
+ case 0:
+ filter_info->proto_mask = 1;
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid protocol mask.");
+ return -EINVAL;
+ }
+
+ filter_info->priority = (uint8_t)filter->priority;
+ return 0;
+}
+
+/*
+ * add or delete a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ * add: if true, add filter, if false, remove filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+int
+ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter,
+ bool add)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter_info filter_5tuple;
+ struct ixgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter != NULL && add) {
+ PMD_DRV_LOG(ERR, "filter exists.");
+ return -EEXIST;
+ }
+ if (filter == NULL && !add) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+
+ if (add) {
+ filter = rte_zmalloc("ixgbe_5tuple_filter",
+ sizeof(struct ixgbe_5tuple_filter), 0);
+ if (filter == NULL)
+ return -ENOMEM;
+ (void)rte_memcpy(&filter->filter_info,
+ &filter_5tuple,
+ sizeof(struct ixgbe_5tuple_filter_info));
+ filter->queue = ntuple_filter->queue;
+ ret = ixgbe_add_5tuple_filter(dev, filter);
+ if (ret < 0) {
+ rte_free(filter);
+ return ret;
+ }
+ } else
+ ixgbe_remove_5tuple_filter(dev, filter);
+
+ return 0;
+}
+
+/*
+ * get a ntuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * ntuple_filter: Pointer to struct rte_eth_ntuple_filter
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ struct ixgbe_5tuple_filter_info filter_5tuple;
+ struct ixgbe_5tuple_filter *filter;
+ int ret;
+
+ if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
+ PMD_DRV_LOG(ERR, "only 5tuple is supported.");
+ return -EINVAL;
+ }
+
+ memset(&filter_5tuple, 0, sizeof(struct ixgbe_5tuple_filter_info));
+ ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
+ if (ret < 0)
+ return ret;
+
+ filter = ixgbe_5tuple_filter_lookup(&filter_info->fivetuple_list,
+ &filter_5tuple);
+ if (filter == NULL) {
+ PMD_DRV_LOG(ERR, "filter doesn't exist.");
+ return -ENOENT;
+ }
+ ntuple_filter->queue = filter->queue;
+ return 0;
+}
+
+/*
+ * ixgbe_ntuple_filter_handle - Handle operations for ntuple filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_ntuple_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_get_ntuple_filter(dev,
+ (struct rte_eth_ntuple_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int
+ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf = 0;
+ uint32_t etqs = 0;
+ int ret;
+ struct ixgbe_ethertype_filter ethertype_filter;
+
+ if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ if (filter->ether_type == ETHER_TYPE_IPv4 ||
+ filter->ether_type == ETHER_TYPE_IPv6) {
+ PMD_DRV_LOG(ERR, "unsupported ether_type(0x%04x) in"
+ " ethertype filter.", filter->ether_type);
+ return -EINVAL;
+ }
+
+ if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
+ PMD_DRV_LOG(ERR, "mac compare is unsupported.");
+ return -EINVAL;
+ }
+ if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
+ PMD_DRV_LOG(ERR, "drop option is unsupported.");
+ return -EINVAL;
+ }
+
+ ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret >= 0 && add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter exists.",
+ filter->ether_type);
+ return -EEXIST;
+ }
+ if (ret < 0 && !add) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ if (add) {
+ etqf = IXGBE_ETQF_FILTER_EN;
+ etqf |= (uint32_t)filter->ether_type;
+ etqs |= (uint32_t)((filter->queue <<
+ IXGBE_ETQS_RX_QUEUE_SHIFT) &
+ IXGBE_ETQS_RX_QUEUE);
+ etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ ethertype_filter.ethertype = filter->ether_type;
+ ethertype_filter.etqf = etqf;
+ ethertype_filter.etqs = etqs;
+ ethertype_filter.conf = FALSE;
+ ret = ixgbe_ethertype_filter_insert(filter_info,
+ ðertype_filter);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype filters are full.");
+ return -ENOSPC;
+ }
+ } else {
+ ret = ixgbe_ethertype_filter_remove(filter_info, (uint8_t)ret);
+ if (ret < 0)
+ return -ENOSYS;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(ret), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(ret), etqs);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+ uint32_t etqf, etqs;
+ int ret;
+
+ ret = ixgbe_ethertype_filter_lookup(filter_info, filter->ether_type);
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR, "ethertype (0x%04x) filter doesn't exist.",
+ filter->ether_type);
+ return -ENOENT;
+ }
+
+ etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(ret));
+ if (etqf & IXGBE_ETQF_FILTER_EN) {
+ etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(ret));
+ filter->ether_type = etqf & IXGBE_ETQF_ETHERTYPE;
+ filter->flags = 0;
+ filter->queue = (etqs & IXGBE_ETQS_RX_QUEUE) >>
+ IXGBE_ETQS_RX_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * ixgbe_ethertype_filter_handle - Handle operations for ethertype filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_ethertype_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ MAC_TYPE_FILTER_SUP(hw->mac.type);
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ TRUE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_add_del_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_GET:
+ ret = ixgbe_get_ethertype_filter(dev,
+ (struct rte_eth_ethertype_filter *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+ixgbe_dev_filter_ctrl(struct rte_eth_dev *dev,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret = 0;
+
+ switch (filter_type) {
+ case RTE_ETH_FILTER_NTUPLE:
+ ret = ixgbe_ntuple_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_ETHERTYPE:
+ ret = ixgbe_ethertype_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_SYN:
+ ret = ixgbe_syn_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_FDIR:
+ ret = ixgbe_fdir_ctrl_func(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_L2_TUNNEL:
+ ret = ixgbe_dev_l2_tunnel_filter_handle(dev, filter_op, arg);
+ break;
+ case RTE_ETH_FILTER_GENERIC:
+ if (filter_op != RTE_ETH_FILTER_GET)
+ return -EINVAL;
+ *(const void **)arg = &ixgbe_flow_ops;
+ break;
+ default:
+ PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
+ filter_type);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static u8 *
+ixgbe_dev_addr_list_itr(__attribute__((unused)) struct ixgbe_hw *hw,
+ u8 **mc_addr_ptr, u32 *vmdq)
+{
+ u8 *mc_addr;
+
+ *vmdq = 0;
+ mc_addr = *mc_addr_ptr;
+ *mc_addr_ptr = (mc_addr + sizeof(struct ether_addr));
+ return mc_addr;
+}
+
+static int
+ixgbe_dev_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set,
+ uint32_t nb_mc_addr)
+{
+ struct ixgbe_hw *hw;
+ u8 *mc_addr_list;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mc_addr_list = (u8 *)mc_addr_set;
+ return ixgbe_update_mc_addr_list(hw, mc_addr_list, nb_mc_addr,
+ ixgbe_dev_addr_list_itr, TRUE);
+}
+
+static uint64_t
+ixgbe_read_systime_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t systime_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* SYSTIMEL stores ns and SYSTIMEH stores seconds. */
+ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ systime_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ systime_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
+ systime_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_SYSTIMH)
+ << 32;
+ }
+
+ return systime_cycles;
+}
+
+static uint64_t
+ixgbe_read_rx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t rx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+ rx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ /* RXSTMPL stores ns and RXSTMPH stores seconds. */
+ rx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
+ rx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_RXSTMPH)
+ << 32;
+ }
+
+ return rx_tstamp_cycles;
+}
+
+static uint64_t
+ixgbe_read_tx_tstamp_cyclecounter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint64_t tx_tstamp_cycles;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+ tx_tstamp_cycles += (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+ * NSEC_PER_SEC;
+ break;
+ default:
+ /* TXSTMPL stores ns and TXSTMPH stores seconds. */
+ tx_tstamp_cycles = (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
+ tx_tstamp_cycles |= (uint64_t)IXGBE_READ_REG(hw, IXGBE_TXSTMPH)
+ << 32;
+ }
+
+ return tx_tstamp_cycles;
+}
+
+static void
+ixgbe_start_timecounters(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ struct rte_eth_link link;
+ uint32_t incval = 0;
+ uint32_t shift = 0;
+
+ /* Get current link speed. */
+ memset(&link, 0, sizeof(link));
+ ixgbe_dev_link_update(dev, 1);
+ rte_ixgbe_dev_atomic_read_link_status(dev, &link);
+
+ switch (link.link_speed) {
+ case ETH_SPEED_NUM_100M:
+ incval = IXGBE_INCVAL_100;
+ shift = IXGBE_INCVAL_SHIFT_100;
+ break;
+ case ETH_SPEED_NUM_1G:
+ incval = IXGBE_INCVAL_1GB;
+ shift = IXGBE_INCVAL_SHIFT_1GB;
+ break;
+ case ETH_SPEED_NUM_10G:
+ default:
+ incval = IXGBE_INCVAL_10GB;
+ shift = IXGBE_INCVAL_SHIFT_10GB;
+ break;
+ }
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ /* Independent of link speed. */
+ incval = 1;
+ /* Cycles read will be interpreted as ns. */
+ shift = 0;
+ /* Fall-through */
+ case ixgbe_mac_X540:
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval);
+ break;
+ case ixgbe_mac_82599EB:
+ incval >>= IXGBE_INCVAL_SHIFT_82599;
+ shift -= IXGBE_INCVAL_SHIFT_82599;
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
+ (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+ break;
+ default:
+ /* Not supported. */
+ return;
+ }
+
+ memset(&adapter->systime_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->rx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+ memset(&adapter->tx_tstamp_tc, 0, sizeof(struct rte_timecounter));
+
+ adapter->systime_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->systime_tc.cc_shift = shift;
+ adapter->systime_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->rx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->rx_tstamp_tc.cc_shift = shift;
+ adapter->rx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+
+ adapter->tx_tstamp_tc.cc_mask = IXGBE_CYCLECOUNTER_MASK;
+ adapter->tx_tstamp_tc.cc_shift = shift;
+ adapter->tx_tstamp_tc.nsec_mask = (1ULL << shift) - 1;
+}
+
+static int
+ixgbe_timesync_adjust_time(struct rte_eth_dev *dev, int64_t delta)
+{
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ adapter->systime_tc.nsec += delta;
+ adapter->rx_tstamp_tc.nsec += delta;
+ adapter->tx_tstamp_tc.nsec += delta;
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *ts)
+{
+ uint64_t ns;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ ns = rte_timespec_to_ns(ts);
+ /* Set the timecounters to a new value. */
+ adapter->systime_tc.nsec = ns;
+ adapter->rx_tstamp_tc.nsec = ns;
+ adapter->tx_tstamp_tc.nsec = ns;
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *ts)
+{
+ uint64_t ns, systime_cycles;
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+
+ systime_cycles = ixgbe_read_systime_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->systime_tc, systime_cycles);
+ *ts = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_enable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+ uint32_t tsauxc;
+
+ /* Stop the timesync system time. */
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0x0);
+ /* Reset the timesync system time value. */
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x0);
+ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x0);
+
+ /* Enable system time for platforms where it isn't on by default. */
+ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC);
+ tsauxc &= ~IXGBE_TSAUXC_DISABLE_SYSTIME;
+ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
+
+ ixgbe_start_timecounters(dev);
+
+ /* Enable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
+ (ETHER_TYPE_1588 |
+ IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_1588));
+
+ /* Enable timestamping of received PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ tsync_ctl |= IXGBE_TSYNCRXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+ /* Enable timestamping of transmitted PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ tsync_ctl |= IXGBE_TSYNCTXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t tsync_ctl;
+
+ /* Disable timestamping of transmitted PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ tsync_ctl &= ~IXGBE_TSYNCTXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCTXCTL, tsync_ctl);
+
+ /* Disable timestamping of received PTP packets. */
+ tsync_ctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ tsync_ctl &= ~IXGBE_TSYNCRXCTL_ENABLED;
+ IXGBE_WRITE_REG(hw, IXGBE_TSYNCRXCTL, tsync_ctl);
+
+ /* Disable L2 filtering of IEEE1588/802.1AS Ethernet frame types. */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
+
+ /* Stop incrementating the System Time registers. */
+ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, 0);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_rx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp,
+ uint32_t flags __rte_unused)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ uint32_t tsync_rxctl;
+ uint64_t rx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_rxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
+ if ((tsync_rxctl & IXGBE_TSYNCRXCTL_VALID) == 0)
+ return -EINVAL;
+
+ rx_tstamp_cycles = ixgbe_read_rx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->rx_tstamp_tc, rx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_timesync_read_tx_timestamp(struct rte_eth_dev *dev,
+ struct timespec *timestamp)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_adapter *adapter =
+ (struct ixgbe_adapter *)dev->data->dev_private;
+ uint32_t tsync_txctl;
+ uint64_t tx_tstamp_cycles;
+ uint64_t ns;
+
+ tsync_txctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
+ if ((tsync_txctl & IXGBE_TSYNCTXCTL_VALID) == 0)
+ return -EINVAL;
+
+ tx_tstamp_cycles = ixgbe_read_tx_tstamp_cyclecounter(dev);
+ ns = rte_timecounter_update(&adapter->tx_tstamp_tc, tx_tstamp_cycles);
+ *timestamp = rte_ns_to_timespec(ns);
+
+ return 0;
+}
+
+static int
+ixgbe_get_reg_length(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+ ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+
+ while ((reg_group = reg_set[g_ind++]))
+ count += ixgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+ixgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = ixgbevf_regs[g_ind++]))
+ count += ixgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+ixgbe_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+ const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
+ ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+
+ if (data == NULL) {
+ regs->length = ixgbe_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = reg_set[g_ind++]))
+ count += ixgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+ixgbevf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ if (data == NULL) {
+ regs->length = ixgbevf_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if ((regs->length == 0) ||
+ (regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = ixgbevf_regs[g_ind++]))
+ count += ixgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+ixgbe_get_eeprom_length(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ /* Return unit is byte count */
+ return hw->eeprom.word_size * 2;
+}
+
+static int
+ixgbe_get_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ return eeprom->ops.read_buffer(hw, first, length, data);
+}
+
+static int
+ixgbe_set_eeprom(struct rte_eth_dev *dev,
+ struct rte_dev_eeprom_info *in_eeprom)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ uint16_t *data = in_eeprom->data;
+ int first, length;
+
+ first = in_eeprom->offset >> 1;
+ length = in_eeprom->length >> 1;
+ if ((first > hw->eeprom.word_size) ||
+ ((first + length) > hw->eeprom.word_size))
+ return -EINVAL;
+
+ in_eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+ return eeprom->ops.write_buffer(hw, first, length, data);
+}
+
+uint16_t
+ixgbe_reta_size_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ return ETH_RSS_RETA_SIZE_512;
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return ETH_RSS_RETA_SIZE_64;
+ default:
+ return ETH_RSS_RETA_SIZE_128;
+ }
+}
+
+uint32_t
+ixgbe_reta_reg_get(enum ixgbe_mac_type mac_type, uint16_t reta_idx) {
+ switch (mac_type) {
+ case ixgbe_mac_X550:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_X550EM_a:
+ if (reta_idx < ETH_RSS_RETA_SIZE_128)
+ return IXGBE_RETA(reta_idx >> 2);
+ else
+ return IXGBE_ERETA((reta_idx - ETH_RSS_RETA_SIZE_128) >> 2);
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return IXGBE_VFRETA(reta_idx >> 2);
+ default:
+ return IXGBE_RETA(reta_idx >> 2);
+ }
+}
+
+uint32_t
+ixgbe_mrqc_reg_get(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return IXGBE_VFMRQC;
+ default:
+ return IXGBE_MRQC;
+ }
+}
+
+uint32_t
+ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i) {
+ switch (mac_type) {
+ case ixgbe_mac_X550_vf:
+ case ixgbe_mac_X550EM_x_vf:
+ case ixgbe_mac_X550EM_a_vf:
+ return IXGBE_VFRSSRK(i);
+ default:
+ return IXGBE_RSSRK(i);
+ }
+}
+
+bool
+ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type) {
+ switch (mac_type) {
+ case ixgbe_mac_82599_vf:
+ case ixgbe_mac_X540_vf:
+ return 0;
+ default:
+ return 1;
+ }
+}
+
+static int
+ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev,
+ struct rte_eth_dcb_info *dcb_info)
+{
+ struct ixgbe_dcb_config *dcb_config =
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ struct ixgbe_dcb_tc_config *tc;
+ uint8_t i, j;
+
+ if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
+ dcb_info->nb_tcs = dcb_config->num_tcs.pg_tcs;
+ else
+ dcb_info->nb_tcs = 1;
+
+ if (dcb_config->vt_mode) { /* vt is enabled*/
+ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < dcb_info->nb_tcs; j++) {
+ dcb_info->tc_queue.tc_rxq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
+ dcb_info->tc_queue.tc_txq[i][j].base =
+ i * dcb_info->nb_tcs + j;
+ dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ }
+ }
+ } else { /* vt is disabled*/
+ struct rte_eth_dcb_rx_conf *rx_conf =
+ &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
+ for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
+ dcb_info->prio_tc[i] = rx_conf->dcb_tc[i];
+ if (dcb_info->nb_tcs == ETH_4_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 32;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 64;
+ dcb_info->tc_queue.tc_txq[0][2].base = 96;
+ dcb_info->tc_queue.tc_txq[0][3].base = 112;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 64;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ } else if (dcb_info->nb_tcs == ETH_8_TCS) {
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ dcb_info->tc_queue.tc_rxq[0][i].base = i * 16;
+ dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 16;
+ }
+ dcb_info->tc_queue.tc_txq[0][0].base = 0;
+ dcb_info->tc_queue.tc_txq[0][1].base = 32;
+ dcb_info->tc_queue.tc_txq[0][2].base = 64;
+ dcb_info->tc_queue.tc_txq[0][3].base = 80;
+ dcb_info->tc_queue.tc_txq[0][4].base = 96;
+ dcb_info->tc_queue.tc_txq[0][5].base = 104;
+ dcb_info->tc_queue.tc_txq[0][6].base = 112;
+ dcb_info->tc_queue.tc_txq[0][7].base = 120;
+ dcb_info->tc_queue.tc_txq[0][0].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][1].nb_queue = 32;
+ dcb_info->tc_queue.tc_txq[0][2].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][3].nb_queue = 16;
+ dcb_info->tc_queue.tc_txq[0][4].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][5].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][6].nb_queue = 8;
+ dcb_info->tc_queue.tc_txq[0][7].nb_queue = 8;
+ }
+ }
+ for (i = 0; i < dcb_info->nb_tcs; i++) {
+ tc = &dcb_config->tc_config[i];
+ dcb_info->tc_bws[i] = tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent;
+ }
+ return 0;
+}
+
+/* Update e-tag ether type */
+static int
+ixgbe_update_e_tag_eth_type(struct ixgbe_hw *hw,
+ uint16_t ether_type)
+{
+ uint32_t etag_etype;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+ etag_etype &= ~IXGBE_ETAG_ETYPE_MASK;
+ etag_etype |= ether_type;
+ IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* Config l2 tunnel ether type */
+static int
+ixgbe_dev_l2_tunnel_eth_type_conf(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+
+ if (l2_tunnel == NULL)
+ return -EINVAL;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_ether_type = l2_tunnel->ether_type;
+ ret = ixgbe_update_e_tag_eth_type(hw, l2_tunnel->ether_type);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Enable e-tag tunnel */
+static int
+ixgbe_e_tag_enable(struct ixgbe_hw *hw)
+{
+ uint32_t etag_etype;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+ etag_etype |= IXGBE_ETAG_ETYPE_VALID;
+ IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* Enable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_enable(struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = TRUE;
+ ret = ixgbe_e_tag_enable(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable e-tag tunnel */
+static int
+ixgbe_e_tag_disable(struct ixgbe_hw *hw)
+{
+ uint32_t etag_etype;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ etag_etype = IXGBE_READ_REG(hw, IXGBE_ETAG_ETYPE);
+ etag_etype &= ~IXGBE_ETAG_ETYPE_VALID;
+ IXGBE_WRITE_REG(hw, IXGBE_ETAG_ETYPE, etag_etype);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+/* Disable l2 tunnel */
+static int
+ixgbe_dev_l2_tunnel_disable(struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_en = FALSE;
+ ret = ixgbe_e_tag_disable(hw);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+ for (i = 1; i < rar_entries; i++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(i));
+ if ((rar_high & IXGBE_RAH_AV) &&
+ (rar_high & IXGBE_RAH_ADTYPE) &&
+ ((rar_low & IXGBE_RAL_ETAG_FILTER_MASK) ==
+ l2_tunnel->tunnel_id)) {
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0);
+
+ ixgbe_clear_vmdq(hw, i, IXGBE_CLEAR_VMDQ_ALL);
+
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t i, rar_entries;
+ uint32_t rar_low, rar_high;
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ /* One entry for one tunnel. Try to remove potential existing entry. */
+ ixgbe_e_tag_filter_del(dev, l2_tunnel);
+
+ rar_entries = ixgbe_get_num_rx_addrs(hw);
+
+ for (i = 1; i < rar_entries; i++) {
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(i));
+ if (rar_high & IXGBE_RAH_AV) {
+ continue;
+ } else {
+ ixgbe_set_vmdq(hw, i, l2_tunnel->pool);
+ rar_high = IXGBE_RAH_AV | IXGBE_RAH_ADTYPE;
+ rar_low = l2_tunnel->tunnel_id;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(i), rar_low);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(i), rar_high);
+
+ return ret;
+ }
+ }
+
+ PMD_INIT_LOG(NOTICE, "The table of E-tag forwarding rule is full."
+ " Please remove a rule before adding a new one.");
+ return -EINVAL;
+}
+
+static inline struct ixgbe_l2_tn_filter *
+ixgbe_l2_tn_filter_lookup(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+
+ ret = rte_hash_lookup(l2_tn_info->hash_handle, (const void *)key);
+ if (ret < 0)
+ return NULL;
+
+ return l2_tn_info->hash_map[ret];
+}
+
+static inline int
+ixgbe_insert_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_filter *l2_tn_filter)
+{
+ int ret;
+
+ ret = rte_hash_add_key(l2_tn_info->hash_handle,
+ &l2_tn_filter->key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "Failed to insert L2 tunnel filter"
+ " to hash table %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_info->hash_map[ret] = l2_tn_filter;
+
+ TAILQ_INSERT_TAIL(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+
+ return 0;
+}
+
+static inline int
+ixgbe_remove_l2_tn_filter(struct ixgbe_l2_tn_info *l2_tn_info,
+ struct ixgbe_l2_tn_key *key)
+{
+ int ret;
+ struct ixgbe_l2_tn_filter *l2_tn_filter;
+
+ ret = rte_hash_del_key(l2_tn_info->hash_handle, key);
+
+ if (ret < 0) {
+ PMD_DRV_LOG(ERR,
+ "No such L2 tunnel filter to delete %d!",
+ ret);
+ return ret;
+ }
+
+ l2_tn_filter = l2_tn_info->hash_map[ret];
+ l2_tn_info->hash_map[ret] = NULL;
+
+ TAILQ_REMOVE(&l2_tn_info->l2_tn_list, l2_tn_filter, entries);
+ rte_free(l2_tn_filter);
+
+ return 0;
+}
+
+/* Add l2 tunnel filter */
+int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore)
+{
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+ struct ixgbe_l2_tn_filter *node;
+
+ if (!restore) {
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+
+ node = ixgbe_l2_tn_filter_lookup(l2_tn_info, &key);
+
+ if (node) {
+ PMD_DRV_LOG(ERR,
+ "The L2 tunnel filter already exists!");
+ return -EINVAL;
+ }
+
+ node = rte_zmalloc("ixgbe_l2_tn",
+ sizeof(struct ixgbe_l2_tn_filter),
+ 0);
+ if (!node)
+ return -ENOMEM;
+
+ (void)rte_memcpy(&node->key,
+ &key,
+ sizeof(struct ixgbe_l2_tn_key));
+ node->pool = l2_tunnel->pool;
+ ret = ixgbe_insert_l2_tn_filter(l2_tn_info, node);
+ if (ret < 0) {
+ rte_free(node);
+ return ret;
+ }
+ }
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_filter_add(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ if ((!restore) && (ret < 0))
+ (void)ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+
+ return ret;
+}
+
+/* Delete l2 tunnel filter */
+int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret;
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ struct ixgbe_l2_tn_key key;
+
+ key.l2_tn_type = l2_tunnel->l2_tunnel_type;
+ key.tn_id = l2_tunnel->tunnel_id;
+ ret = ixgbe_remove_l2_tn_filter(l2_tn_info, &key);
+ if (ret < 0)
+ return ret;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_filter_del(dev, l2_tunnel);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * ixgbe_dev_l2_tunnel_filter_handle - Handle operations for l2 tunnel filter.
+ * @dev: pointer to rte_eth_dev structure
+ * @filter_op:operation will be taken.
+ * @arg: a pointer to specific structure corresponding to the filter_op
+ */
+static int
+ixgbe_dev_l2_tunnel_filter_handle(struct rte_eth_dev *dev,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ int ret;
+
+ if (filter_op == RTE_ETH_FILTER_NOP)
+ return 0;
+
+ if (arg == NULL) {
+ PMD_DRV_LOG(ERR, "arg shouldn't be NULL for operation %u.",
+ filter_op);
+ return -EINVAL;
+ }
+
+ switch (filter_op) {
+ case RTE_ETH_FILTER_ADD:
+ ret = ixgbe_dev_l2_tunnel_filter_add
+ (dev,
+ (struct rte_eth_l2_tunnel_conf *)arg,
+ FALSE);
+ break;
+ case RTE_ETH_FILTER_DELETE:
+ ret = ixgbe_dev_l2_tunnel_filter_del
+ (dev,
+ (struct rte_eth_l2_tunnel_conf *)arg);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported operation %u.", filter_op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static int
+ixgbe_e_tag_forwarding_en_dis(struct rte_eth_dev *dev, bool en)
+{
+ int ret = 0;
+ uint32_t ctrl;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ ctrl &= ~IXGBE_VT_CTL_POOLING_MODE_MASK;
+ if (en)
+ ctrl |= IXGBE_VT_CTL_POOLING_MODE_ETAG;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, ctrl);
+
+ return ret;
+}
+
+/* Enable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_enable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = TRUE;
+ ret = ixgbe_e_tag_forwarding_en_dis(dev, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel forwarding */
+static int
+ixgbe_dev_l2_tunnel_forwarding_disable
+ (struct rte_eth_dev *dev,
+ enum rte_eth_tunnel_type l2_tunnel_type)
+{
+ struct ixgbe_l2_tn_info *l2_tn_info =
+ IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(dev->data->dev_private);
+ int ret = 0;
+
+ switch (l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ l2_tn_info->e_tag_fwd_en = FALSE;
+ ret = ixgbe_e_tag_forwarding_en_dis(dev, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_insertion_en_dis(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool en)
+{
+ struct rte_pci_device *pci_dev = IXGBE_DEV_TO_PCI(dev);
+ int ret = 0;
+ uint32_t vmtir, vmvir;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (l2_tunnel->vf_id >= pci_dev->max_vfs) {
+ PMD_DRV_LOG(ERR,
+ "VF id %u should be less than %u",
+ l2_tunnel->vf_id,
+ pci_dev->max_vfs);
+ return -EINVAL;
+ }
+
+ if (hw->mac.type != ixgbe_mac_X550 &&
+ hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_X550EM_a) {
+ return -ENOTSUP;
+ }
+
+ if (en)
+ vmtir = l2_tunnel->tunnel_id;
+ else
+ vmtir = 0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMTIR(l2_tunnel->vf_id), vmtir);
+
+ vmvir = IXGBE_READ_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id));
+ vmvir &= ~IXGBE_VMVIR_TAGA_MASK;
+ if (en)
+ vmvir |= IXGBE_VMVIR_TAGA_ETAG_INSERT;
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(l2_tunnel->vf_id), vmvir);
+
+ return ret;
+}
+
+/* Enable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_enable(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 1);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/* Disable l2 tunnel tag insertion */
+static int
+ixgbe_dev_l2_tunnel_insertion_disable
+ (struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel)
+{
+ int ret = 0;
+
+ switch (l2_tunnel->l2_tunnel_type) {
+ case RTE_L2_TUNNEL_TYPE_E_TAG:
+ ret = ixgbe_e_tag_insertion_en_dis(dev, l2_tunnel, 0);
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "Invalid tunnel type");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_e_tag_stripping_en_dis(struct rte_eth_dev *dev,
+ bool en)
+{
+ int ret = 0;
+ uint32_t qde;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);