+
+static int
+ixgbe_vmdq_mode_check(struct ixgbe_hw *hw)
+{
+ uint32_t reg_val;
+
+ /* we only need to do this if VMDq is enabled */
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(reg_val & IXGBE_VT_CTL_VT_ENABLE)) {
+ PMD_INIT_LOG(ERR, "VMDq must be enabled for this setting");
+ return (-1);
+ }
+
+ return 0;
+}
+
+static uint32_t
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+{
+ uint32_t vector = 0;
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 4) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 3) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((uc_addr->addr_bytes[4] >> 2) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((uc_addr->addr_bytes[4]) |
+ (((uint16_t)uc_addr->addr_bytes[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+static int
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
+ uint8_t on)
+{
+ uint32_t vector;
+ uint32_t uta_idx;
+ uint32_t reg_val;
+ uint32_t uta_shift;
+ uint32_t rc;
+ const uint32_t ixgbe_uta_idx_mask = 0x7F;
+ const uint32_t ixgbe_uta_bit_shift = 5;
+ const uint32_t ixgbe_uta_bit_mask = (0x1 << ixgbe_uta_bit_shift) - 1;
+ const uint32_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return (-ENOTSUP);
+
+ vector = ixgbe_uta_vector(hw,mac_addr);
+ uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
+ uta_shift = vector & ixgbe_uta_bit_mask;
+
+ rc = ((uta_info->uta_shadow[uta_idx] >> uta_shift & bit1) != 0);
+ if(rc == on)
+ return 0;
+
+ reg_val = IXGBE_READ_REG(hw, IXGBE_UTA(uta_idx));
+ if (on) {
+ uta_info->uta_in_use++;
+ reg_val |= (bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] |= (bit1 << uta_shift);
+ } else {
+ uta_info->uta_in_use--;
+ reg_val &= ~(bit1 << uta_shift);
+ uta_info->uta_shadow[uta_idx] &= ~(bit1 << uta_shift);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(uta_idx), reg_val);
+
+ if (uta_info->uta_in_use > 0)
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+
+ return 0;
+}
+
+static int
+ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on)
+{
+ int i;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_uta_info *uta_info =
+ IXGBE_DEV_PRIVATE_TO_UTA(dev->data->dev_private);
+
+ /* The UTA table only exists on 82599 hardware and newer */
+ if (hw->mac.type < ixgbe_mac_82599EB)
+ return (-ENOTSUP);
+
+ if(on) {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = ~0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), ~0);
+ }
+ } else {
+ for (i = 0; i < ETH_VMDQ_NUM_UC_HASH_ARRAY; i++) {
+ uta_info->uta_shadow[i] = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0);
+ }
+ }
+ return 0;
+
+}
+
+uint32_t
+ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val)
+{
+ uint32_t new_val = orig_val;
+
+ if (rx_mask & ETH_VMDQ_ACCEPT_UNTAG)
+ new_val |= IXGBE_VMOLR_AUPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_MC)
+ new_val |= IXGBE_VMOLR_ROMPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_HASH_UC)
+ new_val |= IXGBE_VMOLR_ROPE;
+ if (rx_mask & ETH_VMDQ_ACCEPT_BROADCAST)
+ new_val |= IXGBE_VMOLR_BAM;
+ if (rx_mask & ETH_VMDQ_ACCEPT_MULTICAST)
+ new_val |= IXGBE_VMOLR_MPE;
+
+ return new_val;
+}
+
+static int
+ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
+ uint16_t rx_mask, uint8_t on)
+{
+ int val = 0;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
+
+ if (hw->mac.type == ixgbe_mac_82598EB) {
+ PMD_INIT_LOG(ERR, "setting VF receive mode set should be done"
+ " on 82599 hardware and newer");
+ return (-ENOTSUP);
+ }
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ val = ixgbe_convert_vm_rx_mask_to_val(rx_mask, val);
+
+ if (on)
+ vmolr |= val;
+ else
+ vmolr &= ~val;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+ uint32_t reg,addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+ reg = IXGBE_READ_REG(hw, addr);
+ val = bit1 << pool;
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr,reg);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
+{
+ uint32_t reg,addr;
+ uint32_t val;
+ const uint8_t bit1 = 0x1;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+ reg = IXGBE_READ_REG(hw, addr);
+ val = bit1 << pool;
+
+ if (on)
+ reg |= val;
+ else
+ reg &= ~val;
+
+ IXGBE_WRITE_REG(hw, addr,reg);
+
+ return 0;
+}
+
+static int
+ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
+ uint64_t pool_mask, uint8_t vlan_on)
+{
+ int ret = 0;
+ uint16_t pool_idx;
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+ for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
+ if (pool_mask & ((uint64_t)(1ULL << pool_idx)))
+ ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
+ struct rte_eth_vmdq_mirror_conf *mirror_conf,
+ uint8_t rule_id, uint8_t on)
+{
+ uint32_t mr_ctl,vlvf;
+ uint32_t mp_lsb = 0;
+ uint32_t mv_msb = 0;
+ uint32_t mv_lsb = 0;
+ uint32_t mp_msb = 0;
+ uint8_t i = 0;
+ int reg_index = 0;
+ uint64_t vlan_mask = 0;
+
+ const uint8_t pool_mask_offset = 32;
+ const uint8_t vlan_mask_offset = 32;
+ const uint8_t dst_pool_offset = 8;
+ const uint8_t rule_mr_offset = 4;
+ const uint8_t mirror_rule_mask= 0x0F;
+
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ /* Check if vlan mask is valid */
+ if ((mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) && (on)) {
+ if (mirror_conf->vlan.vlan_mask == 0)
+ return (-EINVAL);
+ }
+
+ /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
+ /* search vlan id related pool vlan filter index */
+ reg_index = ixgbe_find_vlvf_slot(hw,
+ mirror_conf->vlan.vlan_id[i]);
+ if(reg_index < 0)
+ return (-EINVAL);
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
+ if ((vlvf & IXGBE_VLVF_VIEN) &&
+ ((vlvf & IXGBE_VLVF_VLANID_MASK)
+ == mirror_conf->vlan.vlan_id[i]))
+ vlan_mask |= (1ULL << reg_index);
+ else
+ return (-EINVAL);
+ }
+ }
+
+ if (on) {
+ mv_lsb = vlan_mask & 0xFFFFFFFF;
+ mv_msb = vlan_mask >> vlan_mask_offset;
+
+ mr_info->mr_conf[rule_id].vlan.vlan_mask =
+ mirror_conf->vlan.vlan_mask;
+ for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++) {
+ if(mirror_conf->vlan.vlan_mask & (1ULL << i))
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] =
+ mirror_conf->vlan.vlan_id[i];
+ }
+ } else {
+ mv_lsb = 0;
+ mv_msb = 0;
+ mr_info->mr_conf[rule_id].vlan.vlan_mask = 0;
+ for(i = 0 ;i < ETH_VMDQ_MAX_VLAN_FILTERS; i++)
+ mr_info->mr_conf[rule_id].vlan.vlan_id[i] = 0;
+ }
+ }
+
+ /*
+ * if enable pool mirror, write related pool mask register,if disable
+ * pool mirror, clear PFMRVM register
+ */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ if (on) {
+ mp_lsb = mirror_conf->pool_mask & 0xFFFFFFFF;
+ mp_msb = mirror_conf->pool_mask >> pool_mask_offset;
+ mr_info->mr_conf[rule_id].pool_mask =
+ mirror_conf->pool_mask;
+
+ } else {
+ mp_lsb = 0;
+ mp_msb = 0;
+ mr_info->mr_conf[rule_id].pool_mask = 0;
+ }
+ }
+
+ /* read mirror control register and recalculate it */
+ mr_ctl = IXGBE_READ_REG(hw,IXGBE_MRCTL(rule_id));
+
+ if (on) {
+ mr_ctl |= mirror_conf->rule_type_mask;
+ mr_ctl &= mirror_rule_mask;
+ mr_ctl |= mirror_conf->dst_pool << dst_pool_offset;
+ } else
+ mr_ctl &= ~(mirror_conf->rule_type_mask & mirror_rule_mask);
+
+ mr_info->mr_conf[rule_id].rule_type_mask = (uint8_t)(mr_ctl & mirror_rule_mask);
+ mr_info->mr_conf[rule_id].dst_pool = mirror_conf->dst_pool;
+
+ /* write mirrror control register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* write pool mirrror control register */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_POOL_MIRROR) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
+ mp_msb);
+ }
+ /* write VLAN mirrror control register */
+ if (mirror_conf->rule_type_mask & ETH_VMDQ_VLAN_MIRROR) {
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
+ mv_msb);
+ }
+
+ return 0;
+}
+
+static int
+ixgbe_mirror_rule_reset(struct rte_eth_dev *dev, uint8_t rule_id)
+{
+ int mr_ctl = 0;
+ uint32_t lsb_val = 0;
+ uint32_t msb_val = 0;
+ const uint8_t rule_mr_offset = 4;
+
+ struct ixgbe_hw *hw =
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_mirror_info *mr_info =
+ (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
+
+ if (ixgbe_vmdq_mode_check(hw) < 0)
+ return (-ENOTSUP);
+
+ memset(&mr_info->mr_conf[rule_id], 0,
+ sizeof(struct rte_eth_vmdq_mirror_conf));
+
+ /* clear PFVMCTL register */
+ IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
+
+ /* clear pool mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), msb_val);
+
+ /* clear vlan mask register */
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), lsb_val);
+ IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), msb_val);
+
+ return 0;
+}
+
+static int ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
+ uint16_t queue_idx, uint16_t tx_rate)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t rf_dec, rf_int;
+ uint32_t bcnrc_val;
+ uint16_t link_speed = dev->data->dev_link.link_speed;
+
+ if (queue_idx >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (tx_rate != 0) {
+ /* Calculate the rate factor values to set */
+ rf_int = (uint32_t)link_speed / (uint32_t)tx_rate;
+ rf_dec = (uint32_t)link_speed % (uint32_t)tx_rate;
+ rf_dec = (rf_dec << IXGBE_RTTBCNRC_RF_INT_SHIFT) / tx_rate;
+
+ bcnrc_val = IXGBE_RTTBCNRC_RS_ENA;
+ bcnrc_val |= ((rf_int << IXGBE_RTTBCNRC_RF_INT_SHIFT) &
+ IXGBE_RTTBCNRC_RF_INT_MASK_M);
+ bcnrc_val |= (rf_dec & IXGBE_RTTBCNRC_RF_DEC_MASK);
+ } else {
+ bcnrc_val = 0;
+ }
+
+ /*
+ * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
+ * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
+ * set as 0x4.
+ */
+ if ((dev->data->dev_conf.rxmode.jumbo_frame == 1) &&
+ (dev->data->dev_conf.rxmode.max_rx_pkt_len >=
+ IXGBE_MAX_JUMBO_FRAME_SIZE))
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_JUMBO_FRAME);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
+ IXGBE_MMW_SIZE_DEFAULT);
+
+ /* Set RTTBCNRC of queue X */
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+ IXGBE_WRITE_FLUSH(hw);
+
+ return 0;
+}
+
+static int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_vf_info *vfinfo =
+ *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ uint8_t nb_q_per_pool = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
+ uint32_t queue_stride =
+ IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ uint32_t queue_idx = vf * queue_stride, idx = 0, vf_idx;
+ uint32_t queue_end = queue_idx + nb_q_per_pool - 1;
+ uint16_t total_rate = 0;
+
+ if (queue_end >= hw->mac.max_tx_queues)
+ return -EINVAL;
+
+ if (vfinfo != NULL) {
+ for (vf_idx = 0; vf_idx < dev->pci_dev->max_vfs; vf_idx++) {
+ if (vf_idx == vf)
+ continue;
+ for (idx = 0; idx < RTE_DIM(vfinfo[vf_idx].tx_rate);
+ idx++)
+ total_rate += vfinfo[vf_idx].tx_rate[idx];
+ }
+ } else
+ return -EINVAL;
+
+ /* Store tx_rate for this vf. */
+ for (idx = 0; idx < nb_q_per_pool; idx++) {
+ if (((uint64_t)0x1 << idx) & q_msk) {
+ if (vfinfo[vf].tx_rate[idx] != tx_rate)
+ vfinfo[vf].tx_rate[idx] = tx_rate;
+ total_rate += tx_rate;
+ }
+ }
+
+ if (total_rate > dev->data->dev_link.link_speed) {
+ /*
+ * Reset stored TX rate of the VF if it causes exceed
+ * link speed.
+ */
+ memset(vfinfo[vf].tx_rate, 0, sizeof(vfinfo[vf].tx_rate));
+ return -EINVAL;
+ }
+
+ /* Set RTTBCNRC of each queue/pool for vf X */
+ for (; queue_idx <= queue_end; queue_idx++) {
+ if (0x1 & q_msk)
+ ixgbe_set_queue_rate_limit(dev, queue_idx, tx_rate);
+ q_msk = q_msk >> 1;
+ }
+
+ return 0;
+}
+
+static void
+ixgbevf_add_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ __attribute__((unused)) uint32_t index,
+ __attribute__((unused)) uint32_t pool)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int diag;
+
+ /*
+ * On a 82599 VF, adding again the same MAC addr is not an idempotent
+ * operation. Trap this case to avoid exhausting the [very limited]
+ * set of PF resources used to store VF MAC addresses.
+ */
+ if (memcmp(hw->mac.perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ return;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag == 0)
+ return;
+ PMD_DRV_LOG(ERR, "Unable to add MAC address - diag=%d", diag);
+}
+
+static void
+ixgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ether_addr *perm_addr = (struct ether_addr *) hw->mac.perm_addr;
+ struct ether_addr *mac_addr;
+ uint32_t i;
+ int diag;
+
+ /*
+ * The IXGBE_VF_SET_MACVLAN command of the ixgbe-pf driver does
+ * not support the deletion of a given MAC address.
+ * Instead, it imposes to delete all MAC addresses, then to add again
+ * all MAC addresses with the exception of the one to be deleted.
+ */
+ (void) ixgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+ /*
+ * Add again all MAC addresses, with the exception of the deleted one
+ * and of the permanent MAC address.
+ */
+ for (i = 0, mac_addr = dev->data->mac_addrs;
+ i < hw->mac.num_rar_entries; i++, mac_addr++) {
+ /* Skip the deleted MAC address */
+ if (i == index)
+ continue;
+ /* Skip NULL MAC addresses */
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+ /* Skip the permanent MAC address */
+ if (memcmp(perm_addr, mac_addr, sizeof(struct ether_addr)) == 0)
+ continue;
+ diag = ixgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (diag != 0)
+ PMD_DRV_LOG(ERR,
+ "Adding again MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x failed "
+ "diag=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ diag);
+ }
+}
+
+/*
+ * add syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+ if (synqf & IXGBE_SYN_FILTER_ENABLE)
+ return -EINVAL;
+
+ synqf = (uint32_t)(((rx_queue << IXGBE_SYN_FILTER_QUEUE_SHIFT) &
+ IXGBE_SYN_FILTER_QUEUE) | IXGBE_SYN_FILTER_ENABLE);
+
+ if (filter->hig_pri)
+ synqf |= IXGBE_SYN_FILTER_SYNQFP;
+ else
+ synqf &= ~IXGBE_SYN_FILTER_SYNQFP;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ return 0;
+}
+
+/*
+ * remove syn filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_remove_syn_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+
+ synqf &= ~(IXGBE_SYN_FILTER_QUEUE | IXGBE_SYN_FILTER_ENABLE);
+
+ IXGBE_WRITE_REG(hw, IXGBE_SYNQF, synqf);
+ return 0;
+}
+
+/*
+ * get the syn filter's info
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer to the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_syn_filter(struct rte_eth_dev *dev,
+ struct rte_syn_filter *filter, uint16_t *rx_queue)
+
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t synqf;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ synqf = IXGBE_READ_REG(hw, IXGBE_SYNQF);
+ if (synqf & IXGBE_SYN_FILTER_ENABLE) {
+ filter->hig_pri = (synqf & IXGBE_SYN_FILTER_SYNQFP) ? 1 : 0;
+ *rx_queue = (uint16_t)((synqf & IXGBE_SYN_FILTER_QUEUE) >> 1);
+ return 0;
+ }
+ return -ENOENT;
+}
+
+/*
+ * add an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_ethertype_filter *filter,
+ uint16_t rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf, etqs = 0;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_ETQF_FILTERS ||
+ rx_queue >= IXGBE_MAX_RX_QUEUE_NUM)
+ return -EINVAL;
+
+ etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+ if (etqf & IXGBE_ETQF_FILTER_EN)
+ return -EINVAL; /* filter index is in use. */
+
+ etqf = 0;
+ etqf |= IXGBE_ETQF_FILTER_EN;
+ etqf |= (uint32_t)filter->ethertype;
+
+ if (filter->priority_en) {
+ if (filter->priority > IXGBE_ETQF_MAX_PRI)
+ return -EINVAL;
+ etqf |= (uint32_t)((filter->priority << IXGBE_ETQF_SHIFT) & IXGBE_ETQF_UP);
+ etqf |= IXGBE_ETQF_UP_EN;
+ }
+ etqs |= (uint32_t)((rx_queue << IXGBE_ETQS_RX_QUEUE_SHIFT) & IXGBE_ETQS_RX_QUEUE);
+ etqs |= IXGBE_ETQS_QUEUE_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), etqf);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), etqs);
+ return 0;
+}
+
+/*
+ * remove an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_remove_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_ETQF_FILTERS)
+ return -EINVAL;
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(index), 0);
+
+ return 0;
+}
+
+/*
+ * get an ethertype filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be gotten.
+ * *rx_queue: the ponited of the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_ethertype_filter(struct rte_eth_dev *dev,
+ uint16_t index, struct rte_ethertype_filter *filter,
+ uint16_t *rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t etqf, etqs;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_ETQF_FILTERS)
+ return -EINVAL;
+
+ etqf = IXGBE_READ_REG(hw, IXGBE_ETQF(index));
+ etqs = IXGBE_READ_REG(hw, IXGBE_ETQS(index));
+ if (etqf & IXGBE_ETQF_FILTER_EN) {
+ filter->ethertype = etqf & IXGBE_ETQF_ETHERTYPE;
+ filter->priority_en = (etqf & IXGBE_ETQF_UP_EN) ? 1 : 0;
+ if (filter->priority_en)
+ filter->priority = (etqf & IXGBE_ETQF_UP) >> 16;
+ *rx_queue = (etqs & IXGBE_ETQS_RX_QUEUE) >> IXGBE_ETQS_RX_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static inline enum ixgbe_5tuple_protocol
+convert_protocol_type(uint8_t protocol_value)
+{
+ if (protocol_value == IPPROTO_TCP)
+ return IXGBE_FILTER_PROTOCOL_TCP;
+ else if (protocol_value == IPPROTO_UDP)
+ return IXGBE_FILTER_PROTOCOL_UDP;
+ else if (protocol_value == IPPROTO_SCTP)
+ return IXGBE_FILTER_PROTOCOL_SCTP;
+ else
+ return IXGBE_FILTER_PROTOCOL_NONE;
+}
+
+static inline uint8_t
+revert_protocol_type(enum ixgbe_5tuple_protocol protocol)
+{
+ if (protocol == IXGBE_FILTER_PROTOCOL_TCP)
+ return IPPROTO_TCP;
+ else if (protocol == IXGBE_FILTER_PROTOCOL_UDP)
+ return IPPROTO_UDP;
+ else if (protocol == IXGBE_FILTER_PROTOCOL_SCTP)
+ return IPPROTO_SCTP;
+ else
+ return 0;
+}
+
+/*
+ * add a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ * filter: ponter to the filter that will be added.
+ * rx_queue: the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_add_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t ftqf, sdpqf = 0;
+ uint32_t l34timir = 0;
+ uint8_t mask = 0xff;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_FTQF_FILTERS ||
+ rx_queue >= IXGBE_MAX_RX_QUEUE_NUM ||
+ filter->priority > IXGBE_5TUPLE_MAX_PRI ||
+ filter->priority < IXGBE_5TUPLE_MIN_PRI)
+ return -EINVAL; /* filter index is out of range. */
+
+ if (filter->tcp_flags) {
+ PMD_INIT_LOG(INFO, "82599EB not tcp flags in 5tuple");
+ return -EINVAL;
+ }
+
+ ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+ if (ftqf & IXGBE_FTQF_QUEUE_ENABLE)
+ return -EINVAL; /* filter index is in use. */
+
+ ftqf = 0;
+ sdpqf = (uint32_t)(filter->dst_port << IXGBE_SDPQF_DSTPORT_SHIFT);
+ sdpqf = sdpqf | (filter->src_port & IXGBE_SDPQF_SRCPORT);
+
+ ftqf |= (uint32_t)(convert_protocol_type(filter->protocol) &
+ IXGBE_FTQF_PROTOCOL_MASK);
+ ftqf |= (uint32_t)((filter->priority & IXGBE_FTQF_PRIORITY_MASK) <<
+ IXGBE_FTQF_PRIORITY_SHIFT);
+ if (filter->src_ip_mask == 0) /* 0 means compare. */
+ mask &= IXGBE_FTQF_SOURCE_ADDR_MASK;
+ if (filter->dst_ip_mask == 0)
+ mask &= IXGBE_FTQF_DEST_ADDR_MASK;
+ if (filter->src_port_mask == 0)
+ mask &= IXGBE_FTQF_SOURCE_PORT_MASK;
+ if (filter->dst_port_mask == 0)
+ mask &= IXGBE_FTQF_DEST_PORT_MASK;
+ if (filter->protocol_mask == 0)
+ mask &= IXGBE_FTQF_PROTOCOL_COMP_MASK;
+ ftqf |= mask << IXGBE_FTQF_5TUPLE_MASK_SHIFT;
+ ftqf |= IXGBE_FTQF_POOL_MASK_EN;
+ ftqf |= IXGBE_FTQF_QUEUE_ENABLE;
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), filter->dst_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), filter->src_ip);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), sdpqf);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), ftqf);
+
+ l34timir |= IXGBE_L34T_IMIR_RESERVE;
+ l34timir |= (uint32_t)(rx_queue << IXGBE_L34T_IMIR_QUEUE_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), l34timir);
+ return 0;
+}
+
+/*
+ * remove a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_remove_5tuple_filter(struct rte_eth_dev *dev,
+ uint16_t index)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_FTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ IXGBE_WRITE_REG(hw, IXGBE_DAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SAQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_SDPQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_FTQF(index), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_L34T_IMIR(index), 0);
+ return 0;
+}
+
+/*
+ * get a 5tuple filter
+ *
+ * @param
+ * dev: Pointer to struct rte_eth_dev.
+ * index: the index the filter allocates
+ * filter: ponter to the filter that returns.
+ * *rx_queue: pointer of the queue id the filter assigned to.
+ *
+ * @return
+ * - On success, zero.
+ * - On failure, a negative value.
+ */
+static int
+ixgbe_get_5tuple_filter(struct rte_eth_dev *dev, uint16_t index,
+ struct rte_5tuple_filter *filter, uint16_t *rx_queue)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ uint32_t sdpqf, ftqf, l34timir;
+ uint8_t mask;
+ enum ixgbe_5tuple_protocol proto;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return -ENOSYS;
+
+ if (index >= IXGBE_MAX_FTQF_FILTERS)
+ return -EINVAL; /* filter index is out of range. */
+
+ ftqf = IXGBE_READ_REG(hw, IXGBE_FTQF(index));
+ if (ftqf & IXGBE_FTQF_QUEUE_ENABLE) {
+ proto = (enum ixgbe_5tuple_protocol)(ftqf & IXGBE_FTQF_PROTOCOL_MASK);
+ filter->protocol = revert_protocol_type(proto);
+ filter->priority = (ftqf >> IXGBE_FTQF_PRIORITY_SHIFT) &
+ IXGBE_FTQF_PRIORITY_MASK;
+ mask = (uint8_t)((ftqf >> IXGBE_FTQF_5TUPLE_MASK_SHIFT) &
+ IXGBE_FTQF_5TUPLE_MASK_MASK);
+ filter->src_ip_mask =
+ (mask & IXGBE_FTQF_SOURCE_ADDR_MASK) ? 1 : 0;
+ filter->dst_ip_mask =
+ (mask & IXGBE_FTQF_DEST_ADDR_MASK) ? 1 : 0;
+ filter->src_port_mask =
+ (mask & IXGBE_FTQF_SOURCE_PORT_MASK) ? 1 : 0;
+ filter->dst_port_mask =
+ (mask & IXGBE_FTQF_DEST_PORT_MASK) ? 1 : 0;
+ filter->protocol_mask =
+ (mask & IXGBE_FTQF_PROTOCOL_COMP_MASK) ? 1 : 0;
+
+ sdpqf = IXGBE_READ_REG(hw, IXGBE_SDPQF(index));
+ filter->dst_port = (sdpqf & IXGBE_SDPQF_DSTPORT) >>
+ IXGBE_SDPQF_DSTPORT_SHIFT;
+ filter->src_port = sdpqf & IXGBE_SDPQF_SRCPORT;
+ filter->dst_ip = IXGBE_READ_REG(hw, IXGBE_DAQF(index));
+ filter->src_ip = IXGBE_READ_REG(hw, IXGBE_SAQF(index));
+
+ l34timir = IXGBE_READ_REG(hw, IXGBE_L34T_IMIR(index));
+ *rx_queue = (l34timir & IXGBE_L34T_IMIR_QUEUE) >>
+ IXGBE_L34T_IMIR_QUEUE_SHIFT;
+ return 0;
+ }
+ return -ENOENT;
+}
+
+static int
+ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct ixgbe_hw *hw;
+ uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
+ if ((mtu < ETHER_MIN_MTU) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
+ return -EINVAL;
+
+ /* refuse mtu that requires the support of scattered packets when this
+ * feature has not been enabled before. */
+ if (!dev->data->scattered_rx &&
+ (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
+ return -EINVAL;
+
+ /*
+ * When supported by the underlying PF driver, use the IXGBE_VF_SET_MTU
+ * request of the version 2.0 of the mailbox API.
+ * For now, use the IXGBE_VF_SET_LPE request of the version 1.0
+ * of the mailbox API.
+ * This call to IXGBE_SET_LPE action won't work with ixgbe pf drivers
+ * prior to 3.11.33 which contains the following change:
+ * "ixgbe: Enable jumbo frames support w/ SR-IOV"
+ */
+ ixgbevf_rlpml_set_vf(hw, max_frame);
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return 0;
+}
+
+static struct rte_driver rte_ixgbe_driver = {
+ .type = PMD_PDEV,
+ .init = rte_ixgbe_pmd_init,
+};
+
+static struct rte_driver rte_ixgbevf_driver = {
+ .type = PMD_PDEV,
+ .init = rte_ixgbevf_pmd_init,
+};
+
+PMD_REGISTER_DRIVER(rte_ixgbe_driver);
+PMD_REGISTER_DRIVER(rte_ixgbevf_driver);