+ rte_free(dev->data->mac_addrs);
+ dev->data->mac_addrs = NULL;
+
+ rte_intr_disable(intr_handle);
+ rte_intr_callback_unregister(intr_handle,
+ txgbevf_dev_interrupt_handler, dev);
+
+ return ret;
+}
+
+/*
+ * Reset VF device
+ */
+static int
+txgbevf_dev_reset(struct rte_eth_dev *dev)
+{
+ int ret;
+
+ ret = eth_txgbevf_dev_uninit(dev);
+ if (ret)
+ return ret;
+
+ ret = eth_txgbevf_dev_init(dev);
+
+ return ret;
+}
+
+static void txgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ int i = 0, j = 0, vfta = 0, mask = 1;
+
+ for (i = 0; i < TXGBE_VFTA_SIZE; i++) {
+ vfta = shadow_vfta->vfta[i];
+ if (vfta) {
+ mask = 1;
+ for (j = 0; j < 32; j++) {
+ if (vfta & mask)
+ txgbe_set_vfta(hw, (i << 5) + j, 0,
+ on, false);
+ mask <<= 1;
+ }
+ }
+ }
+}
+
+static int
+txgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_vfta *shadow_vfta = TXGBE_DEV_VFTA(dev);
+ uint32_t vid_idx = 0;
+ uint32_t vid_bit = 0;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ /* vind is not used in VF driver, set to 0, check txgbe_set_vfta_vf */
+ ret = hw->mac.set_vfta(hw, vlan_id, 0, !!on, false);
+ if (ret) {
+ PMD_INIT_LOG(ERR, "Unable to set VF vlan");
+ return ret;
+ }
+ vid_idx = (uint32_t)((vlan_id >> 5) & 0x7F);
+ vid_bit = (uint32_t)(1 << (vlan_id & 0x1F));
+
+ /* Save what we set and restore it after device reset */
+ if (on)
+ shadow_vfta->vfta[vid_idx] |= vid_bit;
+ else
+ shadow_vfta->vfta[vid_idx] &= ~vid_bit;
+
+ return 0;
+}
+
+static void
+txgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t ctrl;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (queue >= hw->mac.max_rx_queues)
+ return;
+
+ ctrl = rd32(hw, TXGBE_RXCFG(queue));
+ txgbe_dev_save_rx_queue(hw, queue);
+ if (on)
+ ctrl |= TXGBE_RXCFG_VLAN;
+ else
+ ctrl &= ~TXGBE_RXCFG_VLAN;
+ wr32(hw, TXGBE_RXCFG(queue), 0);
+ msec_delay(100);
+ txgbe_dev_store_rx_queue(hw, queue);
+ wr32m(hw, TXGBE_RXCFG(queue),
+ TXGBE_RXCFG_VLAN | TXGBE_RXCFG_ENA, ctrl);
+
+ txgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
+}
+
+static int
+txgbevf_vlan_offload_config(struct rte_eth_dev *dev, int mask)
+{
+ struct txgbe_rx_queue *rxq;
+ uint16_t i;
+ int on = 0;
+
+ /* VF function only support hw strip feature, others are not support */
+ if (mask & ETH_VLAN_STRIP_MASK) {
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ on = !!(rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ txgbevf_vlan_strip_queue_set(dev, i, on);
+ }
+ }
+
+ return 0;
+}
+
+static int
+txgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
+{
+ txgbe_config_vlan_strip_on_all_queues(dev, mask);
+
+ txgbevf_vlan_offload_config(dev, mask);
+
+ return 0;
+}
+
+static int
+txgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t vec = TXGBE_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = TXGBE_RX_VEC_START;
+ intr->mask_misc &= ~(1 << vec);
+ RTE_SET_USED(queue_id);
+ wr32(hw, TXGBE_VFIMC, ~intr->mask_misc);
+
+ rte_intr_enable(intr_handle);
+
+ return 0;
+}
+
+static int
+txgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
+{
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint32_t vec = TXGBE_MISC_VEC_ID;
+
+ if (rte_intr_allow_others(intr_handle))
+ vec = TXGBE_RX_VEC_START;
+ intr->mask_misc |= (1 << vec);
+ RTE_SET_USED(queue_id);
+ wr32(hw, TXGBE_VFIMS, intr->mask_misc);
+
+ return 0;
+}
+
+static void
+txgbevf_set_ivar_map(struct txgbe_hw *hw, int8_t direction,
+ uint8_t queue, uint8_t msix_vector)
+{
+ uint32_t tmp, idx;
+
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= TXGBE_VFIVAR_VLD;
+ tmp = rd32(hw, TXGBE_VFIVARMISC);
+ tmp &= ~0xFF;
+ tmp |= msix_vector;
+ wr32(hw, TXGBE_VFIVARMISC, tmp);
+ } else {
+ /* rx or tx cause */
+ /* Workround for ICR lost */
+ idx = ((16 * (queue & 1)) + (8 * direction));
+ tmp = rd32(hw, TXGBE_VFIVAR(queue >> 1));
+ tmp &= ~(0xFF << idx);
+ tmp |= (msix_vector << idx);
+ wr32(hw, TXGBE_VFIVAR(queue >> 1), tmp);
+ }
+}
+
+static void
+txgbevf_configure_msix(struct rte_eth_dev *dev)
+{
+ struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
+ struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t q_idx;
+ uint32_t vector_idx = TXGBE_MISC_VEC_ID;
+ uint32_t base = TXGBE_MISC_VEC_ID;
+
+ /* Configure VF other cause ivar */
+ txgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
+ /* won't configure msix register if no mapping is done
+ * between intr vector and event fd.
+ */
+ if (!rte_intr_dp_is_en(intr_handle))
+ return;
+
+ if (rte_intr_allow_others(intr_handle)) {
+ base = TXGBE_RX_VEC_START;
+ vector_idx = TXGBE_RX_VEC_START;
+ }
+
+ /* Configure all RX queues of VF */
+ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) {
+ /* Force all queue use vector 0,
+ * as TXGBE_VF_MAXMSIVECOTR = 1
+ */
+ txgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
+ intr_handle->intr_vec[q_idx] = vector_idx;
+ if (vector_idx < base + intr_handle->nb_efd - 1)
+ vector_idx++;
+ }
+
+ /* As RX queue setting above show, all queues use the vector 0.
+ * Set only the ITR value of TXGBE_MISC_VEC_ID.
+ */
+ wr32(hw, TXGBE_ITR(TXGBE_MISC_VEC_ID),
+ TXGBE_ITR_IVAL(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT)
+ | TXGBE_ITR_WRDSA);
+}
+
+static int
+txgbevf_add_mac_addr(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
+ __rte_unused uint32_t index,
+ __rte_unused uint32_t pool)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int err;
+
+ /*
+ * On a VF, adding again the same MAC addr is not an idempotent
+ * operation. Trap this case to avoid exhausting the [very limited]
+ * set of PF resources used to store VF MAC addresses.
+ */
+ if (memcmp(hw->mac.perm_addr, mac_addr,
+ sizeof(struct rte_ether_addr)) == 0)
+ return -1;
+ err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (err != 0)
+ PMD_DRV_LOG(ERR, "Unable to add MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x - err=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ err);
+ return err;
+}
+
+static void
+txgbevf_remove_mac_addr(struct rte_eth_dev *dev, uint32_t index)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct rte_ether_addr *perm_addr =
+ (struct rte_ether_addr *)hw->mac.perm_addr;
+ struct rte_ether_addr *mac_addr;
+ uint32_t i;
+ int err;
+
+ /*
+ * The TXGBE_VF_SET_MACVLAN command of the txgbe-pf driver does
+ * not support the deletion of a given MAC address.
+ * Instead, it imposes to delete all MAC addresses, then to add again
+ * all MAC addresses with the exception of the one to be deleted.
+ */
+ (void)txgbevf_set_uc_addr_vf(hw, 0, NULL);
+
+ /*
+ * Add again all MAC addresses, with the exception of the deleted one
+ * and of the permanent MAC address.
+ */
+ for (i = 0, mac_addr = dev->data->mac_addrs;
+ i < hw->mac.num_rar_entries; i++, mac_addr++) {
+ /* Skip the deleted MAC address */
+ if (i == index)
+ continue;
+ /* Skip NULL MAC addresses */
+ if (rte_is_zero_ether_addr(mac_addr))
+ continue;
+ /* Skip the permanent MAC address */
+ if (memcmp(perm_addr, mac_addr,
+ sizeof(struct rte_ether_addr)) == 0)
+ continue;
+ err = txgbevf_set_uc_addr_vf(hw, 2, mac_addr->addr_bytes);
+ if (err != 0)
+ PMD_DRV_LOG(ERR,
+ "Adding again MAC address "
+ "%02x:%02x:%02x:%02x:%02x:%02x failed "
+ "err=%d",
+ mac_addr->addr_bytes[0],
+ mac_addr->addr_bytes[1],
+ mac_addr->addr_bytes[2],
+ mac_addr->addr_bytes[3],
+ mac_addr->addr_bytes[4],
+ mac_addr->addr_bytes[5],
+ err);
+ }
+}
+
+static int
+txgbevf_set_default_mac_addr(struct rte_eth_dev *dev,
+ struct rte_ether_addr *addr)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+
+ hw->mac.set_rar(hw, 0, (void *)addr, 0, 0);
+
+ return 0;
+}
+
+static int
+txgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
+{
+ struct txgbe_hw *hw;
+ uint32_t max_frame = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ struct rte_eth_dev_data *dev_data = dev->data;
+
+ hw = TXGBE_DEV_HW(dev);
+
+ if (mtu < RTE_ETHER_MIN_MTU ||
+ max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
+ return -EINVAL;
+
+ /* If device is started, refuse mtu that requires the support of
+ * scattered packets when this feature has not been enabled before.
+ */
+ if (dev_data->dev_started && !dev_data->scattered_rx &&
+ (max_frame + 2 * TXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ PMD_INIT_LOG(ERR, "Stop port first.");
+ return -EINVAL;
+ }
+
+ /*
+ * When supported by the underlying PF driver, use the TXGBE_VF_SET_MTU
+ * request of the version 2.0 of the mailbox API.
+ * For now, use the TXGBE_VF_SET_LPE request of the version 1.0
+ * of the mailbox API.
+ */
+ if (txgbevf_rlpml_set_vf(hw, max_frame))
+ return -EINVAL;
+
+ /* update max frame size */
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
+ return 0;
+}
+
+static int
+txgbevf_get_reg_length(struct rte_eth_dev *dev __rte_unused)
+{
+ int count = 0;
+ int g_ind = 0;
+ const struct reg_info *reg_group;
+
+ while ((reg_group = txgbevf_regs[g_ind++]))
+ count += txgbe_regs_group_count(reg_group);
+
+ return count;
+}
+
+static int
+txgbevf_get_regs(struct rte_eth_dev *dev,
+ struct rte_dev_reg_info *regs)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ uint32_t *data = regs->data;
+ int g_ind = 0;
+ int count = 0;
+ const struct reg_info *reg_group;
+
+ if (data == NULL) {
+ regs->length = txgbevf_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
+ /* Support only full register dump */
+ if (regs->length == 0 ||
+ regs->length == (uint32_t)txgbevf_get_reg_length(dev)) {
+ regs->version = hw->mac.type << 24 | hw->revision_id << 16 |
+ hw->device_id;
+ while ((reg_group = txgbevf_regs[g_ind++]))
+ count += txgbe_read_regs_group(dev, &data[count],
+ reg_group);
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
+txgbevf_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+
+ switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_PROMISC)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbevf_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+
+ switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_NONE)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+
+ switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_ALLMULTI)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+txgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ int ret;
+
+ switch (hw->mac.update_xcast_mode(hw, TXGBEVF_XCAST_MODE_MULTI)) {
+ case 0:
+ ret = 0;
+ break;
+ case TXGBE_ERR_FEATURE_NOT_SUPPORTED:
+ ret = -ENOTSUP;
+ break;
+ default:
+ ret = -EAGAIN;
+ break;
+ }
+
+ return ret;
+}
+
+static void txgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ u32 in_msg = 0;
+
+ /* peek the message first */
+ in_msg = rd32(hw, TXGBE_VFMBX);
+
+ /* PF reset VF event */
+ if (in_msg == TXGBE_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (txgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
+}
+
+static int
+txgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
+{
+ uint32_t eicr;
+ struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
+ struct txgbe_interrupt *intr = TXGBE_DEV_INTR(dev);
+ txgbevf_intr_disable(dev);
+
+ /* read-on-clear nic registers here */
+ eicr = rd32(hw, TXGBE_VFICR);
+ intr->flags = 0;
+
+ /* only one misc vector supported - mailbox */
+ eicr &= TXGBE_VFICR_MASK;
+ /* Workround for ICR lost */
+ intr->flags |= TXGBE_FLAG_MAILBOX;
+