X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_pmd_ixgbe%2Fixgbe_pf.c;h=dbda9b5ac7dd130eb5a7c9ea85ae24b32fce516c;hb=4ae39dfa69ad9a1ae6174f52c60c187d2843402b;hp=a11a9699b274a34028c82fbb18a71d7ea6978c03;hpb=00e30184daa0b2a99985581c860e665ee59eb76b;p=dpdk.git diff --git a/lib/librte_pmd_ixgbe/ixgbe_pf.c b/lib/librte_pmd_ixgbe/ixgbe_pf.c index a11a9699b2..dbda9b5ac7 100644 --- a/lib/librte_pmd_ixgbe/ixgbe_pf.c +++ b/lib/librte_pmd_ixgbe/ixgbe_pf.c @@ -1,35 +1,34 @@ /*- * BSD LICENSE - * - * Copyright(c) 2010-2013 Intel Corporation. All rights reserved. + * + * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions * are met: - * - * * Redistributions of source code must retain the above copyright + * + * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * */ #include @@ -54,17 +53,8 @@ #include "ixgbe_ethdev.h" #define IXGBE_MAX_VFTA (128) - -static inline -void eth_random_addr(uint8_t *addr) -{ - uint64_t rand = rte_rand(); - uint8_t *p = (uint8_t*)&rand; - - rte_memcpy(addr, p, ETHER_ADDR_LEN); - addr[0] &= 0xfe; /* clear multicast bit */ - addr[0] |= 0x02; /* set local assignment bit (IEEE802) */ -} +#define IXGBE_VF_MSG_SIZE_DEFAULT 1 +#define IXGBE_VF_GET_QUEUE_MSG_SIZE 5 static inline uint16_t dev_num_vf(struct rte_eth_dev *eth_dev) @@ -72,18 +62,18 @@ dev_num_vf(struct rte_eth_dev *eth_dev) return eth_dev->pci_dev->max_vfs; } -static inline +static inline int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) { unsigned char vf_mac_addr[ETHER_ADDR_LEN]; - struct ixgbe_vf_info *vfinfo = + struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); uint16_t vfn; for (vfn = 0; vfn < vf_num; vfn++) { eth_random_addr(vf_mac_addr); /* keep the random address as default */ - memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, + memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, ETHER_ADDR_LEN); } @@ -103,9 +93,13 @@ ixgbe_mb_intr_setup(struct rte_eth_dev *dev) void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) { - struct ixgbe_vf_info **vfinfo = + struct ixgbe_vf_info **vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); - struct ixgbe_hw *hw = + struct ixgbe_mirror_info *mirror_info = + IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); + struct ixgbe_uta_info *uta_info = + IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); uint16_t vf_num; uint8_t nb_queue; @@ -120,6 +114,10 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) if (*vfinfo == NULL) rte_panic("Cannot allocate memory for private VF data\n"); + memset(mirror_info,0,sizeof(struct ixgbe_mirror_info)); + memset(uta_info,0,sizeof(struct ixgbe_uta_info)); + hw->mac.mc_filter_type = 0; + if (vf_num >= ETH_32_POOLS) { nb_queue = 2; RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS; @@ -165,14 +163,14 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); vtctl |= IXGBE_VMD_CTL_VMDQ_EN; vtctl &= ~IXGBE_VT_CTL_POOL_MASK; - vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx + vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx << IXGBE_VT_CTL_POOL_SHIFT; vtctl |= IXGBE_VT_CTL_REPLEN; IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl); - vfre_offset = vf_num & VFRE_MASK; + vfre_offset = vf_num & VFRE_MASK; vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0; - + /* Enable pools reserved to PF only */ IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0) << vfre_offset); IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1); @@ -197,11 +195,11 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) */ gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK; - + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); gpie &= ~IXGBE_GPIE_VTMODE_MASK; gpie |= IXGBE_GPIE_MSIX_MODE; - + switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { case ETH_64_POOLS: gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; @@ -220,8 +218,8 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - /* - * enable vlan filtering and allow all vlan tags through + /* + * enable vlan filtering and allow all vlan tags through */ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */ @@ -231,11 +229,11 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) for (i = 0; i < IXGBE_MAX_VFTA; i++) { IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); } - + /* Enable MAC Anti-Spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num); - /* set flow control threshold to max to avoid tx switch hang */ + /* set flow control threshold to max to avoid tx switch hang */ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; @@ -245,10 +243,10 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) return 0; } -static void +static void set_rx_mode(struct rte_eth_dev *dev) { - struct rte_eth_dev_data *dev_data = + struct rte_eth_dev_data *dev_data = (struct rte_eth_dev_data*)dev->data->dev_private; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; @@ -267,8 +265,6 @@ set_rx_mode(struct rte_eth_dev *dev) if (dev_data->promiscuous) { fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE); - /* don't hardware filter vlans in promisc mode */ - ixgbe_vlan_hw_filter_disable(dev); } else { if (dev_data->all_multicast) { fctrl |= IXGBE_FCTRL_MPE; @@ -276,7 +272,6 @@ set_rx_mode(struct rte_eth_dev *dev) } else { vmolr |= IXGBE_VMOLR_ROMPE; } - ixgbe_vlan_hw_filter_enable(dev); } if (hw->mac.type != ixgbe_mac_82598EB) { @@ -294,32 +289,32 @@ set_rx_mode(struct rte_eth_dev *dev) ixgbe_vlan_hw_strip_disable_all(dev); } -static inline void +static inline void ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf) { - struct ixgbe_hw *hw = + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = + struct ixgbe_vf_info *vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); int rar_entry = hw->mac.num_rar_entries - (vf + 1); uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE | + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); - + /* reset multicast table array for vf */ vfinfo[vf].num_vf_mc_hashes = 0; /* reset rx mode */ set_rx_mode(dev); - + hw->mac.ops.clear_rar(hw, rar_entry); } -static inline void +static inline void ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -352,7 +347,7 @@ static int ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = + struct ixgbe_vf_info *vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses; int rar_entry = hw->mac.num_rar_entries - (vf + 1); @@ -379,7 +374,7 @@ static int ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = + struct ixgbe_vf_info *vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); int rar_entry = hw->mac.num_rar_entries - (vf + 1); uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); @@ -395,9 +390,9 @@ static int ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = + struct ixgbe_vf_info *vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); - int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; uint16_t *hash_list = (uint16_t *)&msgbuf[1]; uint32_t mta_idx; @@ -407,7 +402,7 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32 const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1; uint32_t reg_val; int i; - + /* only so many hash values supported */ nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES); @@ -418,7 +413,7 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32 } for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { - mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT) + mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT) & IXGBE_MTA_INDEX_MASK; mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK; reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx)); @@ -434,7 +429,7 @@ ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) { int add, vid; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vf_info *vfinfo = + struct ixgbe_vf_info *vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) @@ -448,7 +443,7 @@ ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add); } -static int +static int ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -456,11 +451,13 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms uint32_t max_frs; int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; - /* Only X540 supports jumbo frames in IOV mode */ - if (hw->mac.type != ixgbe_mac_X540) + /* X540 and X550 support jumbo frames in IOV mode */ + if (hw->mac.type != ixgbe_mac_X540 && + hw->mac.type != ixgbe_mac_X550 && + hw->mac.type != ixgbe_mac_X550EM_x) return -1; - if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) return -1; max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & @@ -473,17 +470,73 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms return 0; } -static int +static int +ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + uint32_t api_version = msgbuf[1]; + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + switch (api_version) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: + vfinfo[vf].api_version = (uint8_t)api_version; + return 0; + default: + break; + } + + RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n", + api_version, vf); + + return -1; +} + +static int +ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + uint32_t default_q = vf * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + + /* Verify if the PF supports the mbox APIs version or not */ + switch (vfinfo[vf].api_version) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: + break; + default: + return -1; + } + + /* Notify VF of Rx and Tx queue number */ + msgbuf[IXGBE_VF_RX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + msgbuf[IXGBE_VF_TX_QUEUES] = RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; + + /* Notify VF of default queue */ + msgbuf[IXGBE_VF_DEF_QUEUE] = default_q; + + /* + * FIX ME if it needs fill msgbuf[IXGBE_VF_TRANS_VLAN] + * for VLAN strip or VMDQ_DCB or VMDQ_DCB_RSS + */ + + return 0; +} + +static int ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) { uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE; + uint16_t msg_size = IXGBE_VF_MSG_SIZE_DEFAULT; uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE]; int32_t retval; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); if (retval) { - RTE_LOG(ERR, PMD, "Error mbx recv msg from VF %d\n", vf); + PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf); return retval; } @@ -496,7 +549,9 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) /* perform VF reset */ if (msgbuf[0] == IXGBE_VF_RESET) { - return ixgbe_vf_reset(dev, vf, msgbuf); + int ret = ixgbe_vf_reset(dev, vf, msgbuf); + vfinfo[vf].clear_to_send = true; + return ret; } /* check & process VF to PF mailbox message */ @@ -513,8 +568,15 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) case IXGBE_VF_SET_VLAN: retval = ixgbe_vf_set_vlan(dev, vf, msgbuf); break; + case IXGBE_VF_API_NEGOTIATE: + retval = ixgbe_negotiate_vf_api(dev, vf, msgbuf); + break; + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(dev, vf, msgbuf); + msg_size = IXGBE_VF_GET_QUEUE_MSG_SIZE; + break; default: - RTE_LOG(DEBUG, PMD, "Unhandled Msg %8.8x\n", (unsigned) msgbuf[0]); + PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]); retval = IXGBE_ERR_MBX; break; } @@ -527,25 +589,28 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS; - ixgbe_write_mbx(hw, msgbuf, 1, vf); + ixgbe_write_mbx(hw, msgbuf, msg_size, vf); return retval; } -static inline void +static inline void ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf) { uint32_t msg = IXGBE_VT_MSGTYPE_NACK; - struct ixgbe_hw *hw = + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vfinfo = + *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); - ixgbe_write_mbx(hw, &msg, 1, vf); + if (!vfinfo[vf].clear_to_send) + ixgbe_write_mbx(hw, &msg, 1, vf); } void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev) { uint16_t vf; - struct ixgbe_hw *hw = + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {