4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <rte_interrupts.h>
44 #include <rte_debug.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_memcpy.h>
49 #include <rte_malloc.h>
50 #include <rte_random.h>
52 #include "ixgbe/ixgbe_common.h"
53 #include "ixgbe_ethdev.h"
55 #define IXGBE_MAX_VFTA (128)
57 static inline uint16_t
58 dev_num_vf(struct rte_eth_dev *eth_dev)
60 return eth_dev->pci_dev->max_vfs;
64 int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num)
66 unsigned char vf_mac_addr[ETHER_ADDR_LEN];
67 struct ixgbe_vf_info *vfinfo =
68 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
71 for (vfn = 0; vfn < vf_num; vfn++) {
72 eth_random_addr(vf_mac_addr);
73 /* keep the random address as default */
74 memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr,
82 ixgbe_mb_intr_setup(struct rte_eth_dev *dev)
84 struct ixgbe_interrupt *intr =
85 IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
87 intr->mask |= IXGBE_EICR_MAILBOX;
92 void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev)
94 struct ixgbe_vf_info **vfinfo =
95 IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
96 struct ixgbe_mirror_info *mirror_info =
97 IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
98 struct ixgbe_uta_info *uta_info =
99 IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
100 struct ixgbe_hw *hw =
101 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
105 PMD_INIT_FUNC_TRACE();
107 RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
108 if (0 == (vf_num = dev_num_vf(eth_dev)))
111 *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
113 rte_panic("Cannot allocate memory for private VF data\n");
115 memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
116 memset(uta_info,0,sizeof(struct ixgbe_uta_info));
117 hw->mac.mc_filter_type = 0;
119 if (vf_num >= ETH_32_POOLS) {
121 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_64_POOLS;
122 } else if (vf_num >= ETH_16_POOLS) {
124 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_32_POOLS;
127 RTE_ETH_DEV_SRIOV(eth_dev).active = ETH_16_POOLS;
130 RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = nb_queue;
131 RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = vf_num;
132 RTE_ETH_DEV_SRIOV(eth_dev).def_pool_q_idx = (uint16_t)(vf_num * nb_queue);
134 ixgbe_vf_perm_addr_gen(eth_dev, vf_num);
136 /* init_mailbox_params */
137 hw->mbx.ops.init_params(hw);
139 /* set mb interrupt mask */
140 ixgbe_mb_intr_setup(eth_dev);
145 int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev)
147 uint32_t vtctl, fcrth;
148 uint32_t vfre_slot, vfre_offset;
150 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
151 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
152 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
153 uint32_t gpie, gcr_ext;
157 if (0 == (vf_num = dev_num_vf(eth_dev)))
160 /* enable VMDq and set the default pool for PF */
161 vtctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
162 vtctl |= IXGBE_VMD_CTL_VMDQ_EN;
163 vtctl &= ~IXGBE_VT_CTL_POOL_MASK;
164 vtctl |= RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx
165 << IXGBE_VT_CTL_POOL_SHIFT;
166 vtctl |= IXGBE_VT_CTL_REPLEN;
167 IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vtctl);
169 vfre_offset = vf_num & VFRE_MASK;
170 vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
172 /* Enable pools reserved to PF only */
173 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0) << vfre_offset);
174 IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
175 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0) << vfre_offset);
176 IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
178 /* PFDMA Tx General Switch Control Enables VMDQ loopback */
179 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
181 /* clear VMDq map to perment rar 0 */
182 hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL);
184 /* clear VMDq map to scan rar 127 */
185 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(hw->mac.num_rar_entries), 0);
186 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(hw->mac.num_rar_entries), 0);
188 /* set VMDq map to default PF pool */
189 hw->mac.ops.set_vmdq(hw, 0, RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx);
192 * SW msut set GCR_EXT.VT_Mode the same as GPIE.VT_Mode
194 gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
195 gcr_ext &= ~IXGBE_GCR_EXT_VT_MODE_MASK;
197 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
198 gpie &= ~IXGBE_GPIE_VTMODE_MASK;
199 gpie |= IXGBE_GPIE_MSIX_MODE;
201 switch (RTE_ETH_DEV_SRIOV(eth_dev).active) {
203 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64;
204 gpie |= IXGBE_GPIE_VTMODE_64;
207 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_32;
208 gpie |= IXGBE_GPIE_VTMODE_32;
211 gcr_ext |= IXGBE_GCR_EXT_VT_MODE_16;
212 gpie |= IXGBE_GPIE_VTMODE_16;
216 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
217 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
220 * enable vlan filtering and allow all vlan tags through
222 vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
223 vlanctrl |= IXGBE_VLNCTRL_VFE ; /* enable vlan filters */
224 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
226 /* VFTA - enable all vlan filters */
227 for (i = 0; i < IXGBE_MAX_VFTA; i++) {
228 IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
231 /* Enable MAC Anti-Spoofing */
232 hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
234 /* set flow control threshold to max to avoid tx switch hang */
235 for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
236 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
237 fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
238 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth);
245 set_rx_mode(struct rte_eth_dev *dev)
247 struct rte_eth_dev_data *dev_data =
248 (struct rte_eth_dev_data*)dev->data->dev_private;
249 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
250 u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
251 uint16_t vfn = dev_num_vf(dev);
253 /* Check for Promiscuous and All Multicast modes */
254 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
256 /* set all bits that we expect to always be set */
257 fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
258 fctrl |= IXGBE_FCTRL_BAM;
260 /* clear the bits we are changing the status of */
261 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
263 if (dev_data->promiscuous) {
264 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
265 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
267 if (dev_data->all_multicast) {
268 fctrl |= IXGBE_FCTRL_MPE;
269 vmolr |= IXGBE_VMOLR_MPE;
271 vmolr |= IXGBE_VMOLR_ROMPE;
275 if (hw->mac.type != ixgbe_mac_82598EB) {
276 vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(vfn)) &
277 ~(IXGBE_VMOLR_MPE | IXGBE_VMOLR_ROMPE |
279 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vfn), vmolr);
282 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
284 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
285 ixgbe_vlan_hw_strip_enable_all(dev);
287 ixgbe_vlan_hw_strip_disable_all(dev);
291 ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf)
293 struct ixgbe_hw *hw =
294 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
295 struct ixgbe_vf_info *vfinfo =
296 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
297 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
298 uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
300 vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE |
301 IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
302 IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
304 IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0);
306 /* reset multicast table array for vf */
307 vfinfo[vf].num_vf_mc_hashes = 0;
312 hw->mac.ops.clear_rar(hw, rar_entry);
316 ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf)
318 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
320 uint32_t reg_offset, vf_shift;
321 const uint8_t VFRE_SHIFT = 5; /* VFRE 32 bits per slot */
322 const uint8_t VFRE_MASK = (uint8_t)((1U << VFRE_SHIFT) - 1);
324 vf_shift = vf & VFRE_MASK;
325 reg_offset = (vf >> VFRE_SHIFT) > 0 ? 1 : 0;
327 /* enable transmit and receive for vf */
328 reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
329 reg |= (reg | (1 << vf_shift));
330 IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
332 reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
333 reg |= (reg | (1 << vf_shift));
334 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
336 /* Enable counting of spoofed packets in the SSVPC register */
337 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
338 reg |= (1 << vf_shift);
339 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
341 ixgbe_vf_reset_event(dev, vf);
345 ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf)
347 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
348 struct ixgbe_vf_info *vfinfo =
349 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
350 unsigned char *vf_mac = vfinfo[vf].vf_mac_addresses;
351 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
352 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
354 ixgbe_vf_reset_msg(dev, vf);
356 hw->mac.ops.set_rar(hw, rar_entry, vf_mac, vf, IXGBE_RAH_AV);
358 /* reply to reset with ack and vf mac address */
359 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
360 rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN);
362 * Piggyback the multicast filter type so VF can compute the
365 msgbuf[3] = hw->mac.mc_filter_type;
366 ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
372 ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
374 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
375 struct ixgbe_vf_info *vfinfo =
376 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
377 int rar_entry = hw->mac.num_rar_entries - (vf + 1);
378 uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
380 if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
381 rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
382 return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
388 ixgbe_vf_set_multicast(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
390 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
391 struct ixgbe_vf_info *vfinfo =
392 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
393 int nb_entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >>
394 IXGBE_VT_MSGINFO_SHIFT;
395 uint16_t *hash_list = (uint16_t *)&msgbuf[1];
398 const uint32_t IXGBE_MTA_INDEX_MASK = 0x7F;
399 const uint32_t IXGBE_MTA_BIT_SHIFT = 5;
400 const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1;
404 /* only so many hash values supported */
405 nb_entries = RTE_MIN(nb_entries, IXGBE_MAX_VF_MC_ENTRIES);
407 /* store the mc entries */
408 vfinfo->num_vf_mc_hashes = (uint16_t)nb_entries;
409 for (i = 0; i < nb_entries; i++) {
410 vfinfo->vf_mc_hashes[i] = hash_list[i];
413 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
414 mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT)
415 & IXGBE_MTA_INDEX_MASK;
416 mta_shift = vfinfo->vf_mc_hashes[i] & IXGBE_MTA_BIT_MASK;
417 reg_val = IXGBE_READ_REG(hw, IXGBE_MTA(mta_idx));
418 reg_val |= (1 << mta_shift);
419 IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val);
426 ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
429 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
430 struct ixgbe_vf_info *vfinfo =
431 *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
433 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
434 >> IXGBE_VT_MSGINFO_SHIFT;
435 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
438 vfinfo[vf].vlan_count++;
439 else if (vfinfo[vf].vlan_count)
440 vfinfo[vf].vlan_count--;
441 return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
445 ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *msgbuf)
447 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
448 uint32_t new_mtu = msgbuf[1];
450 int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
452 /* X540 and X550 support jumbo frames in IOV mode */
453 if (hw->mac.type != ixgbe_mac_X540 &&
454 hw->mac.type != ixgbe_mac_X550 &&
455 hw->mac.type != ixgbe_mac_X550EM_x)
458 if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN))
461 max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) &
462 IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
463 if (max_frs < new_mtu) {
464 max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT;
465 IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs);
472 ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf)
474 uint16_t mbx_size = IXGBE_VFMAILBOX_SIZE;
475 uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
477 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
478 struct ixgbe_vf_info *vfinfo =
479 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
481 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
483 PMD_DRV_LOG(ERR, "Error mbx recv msg from VF %d", vf);
487 /* do nothing with the message already been processed */
488 if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
491 /* flush the ack before we write any messages back */
492 IXGBE_WRITE_FLUSH(hw);
494 /* perform VF reset */
495 if (msgbuf[0] == IXGBE_VF_RESET) {
496 int ret = ixgbe_vf_reset(dev, vf, msgbuf);
497 vfinfo[vf].clear_to_send = true;
501 /* check & process VF to PF mailbox message */
502 switch ((msgbuf[0] & 0xFFFF)) {
503 case IXGBE_VF_SET_MAC_ADDR:
504 retval = ixgbe_vf_set_mac_addr(dev, vf, msgbuf);
506 case IXGBE_VF_SET_MULTICAST:
507 retval = ixgbe_vf_set_multicast(dev, vf, msgbuf);
509 case IXGBE_VF_SET_LPE:
510 retval = ixgbe_set_vf_lpe(dev, vf, msgbuf);
512 case IXGBE_VF_SET_VLAN:
513 retval = ixgbe_vf_set_vlan(dev, vf, msgbuf);
516 PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]);
517 retval = IXGBE_ERR_MBX;
521 /* response the VF according to the message process result */
523 msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
525 msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
527 msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
529 ixgbe_write_mbx(hw, msgbuf, 1, vf);
535 ixgbe_rcv_ack_from_vf(struct rte_eth_dev *dev, uint16_t vf)
537 uint32_t msg = IXGBE_VT_MSGTYPE_NACK;
538 struct ixgbe_hw *hw =
539 IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
540 struct ixgbe_vf_info *vfinfo =
541 *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private);
543 if (!vfinfo[vf].clear_to_send)
544 ixgbe_write_mbx(hw, &msg, 1, vf);
547 void ixgbe_pf_mbx_process(struct rte_eth_dev *eth_dev)
550 struct ixgbe_hw *hw =
551 IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
553 for (vf = 0; vf < dev_num_vf(eth_dev); vf++) {
554 /* check & process vf function level reset */
555 if (!ixgbe_check_for_rst(hw, vf))
556 ixgbe_vf_reset_event(eth_dev, vf);
558 /* check & process vf mailbox messages */
559 if (!ixgbe_check_for_msg(hw, vf))
560 ixgbe_rcv_msg_from_vf(eth_dev, vf);
562 /* check & process acks from vf */
563 if (!ixgbe_check_for_ack(hw, vf))
564 ixgbe_rcv_ack_from_vf(eth_dev, vf);