struct ixgbe_vf_info **vfinfo =
IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private);
struct ixgbe_mirror_info *mirror_info =
- IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private);
struct ixgbe_uta_info *uta_info =
- IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private);
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
uint16_t vf_num;
PMD_INIT_FUNC_TRACE();
RTE_ETH_DEV_SRIOV(eth_dev).active = 0;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return;
*vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0);
if (*vfinfo == NULL)
rte_panic("Cannot allocate memory for private VF data\n");
- memset(mirror_info,0,sizeof(struct ixgbe_mirror_info));
- memset(uta_info,0,sizeof(struct ixgbe_uta_info));
+ memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info));
+ memset(uta_info, 0, sizeof(struct ixgbe_uta_info));
hw->mac.mc_filter_type = 0;
if (vf_num >= ETH_32_POOLS) {
/* set mb interrupt mask */
ixgbe_mb_intr_setup(eth_dev);
-
- return;
}
void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev)
uint32_t vlanctrl;
int i;
- if (0 == (vf_num = dev_num_vf(eth_dev)))
+ vf_num = dev_num_vf(eth_dev);
+ if (vf_num == 0)
return -1;
/* enable VMDq and set the default pool for PF */
vfre_slot = (vf_num >> VFRE_SHIFT) > 0 ? 1 : 0;
/* Enable pools reserved to PF only */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0) << vfre_offset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot), (~0U) << vfre_offset);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(vfre_slot ^ 1), vfre_slot - 1);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0) << vfre_offset);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot), (~0U) << vfre_offset);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(vfre_slot ^ 1), vfre_slot - 1);
/* PFDMA Tx General Switch Control Enables VMDQ loopback */
}
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
- IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
- /*
+ /*
* enable vlan filtering and allow all vlan tags through
*/
- vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
+ vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
- /* VFTA - enable all vlan filters */
- for (i = 0; i < IXGBE_MAX_VFTA; i++) {
- IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
- }
+ /* VFTA - enable all vlan filters */
+ for (i = 0; i < IXGBE_MAX_VFTA; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF);
/* Enable MAC Anti-Spoofing */
hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num);
int rar_entry = hw->mac.num_rar_entries - (vf + 1);
uint8_t *new_mac = (uint8_t *)(&msgbuf[1]);
- if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) {
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6);
return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV);
}
vfinfo[vf].vlan_count++;
else if (vfinfo[vf].vlan_count)
vfinfo[vf].vlan_count--;
- return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add);
+ return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false);
}
static int
/* perform VF reset */
if (msgbuf[0] == IXGBE_VF_RESET) {
int ret = ixgbe_vf_reset(dev, vf, msgbuf);
+
vfinfo[vf].clear_to_send = true;
return ret;
}