X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_pf.c;h=5845bc2242b1ae2d1ac1c15a338a6b55cde954e0;hb=1fe1aaf2076f0b7b08adeca3b548af310f033452;hp=6a4d210fad3bf9d178361e89a9fb8af85c19d136;hpb=d15fcf76c8b76e12c4050609cd31927ee0864c5a;p=dpdk.git diff --git a/drivers/net/e1000/igb_pf.c b/drivers/net/e1000/igb_pf.c index 6a4d210fad..5845bc2242 100644 --- a/drivers/net/e1000/igb_pf.c +++ b/drivers/net/e1000/igb_pf.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -127,6 +127,28 @@ void igb_pf_host_init(struct rte_eth_dev *eth_dev) return; } +void igb_pf_host_uninit(struct rte_eth_dev *dev) +{ + struct e1000_vf_info **vfinfo; + uint16_t vf_num; + + PMD_INIT_FUNC_TRACE(); + + vfinfo = E1000_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); + + RTE_ETH_DEV_SRIOV(dev).active = 0; + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 0; + RTE_ETH_DEV_SRIOV(dev).def_vmdq_idx = 0; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = 0; + + vf_num = dev_num_vf(dev); + if (vf_num == 0) + return; + + rte_free(*vfinfo); + *vfinfo = NULL; +} + #define E1000_RAH_POOLSEL_SHIFT (18) int igb_pf_host_configure(struct rte_eth_dev *eth_dev) { @@ -150,8 +172,8 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev) E1000_WRITE_REG(hw, E1000_VT_CTL, vtctl); /* Enable pools reserved to PF only */ - E1000_WRITE_REG(hw, E1000_VFRE, (~0) << vf_num); - E1000_WRITE_REG(hw, E1000_VFTE, (~0) << vf_num); + E1000_WRITE_REG(hw, E1000_VFRE, (~0U) << vf_num); + E1000_WRITE_REG(hw, E1000_VFTE, (~0U) << vf_num); /* PFDMA Tx General Switch Control Enables VMDQ loopback */ if (hw->mac.type == e1000_i350) @@ -196,8 +218,7 @@ int igb_pf_host_configure(struct rte_eth_dev *eth_dev) static void set_rx_mode(struct rte_eth_dev *dev) { - struct rte_eth_dev_data *dev_data = - (struct rte_eth_dev_data*)dev->data->dev_private; + struct rte_eth_dev_data *dev_data = dev->data; struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t fctrl, vmolr = E1000_VMOLR_BAM | E1000_VMOLR_AUPE; uint16_t vfn = dev_num_vf(dev); @@ -207,7 +228,7 @@ set_rx_mode(struct rte_eth_dev *dev) /* set all bits that we expect to always be set */ fctrl &= ~E1000_RCTL_SBP; /* disable store-bad-packets */ - fctrl |= E1000_RCTL_BAM;; + fctrl |= E1000_RCTL_BAM; /* clear the bits we are changing the status of */ fctrl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); @@ -310,8 +331,10 @@ igb_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) int rar_entry = hw->mac.rar_entry_count - (vf + 1); uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); - if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) { - rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); + if (is_unicast_ether_addr((struct ether_addr *)new_mac)) { + if (!is_zero_ether_addr((struct ether_addr *)new_mac)) + rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, + sizeof(vfinfo[vf].vf_mac_addresses)); hw->mac.ops.rar_set(hw, new_mac, rar_entry); return 0; }