X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_pf.c;h=67b5bef44fefadc3b66674c95bec61d907fdb815;hb=1e8d75d8059701fd15876416be06064735ec5e87;hp=44ab450097420342b915a1e52cdcfbc3fa5f6fa7;hpb=5566a3e35866ce9e5eacf886c27b460ebfcd6ee9;p=dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index 44ab450097..67b5bef44f 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include #include @@ -40,16 +40,16 @@ dev_num_vf(struct rte_eth_dev *eth_dev) static inline int ixgbe_vf_perm_addr_gen(struct rte_eth_dev *dev, uint16_t vf_num) { - unsigned char vf_mac_addr[ETHER_ADDR_LEN]; + unsigned char vf_mac_addr[RTE_ETHER_ADDR_LEN]; struct ixgbe_vf_info *vfinfo = *IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private); uint16_t vfn; for (vfn = 0; vfn < vf_num; vfn++) { - eth_random_addr(vf_mac_addr); + rte_eth_random_addr(vf_mac_addr); /* keep the random address as default */ memcpy(vfinfo[vfn].vf_mac_addresses, vf_mac_addr, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); } return 0; @@ -90,6 +90,8 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) if (*vfinfo == NULL) rte_panic("Cannot allocate memory for private VF data\n"); + rte_eth_switch_domain_alloc(&(*vfinfo)->switch_domain_id); + memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); hw->mac.mc_filter_type = 0; @@ -122,11 +124,10 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) { struct ixgbe_vf_info **vfinfo; uint16_t vf_num; + int ret; PMD_INIT_FUNC_TRACE(); - vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); - RTE_ETH_DEV_SRIOV(eth_dev).active = 0; RTE_ETH_DEV_SRIOV(eth_dev).nb_q_per_pool = 0; RTE_ETH_DEV_SRIOV(eth_dev).def_vmdq_idx = 0; @@ -136,6 +137,14 @@ void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) if (vf_num == 0) return; + vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); + if (*vfinfo == NULL) + return; + + ret = rte_eth_switch_domain_free((*vfinfo)->switch_domain_id); + if (ret) + PMD_INIT_LOG(WARNING, "failed to free switch domain: %d", ret); + rte_free(*vfinfo); *vfinfo = NULL; } @@ -152,16 +161,14 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) struct ixgbe_ethertype_filter ethertype_filter; if (!hw->mac.ops.set_ethertype_anti_spoofing) { - RTE_LOG(INFO, PMD, "ether type anti-spoofing is not" - " supported.\n"); + PMD_DRV_LOG(INFO, "ether type anti-spoofing is not supported.\n"); return; } i = ixgbe_ethertype_filter_lookup(filter_info, IXGBE_ETHERTYPE_FLOW_CTRL); if (i >= 0) { - RTE_LOG(ERR, PMD, "A ether type filter" - " entity for flow control already exists!\n"); + PMD_DRV_LOG(ERR, "A ether type filter entity for flow control already exists!\n"); return; } @@ -174,8 +181,7 @@ ixgbe_add_tx_flow_control_drop_filter(struct rte_eth_dev *eth_dev) i = ixgbe_ethertype_filter_insert(filter_info, ðertype_filter); if (i < 0) { - RTE_LOG(ERR, PMD, "Cannot find an unused ether type filter" - " entity for flow control.\n"); + PMD_DRV_LOG(ERR, "Cannot find an unused ether type filter entity for flow control.\n"); return; } @@ -244,7 +250,7 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); gpie &= ~IXGBE_GPIE_VTMODE_MASK; - gpie |= IXGBE_GPIE_MSIX_MODE; + gpie |= IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT; switch (RTE_ETH_DEV_SRIOV(eth_dev).active) { case ETH_64_POOLS: @@ -329,10 +335,7 @@ set_rx_mode(struct rte_eth_dev *dev) IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); - if (dev->data->dev_conf.rxmode.hw_vlan_strip) - ixgbe_vlan_hw_strip_enable_all(dev); - else - ixgbe_vlan_hw_strip_disable_all(dev); + ixgbe_vlan_hw_strip_config(dev); } static inline void @@ -345,7 +348,7 @@ ixgbe_vf_reset_event(struct rte_eth_dev *dev, uint16_t vf) int rar_entry = hw->mac.num_rar_entries - (vf + 1); uint32_t vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_ROMPE | + vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); @@ -401,23 +404,6 @@ ixgbe_vf_reset_msg(struct rte_eth_dev *dev, uint16_t vf) ixgbe_vf_reset_event(dev, vf); } -static int -ixgbe_enable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) -{ - struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t vmolr; - - vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - - RTE_LOG(INFO, PMD, "VF %u: enabling multicast promiscuous\n", vf); - - vmolr |= IXGBE_VMOLR_MPE; - - IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); - - return 0; -} - static int ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) { @@ -426,7 +412,7 @@ ixgbe_disable_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf) vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); - RTE_LOG(INFO, PMD, "VF %u: disabling multicast promiscuous\n", vf); + PMD_DRV_LOG(INFO, "VF %u: disabling multicast promiscuous\n", vf); vmolr &= ~IXGBE_VMOLR_MPE; @@ -454,7 +440,7 @@ ixgbe_vf_reset(struct rte_eth_dev *dev, uint16_t vf, uint32_t *msgbuf) /* reply to reset with ack and vf mac address */ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; - rte_memcpy(new_mac, vf_mac, ETHER_ADDR_LEN); + rte_memcpy(new_mac, vf_mac, RTE_ETHER_ADDR_LEN); /* * Piggyback the multicast filter type so VF can compute the * correct vectors @@ -474,7 +460,8 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) int rar_entry = hw->mac.num_rar_entries - (vf + 1); uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); - if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) { + if (rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)new_mac)) { rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV); } @@ -497,6 +484,7 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) const uint32_t IXGBE_MTA_BIT_MASK = (0x1 << IXGBE_MTA_BIT_SHIFT) - 1; uint32_t reg_val; int i; + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); /* Disable multicast promiscuous first */ ixgbe_disable_vf_mc_promisc(dev, vf); @@ -510,6 +498,12 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) vfinfo->vf_mc_hashes[i] = hash_list[i]; } + if (nb_entries == 0) { + vmolr &= ~IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + return 0; + } + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { mta_idx = (vfinfo->vf_mc_hashes[i] >> IXGBE_MTA_BIT_SHIFT) & IXGBE_MTA_INDEX_MASK; @@ -519,6 +513,9 @@ ixgbe_vf_set_multicast(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) IXGBE_WRITE_REG(hw, IXGBE_MTA(mta_idx), reg_val); } + vmolr |= IXGBE_VMOLR_ROMPE; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + return 0; } @@ -547,7 +544,8 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t new_mtu = msgbuf[1]; uint32_t max_frs; - int max_frame = new_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + uint32_t hlreg0; + int max_frame = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; /* X540 and X550 support jumbo frames in IOV mode */ if (hw->mac.type != ixgbe_mac_X540 && @@ -556,12 +554,25 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, __rte_unused uint32_t vf, uint32_t *ms hw->mac.type != ixgbe_mac_X550EM_a) return -1; - if ((max_frame < ETHER_MIN_LEN) || (max_frame > ETHER_MAX_JUMBO_FRAME_LEN)) + if (max_frame < RTE_ETHER_MIN_LEN || + max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN) return -1; max_frs = (IXGBE_READ_REG(hw, IXGBE_MAXFRS) & IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT; if (max_frs < new_mtu) { + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + if (new_mtu > RTE_ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + } else { + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; + hlreg0 &= ~IXGBE_HLREG0_JUMBOEN; + } + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + max_frs = new_mtu << IXGBE_MHADD_MFS_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, max_frs); } @@ -580,13 +591,14 @@ ixgbe_negotiate_vf_api(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: vfinfo[vf].api_version = (uint8_t)api_version; return 0; default: break; } - RTE_LOG(ERR, PMD, "Negotiate invalid api version %u from VF %d\n", + PMD_DRV_LOG(ERR, "Negotiate invalid api version %u from VF %d\n", api_version, vf); return -1; @@ -616,6 +628,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: case ixgbe_mbox_api_12: + case ixgbe_mbox_api_13: break; default: return -1; @@ -633,7 +646,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) switch (eth_conf->txmode.mq_mode) { case ETH_MQ_TX_NONE: case ETH_MQ_TX_DCB: - RTE_LOG(ERR, PMD, "PF must work with virtualization for VF %u" + PMD_DRV_LOG(ERR, "PF must work with virtualization for VF %u" ", but its tx mode = %d\n", vf, eth_conf->txmode.mq_mode); return -1; @@ -668,7 +681,7 @@ ixgbe_get_vf_queues(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) break; default: - RTE_LOG(ERR, PMD, "PF work with invalid mode = %d\n", + PMD_DRV_LOG(ERR, "PF work with invalid mode = %d\n", eth_conf->txmode.mq_mode); return -1; } @@ -682,19 +695,100 @@ ixgbe_set_vf_mc_promisc(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) { struct ixgbe_vf_info *vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); - bool enable = !!msgbuf[1]; /* msgbuf contains the flag to enable */ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int xcast_mode = msgbuf[1]; /* msgbuf contains the flag to enable */ + u32 vmolr, fctrl, disable, enable; switch (vfinfo[vf].api_version) { case ixgbe_mbox_api_12: + /* promisc introduced in 1.3 version */ + if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) + return -EOPNOTSUPP; + break; + /* Fall threw */ + case ixgbe_mbox_api_13: break; default: return -1; } - if (enable) - return ixgbe_enable_vf_mc_promisc(dev, vf); - else - return ixgbe_disable_vf_mc_promisc(dev, vf); + if (vfinfo[vf].xcast_mode == xcast_mode) + goto out; + + switch (xcast_mode) { + case IXGBEVF_XCAST_MODE_NONE: + disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + enable = 0; + break; + case IXGBEVF_XCAST_MODE_MULTI: + disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; + break; + case IXGBEVF_XCAST_MODE_ALLMULTI: + disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; + break; + case IXGBEVF_XCAST_MODE_PROMISC: + if (hw->mac.type <= ixgbe_mac_82599EB) + return -1; + + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); + if (!(fctrl & IXGBE_FCTRL_UPE)) { + /* VF promisc requires PF in promisc */ + PMD_DRV_LOG(ERR, + "Enabling VF promisc requires PF in promisc\n"); + return -1; + } + + disable = 0; + enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + break; + default: + return -1; + } + + vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); + vmolr &= ~disable; + vmolr |= enable; + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + vfinfo[vf].xcast_mode = xcast_mode; + +out: + msgbuf[1] = xcast_mode; + + return 0; +} + +static int +ixgbe_set_vf_macvlan_msg(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_vf_info *vf_info = + *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private)); + uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); + int index = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> + IXGBE_VT_MSGINFO_SHIFT; + + if (index) { + if (!rte_is_valid_assigned_ether_addr( + (struct rte_ether_addr *)new_mac)) { + PMD_DRV_LOG(ERR, "set invalid mac vf:%d\n", vf); + return -1; + } + + vf_info[vf].mac_count++; + + hw->mac.ops.set_rar(hw, vf_info[vf].mac_count, + new_mac, vf, IXGBE_RAH_AV); + } else { + if (vf_info[vf].mac_count) { + hw->mac.ops.clear_rar(hw, vf_info[vf].mac_count); + vf_info[vf].mac_count = 0; + } + } + return 0; } static int @@ -739,7 +833,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) /* notify application about VF reset */ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, - NULL, &ret_param); + &ret_param); return ret; } @@ -751,7 +845,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) * if ret_param.retval > 1, do nothing and send NAK to VF */ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_VF_MBOX, - NULL, &ret_param); + &ret_param); retval = ret_param.retval; @@ -784,6 +878,10 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) retval = ixgbe_set_vf_mc_promisc(dev, vf, msgbuf); break; + case IXGBE_VF_SET_MACVLAN: + if (retval == RTE_PMD_IXGBE_MB_EVENT_PROCEED) + retval = ixgbe_set_vf_macvlan_msg(dev, vf, msgbuf); + break; default: PMD_DRV_LOG(DEBUG, "Unhandled Msg %8.8x", (unsigned)msgbuf[0]); retval = IXGBE_ERR_MBX;