X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=cf89882a99527432433659eb5a79ad84bf8484d1;hb=1e58b1426a38917eb16012d5cdfdcc14b19daa3b;hp=92c750fc09491755ecec4d6d1196f97c79c4745c;hpb=e61e47c2c377d89ba3d4bc94228949fb63bab69c;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 92c750fc09..cf89882a99 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -243,6 +243,15 @@ /* Bit mask of Extended Tag enable/disable */ #define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) +/* The max bandwidth of i40e is 40Gbps. */ +#define I40E_QOS_BW_MAX 40000 +/* The bandwidth should be the multiple of 50Mbps. */ +#define I40E_QOS_BW_GRANULARITY 50 +/* The min bandwidth weight is 1. */ +#define I40E_QOS_BW_WEIGHT_MIN 1 +/* The max bandwidth weight is 127. */ +#define I40E_QOS_BW_WEIGHT_MAX 127 + static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); @@ -479,6 +488,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .rx_queue_release = i40e_dev_rx_queue_release, .rx_queue_count = i40e_dev_rx_queue_count, .rx_descriptor_done = i40e_dev_rx_descriptor_done, + .rx_descriptor_status = i40e_dev_rx_descriptor_status, + .tx_descriptor_status = i40e_dev_tx_descriptor_status, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, .dev_led_on = i40e_dev_led_on, @@ -899,6 +910,8 @@ i40e_init_ethtype_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_ETHERTYPE_FILTER_NUM, .key_len = sizeof(struct i40e_ethertype_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize ethertype filter rule list and hash */ @@ -942,6 +955,8 @@ i40e_init_tunnel_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_TUNNEL_FILTER_NUM, .key_len = sizeof(struct i40e_tunnel_filter_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize tunnel filter rule list and hash */ @@ -985,6 +1000,8 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) .entries = I40E_MAX_FDIR_FILTER_NUM, .key_len = sizeof(struct rte_eth_fdir_input), .hash_func = rte_hash_crc, + .hash_func_init_val = 0, + .socket_id = rte_socket_id(), }; /* Initialize flow director filter rule list and hash */ @@ -1045,7 +1062,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) intr_handle = &pci_dev->intr_handle; rte_eth_copy_pci_info(dev, pci_dev); - dev->data->dev_flags = RTE_ETH_DEV_DETACHABLE; + dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; pf->adapter = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); pf->adapter->eth_dev = dev; @@ -1235,6 +1252,15 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) ether_addr_copy((struct ether_addr *)hw->mac.perm_addr, &dev->data->mac_addrs[0]); + /* Init dcb to sw mode by default */ + ret = i40e_dcb_init_configure(dev, TRUE); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(INFO, "Failed to init dcb."); + pf->flags &= ~I40E_FLAG_DCB; + } + /* Update HW struct after DCB configuration */ + i40e_get_cap(hw); + /* initialize pf host driver to setup SRIOV resource if applicable */ i40e_pf_host_init(dev); @@ -1263,13 +1289,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); - /* Init dcb to sw mode by default */ - ret = i40e_dcb_init_configure(dev, TRUE); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(INFO, "Failed to init dcb."); - pf->flags &= ~I40E_FLAG_DCB; - } - ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) goto err_init_ethtype_filter_list; @@ -1867,6 +1886,7 @@ i40e_dev_start(struct rte_eth_dev *dev) struct rte_pci_device *pci_dev = I40E_DEV_TO_PCI(dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; uint32_t intr_vector = 0; + struct i40e_vsi *vsi; hw->adapter_stopped = 0; @@ -1894,7 +1914,7 @@ i40e_dev_start(struct rte_eth_dev *dev) 0); if (!intr_handle->intr_vec) { PMD_INIT_LOG(ERR, - "Failed to allocate %d rx_queues intr_vec\n", + "Failed to allocate %d rx_queues intr_vec", dev->data->nb_rx_queues); return -ENOMEM; } @@ -1945,6 +1965,15 @@ i40e_dev_start(struct rte_eth_dev *dev) PMD_DRV_LOG(INFO, "fail to set vsi broadcast"); } + /* Enable the VLAN promiscuous mode. */ + if (pf->vfs) { + for (i = 0; i < pf->vf_num; i++) { + vsi = pf->vfs[i].vsi; + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + true, NULL); + } + } + /* Apply link configure */ if (dev->data->dev_conf.link_speeds & ~(ETH_LINK_SPEED_100M | ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | @@ -1969,7 +1998,7 @@ i40e_dev_start(struct rte_eth_dev *dev) if (dev->data->dev_conf.intr_conf.lsc != 0) PMD_INIT_LOG(INFO, - "lsc won't enable because of no intr multiplex\n"); + "lsc won't enable because of no intr multiplex"); } else if (dev->data->dev_conf.intr_conf.lsc != 0) { ret = i40e_aq_set_phy_int_mask(hw, ~(I40E_AQ_EVENT_LINK_UPDOWN | @@ -2072,18 +2101,17 @@ i40e_dev_close(struct rte_eth_dev *dev) /* shutdown and destroy the HMC */ i40e_shutdown_lan_hmc(hw); - /* release all the existing VSIs and VEBs */ - i40e_fdir_teardown(pf); - i40e_vsi_release(pf->main_vsi); - for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { i40e_vsi_release(pf->vmdq[i].vsi); pf->vmdq[i].vsi = NULL; } - rte_free(pf->vmdq); pf->vmdq = NULL; + /* release all the existing VSIs and VEBs */ + i40e_fdir_teardown(pf); + i40e_vsi_release(pf->main_vsi); + /* shutdown the adminq */ i40e_aq_queue_shutdown(hw, true); i40e_shutdown_adminq(hw); @@ -2221,11 +2249,11 @@ i40e_dev_link_update(struct rte_eth_dev *dev, } link.link_status = link_status.link_info & I40E_AQ_LINK_UP; - if (!wait_to_complete) + if (!wait_to_complete || link.link_status) break; rte_delay_ms(CHECK_INTERVAL); - } while (!link.link_status && rep_cnt--); + } while (--rep_cnt); if (!link.link_status) goto out; @@ -2937,7 +2965,7 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, else { ret = -EINVAL; PMD_DRV_LOG(ERR, - "Unsupported vlan type in single vlan.\n"); + "Unsupported vlan type in single vlan."); return ret; } break; @@ -3499,7 +3527,7 @@ i40e_dev_rss_reta_update(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { PMD_DRV_LOG(ERR, - "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", reta_size, lut_size); return -EINVAL; } @@ -3540,7 +3568,7 @@ i40e_dev_rss_reta_query(struct rte_eth_dev *dev, if (reta_size != lut_size || reta_size > ETH_RSS_RETA_SIZE_512) { PMD_DRV_LOG(ERR, - "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n", + "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)", reta_size, lut_size); return -EINVAL; } @@ -4340,6 +4368,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) hw->aq.asq_last_status); goto fail; } + veb->enabled_tc = I40E_DEFAULT_TCMAP; /* get statistics index */ ret = i40e_aq_get_veb_parameters(hw, veb->seid, NULL, NULL, @@ -4374,6 +4403,9 @@ i40e_vsi_release(struct i40e_vsi *vsi) if (!vsi) return I40E_SUCCESS; + if (!vsi->adapter) + return -EFAULT; + user_param = vsi->user_param; pf = I40E_VSI_TO_PF(vsi); @@ -4584,7 +4616,7 @@ i40e_enable_pf_lb(struct i40e_pf *pf) ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); if (ret) - PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n", + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d", hw->aq.asq_last_status); } @@ -4657,6 +4689,7 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; vsi->user_param = user_param; vsi->vlan_anti_spoof_on = 0; + vsi->vlan_filter_on = 0; /* Allocate queues */ switch (vsi->type) { case I40E_VSI_MAIN : @@ -4833,13 +4866,14 @@ i40e_vsi_setup(struct i40e_pf *pf, rte_cpu_to_le_16(I40E_AQ_VSI_PROP_VLAN_VALID); ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL; ret = i40e_vsi_config_tc_queue_mapping(vsi, &ctxt.info, - I40E_DEFAULT_TCMAP); + hw->func_caps.enabled_tcmap); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Failed to configure TC queue mapping"); goto fail_msix_alloc; } - ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP; + + ctxt.info.up_enable_bits = hw->func_caps.enabled_tcmap; ctxt.info.valid_sections |= rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SCHED_VALID); /** @@ -5181,11 +5215,11 @@ i40e_pf_setup(struct i40e_pf *pf) else if (hw->func_caps.rss_table_size == ETH_RSS_RETA_SIZE_512) settings.hash_lut_size = I40E_HASH_LUT_SIZE_512; else { - PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported\n", - hw->func_caps.rss_table_size); + PMD_DRV_LOG(ERR, "Hash lookup table size (%u) not supported", + hw->func_caps.rss_table_size); return I40E_ERR_PARAM; } - PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u\n", + PMD_DRV_LOG(INFO, "Hardware capability of hash lookup table size: %u", hw->func_caps.rss_table_size); pf->hash_lut_size = hw->func_caps.rss_table_size; @@ -5868,7 +5902,7 @@ i40e_add_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_ADD_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC match type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC match type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -5943,7 +5977,7 @@ i40e_remove_macvlan_filters(struct i40e_vsi *vsi, flags = I40E_AQC_MACVLAN_DEL_HASH_MATCH; break; default: - PMD_DRV_LOG(ERR, "Invalid MAC filter type\n"); + PMD_DRV_LOG(ERR, "Invalid MAC filter type"); ret = I40E_ERR_PARAM; goto DONE; } @@ -6025,7 +6059,7 @@ i40e_set_vlan_filter(struct i40e_vsi *vsi, i40e_store_vlan_filter(vsi, vlan_id, on); - if (!vsi->vlan_anti_spoof_on || !vlan_id) + if ((!vsi->vlan_anti_spoof_on && !vsi->vlan_filter_on) || !vlan_id) return; vlan_data.vlan_tag = rte_cpu_to_le_16(vlan_id); @@ -6115,7 +6149,7 @@ i40e_find_all_mac_for_vlan(struct i40e_vsi *vsi, static int i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) { - int i, num; + int i, j, num; struct i40e_mac_filter *f; struct i40e_macvlan_filter *mv_f; int ret = I40E_SUCCESS; @@ -6140,6 +6174,7 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) TAILQ_FOREACH(f, &vsi->mac_list, next) { (void)rte_memcpy(&mv_f[i].macaddr, &f->mac_info.mac_addr, ETH_ADDR_LEN); + mv_f[i].filter_type = f->mac_info.filter_type; mv_f[i].vlan_id = 0; i++; } @@ -6149,6 +6184,8 @@ i40e_vsi_remove_all_macvlan_filter(struct i40e_vsi *vsi) vsi->vlan_num, &f->mac_info.mac_addr); if (ret != I40E_SUCCESS) goto DONE; + for (j = i; j < i + vsi->vlan_num; j++) + mv_f[j].filter_type = f->mac_info.filter_type; i += vsi->vlan_num; } } @@ -6360,7 +6397,7 @@ i40e_vsi_delete_mac(struct i40e_vsi *vsi, struct ether_addr *addr) if (filter_type == RTE_MACVLAN_PERFECT_MATCH || filter_type == RTE_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { - PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0\n"); + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } } else if (filter_type == RTE_MAC_PERFECT_MATCH || @@ -6700,6 +6737,12 @@ i40e_tunnel_filter_convert(struct i40e_aqc_add_remove_cloud_filters_element_data ether_addr_copy((struct ether_addr *)&cld_filter->inner_mac, (struct ether_addr *)&tunnel_filter->input.inner_mac); tunnel_filter->input.inner_vlan = cld_filter->inner_vlan; + if ((rte_le_to_cpu_16(cld_filter->flags) & + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) == + I40E_AQC_ADD_CLOUD_FLAGS_IPV6) + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV6; + else + tunnel_filter->input.ip_type = I40E_TUNNEL_IPTYPE_IPV4; tunnel_filter->input.flags = cld_filter->flags; tunnel_filter->input.tenant_id = cld_filter->tenant_id; tunnel_filter->queue = cld_filter->queue_number; @@ -7157,7 +7200,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) int ret = -EINVAL; val = I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2)); - PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x\n", val); + PMD_DRV_LOG(DEBUG, "Read original GL_PRS_FVBM with 0x%08x", val); if (len == 3) { reg = val | I40E_GL_PRS_FVBM_MSK_ENA; @@ -7176,7 +7219,7 @@ i40e_dev_set_gre_key_len(struct i40e_hw *hw, uint8_t len) } else { ret = 0; } - PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x\n", + PMD_DRV_LOG(DEBUG, "Read modified GL_PRS_FVBM with 0x%08x", I40E_READ_REG(hw, I40E_GL_PRS_FVBM(2))); return ret; @@ -8020,10 +8063,10 @@ i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { uint32_t reg = i40e_read_rx_ctl(hw, addr); - PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg); + PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x", addr, reg); if (reg != val) i40e_write_rx_ctl(hw, addr, val); - PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr, + PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x", addr, (uint32_t)i40e_read_rx_ctl(hw, addr)); } @@ -8471,7 +8514,7 @@ i40e_ethertype_filter_set(struct i40e_pf *pf, filter->queue, add, &stats, NULL); PMD_DRV_LOG(INFO, - "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u\n", + "add/rem control packet filter, return %d, mac_etype_used = %u, etype_used = %u, mac_etype_free = %u, etype_free = %u", ret, stats.mac_etype_used, stats.etype_used, stats.mac_etype_free, stats.etype_free); if (ret < 0) @@ -8523,7 +8566,7 @@ i40e_ethertype_filter_handle(struct rte_eth_dev *dev, FALSE); break; default: - PMD_DRV_LOG(ERR, "unsupported operation %u\n", filter_op); + PMD_DRV_LOG(ERR, "unsupported operation %u", filter_op); ret = -ENOSYS; break; } @@ -8727,6 +8770,10 @@ i40e_pctype_to_flowtype(enum i40e_filter_pctype pctype) #define I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x011f0200 #define I40E_GL_SWR_PRI_JOIN_MAP_2 0x26CE08 +/* For X722 */ +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE 0x20000200 +#define I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE 0x013F0200 + /* For X710 */ #define I40E_GL_SWR_PM_UP_THR_EF_VALUE 0x03030303 /* For XL710 */ @@ -8749,7 +8796,6 @@ i40e_dev_sync_phy_type(struct i40e_hw *hw) return 0; } - static void i40e_configure_registers(struct i40e_hw *hw) { @@ -8757,8 +8803,8 @@ i40e_configure_registers(struct i40e_hw *hw) uint32_t addr; uint64_t val; } reg_table[] = { - {I40E_GL_SWR_PRI_JOIN_MAP_0, I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE}, - {I40E_GL_SWR_PRI_JOIN_MAP_2, I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE}, + {I40E_GL_SWR_PRI_JOIN_MAP_0, 0}, + {I40E_GL_SWR_PRI_JOIN_MAP_2, 0}, {I40E_GL_SWR_PM_UP_THR, 0}, /* Compute value dynamically */ }; uint64_t reg; @@ -8766,6 +8812,24 @@ i40e_configure_registers(struct i40e_hw *hw) int ret; for (i = 0; i < RTE_DIM(reg_table); i++) { + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_0) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_0_VALUE; + else /* For X710/XL710/XXV710 */ + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_0_VALUE; + } + + if (reg_table[i].addr == I40E_GL_SWR_PRI_JOIN_MAP_2) { + if (hw->mac.type == I40E_MAC_X722) /* For X722 */ + reg_table[i].val = + I40E_X722_GL_SWR_PRI_JOIN_MAP_2_VALUE; + else /* For X710/XL710/XXV710 */ + reg_table[i].val = + I40E_GL_SWR_PRI_JOIN_MAP_2_VALUE; + } + if (reg_table[i].addr == I40E_GL_SWR_PM_UP_THR) { if (I40E_PHY_TYPE_SUPPORT_40G(hw->phy.phy_types) || /* For XL710 */ I40E_PHY_TYPE_SUPPORT_25G(hw->phy.phy_types)) /* For XXV710 */ @@ -9761,8 +9825,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, old_cfg->etsrec = old_cfg->etscfg; ret = i40e_set_dcb_config(hw); if (ret) { - PMD_INIT_LOG(ERR, - "Set DCB Config failed, err %s aq_err %s\n", + PMD_INIT_LOG(ERR, "Set DCB Config failed, err %s aq_err %s", i40e_stat_str(hw, ret), i40e_aq_str(hw, hw->aq.asq_last_status)); return ret; @@ -9794,7 +9857,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VEB seid=%d\n", + "Failed configuring TC for VEB seid=%d", main_vsi->veb->seid); } /* Update each VSI */ @@ -9812,7 +9875,7 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, I40E_DEFAULT_TCMAP); if (ret) PMD_INIT_LOG(WARNING, - "Failed configuring TC for VSI seid=%d\n", + "Failed configuring TC for VSI seid=%d", vsi_list->vsi->seid); /* continue */ } @@ -9832,7 +9895,7 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - int ret = 0; + int i, ret = 0; if ((pf->flags & I40E_FLAG_DCB) == 0) { PMD_INIT_LOG(ERR, "HW doesn't support DCB"); @@ -9859,11 +9922,16 @@ i40e_dcb_init_configure(struct rte_eth_dev *dev, bool sw_dcb) hw->local_dcbx_config.etscfg.tcbwtable[0] = 100; hw->local_dcbx_config.etscfg.tsatable[0] = I40E_IEEE_TSA_ETS; + /* all UPs mapping to TC0 */ + for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) + hw->local_dcbx_config.etscfg.prioritytable[i] = 0; hw->local_dcbx_config.etsrec = hw->local_dcbx_config.etscfg; hw->local_dcbx_config.pfc.willing = 0; hw->local_dcbx_config.pfc.pfccap = I40E_MAX_TRAFFIC_CLASS; + hw->local_dcbx_config.pfc.pfcenable = + I40E_DEFAULT_TCMAP; /* FW needs one App to configure HW */ hw->local_dcbx_config.numapps = 1; hw->local_dcbx_config.app[0].selector = @@ -10175,8 +10243,7 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) /* mtu setting is forbidden if port is start */ if (dev_data->dev_started) { - PMD_DRV_LOG(ERR, - "port %d must be stopped before configuration\n", + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", dev_data->port_id); return -EBUSY; } @@ -10219,7 +10286,7 @@ i40e_ethertype_filter_restore(struct i40e_pf *pf) } PMD_DRV_LOG(INFO, "Ethertype filter:" " mac_etype_used = %u, etype_used = %u," - " mac_etype_free = %u, etype_free = %u\n", + " mac_etype_free = %u, etype_free = %u", stats.mac_etype_used, stats.etype_used, stats.mac_etype_free, stats.etype_free); } @@ -10251,16 +10318,14 @@ i40e_filter_restore(struct i40e_pf *pf) i40e_fdir_filter_restore(pf); } -static int -is_i40e_pmd(const char *driver_name) +static bool +is_device_supported(struct rte_eth_dev *dev, struct eth_driver *drv) { - if (!strstr(driver_name, "i40e")) - return -ENOTSUP; - - if (strstr(driver_name, "i40e_vf")) - return -ENOTSUP; + if (strcmp(dev->driver->pci_drv.driver.name, + drv->pci_drv.driver.name)) + return false; - return 0; + return true; } int @@ -10273,7 +10338,7 @@ rte_pmd_i40e_ping_vfs(uint8_t port, uint16_t vf) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10302,7 +10367,7 @@ rte_pmd_i40e_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10406,7 +10471,7 @@ rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10427,10 +10492,12 @@ rte_pmd_i40e_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf_id, uint8_t on) return 0; /* already on or off */ vsi->vlan_anti_spoof_on = on; - ret = i40e_add_rm_all_vlan_filter(vsi, on); - if (ret) { - PMD_DRV_LOG(ERR, "Failed to remove VLAN filters."); - return -ENOTSUP; + if (!vsi->vlan_filter_on) { + ret = i40e_add_rm_all_vlan_filter(vsi, on); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to add/remove VLAN filters."); + return -ENOTSUP; + } } vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID); @@ -10470,8 +10537,7 @@ i40e_vsi_rm_mac_filter(struct i40e_vsi *vsi) if (filter_type == RTE_MACVLAN_PERFECT_MATCH || filter_type == RTE_MACVLAN_HASH_MATCH) { if (vlan_num == 0) { - PMD_DRV_LOG(ERR, - "VLAN number shouldn't be 0\n"); + PMD_DRV_LOG(ERR, "VLAN number shouldn't be 0"); return I40E_ERR_PARAM; } } else if (filter_type == RTE_MAC_PERFECT_MATCH || @@ -10614,7 +10680,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) PMD_INIT_LOG(ERR, "Failed to remove MAC filters."); return ret; } - if (vsi->vlan_anti_spoof_on) { + if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) { ret = i40e_add_rm_all_vlan_filter(vsi, 0); if (ret) { PMD_INIT_LOG(ERR, "Failed to remove VLAN filters."); @@ -10642,7 +10708,7 @@ i40e_vsi_set_tx_loopback(struct i40e_vsi *vsi, uint8_t on) ret = i40e_vsi_restore_mac_filter(vsi); if (ret) return ret; - if (vsi->vlan_anti_spoof_on) { + if (vsi->vlan_anti_spoof_on || vsi->vlan_filter_on) { ret = i40e_add_rm_all_vlan_filter(vsi, 1); if (ret) return ret; @@ -10665,7 +10731,7 @@ rte_pmd_i40e_set_tx_loopback(uint8_t port, uint8_t on) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10707,7 +10773,7 @@ rte_pmd_i40e_set_vf_unicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10748,7 +10814,7 @@ rte_pmd_i40e_set_vf_multicast_promisc(uint8_t port, uint16_t vf_id, uint8_t on) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10794,7 +10860,7 @@ rte_pmd_i40e_set_vf_mac_addr(uint8_t port, uint16_t vf_id, dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10831,7 +10897,7 @@ rte_pmd_i40e_set_vf_vlan_stripq(uint8_t port, uint16_t vf_id, uint8_t on) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10874,7 +10940,7 @@ int rte_pmd_i40e_set_vf_vlan_insert(uint8_t port, uint16_t vf_id, dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10927,6 +10993,9 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, struct i40e_pf *pf; struct i40e_vsi *vsi; struct i40e_hw *hw; + struct i40e_mac_filter_info filter; + struct ether_addr broadcast = { + .addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff} }; int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); @@ -10938,7 +11007,7 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -10965,12 +11034,19 @@ int rte_pmd_i40e_set_vf_broadcast(uint8_t port, uint16_t vf_id, return -EINVAL; } - hw = I40E_VSI_TO_HW(vsi); + if (on) { + (void)rte_memcpy(&filter.mac_addr, &broadcast, ETHER_ADDR_LEN); + filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + ret = i40e_vsi_add_mac(vsi, &filter); + } else { + ret = i40e_vsi_delete_mac(vsi, &broadcast); + } - ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, on, NULL); - if (ret != I40E_SUCCESS) { + if (ret != I40E_SUCCESS && ret != I40E_ERR_PARAM) { ret = -ENOTSUP; PMD_DRV_LOG(ERR, "Failed to set VSI broadcast"); + } else { + ret = 0; } return ret; @@ -10994,7 +11070,7 @@ int rte_pmd_i40e_set_vf_vlan_tag(uint8_t port, uint16_t vf_id, uint8_t on) dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -11050,6 +11126,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, struct rte_eth_dev *dev; struct i40e_pf *pf; struct i40e_hw *hw; + struct i40e_vsi *vsi; uint16_t vf_idx; int ret = I40E_SUCCESS; @@ -11057,7 +11134,7 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; if (vlan_id > ETHER_MAX_VLAN_ID) { @@ -11088,14 +11165,22 @@ int rte_pmd_i40e_set_vf_vlan_filter(uint8_t port, uint16_t vlan_id, return -ENODEV; } - for (vf_idx = 0; vf_idx < 64 && ret == I40E_SUCCESS; vf_idx++) { + for (vf_idx = 0; vf_idx < pf->vf_num && ret == I40E_SUCCESS; vf_idx++) { if (vf_mask & ((uint64_t)(1ULL << vf_idx))) { - if (on) - ret = i40e_vsi_add_vlan(pf->vfs[vf_idx].vsi, - vlan_id); - else - ret = i40e_vsi_delete_vlan(pf->vfs[vf_idx].vsi, - vlan_id); + vsi = pf->vfs[vf_idx].vsi; + if (on) { + if (!vsi->vlan_filter_on) { + vsi->vlan_filter_on = true; + if (!vsi->vlan_anti_spoof_on) + i40e_add_rm_all_vlan_filter( + vsi, true); + } + i40e_aq_set_vsi_vlan_promisc(hw, vsi->seid, + false, NULL); + ret = i40e_vsi_add_vlan(vsi, vlan_id); + } else { + ret = i40e_vsi_delete_vlan(vsi, vlan_id); + } } } @@ -11120,7 +11205,7 @@ rte_pmd_i40e_get_vf_stats(uint8_t port, dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -11164,7 +11249,7 @@ rte_pmd_i40e_reset_vf_stats(uint8_t port, dev = &rte_eth_devices[port]; - if (is_i40e_pmd(dev->data->drv_name)) + if (!is_device_supported(dev, &rte_i40e_pmd)) return -ENOTSUP; pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -11185,3 +11270,206 @@ rte_pmd_i40e_reset_vf_stats(uint8_t port, return 0; } + +int +rte_pmd_i40e_set_vf_max_bw(uint8_t port, uint16_t vf_id, uint32_t bw) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + int ret = 0; + int i; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (bw > I40E_QOS_BW_MAX) { + PMD_DRV_LOG(ERR, "Bandwidth should not be larger than %dMbps.", + I40E_QOS_BW_MAX); + return -EINVAL; + } + + if (bw % I40E_QOS_BW_GRANULARITY) { + PMD_DRV_LOG(ERR, "Bandwidth should be the multiple of %dMbps.", + I40E_QOS_BW_GRANULARITY); + return -EINVAL; + } + + bw /= I40E_QOS_BW_GRANULARITY; + + hw = I40E_VSI_TO_HW(vsi); + + /* No change. */ + if (bw == vsi->bw_info.bw_limit) { + PMD_DRV_LOG(INFO, + "No change for VF max bandwidth. Nothing to do."); + return 0; + } + + /** + * VF bandwidth limitation and TC bandwidth limitation cannot be + * enabled in parallel, quit if TC bandwidth limitation is enabled. + * + * If bw is 0, means disable bandwidth limitation. Then no need to + * check TC bandwidth limitation. + */ + if (bw) { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if ((vsi->enabled_tc & BIT_ULL(i)) && + vsi->bw_info.bw_ets_credits[i]) + break; + } + if (i != I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, + "TC max bandwidth has been set on this VF," + " please disable it first."); + return -EINVAL; + } + } + + ret = i40e_aq_config_vsi_bw_limit(hw, vsi->seid, (uint16_t)bw, 0, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d bandwidth, err(%d).", + vf_id, ret); + return -EINVAL; + } + + /* Store the configuration. */ + vsi->bw_info.bw_limit = (uint16_t)bw; + vsi->bw_info.bw_max = 0; + + return 0; +} + +int +rte_pmd_i40e_set_vf_tc_bw_alloc(uint8_t port, uint16_t vf_id, + uint8_t tc_num, uint8_t *bw_weight) +{ + struct rte_eth_dev *dev; + struct i40e_pf *pf; + struct i40e_vsi *vsi; + struct i40e_hw *hw; + struct i40e_aqc_configure_vsi_tc_bw_data tc_bw; + int ret = 0; + int i, j; + uint16_t sum; + bool b_change = false; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV); + + dev = &rte_eth_devices[port]; + + if (!is_device_supported(dev, &rte_i40e_pmd)) + return -ENOTSUP; + + pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + + if (vf_id >= pf->vf_num || !pf->vfs) { + PMD_DRV_LOG(ERR, "Invalid VF ID."); + return -EINVAL; + } + + vsi = pf->vfs[vf_id].vsi; + if (!vsi) { + PMD_DRV_LOG(ERR, "Invalid VSI."); + return -EINVAL; + } + + if (tc_num > I40E_MAX_TRAFFIC_CLASS) { + PMD_DRV_LOG(ERR, "TCs should be no more than %d.", + I40E_MAX_TRAFFIC_CLASS); + return -EINVAL; + } + + sum = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) + sum++; + } + if (sum != tc_num) { + PMD_DRV_LOG(ERR, + "Weight should be set for all %d enabled TCs.", + sum); + return -EINVAL; + } + + sum = 0; + for (i = 0; i < tc_num; i++) { + if (!bw_weight[i]) { + PMD_DRV_LOG(ERR, + "The weight should be 1 at least."); + return -EINVAL; + } + sum += bw_weight[i]; + } + if (sum != 100) { + PMD_DRV_LOG(ERR, + "The summary of the TC weight should be 100."); + return -EINVAL; + } + + /** + * Create the configuration for all the TCs. + */ + memset(&tc_bw, 0, sizeof(tc_bw)); + tc_bw.tc_valid_bits = vsi->enabled_tc; + j = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + if (bw_weight[j] != + vsi->bw_info.bw_ets_share_credits[i]) + b_change = true; + + tc_bw.tc_bw_credits[i] = bw_weight[j]; + j++; + } + } + + /* No change. */ + if (!b_change) { + PMD_DRV_LOG(INFO, + "No change for TC allocated bandwidth." + " Nothing to do."); + return 0; + } + + hw = I40E_VSI_TO_HW(vsi); + + ret = i40e_aq_config_vsi_tc_bw(hw, vsi->seid, &tc_bw, NULL); + if (ret) { + PMD_DRV_LOG(ERR, + "Failed to set VF %d TC bandwidth weight, err(%d).", + vf_id, ret); + return -EINVAL; + } + + /* Store the configuration. */ + j = 0; + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (vsi->enabled_tc & BIT_ULL(i)) { + vsi->bw_info.bw_ets_share_credits[i] = bw_weight[j]; + j++; + } + } + + return 0; +}