X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=8c9ecc1c3274e652567ac37a2367f1d2a63c759e;hb=97ac72aa71a93d5c9bc5dc3ef1d1a324f38e61c8;hp=22b240cc6efd2c23bfa722e0070b10a32713b8e1;hpb=0d16d2694a74416ec1650dfaa921b19528f757c3;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 22b240cc6e..8c9ecc1c32 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -62,8 +62,6 @@ #include "i40e_rxtx.h" #include "i40e_pf.h" -/* Maximun number of MAC addresses */ -#define I40E_NUM_MACADDR_MAX 64 #define I40E_CLEAR_PXE_WAIT_MS 200 /* Maximun number of capability elements */ @@ -137,13 +135,6 @@ #define I40E_DEFAULT_DCB_APP_NUM 1 #define I40E_DEFAULT_DCB_APP_PRIO 3 -#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) -#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) -#define I40E_GLQF_FD_MSK_FIELD 0x0000FFFF -#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) -#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) -#define I40E_GLQF_HASH_MSK_FIELD 0x0000FFFF - #define I40E_INSET_NONE 0x00000000000000000ULL /* bit0 ~ bit 7 */ @@ -273,6 +264,22 @@ #define I40E_INSET_IPV6_TC_MASK 0x0009F00FUL #define I40E_INSET_IPV6_NEXT_HDR_MASK 0x000C00FFUL +#define I40E_GL_SWT_L2TAGCTRL(_i) (0x001C0A70 + ((_i) * 4)) +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT 16 +#define I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK \ + I40E_MASK(0xFFFF, I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) + +/* PCI offset for querying capability */ +#define PCI_DEV_CAP_REG 0xA4 +/* PCI offset for enabling/disabling Extended Tag */ +#define PCI_DEV_CTRL_REG 0xA8 +/* Bit mask of Extended Tag capability */ +#define PCI_DEV_CAP_EXT_TAG_MASK 0x20 +/* Bit shift of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_SHIFT 8 +/* Bit mask of Extended Tag enable/disable */ +#define PCI_DEV_CTRL_EXT_TAG_MASK (1 << PCI_DEV_CTRL_EXT_TAG_SHIFT) + static int eth_i40e_dev_init(struct rte_eth_dev *eth_dev); static int eth_i40e_dev_uninit(struct rte_eth_dev *eth_dev); static int i40e_dev_configure(struct rte_eth_dev *dev); @@ -299,7 +306,9 @@ static void i40e_dev_info_get(struct rte_eth_dev *dev, static int i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); -static void i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid); +static int i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid); static void i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void i40e_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, @@ -369,10 +378,10 @@ static int i40e_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); static int i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf); -static int i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel); -static int i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel); +static int i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); +static int i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel); static int i40e_ethertype_filter_set(struct i40e_pf *pf, struct rte_eth_ethertype_filter *filter, bool add); @@ -386,7 +395,7 @@ static int i40e_dev_filter_ctrl(struct rte_eth_dev *dev, static int i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct rte_eth_dcb_info *dcb_info); static void i40e_configure_registers(struct i40e_hw *hw); -static void i40e_hw_init(struct i40e_hw *hw); +static void i40e_hw_init(struct rte_eth_dev *dev); static int i40e_config_qinq(struct i40e_hw *hw, struct i40e_vsi *vsi); static int i40e_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, @@ -467,8 +476,8 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .reta_query = i40e_dev_rss_reta_query, .rss_hash_update = i40e_dev_rss_hash_update, .rss_hash_conf_get = i40e_dev_rss_hash_conf_get, - .udp_tunnel_add = i40e_dev_udp_tunnel_add, - .udp_tunnel_del = i40e_dev_udp_tunnel_del, + .udp_tunnel_port_add = i40e_dev_udp_tunnel_port_add, + .udp_tunnel_port_del = i40e_dev_udp_tunnel_port_del, .filter_ctrl = i40e_dev_filter_ctrl, .rxq_info_get = i40e_rxq_info_get, .txq_info_get = i40e_txq_info_get, @@ -765,7 +774,7 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) i40e_clear_hw(hw); /* Initialize the hardware */ - i40e_hw_init(hw); + i40e_hw_init(dev); /* Reset here to make sure all is clean for each PF */ ret = i40e_pf_reset(hw); @@ -872,6 +881,20 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) hw->fc.requested_mode = I40E_FC_NONE; i40e_set_fc(hw, &aq_fail, TRUE); + /* Set the global registers with default ether type value */ + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } + ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, ETHER_TYPE_VLAN); + if (ret != I40E_SUCCESS) { + PMD_INIT_LOG(ERR, "Failed to set the default outer " + "VLAN ether type"); + goto err_setup_pf_switch; + } + /* PF setup, which includes VSI setup */ ret = i40e_pf_setup(pf); if (ret) { @@ -919,6 +942,11 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) */ i40e_add_tx_flow_control_drop_filter(pf); + /* Set the max frame size to 0x2600 by default, + * in case other drivers changed the default value. + */ + i40e_aq_set_mac_config(hw, I40E_FRAME_SIZE_MAX, TRUE, 0, NULL); + /* initialize mirror rule list */ TAILQ_INIT(&pf->mirror_list); @@ -2314,11 +2342,59 @@ i40e_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) return i40e_vsi_delete_vlan(vsi, vlan_id); } -static void -i40e_vlan_tpid_set(__rte_unused struct rte_eth_dev *dev, - __rte_unused uint16_t tpid) +static int +i40e_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type, + uint16_t tpid) { - PMD_INIT_FUNC_TRACE(); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint64_t reg_r = 0, reg_w = 0; + uint16_t reg_id = 0; + int ret = 0; + + switch (vlan_type) { + case ETH_VLAN_TYPE_OUTER: + reg_id = 2; + break; + case ETH_VLAN_TYPE_INNER: + reg_id = 3; + break; + default: + ret = -EINVAL; + PMD_DRV_LOG(ERR, "Unsupported vlan type %d", vlan_type); + return ret; + } + ret = i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + ®_r, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Fail to debug read from " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + ret = -EIO; + return ret; + } + PMD_DRV_LOG(DEBUG, "Debug read from I40E_GL_SWT_L2TAGCTRL[%d]: " + "0x%08"PRIx64"", reg_id, reg_r); + + reg_w = reg_r & (~(I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_MASK)); + reg_w |= ((uint64_t)tpid << I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT); + if (reg_r == reg_w) { + ret = 0; + PMD_DRV_LOG(DEBUG, "No need to write"); + return ret; + } + + ret = i40e_aq_debug_write_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id), + reg_w, NULL); + if (ret != I40E_SUCCESS) { + ret = -EIO; + PMD_DRV_LOG(ERR, "Fail to debug write to " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_id); + return ret; + } + PMD_DRV_LOG(DEBUG, "Debug write 0x%08"PRIx64" to " + "I40E_GL_SWT_L2TAGCTRL[%d]", reg_w, reg_id); + + return ret; } static void @@ -2327,6 +2403,13 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); struct i40e_vsi *vsi = pf->main_vsi; + if (mask & ETH_VLAN_FILTER_MASK) { + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + i40e_vsi_config_vlan_filter(vsi, TRUE); + else + i40e_vsi_config_vlan_filter(vsi, FALSE); + } + if (mask & ETH_VLAN_STRIP_MASK) { /* Enable or disable VLAN stripping */ if (dev->data->dev_conf.rxmode.hw_vlan_strip) @@ -2578,7 +2661,10 @@ i40e_macaddr_add(struct rte_eth_dev *dev, } (void)rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN); - mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + if (dev->data->dev_conf.rxmode.hw_vlan_filter) + mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH; + else + mac_filter.filter_type = RTE_MAC_PERFECT_MATCH; if (pool == 0) vsi = pf->main_vsi; @@ -3408,7 +3494,7 @@ i40e_res_pool_alloc(struct i40e_res_pool_info *pool, pool->num_free -= valid_entry->len; pool->num_alloc += valid_entry->len; - return (valid_entry->base + pool->base); + return valid_entry->base + pool->base; } /** @@ -3631,7 +3717,7 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) veb->uplink_seid = vsi->uplink_seid; ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid, - I40E_DEFAULT_TCMAP, false, false, &veb->seid, NULL); + I40E_DEFAULT_TCMAP, false, &veb->seid, false, NULL); if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d", @@ -3755,7 +3841,6 @@ i40e_update_default_filter_setting(struct i40e_vsi *vsi) return i40e_vsi_add_mac(vsi, &filter); } -#define I40E_3_BIT_MASK 0x7 /* * i40e_vsi_get_bw_config - Query VSI BW Information * @vsi: the VSI to be queried @@ -3805,7 +3890,7 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) /* 4 bits per TC, 4th bit is reserved */ vsi->bw_info.bw_ets_max[i] = (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & - I40E_3_BIT_MASK); + RTE_LEN2MASK(3, uint8_t)); PMD_DRV_LOG(DEBUG, "\tVSI TC%u:share credits %u", i, vsi->bw_info.bw_ets_share_credits[i]); PMD_DRV_LOG(DEBUG, "\tVSI TC%u:credits %u", i, @@ -3817,6 +3902,45 @@ i40e_vsi_get_bw_config(struct i40e_vsi *vsi) return I40E_SUCCESS; } +/* i40e_enable_pf_lb + * @pf: pointer to the pf structure + * + * allow loopback on pf + */ +static inline void +i40e_enable_pf_lb(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_vsi_context ctxt; + int ret; + + /* Use the FW API if FW >= v5.0 */ + if (hw->aq.fw_maj_ver < 5) { + PMD_INIT_LOG(ERR, "FW < v5.0, cannot enable loopback"); + return; + } + + memset(&ctxt, 0, sizeof(ctxt)); + ctxt.seid = pf->main_vsi_seid; + ctxt.pf_num = hw->pf_id; + ret = i40e_aq_get_vsi_params(hw, &ctxt, NULL); + if (ret) { + PMD_DRV_LOG(ERR, "cannot get pf vsi config, err %d, aq_err %d", + ret, hw->aq.asq_last_status); + return; + } + ctxt.flags = I40E_AQ_VSI_TYPE_PF; + ctxt.info.valid_sections = + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id |= + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + + ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL); + if (ret) + PMD_DRV_LOG(ERR, "update vsi switch failed, aq_err=%d\n", + hw->aq.asq_last_status); +} + /* Setup a VSI */ struct i40e_vsi * i40e_vsi_setup(struct i40e_pf *pf, @@ -3852,6 +3976,8 @@ i40e_vsi_setup(struct i40e_pf *pf, PMD_DRV_LOG(ERR, "VEB setup failed"); return NULL; } + /* set ALLOWLOOPBACk on pf, when veb is created */ + i40e_enable_pf_lb(pf); } vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0); @@ -4024,14 +4150,14 @@ i40e_vsi_setup(struct i40e_pf *pf, ctxt.connection_type = 0x1; ctxt.flags = I40E_AQ_VSI_TYPE_VF; - /** - * Do not configure switch ID to enable VEB switch by - * I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB. Because in Fortville, - * if the source mac address of packet sent from VF is not - * listed in the VEB's mac table, the VEB will switch the - * packet back to the VF. Need to enable it when HW issue - * is fixed. - */ + /* Use the VEB configuration if FW >= v5.0 */ + if (hw->aq.fw_maj_ver >= 5) { + /* Configure switch ID */ + ctxt.info.valid_sections |= + rte_cpu_to_le_16(I40E_AQ_VSI_PROP_SWITCH_VALID); + ctxt.info.switch_id = + rte_cpu_to_le_16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB); + } /* Configure port/vlan */ ctxt.info.valid_sections |= @@ -4151,6 +4277,63 @@ fail_mem: return NULL; } +/* Configure vlan filter on or off */ +int +i40e_vsi_config_vlan_filter(struct i40e_vsi *vsi, bool on) +{ + int i, num; + struct i40e_mac_filter *f; + struct i40e_mac_filter_info *mac_filter; + enum rte_mac_filter_type desired_filter; + int ret = I40E_SUCCESS; + + if (on) { + /* Filter to match MAC and VLAN */ + desired_filter = RTE_MACVLAN_PERFECT_MATCH; + } else { + /* Filter to match only MAC */ + desired_filter = RTE_MAC_PERFECT_MATCH; + } + + num = vsi->mac_num; + + mac_filter = rte_zmalloc("mac_filter_info_data", + num * sizeof(*mac_filter), 0); + if (mac_filter == NULL) { + PMD_DRV_LOG(ERR, "failed to allocate memory"); + return I40E_ERR_NO_MEMORY; + } + + i = 0; + + /* Remove all existing mac */ + TAILQ_FOREACH(f, &vsi->mac_list, next) { + mac_filter[i] = f->mac_info; + ret = i40e_vsi_delete_mac(vsi, &f->mac_info.mac_addr); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + i++; + } + + /* Override with new filter */ + for (i = 0; i < num; i++) { + mac_filter[i].filter_type = desired_filter; + ret = i40e_vsi_add_mac(vsi, &mac_filter[i]); + if (ret) { + PMD_DRV_LOG(ERR, "Update VSI failed to %s vlan filter", + on ? "enable" : "disable"); + goto DONE; + } + } + +DONE: + rte_free(mac_filter); + return ret; +} + /* Configure vlan stripping on or off */ int i40e_vsi_config_vlan_stripping(struct i40e_vsi *vsi, bool on) @@ -4198,9 +4381,11 @@ i40e_dev_init_vlan(struct rte_eth_dev *dev) { struct rte_eth_dev_data *data = dev->data; int ret; + int mask = 0; /* Apply vlan offload setting */ - i40e_vlan_offload_set(dev, ETH_VLAN_STRIP_MASK); + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK; + i40e_vlan_offload_set(dev, mask); /* Apply double-vlan setting, not implemented yet */ @@ -5634,11 +5819,11 @@ i40e_pf_disable_rss(struct i40e_pf *pf) struct i40e_hw *hw = I40E_PF_TO_HW(pf); uint64_t hena; - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; hena &= ~I40E_RSS_HENA_ALL; - I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena); - I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); } @@ -5671,7 +5856,7 @@ i40e_set_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t key_len) uint16_t i; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - I40E_WRITE_REG(hw, I40E_PFQF_HKEY(i), hash_key[i]); + i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), hash_key[i]); I40E_WRITE_FLUSH(hw); } @@ -5700,7 +5885,7 @@ i40e_get_rss_key(struct i40e_vsi *vsi, uint8_t *key, uint8_t *key_len) uint16_t i; for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) - key_dw[i] = I40E_READ_REG(hw, I40E_PFQF_HKEY(i)); + key_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i)); } *key_len = (I40E_PFQF_HKEY_MAX_INDEX + 1) * sizeof(uint32_t); @@ -5721,12 +5906,12 @@ i40e_hw_rss_hash_set(struct i40e_pf *pf, struct rte_eth_rss_conf *rss_conf) return ret; rss_hf = rss_conf->rss_hf; - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; hena &= ~I40E_RSS_HENA_ALL; hena |= i40e_config_hena(rss_hf); - I40E_WRITE_REG(hw, I40E_PFQF_HENA(0), (uint32_t)hena); - I40E_WRITE_REG(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); + i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); I40E_WRITE_FLUSH(hw); return 0; @@ -5741,8 +5926,8 @@ i40e_dev_rss_hash_update(struct rte_eth_dev *dev, uint64_t rss_hf = rss_conf->rss_hf & I40E_RSS_OFFLOAD_ALL; uint64_t hena; - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; if (!(hena & I40E_RSS_HENA_ALL)) { /* RSS disabled */ if (rss_hf != 0) /* Enable RSS */ return -EINVAL; @@ -5766,8 +5951,8 @@ i40e_dev_rss_hash_conf_get(struct rte_eth_dev *dev, i40e_get_rss_key(pf->main_vsi, rss_conf->rss_key, &rss_conf->rss_key_len); - hena = (uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(0)); - hena |= ((uint64_t)I40E_READ_REG(hw, I40E_PFQF_HENA(1))) << 32; + hena = (uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)); + hena |= ((uint64_t)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1))) << 32; rss_conf->rss_hf = i40e_parse_hena(hena); return 0; @@ -5792,6 +5977,12 @@ i40e_dev_get_filter_type(uint16_t filter_type, uint16_t *flag) case ETH_TUNNEL_FILTER_IMAC: *flag = I40E_AQC_ADD_CLOUD_FILTER_IMAC; break; + case ETH_TUNNEL_FILTER_OIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_OIP; + break; + case ETH_TUNNEL_FILTER_IIP: + *flag = I40E_AQC_ADD_CLOUD_FILTER_IIP; + break; default: PMD_DRV_LOG(ERR, "invalid tunnel filter type"); return -EINVAL; @@ -5806,7 +5997,9 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, uint8_t add) { uint16_t ip_type; - uint8_t tun_type = 0; + uint8_t i, tun_type = 0; + /* internal varialbe to convert ipv6 byte order */ + uint32_t convert_ipv6[4]; int val, ret = 0; struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_vsi *vsi = pf->main_vsi; @@ -5823,32 +6016,36 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, } pfilter = cld_filter; - (void)rte_memcpy(&pfilter->outer_mac, tunnel_filter->outer_mac, - sizeof(struct ether_addr)); - (void)rte_memcpy(&pfilter->inner_mac, tunnel_filter->inner_mac, - sizeof(struct ether_addr)); + ether_addr_copy(&tunnel_filter->outer_mac, (struct ether_addr*)&pfilter->outer_mac); + ether_addr_copy(&tunnel_filter->inner_mac, (struct ether_addr*)&pfilter->inner_mac); - pfilter->inner_vlan = tunnel_filter->inner_vlan; + pfilter->inner_vlan = rte_cpu_to_le_16(tunnel_filter->inner_vlan); if (tunnel_filter->ip_type == RTE_TUNNEL_IPTYPE_IPV4) { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV4; - (void)rte_memcpy(&pfilter->ipaddr.v4.data, - &tunnel_filter->ip_addr, + rte_memcpy(&pfilter->ipaddr.v4.data, + &rte_cpu_to_le_32(tunnel_filter->ip_addr.ipv4_addr), sizeof(pfilter->ipaddr.v4.data)); } else { ip_type = I40E_AQC_ADD_CLOUD_FLAGS_IPV6; - (void)rte_memcpy(&pfilter->ipaddr.v6.data, - &tunnel_filter->ip_addr, + for (i = 0; i < 4; i++) { + convert_ipv6[i] = + rte_cpu_to_le_32(tunnel_filter->ip_addr.ipv6_addr[i]); + } + rte_memcpy(&pfilter->ipaddr.v6.data, &convert_ipv6, sizeof(pfilter->ipaddr.v6.data)); } /* check tunneled type */ switch (tunnel_filter->tunnel_type) { case RTE_TUNNEL_TYPE_VXLAN: - tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_XVLAN; + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_VXLAN; break; case RTE_TUNNEL_TYPE_NVGRE: tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_NVGRE_OMAC; break; + case RTE_TUNNEL_TYPE_IP_IN_GRE: + tun_type = I40E_AQC_ADD_CLOUD_TNL_TYPE_IP; + break; default: /* Other tunnel types is not supported. */ PMD_DRV_LOG(ERR, "tunnel type is not supported."); @@ -5863,10 +6060,11 @@ i40e_dev_tunnel_filter_set(struct i40e_pf *pf, return -EINVAL; } - pfilter->flags |= I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | ip_type | - (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT); - pfilter->tenant_id = tunnel_filter->tenant_id; - pfilter->queue_number = tunnel_filter->queue_id; + pfilter->flags |= rte_cpu_to_le_16( + I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE | + ip_type | (tun_type << I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT)); + pfilter->tenant_id = rte_cpu_to_le_32(tunnel_filter->tenant_id); + pfilter->queue_number = rte_cpu_to_le_16(tunnel_filter->queue_id); if (add) ret = i40e_aq_add_cloud_filters(hw, vsi->seid, cld_filter, 1); @@ -5971,8 +6169,8 @@ i40e_del_vxlan_port(struct i40e_pf *pf, uint16_t port) /* Add UDP tunneling port */ static int -i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel) +i40e_dev_udp_tunnel_port_add(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) { int ret = 0; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -6002,8 +6200,8 @@ i40e_dev_udp_tunnel_add(struct rte_eth_dev *dev, /* Remove UDP tunneling port */ static int -i40e_dev_udp_tunnel_del(struct rte_eth_dev *dev, - struct rte_eth_udp_tunnel *udp_tunnel) +i40e_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, + struct rte_eth_udp_tunnel *udp_tunnel) { int ret = 0; struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -6126,13 +6324,13 @@ i40e_tunnel_filter_param_check(struct i40e_pf *pf, } if ((filter->filter_type & ETH_TUNNEL_FILTER_OMAC) && - (is_zero_ether_addr(filter->outer_mac))) { + (is_zero_ether_addr(&filter->outer_mac))) { PMD_DRV_LOG(ERR, "Cannot add NULL outer MAC address"); return -EINVAL; } if ((filter->filter_type & ETH_TUNNEL_FILTER_IMAC) && - (is_zero_ether_addr(filter->inner_mac))) { + (is_zero_ether_addr(&filter->inner_mac))) { PMD_DRV_LOG(ERR, "Cannot add NULL inner MAC address"); return -EINVAL; } @@ -6268,7 +6466,7 @@ i40e_pf_config_mq_rx(struct i40e_pf *pf) static void i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) { - uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0); + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); *enable = reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK ? 1 : 0; } @@ -6277,7 +6475,7 @@ i40e_get_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t *enable) static void i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) { - uint32_t reg = I40E_READ_REG(hw, I40E_PRTQF_CTL_0); + uint32_t reg = i40e_read_rx_ctl(hw, I40E_PRTQF_CTL_0); if (enable > 0) { if (reg & I40E_PRTQF_CTL_0_HSYM_ENA_MASK) { @@ -6294,7 +6492,7 @@ i40e_set_symmetric_hash_enable_per_port(struct i40e_hw *hw, uint8_t enable) } reg &= ~I40E_PRTQF_CTL_0_HSYM_ENA_MASK; } - I40E_WRITE_REG(hw, I40E_PRTQF_CTL_0, reg); + i40e_write_rx_ctl(hw, I40E_PRTQF_CTL_0, reg); I40E_WRITE_FLUSH(hw); } @@ -6312,7 +6510,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, enum i40e_filter_pctype pctype; memset(g_cfg, 0, sizeof(*g_cfg)); - reg = I40E_READ_REG(hw, I40E_GLQF_CTL); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); if (reg & I40E_GLQF_CTL_HTOEP_MASK) g_cfg->hash_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; else @@ -6327,7 +6525,7 @@ i40e_get_hash_filter_global_config(struct i40e_hw *hw, /* Bit set indicats the coresponding flow type is supported */ g_cfg->valid_bit_mask[0] |= (1UL << i); pctype = i40e_flowtype_to_pctype(i); - reg = I40E_READ_REG(hw, I40E_GLQF_HSYM(pctype)); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HSYM(pctype)); if (reg & I40E_GLQF_HSYM_SYMH_ENA_MASK) g_cfg->sym_hash_enable_mask[0] |= (1UL << i); } @@ -6400,10 +6598,10 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, pctype = i40e_flowtype_to_pctype(i); reg = (g_cfg->sym_hash_enable_mask[0] & (1UL << i)) ? I40E_GLQF_HSYM_SYMH_ENA_MASK : 0; - I40E_WRITE_REG(hw, I40E_GLQF_HSYM(pctype), reg); + i40e_write_rx_ctl(hw, I40E_GLQF_HSYM(pctype), reg); } - reg = I40E_READ_REG(hw, I40E_GLQF_CTL); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); if (g_cfg->hash_func == RTE_ETH_HASH_FUNCTION_TOEPLITZ) { /* Toeplitz */ if (reg & I40E_GLQF_CTL_HTOEP_MASK) { @@ -6424,7 +6622,7 @@ i40e_set_hash_filter_global_config(struct i40e_hw *hw, /* Use the default, and keep it as it is */ goto out; - I40E_WRITE_REG(hw, I40E_GLQF_CTL, reg); + i40e_write_rx_ctl(hw, I40E_GLQF_CTL, reg); out: I40E_WRITE_FLUSH(hw); @@ -6847,13 +7045,13 @@ i40e_get_reg_inset(struct i40e_hw *hw, enum rte_filter_type filter, uint64_t reg = 0; if (filter == RTE_ETH_FILTER_HASH) { - reg = I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(1, pctype)); + reg = i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, pctype)); reg <<= I40E_32_BIT_WIDTH; - reg |= I40E_READ_REG(hw, I40E_GLQF_HASH_INSET(0, pctype)); + reg |= i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, pctype)); } else if (filter == RTE_ETH_FILTER_FDIR) { - reg = I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 1)); + reg = i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 1)); reg <<= I40E_32_BIT_WIDTH; - reg |= I40E_READ_REG(hw, I40E_PRTQF_FD_INSET(pctype, 0)); + reg |= i40e_read_rx_ctl(hw, I40E_PRTQF_FD_INSET(pctype, 0)); } return reg; @@ -6862,13 +7060,13 @@ i40e_get_reg_inset(struct i40e_hw *hw, enum rte_filter_type filter, static void i40e_check_write_reg(struct i40e_hw *hw, uint32_t addr, uint32_t val) { - uint32_t reg = I40E_READ_REG(hw, addr); + uint32_t reg = i40e_read_rx_ctl(hw, addr); PMD_DRV_LOG(DEBUG, "[0x%08x] original: 0x%08x\n", addr, reg); if (reg != val) - I40E_WRITE_REG(hw, addr, val); + i40e_write_rx_ctl(hw, addr, val); PMD_DRV_LOG(DEBUG, "[0x%08x] after: 0x%08x\n", addr, - (uint32_t)I40E_READ_REG(hw, addr)); + (uint32_t)i40e_read_rx_ctl(hw, addr)); } static int @@ -6897,8 +7095,9 @@ i40e_set_hash_inset_mask(struct i40e_hw *hw, uint8_t j, count = 0; for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) { - reg = I40E_READ_REG(hw, I40E_GLQF_HASH_MSK(i, pctype)); - if (reg & I40E_GLQF_HASH_MSK_FIELD) + reg = i40e_read_rx_ctl(hw, + I40E_GLQF_HASH_MSK(i, pctype)); + if (reg & I40E_GLQF_HASH_MSK_MASK_MASK) count++; } if (count + num > I40E_INSET_MASK_NUM_REG) @@ -6938,8 +7137,9 @@ i40e_set_fd_inset_mask(struct i40e_hw *hw, uint8_t j, count = 0; for (i = 0; i < I40E_INSET_MASK_NUM_REG; i++) { - reg = I40E_READ_REG(hw, I40E_GLQF_FD_MSK(i, pctype)); - if (reg & I40E_GLQF_FD_MSK_FIELD) + reg = i40e_read_rx_ctl(hw, + I40E_GLQF_FD_MSK(i, pctype)); + if (reg & I40E_GLQF_FD_MSK_MASK_MASK) count++; } if (count + num > I40E_INSET_MASK_NUM_REG) @@ -7256,16 +7456,64 @@ i40e_dev_filter_ctrl(struct rte_eth_dev *dev, return ret; } +/* + * Check and enable Extended Tag. + * Enabling Extended Tag is important for 40G performance. + */ +static void +i40e_enable_extended_tag(struct rte_eth_dev *dev) +{ + uint32_t buf = 0; + int ret; + + ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CAP_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CAP_REG); + return; + } + if (!(buf & PCI_DEV_CAP_EXT_TAG_MASK)) { + PMD_DRV_LOG(ERR, "Does not support Extended Tag"); + return; + } + + buf = 0; + ret = rte_eal_pci_read_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } + if (buf & PCI_DEV_CTRL_EXT_TAG_MASK) { + PMD_DRV_LOG(DEBUG, "Extended Tag has already been enabled"); + return; + } + buf |= PCI_DEV_CTRL_EXT_TAG_MASK; + ret = rte_eal_pci_write_config(dev->pci_dev, &buf, sizeof(buf), + PCI_DEV_CTRL_REG); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Failed to write PCI offset 0x%x", + PCI_DEV_CTRL_REG); + return; + } +} + /* * As some registers wouldn't be reset unless a global hardware reset, * hardware initialization is needed to put those registers into an * expected initial state. */ static void -i40e_hw_init(struct i40e_hw *hw) +i40e_hw_init(struct rte_eth_dev *dev) { + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + i40e_enable_extended_tag(dev); + /* clear the PF Queue Filter control register */ - I40E_WRITE_REG(hw, I40E_PFQF_CTL_0, 0); + i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, 0); /* Disable symmetric hash per port */ i40e_set_symmetric_hash_enable_per_port(hw, 0); @@ -8082,6 +8330,8 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, int i, total_tc = 0; uint16_t qpnum_per_tc, bsf, qp_idx; struct rte_eth_dev_data *dev_data = I40E_VSI_TO_DEV_DATA(vsi); + struct i40e_pf *pf = I40E_VSI_TO_PF(vsi); + uint16_t used_queues; ret = validate_tcmap_parameter(vsi, enabled_tcmap); if (ret != I40E_SUCCESS) @@ -8095,7 +8345,18 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, total_tc = 1; vsi->enabled_tc = enabled_tcmap; - qpnum_per_tc = dev_data->nb_rx_queues / total_tc; + /* different VSI has different queues assigned */ + if (vsi->type == I40E_VSI_MAIN) + used_queues = dev_data->nb_rx_queues - + pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else if (vsi->type == I40E_VSI_VMDQ2) + used_queues = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; + else { + PMD_INIT_LOG(ERR, "unsupported VSI type."); + return I40E_ERR_NO_AVAILABLE_VSI; + } + + qpnum_per_tc = used_queues / total_tc; /* Number of queues per enabled TC */ if (qpnum_per_tc == 0) { PMD_INIT_LOG(ERR, " number of queues is less that tcs."); @@ -8139,6 +8400,93 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, return I40E_SUCCESS; } +/* + * i40e_config_switch_comp_tc - Configure VEB tc setting for given TC map + * @veb: VEB to be configured + * @tc_map: enabled TC bitmap + * + * Returns 0 on success, negative value on failure + */ +static enum i40e_status_code +i40e_config_switch_comp_tc(struct i40e_veb *veb, uint8_t tc_map) +{ + struct i40e_aqc_configure_switching_comp_bw_config_data veb_bw; + struct i40e_aqc_query_switching_comp_bw_config_resp bw_query; + struct i40e_aqc_query_switching_comp_ets_config_resp ets_query; + struct i40e_hw *hw = I40E_VSI_TO_HW(veb->associate_vsi); + enum i40e_status_code ret = I40E_SUCCESS; + int i; + uint32_t bw_max; + + /* Check if enabled_tc is same as existing or new TCs */ + if (veb->enabled_tc == tc_map) + return ret; + + /* configure tc bandwidth */ + memset(&veb_bw, 0, sizeof(veb_bw)); + veb_bw.tc_valid_bits = tc_map; + /* Enable ETS TCs with equal BW Share for now across all VSIs */ + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (tc_map & BIT_ULL(i)) + veb_bw.tc_bw_share_credits[i] = 1; + } + ret = i40e_aq_config_switch_comp_bw_config(hw, veb->seid, + &veb_bw, NULL); + if (ret) { + PMD_INIT_LOG(ERR, "AQ command Config switch_comp BW allocation" + " per TC failed = %d", + hw->aq.asq_last_status); + return ret; + } + + memset(&ets_query, 0, sizeof(ets_query)); + ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid, + &ets_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp ETS" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + memset(&bw_query, 0, sizeof(bw_query)); + ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid, + &bw_query, NULL); + if (ret != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, "Failed to get switch_comp bandwidth" + " configuration %u", hw->aq.asq_last_status); + return ret; + } + + /* store and print out BW info */ + veb->bw_info.bw_limit = rte_le_to_cpu_16(ets_query.port_bw_limit); + veb->bw_info.bw_max = ets_query.tc_bw_max; + PMD_DRV_LOG(DEBUG, "switch_comp bw limit:%u", veb->bw_info.bw_limit); + PMD_DRV_LOG(DEBUG, "switch_comp max_bw:%u", veb->bw_info.bw_max); + bw_max = rte_le_to_cpu_16(bw_query.tc_bw_max[0]) | + (rte_le_to_cpu_16(bw_query.tc_bw_max[1]) << + I40E_16_BIT_WIDTH); + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + veb->bw_info.bw_ets_share_credits[i] = + bw_query.tc_bw_share_credits[i]; + veb->bw_info.bw_ets_credits[i] = + rte_le_to_cpu_16(bw_query.tc_bw_limits[i]); + /* 4 bits per TC, 4th bit is reserved */ + veb->bw_info.bw_ets_max[i] = + (uint8_t)((bw_max >> (i * I40E_4_BIT_WIDTH)) & + RTE_LEN2MASK(3, uint8_t)); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:share credits %u", i, + veb->bw_info.bw_ets_share_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u:credits %u", i, + veb->bw_info.bw_ets_credits[i]); + PMD_DRV_LOG(DEBUG, "\tVEB TC%u: max credits: %u", i, + veb->bw_info.bw_ets_max[i]); + } + + veb->enabled_tc = tc_map; + + return ret; +} + + /* * i40e_vsi_config_tc - Configure VSI tc setting for given TC map * @vsi: VSI to be configured @@ -8147,7 +8495,7 @@ i40e_vsi_update_queue_mapping(struct i40e_vsi *vsi, * Returns 0 on success, negative value on failure */ static enum i40e_status_code -i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 tc_map) +i40e_vsi_config_tc(struct i40e_vsi *vsi, uint8_t tc_map) { struct i40e_aqc_configure_vsi_tc_bw_data bw_data; struct i40e_vsi_context ctxt; @@ -8289,15 +8637,27 @@ i40e_dcb_hw_configure(struct i40e_pf *pf, i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0, &hw->local_dcbx_config); + /* if Veb is created, need to update TC of it at first */ + if (main_vsi->veb) { + ret = i40e_config_switch_comp_tc(main_vsi->veb, tc_map); + if (ret) + PMD_INIT_LOG(WARNING, + "Failed configuring TC for VEB seid=%d\n", + main_vsi->veb->seid); + } /* Update each VSI */ i40e_vsi_config_tc(main_vsi, tc_map); if (main_vsi->veb) { TAILQ_FOREACH(vsi_list, &main_vsi->veb->head, list) { - /* Beside main VSI, only enable default + /* Beside main VSI and VMDQ VSIs, only enable default * TC for other VSIs */ - ret = i40e_vsi_config_tc(vsi_list->vsi, - I40E_DEFAULT_TCMAP); + if (vsi_list->vsi->type == I40E_VSI_VMDQ2) + ret = i40e_vsi_config_tc(vsi_list->vsi, + tc_map); + else + ret = i40e_vsi_config_tc(vsi_list->vsi, + I40E_DEFAULT_TCMAP); if (ret) PMD_INIT_LOG(WARNING, "Failed configuring TC for VSI seid=%d\n", @@ -8417,9 +8777,8 @@ i40e_dcb_setup(struct rte_eth_dev *dev) return -ENOTSUP; } - if (pf->vf_num != 0 || - (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)) - PMD_INIT_LOG(DEBUG, " DCB only works on main vsi."); + if (pf->vf_num != 0) + PMD_INIT_LOG(DEBUG, " DCB only works on pf and vmdq vsis."); ret = i40e_parse_dcb_configure(dev, &dcb_cfg, &tc_map); if (ret) { @@ -8444,7 +8803,7 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, struct i40e_vsi *vsi = pf->main_vsi; struct i40e_dcbx_config *dcb_cfg = &hw->local_dcbx_config; uint16_t bsf, tc_mapping; - int i; + int i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) dcb_info->nb_tcs = rte_bsf32(vsi->enabled_tc + 1); @@ -8455,23 +8814,27 @@ i40e_dev_get_dcb_info(struct rte_eth_dev *dev, for (i = 0; i < dcb_info->nb_tcs; i++) dcb_info->tc_bws[i] = dcb_cfg->etscfg.tcbwtable[i]; - for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - if (vsi->enabled_tc & (1 << i)) { + j = 0; + do { + for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { + if (!(vsi->enabled_tc & (1 << i))) + continue; tc_mapping = rte_le_to_cpu_16(vsi->info.tc_mapping[i]); /* only main vsi support multi TCs */ - dcb_info->tc_queue.tc_rxq[0][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base = (tc_mapping & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) >> I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT; - dcb_info->tc_queue.tc_txq[0][i].base = - dcb_info->tc_queue.tc_rxq[0][i].base; + dcb_info->tc_queue.tc_txq[j][i].base = + dcb_info->tc_queue.tc_rxq[j][i].base; bsf = (tc_mapping & I40E_AQ_VSI_TC_QUE_NUMBER_MASK) >> I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT; - dcb_info->tc_queue.tc_rxq[0][i].nb_queue = 1 << bsf; - dcb_info->tc_queue.tc_txq[0][i].nb_queue = - dcb_info->tc_queue.tc_rxq[0][i].nb_queue; + dcb_info->tc_queue.tc_rxq[j][i].nb_queue = 1 << bsf; + dcb_info->tc_queue.tc_txq[j][i].nb_queue = + dcb_info->tc_queue.tc_rxq[j][i].nb_queue; } - } - + vsi = pf->vmdq[j].vsi; + j++; + } while (j < RTE_MIN(pf->nb_cfg_vmdq_vsi, ETH_MAX_VMDQ_POOL)); return 0; }