X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_ethdev.c;h=0c32e451c5b68e8a24773431d5d087cd1b61210f;hb=6f1998a4f0411ea3b2bed7c05faa81243917a76e;hp=393b5320f53d7b24877a0d37a2dfeb7e3654a97e;hpb=50ce3e7aec8f49b29a566688dff5829a3ffe1d98;p=dpdk.git diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index 393b5320f5..0c32e451c5 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "i40e_logs.h" #include "base/i40e_prototype.h" @@ -1045,8 +1046,15 @@ static int i40e_init_fdir_filter_list(struct rte_eth_dev *dev) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_PF_TO_HW(pf); struct i40e_fdir_info *fdir_info = &pf->fdir; char fdir_hash_name[RTE_HASH_NAMESIZE]; + uint32_t alloc = hw->func_caps.fd_filters_guaranteed; + uint32_t best = hw->func_caps.fd_filters_best_effort; + struct rte_bitmap *bmp = NULL; + uint32_t bmp_size; + void *mem = NULL; + uint32_t i = 0; int ret; struct rte_hash_parameters fdir_hash_params = { @@ -1067,6 +1075,7 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) PMD_INIT_LOG(ERR, "Failed to create fdir hash table!"); return -EINVAL; } + fdir_info->hash_map = rte_zmalloc("i40e_fdir_hash_map", sizeof(struct i40e_fdir_filter *) * I40E_MAX_FDIR_FILTER_NUM, @@ -1077,8 +1086,74 @@ i40e_init_fdir_filter_list(struct rte_eth_dev *dev) ret = -ENOMEM; goto err_fdir_hash_map_alloc; } + + fdir_info->fdir_filter_array = rte_zmalloc("fdir_filter", + sizeof(struct i40e_fdir_filter) * + I40E_MAX_FDIR_FILTER_NUM, + 0); + + if (!fdir_info->fdir_filter_array) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir filter array!"); + ret = -ENOMEM; + goto err_fdir_filter_array_alloc; + } + + fdir_info->fdir_space_size = alloc + best; + fdir_info->fdir_actual_cnt = 0; + fdir_info->fdir_guarantee_total_space = alloc; + fdir_info->fdir_guarantee_free_space = + fdir_info->fdir_guarantee_total_space; + + PMD_DRV_LOG(INFO, "FDIR guarantee space: %u, best_effort space %u.", alloc, best); + + fdir_info->fdir_flow_pool.pool = + rte_zmalloc("i40e_fdir_entry", + sizeof(struct i40e_fdir_entry) * + fdir_info->fdir_space_size, + 0); + + if (!fdir_info->fdir_flow_pool.pool) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for bitmap flow!"); + ret = -ENOMEM; + goto err_fdir_bitmap_flow_alloc; + } + + for (i = 0; i < fdir_info->fdir_space_size; i++) + fdir_info->fdir_flow_pool.pool[i].idx = i; + + bmp_size = + rte_bitmap_get_memory_footprint(fdir_info->fdir_space_size); + mem = rte_zmalloc("fdir_bmap", bmp_size, RTE_CACHE_LINE_SIZE); + if (mem == NULL) { + PMD_INIT_LOG(ERR, + "Failed to allocate memory for fdir bitmap!"); + ret = -ENOMEM; + goto err_fdir_mem_alloc; + } + bmp = rte_bitmap_init(fdir_info->fdir_space_size, mem, bmp_size); + if (bmp == NULL) { + PMD_INIT_LOG(ERR, + "Failed to initialization fdir bitmap!"); + ret = -ENOMEM; + goto err_fdir_bmp_alloc; + } + for (i = 0; i < fdir_info->fdir_space_size; i++) + rte_bitmap_set(bmp, i); + + fdir_info->fdir_flow_pool.bitmap = bmp; + return 0; +err_fdir_bmp_alloc: + rte_free(mem); +err_fdir_mem_alloc: + rte_free(fdir_info->fdir_flow_pool.pool); +err_fdir_bitmap_flow_alloc: + rte_free(fdir_info->fdir_filter_array); +err_fdir_filter_array_alloc: + rte_free(fdir_info->hash_map); err_fdir_hash_map_alloc: rte_hash_free(fdir_info->hash_table); @@ -1101,6 +1176,30 @@ i40e_init_customized_info(struct i40e_pf *pf) pf->esp_support = false; } +static void +i40e_init_filter_invalidation(struct i40e_pf *pf) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_info *fdir_info = &pf->fdir; + uint32_t glqf_ctl_reg = 0; + + glqf_ctl_reg = i40e_read_rx_ctl(hw, I40E_GLQF_CTL); + if (!pf->support_multi_driver) { + fdir_info->fdir_invalprio = 1; + glqf_ctl_reg |= I40E_GLQF_CTL_INVALPRIO_MASK; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO set to guaranteed first"); + i40e_write_rx_ctl(hw, I40E_GLQF_CTL, glqf_ctl_reg); + } else { + if (glqf_ctl_reg & I40E_GLQF_CTL_INVALPRIO_MASK) { + fdir_info->fdir_invalprio = 1; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: guaranteed first"); + } else { + fdir_info->fdir_invalprio = 0; + PMD_DRV_LOG(INFO, "FDIR INVALPRIO is: shared first"); + } + } +} + void i40e_init_queue_region_conf(struct rte_eth_dev *dev) { @@ -1654,6 +1753,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev, void *init_params __rte_unused) /* Initialize customized information */ i40e_init_customized_info(pf); + /* Initialize the filter invalidation configuration */ + i40e_init_filter_invalidation(pf); + ret = i40e_init_ethtype_filter_list(dev); if (ret < 0) goto err_init_ethtype_filter_list; @@ -1749,16 +1851,30 @@ i40e_rm_fdir_filter_list(struct i40e_pf *pf) struct i40e_fdir_info *fdir_info; fdir_info = &pf->fdir; - /* Remove all flow director rules and hash */ + + /* Remove all flow director rules */ + while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) + TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); +} + +static void +i40e_fdir_memory_cleanup(struct i40e_pf *pf) +{ + struct i40e_fdir_info *fdir_info; + + fdir_info = &pf->fdir; + + /* flow director memory cleanup */ if (fdir_info->hash_map) rte_free(fdir_info->hash_map); if (fdir_info->hash_table) rte_hash_free(fdir_info->hash_table); - - while ((p_fdir = TAILQ_FIRST(&fdir_info->fdir_list))) { - TAILQ_REMOVE(&fdir_info->fdir_list, p_fdir, rules); - rte_free(p_fdir); - } + if (fdir_info->fdir_flow_pool.bitmap) + rte_bitmap_free(fdir_info->fdir_flow_pool.bitmap); + if (fdir_info->fdir_flow_pool.pool) + rte_free(fdir_info->fdir_flow_pool.pool); + if (fdir_info->fdir_filter_array) + rte_free(fdir_info->fdir_filter_array); } void i40e_flex_payload_reg_set_default(struct i40e_hw *hw) @@ -2001,7 +2117,7 @@ __vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t msix_vect, I40E_WRITE_FLUSH(hw); } -void +int i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; @@ -2021,10 +2137,14 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) /* VF bind interrupt */ if (vsi->type == I40E_VSI_SRIOV) { + if (vsi->nb_msix == 0) { + PMD_DRV_LOG(ERR, "No msix resource"); + return -EINVAL; + } __vsi_queues_bind_intr(vsi, msix_vect, vsi->base_queue, vsi->nb_qps, itr_idx); - return; + return 0; } /* PF & VMDq bind interrupt */ @@ -2041,7 +2161,10 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) } for (i = 0; i < vsi->nb_used_qps; i++) { - if (nb_msix <= 1) { + if (vsi->nb_msix == 0) { + PMD_DRV_LOG(ERR, "No msix resource"); + return -EINVAL; + } else if (nb_msix <= 1) { if (!rte_intr_allow_others(intr_handle)) /* allow to share MISC_VEC_ID */ msix_vect = I40E_MISC_VEC_ID; @@ -2066,9 +2189,11 @@ i40e_vsi_queues_bind_intr(struct i40e_vsi *vsi, uint16_t itr_idx) msix_vect++; nb_msix--; } + + return 0; } -static void +void i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; @@ -2095,7 +2220,7 @@ i40e_vsi_enable_queues_intr(struct i40e_vsi *vsi) I40E_WRITE_FLUSH(hw); } -static void +void i40e_vsi_disable_queues_intr(struct i40e_vsi *vsi) { struct rte_eth_dev *dev = vsi->adapter->eth_dev; @@ -2306,24 +2431,21 @@ i40e_dev_start(struct rte_eth_dev *dev) /* Map queues with MSIX interrupt */ main_vsi->nb_used_qps = dev->data->nb_rx_queues - pf->nb_cfg_vmdq_vsi * RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); + ret = i40e_vsi_queues_bind_intr(main_vsi, I40E_ITR_INDEX_DEFAULT); + if (ret < 0) + return ret; i40e_vsi_enable_queues_intr(main_vsi); /* Map VMDQ VSI queues with MSIX interrupt */ for (i = 0; i < pf->nb_cfg_vmdq_vsi; i++) { pf->vmdq[i].vsi->nb_used_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VM; - i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, - I40E_ITR_INDEX_DEFAULT); + ret = i40e_vsi_queues_bind_intr(pf->vmdq[i].vsi, + I40E_ITR_INDEX_DEFAULT); + if (ret < 0) + return ret; i40e_vsi_enable_queues_intr(pf->vmdq[i].vsi); } - /* enable FDIR MSIX interrupt */ - if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_bind_intr(pf->fdir.fdir_vsi, - I40E_ITR_INDEX_NONE); - i40e_vsi_enable_queues_intr(pf->fdir.fdir_vsi); - } - /* Enable all queues which have been configured */ for (nb_rxq = 0; nb_rxq < dev->data->nb_rx_queues; nb_rxq++) { ret = i40e_dev_rx_queue_start(dev, nb_rxq); @@ -2459,10 +2581,6 @@ i40e_dev_stop(struct rte_eth_dev *dev) i40e_vsi_queues_unbind_intr(pf->vmdq[i].vsi); } - if (pf->fdir.fdir_vsi) { - i40e_vsi_queues_unbind_intr(pf->fdir.fdir_vsi); - i40e_vsi_disable_queues_intr(pf->fdir.fdir_vsi); - } /* Clear all queues and release memory */ i40e_dev_clear_queues(dev); @@ -2618,9 +2736,14 @@ i40e_dev_close(struct rte_eth_dev *dev) /* Remove all flows */ while ((p_flow = TAILQ_FIRST(&pf->flow_list))) { TAILQ_REMOVE(&pf->flow_list, p_flow, node); - rte_free(p_flow); + /* Do not free FDIR flows since they are static allocated */ + if (p_flow->filter_type != RTE_ETH_FILTER_FDIR) + rte_free(p_flow); } + /* release the fdir static allocated memory */ + i40e_fdir_memory_cleanup(pf); + /* Remove all Traffic Manager configuration */ i40e_tm_conf_uninit(dev);