X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_fdir.c;h=c572d003cb04496faf9168f66c0d671ec8abc44d;hb=6a11a1eac0b6dcd52580eef99cf6f09e3361cc3b;hp=b61e6053642a2c1947c084eff863e3a5fc41fbd5;hpb=1be514fbcea9e8964296b46c91dbb56715503ae7;p=dpdk.git diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index b61e605364..c572d003cb 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -11,7 +11,7 @@ #include #include -#include +#include #include #include #include @@ -116,7 +116,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) #endif rx_ctx.dtype = i40e_header_split_none; rx_ctx.hsplit_0 = I40E_HEADER_SPLIT_NONE; - rx_ctx.rxmax = RTE_ETHER_MAX_LEN; + rx_ctx.rxmax = I40E_ETH_MAX_LEN; rx_ctx.tphrdesc_ena = 1; rx_ctx.tphwdesc_ena = 1; rx_ctx.tphdata_ena = 1; @@ -355,6 +355,7 @@ i40e_init_flx_pld(struct i40e_pf *pf) I40E_PRTQF_FLX_PIT(index + 1), 0x0000FC29);/*non-used*/ I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(index + 2), 0x0000FC2A);/*non-used*/ + pf->fdir.flex_pit_flag[i] = 0; } /* initialize the masks */ @@ -1442,6 +1443,151 @@ i40e_fdir_entry_pool_put(struct i40e_fdir_info *fdir_info, rte_bitmap_set(fdir_info->fdir_flow_pool.bitmap, f->idx); } +static int +i40e_flow_store_flex_pit(struct i40e_pf *pf, + struct i40e_fdir_flex_pit *flex_pit, + enum i40e_flxpld_layer_idx layer_idx, + uint8_t raw_id) +{ + uint8_t field_idx; + + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id; + /* Check if the configuration is conflicted */ + if (pf->fdir.flex_pit_flag[layer_idx] && + (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset || + pf->fdir.flex_set[field_idx].size != flex_pit->size || + pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset)) + return -1; + + /* Check if the configuration exists. */ + if (pf->fdir.flex_pit_flag[layer_idx] && + (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset && + pf->fdir.flex_set[field_idx].size == flex_pit->size && + pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset)) + return 1; + + pf->fdir.flex_set[field_idx].src_offset = + flex_pit->src_offset; + pf->fdir.flex_set[field_idx].size = + flex_pit->size; + pf->fdir.flex_set[field_idx].dst_offset = + flex_pit->dst_offset; + + return 0; +} + +static void +i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf, + enum i40e_flxpld_layer_idx layer_idx, + uint8_t raw_id) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + uint32_t flx_pit, flx_ort; + uint16_t min_next_off = 0; + uint8_t field_idx; + uint8_t i; + + if (raw_id) { + flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) | + (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) | + (layer_idx * I40E_MAX_FLXPLD_FIED); + I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort); + } + + /* Set flex pit */ + for (i = 0; i < raw_id; i++) { + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset, + pf->fdir.flex_set[field_idx].size, + pf->fdir.flex_set[field_idx].dst_offset); + + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + min_next_off = pf->fdir.flex_set[field_idx].src_offset + + pf->fdir.flex_set[field_idx].size; + } + + for (; i < I40E_MAX_FLXPLD_FIED; i++) { + /* set the non-used register obeying register's constrain */ + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE, + NONUSE_FLX_PIT_DEST_OFF); + I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit); + min_next_off++; + } +} + +static int +i40e_flow_store_flex_mask(struct i40e_pf *pf, + enum i40e_filter_pctype pctype, + uint8_t *mask) +{ + struct i40e_fdir_flex_mask flex_mask; + uint8_t nb_bitmask = 0; + uint16_t mask_tmp; + uint8_t i; + + memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask)); + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) { + mask_tmp = I40E_WORD(mask[i], mask[i + 1]); + if (mask_tmp) { + flex_mask.word_mask |= + I40E_FLEX_WORD_MASK(i / sizeof(uint16_t)); + if (mask_tmp != UINT16_MAX) { + flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp; + flex_mask.bitmask[nb_bitmask].offset = + i / sizeof(uint16_t); + nb_bitmask++; + if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD) + return -1; + } + } + } + flex_mask.nb_bitmask = nb_bitmask; + + if (pf->fdir.flex_mask_flag[pctype] && + (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype], + sizeof(struct i40e_fdir_flex_mask)))) + return -2; + else if (pf->fdir.flex_mask_flag[pctype] && + !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype], + sizeof(struct i40e_fdir_flex_mask)))) + return 1; + + memcpy(&pf->fdir.flex_mask[pctype], &flex_mask, + sizeof(struct i40e_fdir_flex_mask)); + return 0; +} + +static void +i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf, + enum i40e_filter_pctype pctype) +{ + struct i40e_hw *hw = I40E_PF_TO_HW(pf); + struct i40e_fdir_flex_mask *flex_mask; + uint32_t flxinset, fd_mask; + uint8_t i; + + /* Set flex mask */ + flex_mask = &pf->fdir.flex_mask[pctype]; + flxinset = (flex_mask->word_mask << + I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) & + I40E_PRTQF_FD_FLXINSET_INSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset); + + for (i = 0; i < flex_mask->nb_bitmask; i++) { + fd_mask = (flex_mask->bitmask[i].mask << + I40E_PRTQF_FD_MSK_MASK_SHIFT) & + I40E_PRTQF_FD_MSK_MASK_MASK; + fd_mask |= ((flex_mask->bitmask[i].offset + + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << + I40E_PRTQF_FD_MSK_OFFSET_SHIFT) & + I40E_PRTQF_FD_MSK_OFFSET_MASK; + i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask); + } + + pf->fdir.flex_mask_flag[pctype] = 1; +} + static inline unsigned char * i40e_find_available_buffer(struct rte_eth_dev *dev) { @@ -1494,13 +1640,19 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, { struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); - unsigned char *pkt = NULL; - enum i40e_filter_pctype pctype; + enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX; struct i40e_fdir_info *fdir_info = &pf->fdir; - struct i40e_fdir_filter *node; + uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN]; struct i40e_fdir_filter check_filter; /* Check if the filter exists */ + struct i40e_fdir_flex_pit flex_pit; + enum i40e_filter_pctype pctype; + struct i40e_fdir_filter *node; + unsigned char *pkt = NULL; + bool cfg_flex_pit = true; bool wait_status = true; + uint8_t field_idx; int ret = 0; + int i; if (pf->fdir.fdir_vsi == NULL) { PMD_DRV_LOG(ERR, "FDIR is not enabled"); @@ -1533,6 +1685,47 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, i40e_fdir_filter_convert(filter, &check_filter); if (add) { + if (filter->input.flow_ext.is_flex_flow) { + for (i = 0; i < filter->input.flow_ext.raw_id; i++) { + layer_idx = filter->input.flow_ext.layer_idx; + field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i; + flex_pit = filter->input.flow_ext.flex_pit[field_idx]; + + /* Store flex pit to SW */ + ret = i40e_flow_store_flex_pit(pf, &flex_pit, + layer_idx, i); + if (ret < 0) { + PMD_DRV_LOG(ERR, "Conflict with the" + " first flexible rule."); + return -EINVAL; + } else if (ret > 0) { + cfg_flex_pit = false; + } + } + + if (cfg_flex_pit) + i40e_flow_set_fdir_flex_pit(pf, layer_idx, + filter->input.flow_ext.raw_id); + + /* Store flex mask to SW */ + for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i++) + flex_mask[i] = + filter->input.flow_ext.flex_mask[i]; + + ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask); + if (ret == -1) { + PMD_DRV_LOG(ERR, "Exceed maximal" + " number of bitmasks"); + return -EINVAL; + } else if (ret == -2) { + PMD_DRV_LOG(ERR, "Conflict with the" + " first flexible rule"); + return -EINVAL; + } else if (ret == 0) { + i40e_flow_set_fdir_flex_msk(pf, pctype); + } + } + ret = i40e_sw_fdir_filter_insert(pf, &check_filter); if (ret < 0) { PMD_DRV_LOG(ERR, @@ -1544,6 +1737,9 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, fdir_info->fdir_guarantee_free_space > 0) wait_status = false; } else { + if (filter->input.flow_ext.is_flex_flow) + layer_idx = filter->input.flow_ext.layer_idx; + node = i40e_sw_fdir_filter_lookup(fdir_info, &check_filter.fdir.input); if (!node) { @@ -1591,6 +1787,17 @@ i40e_flow_add_del_fdir_filter(struct rte_eth_dev *dev, goto error_op; } + if (filter->input.flow_ext.is_flex_flow) { + if (add) { + fdir_info->flex_flow_count[layer_idx]++; + pf->fdir.flex_pit_flag[layer_idx] = 1; + } else { + fdir_info->flex_flow_count[layer_idx]--; + if (!fdir_info->flex_flow_count[layer_idx]) + pf->fdir.flex_pit_flag[layer_idx] = 0; + } + } + if (add) { fdir_info->fdir_actual_cnt++; if (fdir_info->fdir_invalprio == 1 &&