X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_flow.c;h=f303df4ad86622a9804e51c49916fa7e13d7a310;hb=35ec45adf7a49d8ac2091e9f06dcf74d92083510;hp=6f2ff874963fcd1189805afb06eb3a88d6a12694;hpb=eb158fc756a5c0c91ed05a676b2085927d76aa63;p=dpdk.git diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 6f2ff87496..f303df4ad8 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -2,8 +2,6 @@ * Copyright(c) 2018-2019 Hisilicon Limited. */ -#include -#include #include #include #include @@ -168,9 +166,9 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, if (cnt) { if (!cnt->shared || cnt->shared != shared) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - cnt, - "Counter id is used,shared flag not match"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + cnt, + "Counter id is used, shared flag not match"); cnt->ref_cnt++; return 0; } @@ -178,7 +176,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); if (cnt == NULL) return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION, cnt, + RTE_FLOW_ERROR_TYPE_HANDLE, cnt, "Alloc mem for counter failed"); cnt->id = id; cnt->shared = shared; @@ -206,13 +204,12 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, cnt = hns3_counter_lookup(dev, flow->counter_id); if (cnt == NULL) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Can't find counter id"); ret = hns3_get_count(&hns->hw, flow->counter_id, &value); if (ret) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Read counter fail."); return ret; } @@ -269,8 +266,8 @@ hns3_handle_action_queue(struct rte_eth_dev *dev, queue = (const struct rte_flow_action_queue *)action->conf; if (queue->index >= hw->used_rx_queues) { - hns3_err(hw, "queue ID(%d) is greater than number of " - "available queue (%d) in driver.", + hns3_err(hw, "queue ID(%u) is greater than number of " + "available queue (%u) in driver.", queue->index, hw->used_rx_queues); return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, @@ -374,9 +371,9 @@ hns3_handle_actions(struct rte_eth_dev *dev, (const struct rte_flow_action_mark *)actions->conf; if (mark->id >= HNS3_MAX_FILTER_ID) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid Mark ID"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid Mark ID"); rule->fd_id = mark->id; rule->flags |= HNS3_RULE_FLAG_FDID; break; @@ -390,9 +387,9 @@ hns3_handle_actions(struct rte_eth_dev *dev, counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]; if (act_count->id >= counter_num) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid counter id"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid counter id"); rule->act_cnt = *act_count; rule->flags |= HNS3_RULE_FLAG_COUNTER; break; @@ -549,14 +546,13 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { ipv4_mask = item->mask; - if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id || ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live || ipv4_mask->hdr.hdr_checksum) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,tos,proto in IPV4"); } @@ -618,10 +614,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { ipv6_mask = item->mask; - if (ipv6_mask->hdr.vtc_flow || - ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) { + if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.hop_limits) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,proto in IPV6"); } @@ -674,14 +670,12 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { tcp_mask = item->mask; - if (tcp_mask->hdr.sent_seq || - tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || - tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { + if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in TCP"); } @@ -728,7 +722,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in UDP"); } @@ -775,10 +769,9 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, sctp_mask = item->mask; if (sctp_mask->hdr.cksum) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in SCTP"); - if (sctp_mask->hdr.src_port) { hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); rule->key_conf.mask.src_port = @@ -920,14 +913,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (vxlan_mask->flags) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Flags is not supported in VxLAN"); /* VNI must be totally masked or not. */ if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in VxLAN"); if (vxlan_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -971,14 +964,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in NVGRE"); /* TNI must be totally masked or not. */ if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) && memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "TNI must be totally masked or not in NVGRE"); if (nvgre_mask->tni[0]) { @@ -1025,13 +1018,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in GENEVE"); /* VNI must be totally masked or not. */ if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in GENEVE"); if (geneve_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -1062,7 +1055,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported tunnel type!"); } if (ret) @@ -1071,8 +1064,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, } static int -hns3_parse_normal(const struct rte_flow_item *item, - struct hns3_fdir_rule *rule, +hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct items_step_mngr *step_mngr, struct rte_flow_error *error) { @@ -1116,7 +1108,7 @@ hns3_parse_normal(const struct rte_flow_item *item, break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported normal type!"); } @@ -1132,7 +1124,7 @@ hns3_validate_item(const struct rte_flow_item *item, if (item->last) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, item, "Not supported last point for range"); for (i = 0; i < step_mngr.count; i++) { @@ -1216,11 +1208,6 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Fdir not supported in VF"); - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, - "fdir_conf.mode isn't perfect"); - step_mngr.items = first_items; step_mngr.count = ARRAY_SIZE(first_items); for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { @@ -1333,9 +1320,8 @@ hns3_rss_conf_copy(struct hns3_rss_conf *out, .key_len = in->key_len, .queue_num = in->queue_num, }; - out->conf.queue = - memcpy(out->queue, in->queue, - sizeof(*in->queue) * in->queue_num); + out->conf.queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num); if (in->key) out->conf.key = memcpy(out->key, in->key, in->key_len); @@ -1356,69 +1342,62 @@ hns3_parse_rss_filter(struct rte_eth_dev *dev, const struct rte_flow_action_rss *rss; const struct rte_flow_action *act; uint32_t act_index = 0; - uint64_t flow_types; uint16_t n; NEXT_ITEM_OF_ACTION(act, actions, act_index); - /* Get configuration args from APP cmdline input */ rss = act->conf; - if (rss == NULL || rss->queue_num == 0) { + if (rss == NULL) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "no valid queues"); } + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "queue number configured exceeds " + "queue buffer size driver supported"); + for (n = 0; n < rss->queue_num; n++) { - if (rss->queue[n] < dev->data->nb_rx_queues) + if (rss->queue[n] < hw->alloc_rss_size) continue; return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "queue id > max number of queues"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "queue id must be less than queue number allocated to a TC"); } - /* Parse flow types of RSS */ if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "Flow types is unsupported by " "hns3's RSS"); - - flow_types = rss->types & HNS3_ETH_RSS_SUPPORT; - if (flow_types != rss->types) - hns3_warn(hw, "RSS flow types(%" PRIx64 ") include unsupported " - "flow types", rss->types); - - /* Parse RSS related parameters from RSS configuration */ - switch (rss->func) { - case RTE_ETH_HASH_FUNCTION_DEFAULT: - case RTE_ETH_HASH_FUNCTION_TOEPLITZ: - case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: - case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: - break; - default: + if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "input RSS hash functions are not supported"); - } - + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "RSS hash func are not supported"); if (rss->level) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "a nonzero RSS encapsulation level is not supported"); if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "RSS hash key must be exactly 40 bytes"); - if (rss->queue_num > RTE_DIM(rss_conf->queue)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "too many queues for RSS context"); + /* + * For Kunpeng920 and Kunpeng930 NIC hardware, it is not supported to + * use dst port/src port fields to RSS hash for the following packet + * types. + * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG + * Besides, for Kunpeng920, The NIC hardware is not supported to use + * src/dst port fields to RSS hash for IPV6 SCTP packet type. + */ if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) && - (rss->types & ETH_RSS_IP)) + (rss->types & ETH_RSS_IP || + (!hw->rss_info.ipv6_sctp_offload_supported && + rss->types & ETH_RSS_NONFRAG_IPV6_SCTP))) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF, &rss->types, @@ -1458,9 +1437,8 @@ hns3_disable_rss(struct hns3_hw *hw) static void hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf) { - if (rss_conf->key == NULL || - rss_conf->key_len < HNS3_RSS_KEY_SIZE) { - hns3_info(hw, "Default RSS hash key to be set"); + if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) { + hns3_warn(hw, "Default RSS hash key to be set"); rss_conf->key = hns3_hash_key; rss_conf->key_len = HNS3_RSS_KEY_SIZE; } @@ -1501,10 +1479,8 @@ hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) struct hns3_rss_tuple_cfg *tuple; int ret; - /* Parse hash key */ hns3_parse_rss_key(hw, rss_config); - /* Parse hash algorithm */ ret = hns3_parse_rss_algorithm(hw, &rss_config->func, &hw->rss_info.hash_algo); if (ret) @@ -1532,31 +1508,22 @@ hns3_update_indir_table(struct rte_eth_dev *dev, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - uint8_t indir_tbl[HNS3_RSS_IND_TBL_SIZE]; - uint16_t j, allow_rss_queues; - uint8_t queue_id; + uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE]; + uint16_t j; uint32_t i; - if (num == 0) { - hns3_err(hw, "No PF queues are configured to enable RSS"); - return -ENOTSUP; - } - - allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max); /* Fill in redirection table */ memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, - HNS3_RSS_IND_TBL_SIZE); + sizeof(hw->rss_info.rss_indirection_tbl)); for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) { j %= num; - if (conf->queue[j] >= allow_rss_queues) { - hns3_err(hw, "Invalid queue id(%u) to be set in " - "redirection table, max number of rss " - "queues: %u", conf->queue[j], - allow_rss_queues); + if (conf->queue[j] >= hw->alloc_rss_size) { + hns3_err(hw, "queue id(%u) set to redirection table " + "exceeds queue number(%u) allocated to a TC.", + conf->queue[j], hw->alloc_rss_size); return -EINVAL; } - queue_id = conf->queue[j]; - indir_tbl[i] = queue_id; + indir_tbl[i] = conf->queue[j]; } return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE); @@ -1566,7 +1533,9 @@ static int hns3_config_rss_filter(struct rte_eth_dev *dev, const struct hns3_rss_conf *conf, bool add) { + struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_hw *hw = &hns->hw; struct hns3_rss_conf *rss_info; uint64_t flow_types; @@ -1597,45 +1566,42 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, rss_info = &hw->rss_info; if (!add) { - if (hns3_action_rss_same(&rss_info->conf, &rss_flow_conf)) { - ret = hns3_disable_rss(hw); - if (ret) { - hns3_err(hw, "RSS disable failed(%d)", ret); - return ret; - } + if (!conf->valid) + return 0; - if (rss_flow_conf.queue_num) { - /* - * Due the content of queue pointer have been - * reset to 0, the rss_info->conf.queue should - * be set NULL. - */ - rss_info->conf.queue = NULL; - rss_info->conf.queue_num = 0; - } + ret = hns3_disable_rss(hw); + if (ret) { + hns3_err(hw, "RSS disable failed(%d)", ret); + return ret; + } - /* set RSS func invalid after flushed */ - rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX; - return 0; + if (rss_flow_conf.queue_num) { + /* + * Due the content of queue pointer have been reset to + * 0, the rss_info->conf.queue should be set NULL + */ + rss_info->conf.queue = NULL; + rss_info->conf.queue_num = 0; } - return -EINVAL; - } - /* Get rx queues num */ - num = dev->data->nb_rx_queues; + /* set RSS func invalid after flushed */ + rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX; + return 0; + } /* Set rx queues to use */ - num = RTE_MIN(num, rss_flow_conf.queue_num); + num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num); if (rss_flow_conf.queue_num > num) hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated", rss_flow_conf.queue_num); hns3_info(hw, "Max of contiguous %u PF queues are configured", num); rte_spinlock_lock(&hw->lock); - /* Update redirection talbe of rss */ - ret = hns3_update_indir_table(dev, &rss_flow_conf, num); - if (ret) - goto rss_config_err; + if (num) { + ret = hns3_update_indir_table(dev, &rss_flow_conf, num); + if (ret) + goto rss_config_err; + } /* Set hash algorithm and flow types by the user's config */ ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf); @@ -1648,35 +1614,60 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, goto rss_config_err; } + /* + * When create a new RSS rule, the old rule will be overlaid and set + * invalid. + */ + TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries) + rss_filter_ptr->filter_info.valid = false; + rss_config_err: rte_spinlock_unlock(&hw->lock); return ret; } -/* Remove the rss filter */ static int hns3_clear_rss_filter(struct rte_eth_dev *dev) { + struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_hw *hw = &hns->hw; + int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */ + int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */ + int ret = 0; - if (hw->rss_info.conf.queue_num == 0) - return 0; + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + while (rss_filter_ptr) { + TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, + entries); + ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + false); + if (ret) + rss_rule_fail_cnt++; + else + rss_rule_succ_cnt++; + rte_free(rss_filter_ptr); + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + } + + if (rss_rule_fail_cnt) { + hns3_err(hw, "fail to delete all RSS filters, success num = %d " + "fail num = %d", rss_rule_succ_cnt, + rss_rule_fail_cnt); + ret = -EIO; + } - return hns3_config_rss_filter(dev, &hw->rss_info, false); + return ret; } -/* Restore the rss filter */ int hns3_restore_rss_filter(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - if (hw->rss_info.conf.queue_num == 0) - return 0; - /* When user flush all rules, it doesn't need to restore RSS rule */ if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX) return 0; @@ -1692,7 +1683,6 @@ hns3_flow_parse_rss(struct rte_eth_dev *dev, struct hns3_hw *hw = &hns->hw; bool ret; - /* Action rss same */ ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf); if (ret) { hns3_err(hw, "Enter duplicate RSS configuration : %d", ret); @@ -1781,17 +1771,15 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0); if (flow == NULL) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate flow memory"); + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate flow memory"); return NULL; } flow_node = rte_zmalloc("hns3 flow node", sizeof(struct hns3_flow_mem), 0); if (flow_node == NULL) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate flow list memory"); + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate flow list memory"); rte_free(flow); return NULL; } @@ -1816,8 +1804,9 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ret = -ENOMEM; goto err; } - memcpy(&rss_filter_ptr->filter_info, rss_conf, - sizeof(struct hns3_rss_conf)); + hns3_rss_conf_copy(&rss_filter_ptr->filter_info, + &rss_conf->conf); + rss_filter_ptr->filter_info.valid = true; TAILQ_INSERT_TAIL(&process_list->filter_rss_list, rss_filter_ptr, entries); @@ -1849,6 +1838,7 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ret = -ENOMEM; goto err_fdir; } + memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, sizeof(struct hns3_fdir_rule)); TAILQ_INSERT_TAIL(&process_list->fdir_list, @@ -1883,7 +1873,6 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct hns3_fdir_rule_ele *fdir_rule_ptr; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; - struct hns3_hw *hw = &hns->hw; enum rte_filter_type filter_type; struct hns3_fdir_rule fdir_rule; int ret; @@ -1913,7 +1902,8 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, break; case RTE_ETH_FILTER_HASH: rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; - ret = hns3_config_rss_filter(dev, &hw->rss_info, false); + ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + false); if (ret) return rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_HANDLE,