X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_flow.c;h=4fb129e32b4e78b8c298dac14c313309206549b4;hb=d743042821de258b8034a95530200a3a9cd59678;hp=06fabcd4621dafc9bd280eeb7360dedfec2057eb;hpb=fcba820d9b9e34007223590d4c75417ed42563c1;p=dpdk.git diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 06fabcd462..4fb129e32b 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -11,6 +11,15 @@ #include "hns3_ethdev.h" #include "hns3_logs.h" +/* Default default keys */ +static uint8_t hns3_hash_key[] = { + 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2, + 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0, + 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4, + 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C, + 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA +}; + static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF }; static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 }; @@ -81,6 +90,58 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) dst[i] = rte_be_to_cpu_32(src[i]); } +/* + * This function is used to find rss general action. + * 1. As we know RSS is used to spread packets among several queues, the flow + * API provide the struct rte_flow_action_rss, user could config it's field + * sush as: func/level/types/key/queue to control RSS function. + * 2. The flow API also support queue region configuration for hns3. It was + * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule + * which action is RSS queues region. + * 3. When action is RSS, we use the following rule to distinguish: + * Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue + * region configuration. + * Case other: an rss general action. + */ +static const struct rte_flow_action * +hns3_find_rss_general_action(const struct rte_flow_item pattern[], + const struct rte_flow_action actions[]) +{ + const struct rte_flow_action *act = NULL; + const struct hns3_rss_conf *rss; + bool have_eth = false; + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { + act = actions; + break; + } + } + if (!act) + return NULL; + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) { + have_eth = true; + break; + } + } + + rss = act->conf; + if (have_eth && rss->conf.queue_num) { + /* + * Patter have ETH and action's queue_num > 0, indicate this is + * queue region configuration. + * Because queue region is implemented by FDIR + RSS in hns3 + * hardware, it need enter FDIR process, so here return NULL to + * avoid enter RSS process. + */ + return NULL; + } + + return act; +} + static inline struct hns3_flow_counter * hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id) { @@ -107,9 +168,9 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, if (cnt) { if (!cnt->shared || cnt->shared != shared) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - cnt, - "Counter id is used,shared flag not match"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + cnt, + "Counter id is used, shared flag not match"); cnt->ref_cnt++; return 0; } @@ -117,7 +178,7 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); if (cnt == NULL) return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION, cnt, + RTE_FLOW_ERROR_TYPE_HANDLE, cnt, "Alloc mem for counter failed"); cnt->id = id; cnt->shared = shared; @@ -145,13 +206,13 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, cnt = hns3_counter_lookup(dev, flow->counter_id); if (cnt == NULL) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Can't find counter id"); ret = hns3_get_count(&hns->hw, flow->counter_id, &value); if (ret) { rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Read counter fail."); return ret; } @@ -203,15 +264,62 @@ hns3_handle_action_queue(struct rte_eth_dev *dev, struct rte_flow_error *error) { struct hns3_adapter *hns = dev->data->dev_private; - struct hns3_hw *hw = &hns->hw; const struct rte_flow_action_queue *queue; + struct hns3_hw *hw = &hns->hw; queue = (const struct rte_flow_action_queue *)action->conf; - if (queue->index >= hw->data->nb_rx_queues) + if (queue->index >= hw->used_rx_queues) { + hns3_err(hw, "queue ID(%d) is greater than number of " + "available queue (%d) in driver.", + queue->index, hw->used_rx_queues); return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "Invalid queue ID in PF"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + action, "Invalid queue ID in PF"); + } + rule->queue_id = queue->index; + rule->nb_queues = 1; + rule->action = HNS3_FD_ACTION_ACCEPT_PACKET; + return 0; +} + +static int +hns3_handle_action_queue_region(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + const struct rte_flow_action_rss *conf = action->conf; + struct hns3_hw *hw = &hns->hw; + uint16_t idx; + + if (!hns3_dev_fd_queue_region_supported(hw)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Not support config queue region!"); + + if ((!rte_is_power_of_2(conf->queue_num)) || + conf->queue_num > hw->rss_size_max || + conf->queue[0] >= hw->used_rx_queues || + conf->queue[0] + conf->queue_num > hw->used_rx_queues) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, action, + "Invalid start queue ID and queue num! the start queue " + "ID must valid, the queue num must be power of 2 and " + "<= rss_size_max."); + } + + for (idx = 1; idx < conf->queue_num; idx++) { + if (conf->queue[idx] != conf->queue[idx - 1] + 1) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, action, + "Invalid queue ID sequence! the queue ID " + "must be continuous increment."); + } + + rule->queue_id = conf->queue[0]; + rule->nb_queues = conf->queue_num; rule->action = HNS3_FD_ACTION_ACCEPT_PACKET; return 0; } @@ -248,14 +356,27 @@ hns3_handle_actions(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_DROP: rule->action = HNS3_FD_ACTION_DROP_PACKET; break; + /* + * Here RSS's real action is queue region. + * Queue region is implemented by FDIR + RSS in hns3 hardware, + * the FDIR's action is one queue region (start_queue_id and + * queue_num), then RSS spread packets to the queue region by + * RSS algorigthm. + */ + case RTE_FLOW_ACTION_TYPE_RSS: + ret = hns3_handle_action_queue_region(dev, actions, + rule, error); + if (ret) + return ret; + break; case RTE_FLOW_ACTION_TYPE_MARK: mark = (const struct rte_flow_action_mark *)actions->conf; if (mark->id >= HNS3_MAX_FILTER_ID) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid Mark ID"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid Mark ID"); rule->fd_id = mark->id; rule->flags |= HNS3_RULE_FLAG_FDID; break; @@ -269,9 +390,9 @@ hns3_handle_actions(struct rte_eth_dev *dev, counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]; if (act_count->id >= counter_num) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid counter id"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid counter id"); rule->act_cnt = *act_count; rule->flags |= HNS3_RULE_FLAG_COUNTER; break; @@ -435,7 +556,7 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, ipv4_mask->hdr.time_to_live || ipv4_mask->hdr.hdr_checksum) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,tos,proto in IPV4"); } @@ -500,7 +621,7 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,proto in IPV6"); } @@ -560,7 +681,7 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in TCP"); } @@ -607,7 +728,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in UDP"); } @@ -654,7 +775,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, sctp_mask = item->mask; if (sctp_mask->hdr.cksum) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in SCTP"); @@ -799,14 +920,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (vxlan_mask->flags) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Flags is not supported in VxLAN"); /* VNI must be totally masked or not. */ if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in VxLAN"); if (vxlan_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -850,14 +971,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in NVGRE"); /* TNI must be totally masked or not. */ if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) && memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "TNI must be totally masked or not in NVGRE"); if (nvgre_mask->tni[0]) { @@ -904,13 +1025,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in GENEVE"); /* VNI must be totally masked or not. */ if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in GENEVE"); if (geneve_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -941,7 +1062,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported tunnel type!"); } if (ret) @@ -995,7 +1116,7 @@ hns3_parse_normal(const struct rte_flow_item *item, break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported normal type!"); } @@ -1011,7 +1132,7 @@ hns3_validate_item(const struct rte_flow_item *item, if (item->last) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, item, "Not supported last point for range"); for (i = 0; i < step_mngr.count; i++) { @@ -1097,7 +1218,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "fdir_conf.mode isn't perfect"); step_mngr.items = first_items; @@ -1132,6 +1253,7 @@ hns3_filterlist_init(struct rte_eth_dev *dev) struct hns3_process_private *process_list = dev->process_private; TAILQ_INIT(&process_list->fdir_list); + TAILQ_INIT(&process_list->filter_rss_list); TAILQ_INIT(&process_list->flow_list); } @@ -1140,6 +1262,7 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) { struct hns3_process_private *process_list = dev->process_private; struct hns3_fdir_rule_ele *fdir_rule_ptr; + struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); @@ -1149,6 +1272,14 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); } + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + while (rss_filter_ptr) { + TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + } + flow_node = TAILQ_FIRST(&process_list->flow_list); while (flow_node) { TAILQ_REMOVE(&process_list->flow_list, flow_node, entries); @@ -1158,6 +1289,423 @@ hns3_filterlist_flush(struct rte_eth_dev *dev) } } +static bool +hns3_action_rss_same(const struct rte_flow_action_rss *comp, + const struct rte_flow_action_rss *with) +{ + bool func_is_same; + + /* + * When user flush all RSS rule, RSS func is set invalid with + * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after + * flushed, any validate RSS func is different with it before + * flushed. Others, when user create an action RSS with RSS func + * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same + * between continuous RSS flow. + */ + if (comp->func == RTE_ETH_HASH_FUNCTION_MAX) + func_is_same = false; + else + func_is_same = (with->func ? (comp->func == with->func) : true); + + return (func_is_same && + comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) && + comp->level == with->level && comp->key_len == with->key_len && + comp->queue_num == with->queue_num && + !memcmp(comp->key, with->key, with->key_len) && + !memcmp(comp->queue, with->queue, + sizeof(*with->queue) * with->queue_num)); +} + +static int +hns3_rss_conf_copy(struct hns3_rss_conf *out, + const struct rte_flow_action_rss *in) +{ + if (in->key_len > RTE_DIM(out->key) || + in->queue_num > RTE_DIM(out->queue)) + return -EINVAL; + if (in->key == NULL && in->key_len) + return -EINVAL; + out->conf = (struct rte_flow_action_rss) { + .func = in->func, + .level = in->level, + .types = in->types, + .key_len = in->key_len, + .queue_num = in->queue_num, + }; + out->conf.queue = + memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num); + if (in->key) + out->conf.key = memcpy(out->key, in->key, in->key_len); + + return 0; +} + +/* + * This function is used to parse rss action validatation. + */ +static int +hns3_parse_rss_filter(struct rte_eth_dev *dev, + const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_conf = &hw->rss_info; + const struct rte_flow_action_rss *rss; + const struct rte_flow_action *act; + uint32_t act_index = 0; + uint16_t n; + + NEXT_ITEM_OF_ACTION(act, actions, act_index); + rss = act->conf; + + if (rss == NULL) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + act, "no valid queues"); + } + + for (n = 0; n < rss->queue_num; n++) { + if (rss->queue[n] < dev->data->nb_rx_queues) + continue; + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + act, + "queue id > max number of queues"); + } + + if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + act, + "Flow types is unsupported by " + "hns3's RSS"); + if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "RSS hash func are not supported"); + if (rss->level) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "a nonzero RSS encapsulation level is not supported"); + if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "RSS hash key must be exactly 40 bytes"); + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "too many queues for RSS context"); + + if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) && + (rss->types & ETH_RSS_IP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->types, + "input RSS types are not supported"); + + act_index++; + + /* Check if the next not void action is END */ + NEXT_ITEM_OF_ACTION(act, actions, act_index); + if (act->type != RTE_FLOW_ACTION_TYPE_END) { + memset(rss_conf, 0, sizeof(struct hns3_rss_conf)); + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + act, "Not supported action."); + } + + return 0; +} + +static int +hns3_disable_rss(struct hns3_hw *hw) +{ + int ret; + + /* Redirected the redirection table to queue 0 */ + ret = hns3_rss_reset_indir_table(hw); + if (ret) + return ret; + + /* Disable RSS */ + hw->rss_info.conf.types = 0; + hw->rss_dis_flag = true; + + return 0; +} + +static void +hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf) +{ + if (rss_conf->key == NULL || + rss_conf->key_len < HNS3_RSS_KEY_SIZE) { + hns3_info(hw, "Default RSS hash key to be set"); + rss_conf->key = hns3_hash_key; + rss_conf->key_len = HNS3_RSS_KEY_SIZE; + } +} + +static int +hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, + uint8_t *hash_algo) +{ + enum rte_eth_hash_function algo_func = *func; + switch (algo_func) { + case RTE_ETH_HASH_FUNCTION_DEFAULT: + /* Keep *hash_algo as what it used to be */ + algo_func = hw->rss_info.conf.func; + break; + case RTE_ETH_HASH_FUNCTION_TOEPLITZ: + *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ; + break; + case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: + *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE; + break; + case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: + *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; + break; + default: + hns3_err(hw, "Invalid RSS algorithm configuration(%u)", + algo_func); + return -EINVAL; + } + *func = algo_func; + + return 0; +} + +static int +hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) +{ + struct hns3_rss_tuple_cfg *tuple; + int ret; + + /* Parse hash key */ + hns3_parse_rss_key(hw, rss_config); + + /* Parse hash algorithm */ + ret = hns3_parse_rss_algorithm(hw, &rss_config->func, + &hw->rss_info.hash_algo); + if (ret) + return ret; + + ret = hns3_set_rss_algo_key(hw, rss_config->key); + if (ret) + return ret; + + /* Update algorithm of hw */ + hw->rss_info.conf.func = rss_config->func; + + /* Set flow type supported */ + tuple = &hw->rss_info.rss_tuple_sets; + ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types); + if (ret) + hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret); + + return ret; +} + +static int +hns3_update_indir_table(struct rte_eth_dev *dev, + const struct rte_flow_action_rss *conf, uint16_t num) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE]; + uint16_t j, allow_rss_queues; + uint32_t i; + + allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max); + /* Fill in redirection table */ + memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, + sizeof(hw->rss_info.rss_indirection_tbl)); + for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) { + j %= num; + if (conf->queue[j] >= allow_rss_queues) { + hns3_err(hw, "Invalid queue id(%u) to be set in " + "redirection table, max number of rss " + "queues: %u", conf->queue[j], + allow_rss_queues); + return -EINVAL; + } + indir_tbl[i] = conf->queue[j]; + } + + return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE); +} + +static int +hns3_config_rss_filter(struct rte_eth_dev *dev, + const struct hns3_rss_conf *conf, bool add) +{ + struct hns3_process_private *process_list = dev->process_private; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_hw *hw = &hns->hw; + struct hns3_rss_conf *rss_info; + uint64_t flow_types; + uint16_t num; + int ret; + + struct rte_flow_action_rss rss_flow_conf = { + .func = conf->conf.func, + .level = conf->conf.level, + .types = conf->conf.types, + .key_len = conf->conf.key_len, + .queue_num = conf->conf.queue_num, + .key = conf->conf.key_len ? + (void *)(uintptr_t)conf->conf.key : NULL, + .queue = conf->conf.queue, + }; + + /* Filter the unsupported flow types */ + flow_types = conf->conf.types ? + rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT : + hw->rss_info.conf.types; + if (flow_types != rss_flow_conf.types) + hns3_warn(hw, "modified RSS types based on hardware support, " + "requested:%" PRIx64 " configured:%" PRIx64, + rss_flow_conf.types, flow_types); + /* Update the useful flow types */ + rss_flow_conf.types = flow_types; + + rss_info = &hw->rss_info; + if (!add) { + if (!conf->valid) + return 0; + + ret = hns3_disable_rss(hw); + if (ret) { + hns3_err(hw, "RSS disable failed(%d)", ret); + return ret; + } + + if (rss_flow_conf.queue_num) { + /* + * Due the content of queue pointer have been reset to + * 0, the rss_info->conf.queue should be set NULL + */ + rss_info->conf.queue = NULL; + rss_info->conf.queue_num = 0; + } + + /* set RSS func invalid after flushed */ + rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX; + return 0; + } + + /* Get rx queues num */ + num = dev->data->nb_rx_queues; + + /* Set rx queues to use */ + num = RTE_MIN(num, rss_flow_conf.queue_num); + if (rss_flow_conf.queue_num > num) + hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated", + rss_flow_conf.queue_num); + hns3_info(hw, "Max of contiguous %u PF queues are configured", num); + + rte_spinlock_lock(&hw->lock); + if (num) { + ret = hns3_update_indir_table(dev, &rss_flow_conf, num); + if (ret) + goto rss_config_err; + } + + /* Set hash algorithm and flow types by the user's config */ + ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf); + if (ret) + goto rss_config_err; + + ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf); + if (ret) { + hns3_err(hw, "RSS config init fail(%d)", ret); + goto rss_config_err; + } + + /* + * When create a new RSS rule, the old rule will be overlaid and set + * invalid. + */ + TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries) + rss_filter_ptr->filter_info.valid = false; + +rss_config_err: + rte_spinlock_unlock(&hw->lock); + + return ret; +} + +/* Remove the rss filter */ +static int +hns3_clear_rss_filter(struct rte_eth_dev *dev) +{ + struct hns3_process_private *process_list = dev->process_private; + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; + struct hns3_hw *hw = &hns->hw; + int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */ + int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */ + int ret = 0; + + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + while (rss_filter_ptr) { + TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, + entries); + ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + false); + if (ret) + rss_rule_fail_cnt++; + else + rss_rule_succ_cnt++; + rte_free(rss_filter_ptr); + rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + } + + if (rss_rule_fail_cnt) { + hns3_err(hw, "fail to delete all RSS filters, success num = %d " + "fail num = %d", rss_rule_succ_cnt, + rss_rule_fail_cnt); + ret = -EIO; + } + + return ret; +} + +/* Restore the rss filter */ +int +hns3_restore_rss_filter(struct rte_eth_dev *dev) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + + /* When user flush all rules, it doesn't need to restore RSS rule */ + if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX) + return 0; + + return hns3_config_rss_filter(dev, &hw->rss_info, true); +} + +static int +hns3_flow_parse_rss(struct rte_eth_dev *dev, + const struct hns3_rss_conf *conf, bool add) +{ + struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_hw *hw = &hns->hw; + bool ret; + + /* Action rss same */ + ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf); + if (ret) { + hns3_err(hw, "Enter duplicate RSS configuration : %d", ret); + return -EINVAL; + } + + return hns3_config_rss_filter(dev, conf, add); +} + static int hns3_flow_args_check(const struct rte_flow_attr *attr, const struct rte_flow_item pattern[], @@ -1200,6 +1748,9 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (ret) return ret; + if (hns3_find_rss_general_action(pattern, actions)) + return hns3_parse_rss_filter(dev, actions, error); + memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); } @@ -1219,13 +1770,16 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; + const struct hns3_rss_conf *rss_conf; struct hns3_fdir_rule_ele *fdir_rule_ptr; + struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; + const struct rte_flow_action *act; struct rte_flow *flow; struct hns3_fdir_rule fdir_rule; int ret; - ret = hns3_flow_args_check(attr, pattern, actions, error); + ret = hns3_flow_validate(dev, attr, pattern, actions, error); if (ret) return NULL; @@ -1249,6 +1803,34 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, flow_node->flow = flow; TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries); + act = hns3_find_rss_general_action(pattern, actions); + if (act) { + rss_conf = act->conf; + + ret = hns3_flow_parse_rss(dev, rss_conf, true); + if (ret) + goto err; + + rss_filter_ptr = rte_zmalloc("hns3 rss filter", + sizeof(struct hns3_rss_conf_ele), + 0); + if (rss_filter_ptr == NULL) { + hns3_err(hw, + "Failed to allocate hns3_rss_filter memory"); + ret = -ENOMEM; + goto err; + } + hns3_rss_conf_copy(&rss_filter_ptr->filter_info, + &rss_conf->conf); + rss_filter_ptr->filter_info.valid = true; + TAILQ_INSERT_TAIL(&process_list->filter_rss_list, + rss_filter_ptr, entries); + + flow->rule = rss_filter_ptr; + flow->filter_type = RTE_ETH_FILTER_HASH; + return flow; + } + memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error); if (ret) @@ -1286,6 +1868,7 @@ err_fdir: if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) hns3_counter_release(dev, fdir_rule.act_cnt.id); +err: rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow"); out: @@ -1303,6 +1886,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_fdir_rule_ele *fdir_rule_ptr; + struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; enum rte_filter_type filter_type; struct hns3_fdir_rule fdir_rule; @@ -1331,6 +1915,20 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, rte_free(fdir_rule_ptr); fdir_rule_ptr = NULL; break; + case RTE_ETH_FILTER_HASH: + rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; + ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + false); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_HANDLE, + flow, + "Destroy RSS fail.Try again"); + TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, + entries); + rte_free(rss_filter_ptr); + rss_filter_ptr = NULL; + break; default: return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, flow, @@ -1371,6 +1969,13 @@ hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) hns3_counter_flush(dev); } + ret = hns3_clear_rss_filter(dev); + if (ret) { + rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to flush rss filter"); + return ret; + } + hns3_filterlist_flush(dev); return 0; @@ -1382,9 +1987,15 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, const struct rte_flow_action *actions, void *data, struct rte_flow_error *error) { + struct rte_flow_action_rss *rss_conf; + struct hns3_rss_conf_ele *rss_rule; struct rte_flow_query_count *qc; int ret; + if (!flow->rule) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule"); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: @@ -1395,13 +2006,24 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, if (ret) return ret; break; + case RTE_FLOW_ACTION_TYPE_RSS: + if (flow->filter_type != RTE_ETH_FILTER_HASH) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "action is not supported"); + } + rss_conf = (struct rte_flow_action_rss *)data; + rss_rule = (struct hns3_rss_conf_ele *)flow->rule; + rte_memcpy(rss_conf, &rss_rule->filter_info.conf, + sizeof(struct rte_flow_action_rss)); + break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Query action only support count"); + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "action is not supported"); } } + return 0; } @@ -1428,8 +2050,6 @@ hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, struct hns3_hw *hw; int ret = 0; - if (dev == NULL) - return -EINVAL; hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); switch (filter_type) { case RTE_ETH_FILTER_GENERIC: