X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_flow.c;h=fc77979c5f14360a9ea865eabe9be4ce3a112470;hb=7fe741821337f3cbeecac768b8ef3a16bf21c938;hp=53708d20549fde3dfc6dbcecb089272c49d1e137;hpb=920be799dbc3fe27c77c665921d053e33d43e091;p=dpdk.git diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 53708d2054..fc77979c5f 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -1,9 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018-2019 Hisilicon Limited. + * Copyright(c) 2018-2021 HiSilicon Limited. */ -#include -#include #include #include #include @@ -46,8 +44,7 @@ static enum rte_flow_item_type first_items[] = { RTE_FLOW_ITEM_TYPE_NVGRE, RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_GENEVE, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE, - RTE_FLOW_ITEM_TYPE_MPLS + RTE_FLOW_ITEM_TYPE_VXLAN_GPE }; static enum rte_flow_item_type L2_next_items[] = { @@ -67,8 +64,7 @@ static enum rte_flow_item_type L3_next_items[] = { static enum rte_flow_item_type L4_next_items[] = { RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_GENEVE, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE, - RTE_FLOW_ITEM_TYPE_MPLS + RTE_FLOW_ITEM_TYPE_VXLAN_GPE }; static enum rte_flow_item_type tunnel_next_items[] = { @@ -90,16 +86,56 @@ net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len) dst[i] = rte_be_to_cpu_32(src[i]); } -static inline const struct rte_flow_action * -find_rss_action(const struct rte_flow_action actions[]) +/* + * This function is used to find rss general action. + * 1. As we know RSS is used to spread packets among several queues, the flow + * API provide the struct rte_flow_action_rss, user could config its field + * sush as: func/level/types/key/queue to control RSS function. + * 2. The flow API also supports queue region configuration for hns3. It was + * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule + * which action is RSS queues region. + * 3. When action is RSS, we use the following rule to distinguish: + * Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue + * region configuration. + * Case other: an rss general action. + */ +static const struct rte_flow_action * +hns3_find_rss_general_action(const struct rte_flow_item pattern[], + const struct rte_flow_action actions[]) { - const struct rte_flow_action *next = &actions[0]; + const struct rte_flow_action *act = NULL; + const struct hns3_rss_conf *rss; + bool have_eth = false; - for (; next->type != RTE_FLOW_ACTION_TYPE_END; next++) { - if (next->type == RTE_FLOW_ACTION_TYPE_RSS) - return next; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) { + act = actions; + break; + } } - return NULL; + if (!act) + return NULL; + + for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) { + if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) { + have_eth = true; + break; + } + } + + rss = act->conf; + if (have_eth && rss->conf.queue_num) { + /* + * Pattern have ETH and action's queue_num > 0, indicate this is + * queue region configuration. + * Because queue region is implemented by FDIR + RSS in hns3 + * hardware, it needs to enter FDIR process, so here return NULL + * to avoid enter RSS process. + */ + return NULL; + } + + return act; } static inline struct hns3_flow_counter * @@ -122,23 +158,33 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; struct hns3_flow_counter *cnt; + uint64_t value; + int ret; cnt = hns3_counter_lookup(dev, id); if (cnt) { if (!cnt->shared || cnt->shared != shared) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - cnt, - "Counter id is used,shared flag not match"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + cnt, + "Counter id is used, shared flag not match"); cnt->ref_cnt++; return 0; } + /* Clear the counter by read ops because the counter is read-clear */ + ret = hns3_get_count(hw, id, &value); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Clear counter failed!"); + cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); if (cnt == NULL) return rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION, cnt, + RTE_FLOW_ERROR_TYPE_HANDLE, cnt, "Alloc mem for counter failed"); cnt->id = id; cnt->shared = shared; @@ -166,18 +212,19 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, cnt = hns3_counter_lookup(dev, flow->counter_id); if (cnt == NULL) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Can't find counter id"); ret = hns3_get_count(&hns->hw, flow->counter_id, &value); if (ret) { - rte_flow_error_set(error, -ret, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Read counter fail."); return ret; } qc->hits_set = 1; qc->hits = value; + qc->bytes_set = 0; + qc->bytes = 0; return 0; } @@ -229,15 +276,57 @@ hns3_handle_action_queue(struct rte_eth_dev *dev, queue = (const struct rte_flow_action_queue *)action->conf; if (queue->index >= hw->used_rx_queues) { - hns3_err(hw, "queue ID(%d) is greater than number of " - "available queue (%d) in driver.", + hns3_err(hw, "queue ID(%u) is greater than number of " + "available queue (%u) in driver.", queue->index, hw->used_rx_queues); return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, action, - "Invalid queue ID in PF"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + action, "Invalid queue ID in PF"); } rule->queue_id = queue->index; + rule->nb_queues = 1; + rule->action = HNS3_FD_ACTION_ACCEPT_PACKET; + return 0; +} + +static int +hns3_handle_action_queue_region(struct rte_eth_dev *dev, + const struct rte_flow_action *action, + struct hns3_fdir_rule *rule, + struct rte_flow_error *error) +{ + struct hns3_adapter *hns = dev->data->dev_private; + const struct rte_flow_action_rss *conf = action->conf; + struct hns3_hw *hw = &hns->hw; + uint16_t idx; + + if (!hns3_dev_fd_queue_region_supported(hw)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, action, + "Not support config queue region!"); + + if ((!rte_is_power_of_2(conf->queue_num)) || + conf->queue_num > hw->rss_size_max || + conf->queue[0] >= hw->used_rx_queues || + conf->queue[0] + conf->queue_num > hw->used_rx_queues) { + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, action, + "Invalid start queue ID and queue num! the start queue " + "ID must valid, the queue num must be power of 2 and " + "<= rss_size_max."); + } + + for (idx = 1; idx < conf->queue_num; idx++) { + if (conf->queue[idx] != conf->queue[idx - 1] + 1) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, action, + "Invalid queue ID sequence! the queue ID " + "must be continuous increment."); + } + + rule->queue_id = conf->queue[0]; + rule->nb_queues = conf->queue_num; rule->action = HNS3_FD_ACTION_ACCEPT_PACKET; return 0; } @@ -274,14 +363,27 @@ hns3_handle_actions(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_DROP: rule->action = HNS3_FD_ACTION_DROP_PACKET; break; + /* + * Here RSS's real action is queue region. + * Queue region is implemented by FDIR + RSS in hns3 hardware, + * the FDIR's action is one queue region (start_queue_id and + * queue_num), then RSS spread packets to the queue region by + * RSS algorigthm. + */ + case RTE_FLOW_ACTION_TYPE_RSS: + ret = hns3_handle_action_queue_region(dev, actions, + rule, error); + if (ret) + return ret; + break; case RTE_FLOW_ACTION_TYPE_MARK: mark = (const struct rte_flow_action_mark *)actions->conf; if (mark->id >= HNS3_MAX_FILTER_ID) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid Mark ID"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid Mark ID"); rule->fd_id = mark->id; rule->flags |= HNS3_RULE_FLAG_FDID; break; @@ -295,9 +397,9 @@ hns3_handle_actions(struct rte_eth_dev *dev, counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]; if (act_count->id >= counter_num) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Invalid counter id"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + actions, + "Invalid counter id"); rule->act_cnt = *act_count; rule->flags |= HNS3_RULE_FLAG_COUNTER; break; @@ -313,7 +415,6 @@ hns3_handle_actions(struct rte_eth_dev *dev, return 0; } -/* Parse to get the attr and action info of flow director rule. */ static int hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) { @@ -341,17 +442,12 @@ hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error) } static int -hns3_parse_eth(const struct rte_flow_item *item, - struct hns3_fdir_rule *rule, struct rte_flow_error *error) +hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, + struct rte_flow_error *error __rte_unused) { const struct rte_flow_item_eth *eth_spec; const struct rte_flow_item_eth *eth_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - /* Only used to describe the protocol stack. */ if (item->spec == NULL && item->mask == NULL) return 0; @@ -391,11 +487,6 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_vlan *vlan_spec; const struct rte_flow_item_vlan *vlan_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - rule->key_conf.vlan_num++; if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX) return rte_flow_error_set(error, EINVAL, @@ -433,6 +524,17 @@ hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, return 0; } +static bool +hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask) +{ + if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id || + ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live || + ipv4_mask->hdr.hdr_checksum) + return false; + + return true; +} + static int hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct rte_flow_error *error) @@ -440,28 +542,19 @@ hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_ipv4 *ipv4_spec; const struct rte_flow_item_ipv4 *ipv4_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4; rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; + /* Only used to describe the protocol stack. */ if (item->spec == NULL && item->mask == NULL) return 0; if (item->mask) { ipv4_mask = item->mask; - - if (ipv4_mask->hdr.total_length || - ipv4_mask->hdr.packet_id || - ipv4_mask->hdr.fragment_offset || - ipv4_mask->hdr.time_to_live || - ipv4_mask->hdr.hdr_checksum) { + if (!hns3_check_ipv4_mask_supported(ipv4_mask)) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,tos,proto in IPV4"); } @@ -508,11 +601,6 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_ipv6 *ipv6_spec; const struct rte_flow_item_ipv6 *ipv6_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1); rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6; rule->key_conf.mask.ether_type = ETHER_TYPE_MASK; @@ -523,10 +611,10 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { ipv6_mask = item->mask; - if (ipv6_mask->hdr.vtc_flow || - ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) { + if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len || + ipv6_mask->hdr.hop_limits) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst ip,proto in IPV6"); } @@ -557,6 +645,18 @@ hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, return 0; } +static bool +hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask) +{ + if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack || + tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags || + tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum || + tcp_mask->hdr.tcp_urp) + return false; + + return true; +} + static int hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct rte_flow_error *error) @@ -564,11 +664,6 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_tcp *tcp_spec; const struct rte_flow_item_tcp *tcp_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); rule->key_conf.spec.ip_proto = IPPROTO_TCP; rule->key_conf.mask.ip_proto = IPPROTO_MASK; @@ -579,14 +674,9 @@ hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (item->mask) { tcp_mask = item->mask; - if (tcp_mask->hdr.sent_seq || - tcp_mask->hdr.recv_ack || - tcp_mask->hdr.data_off || - tcp_mask->hdr.tcp_flags || - tcp_mask->hdr.rx_win || - tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) { + if (!hns3_check_tcp_mask_supported(tcp_mask)) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in TCP"); } @@ -617,14 +707,10 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_udp *udp_spec; const struct rte_flow_item_udp *udp_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); rule->key_conf.spec.ip_proto = IPPROTO_UDP; rule->key_conf.mask.ip_proto = IPPROTO_MASK; + /* Only used to describe the protocol stack. */ if (item->spec == NULL && item->mask == NULL) return 0; @@ -633,7 +719,7 @@ hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, udp_mask = item->mask; if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in UDP"); } @@ -663,11 +749,6 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_sctp *sctp_spec; const struct rte_flow_item_sctp *sctp_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1); rule->key_conf.spec.ip_proto = IPPROTO_SCTP; rule->key_conf.mask.ip_proto = IPPROTO_MASK; @@ -680,10 +761,9 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, sctp_mask = item->mask; if (sctp_mask->hdr.cksum) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Only support src & dst port in SCTP"); - if (sctp_mask->hdr.src_port) { hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1); rule->key_conf.mask.src_port = @@ -712,7 +792,7 @@ hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, } /* - * Check items before tunnel, save inner configs to outer configs,and clear + * Check items before tunnel, save inner configs to outer configs, and clear * inner configs. * The key consists of two parts: meta_data and tuple keys. * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel @@ -800,15 +880,6 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_vxlan *vxlan_spec; const struct rte_flow_item_vxlan *vxlan_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - else if (item->spec && (item->mask == NULL)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Tunnel packets must configure with mask"); - hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) @@ -825,14 +896,14 @@ hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (vxlan_mask->flags) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Flags is not supported in VxLAN"); /* VNI must be totally masked or not. */ if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in VxLAN"); if (vxlan_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -851,15 +922,6 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_nvgre *nvgre_spec; const struct rte_flow_item_nvgre *nvgre_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - else if (item->spec && (item->mask == NULL)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Tunnel packets must configure with mask"); - hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1); rule->key_conf.spec.outer_proto = IPPROTO_GRE; rule->key_conf.mask.outer_proto = IPPROTO_MASK; @@ -876,14 +938,14 @@ hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in NVGRE"); /* TNI must be totally masked or not. */ if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) && memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "TNI must be totally masked or not in NVGRE"); if (nvgre_mask->tni[0]) { @@ -909,15 +971,6 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, const struct rte_flow_item_geneve *geneve_spec; const struct rte_flow_item_geneve *geneve_mask; - if (item->spec == NULL && item->mask) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Can't configure FDIR with mask but without spec"); - else if (item->spec && (item->mask == NULL)) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, - "Tunnel packets must configure with mask"); - hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1); rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE; rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK; @@ -930,13 +983,13 @@ hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "Ver/protocal is not supported in GENEVE"); /* VNI must be totally masked or not. */ if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) && memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN)) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, item, + RTE_FLOW_ERROR_TYPE_ITEM_MASK, item, "VNI must be totally masked or not in GENEVE"); if (geneve_mask->vni[0]) { hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1); @@ -954,6 +1007,17 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, { int ret; + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask " + "but without spec"); + else if (item->spec && (item->mask == NULL)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Tunnel packets must configure " + "with mask"); + switch (item->type) { case RTE_FLOW_ITEM_TYPE_VXLAN: case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: @@ -967,7 +1031,7 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported tunnel type!"); } if (ret) @@ -976,52 +1040,57 @@ hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, } static int -hns3_parse_normal(const struct rte_flow_item *item, - struct hns3_fdir_rule *rule, +hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, struct items_step_mngr *step_mngr, struct rte_flow_error *error) { int ret; + if (item->spec == NULL && item->mask) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, item, + "Can't configure FDIR with mask " + "but without spec"); + switch (item->type) { case RTE_FLOW_ITEM_TYPE_ETH: ret = hns3_parse_eth(item, rule, error); step_mngr->items = L2_next_items; - step_mngr->count = ARRAY_SIZE(L2_next_items); + step_mngr->count = RTE_DIM(L2_next_items); break; case RTE_FLOW_ITEM_TYPE_VLAN: ret = hns3_parse_vlan(item, rule, error); step_mngr->items = L2_next_items; - step_mngr->count = ARRAY_SIZE(L2_next_items); + step_mngr->count = RTE_DIM(L2_next_items); break; case RTE_FLOW_ITEM_TYPE_IPV4: ret = hns3_parse_ipv4(item, rule, error); step_mngr->items = L3_next_items; - step_mngr->count = ARRAY_SIZE(L3_next_items); + step_mngr->count = RTE_DIM(L3_next_items); break; case RTE_FLOW_ITEM_TYPE_IPV6: ret = hns3_parse_ipv6(item, rule, error); step_mngr->items = L3_next_items; - step_mngr->count = ARRAY_SIZE(L3_next_items); + step_mngr->count = RTE_DIM(L3_next_items); break; case RTE_FLOW_ITEM_TYPE_TCP: ret = hns3_parse_tcp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; case RTE_FLOW_ITEM_TYPE_UDP: ret = hns3_parse_udp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; case RTE_FLOW_ITEM_TYPE_SCTP: ret = hns3_parse_sctp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_HANDLE, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported normal type!"); } @@ -1037,7 +1106,7 @@ hns3_validate_item(const struct rte_flow_item *item, if (item->last) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item, + RTE_FLOW_ERROR_TYPE_ITEM_LAST, item, "Not supported last point for range"); for (i = 0; i < step_mngr.count; i++) { @@ -1059,49 +1128,35 @@ is_tunnel_packet(enum rte_flow_item_type type) if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || type == RTE_FLOW_ITEM_TYPE_VXLAN || type == RTE_FLOW_ITEM_TYPE_NVGRE || - type == RTE_FLOW_ITEM_TYPE_GENEVE || - type == RTE_FLOW_ITEM_TYPE_MPLS) + type == RTE_FLOW_ITEM_TYPE_GENEVE) return true; return false; } /* - * Parse the rule to see if it is a IP or MAC VLAN flow director rule. - * And get the flow director filter info BTW. - * UDP/TCP/SCTP PATTERN: - * The first not void item can be ETH or IPV4 or IPV6 - * The second not void item must be IPV4 or IPV6 if the first one is ETH. - * The next not void item could be UDP or TCP or SCTP (optional) - * The next not void item could be RAW (for flexbyte, optional) - * The next not void item must be END. - * A Fuzzy Match pattern can appear at any place before END. - * Fuzzy Match is optional for IPV4 but is required for IPV6 - * MAC VLAN PATTERN: - * The first not void item must be ETH. - * The second not void item must be MAC VLAN. - * The next not void item must be END. - * ACTION: - * The first not void action should be QUEUE or DROP. - * The second not void optional action should be MARK, - * mark_id is a uint32_t number. - * The next not void action should be END. - * UDP/TCP/SCTP pattern example: - * ITEM Spec Mask - * ETH NULL NULL - * IPV4 src_addr 192.168.1.20 0xFFFFFFFF - * dst_addr 192.167.3.50 0xFFFFFFFF - * UDP/TCP/SCTP src_port 80 0xFFFF - * dst_port 80 0xFFFF - * END - * MAC VLAN pattern example: - * ITEM Spec Mask - * ETH dst_addr - {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF, - 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF} - * MAC VLAN tci 0x2016 0xEFFF - * END - * Other members in mask and spec should set to 0x00. - * Item->last should be NULL. + * Parse the flow director rule. + * The supported PATTERN: + * case: non-tunnel packet: + * ETH : src-mac, dst-mac, ethertype + * VLAN: tag1, tag2 + * IPv4: src-ip, dst-ip, tos, proto + * IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto + * UDP : src-port, dst-port + * TCP : src-port, dst-port + * SCTP: src-port, dst-port, tag + * case: tunnel packet: + * OUTER-ETH: ethertype + * OUTER-L3 : proto + * OUTER-L4 : src-port, dst-port + * TUNNEL : vni, flow-id(only valid when NVGRE) + * INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet + * The supported ACTION: + * QUEUE + * DROP + * COUNT + * MARK: the id range [0, 4094] + * FLAG + * RSS: only valid if firmware support FD_QUEUE_REGION. */ static int hns3_parse_fdir_filter(struct rte_eth_dev *dev, @@ -1121,13 +1176,8 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Fdir not supported in VF"); - if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL, - "fdir_conf.mode isn't perfect"); - step_mngr.items = first_items; - step_mngr.count = ARRAY_SIZE(first_items); + step_mngr.count = RTE_DIM(first_items); for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->type == RTE_FLOW_ITEM_TYPE_VOID) continue; @@ -1141,7 +1191,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, if (ret) return ret; step_mngr.items = tunnel_next_items; - step_mngr.count = ARRAY_SIZE(tunnel_next_items); + step_mngr.count = RTE_DIM(tunnel_next_items); } else { ret = hns3_parse_normal(item, rule, &step_mngr, error); if (ret) @@ -1152,45 +1202,34 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, return hns3_handle_actions(dev, actions, rule, error); } -void -hns3_filterlist_init(struct rte_eth_dev *dev) -{ - struct hns3_process_private *process_list = dev->process_private; - - TAILQ_INIT(&process_list->fdir_list); - TAILQ_INIT(&process_list->filter_rss_list); - TAILQ_INIT(&process_list->flow_list); -} - static void hns3_filterlist_flush(struct rte_eth_dev *dev) { - struct hns3_process_private *process_list = dev->process_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_fdir_rule_ele *fdir_rule_ptr; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; - fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); while (fdir_rule_ptr) { - TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries); + TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries); rte_free(fdir_rule_ptr); - fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); } - rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); while (rss_filter_ptr) { - TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, - entries); + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); rte_free(rss_filter_ptr); - rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); } - flow_node = TAILQ_FIRST(&process_list->flow_list); + flow_node = TAILQ_FIRST(&hw->flow_list); while (flow_node) { - TAILQ_REMOVE(&process_list->flow_list, flow_node, entries); + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); rte_free(flow_node->flow); rte_free(flow_node); - flow_node = TAILQ_FIRST(&process_list->flow_list); + flow_node = TAILQ_FIRST(&hw->flow_list); } } @@ -1198,10 +1237,24 @@ static bool hns3_action_rss_same(const struct rte_flow_action_rss *comp, const struct rte_flow_action_rss *with) { - return (comp->func == with->func && - comp->level == with->level && - comp->types == with->types && - comp->key_len == with->key_len && + bool func_is_same; + + /* + * When user flush all RSS rule, RSS func is set invalid with + * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after + * flushed, any validate RSS func is different with it before + * flushed. Others, when user create an action RSS with RSS func + * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same + * between continuous RSS flow. + */ + if (comp->func == RTE_ETH_HASH_FUNCTION_MAX) + func_is_same = false; + else + func_is_same = with->func ? (comp->func == with->func) : true; + + return (func_is_same && + comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) && + comp->level == with->level && comp->key_len == with->key_len && comp->queue_num == with->queue_num && !memcmp(comp->key, with->key, with->key_len) && !memcmp(comp->queue, with->queue, @@ -1224,15 +1277,36 @@ hns3_rss_conf_copy(struct hns3_rss_conf *out, .key_len = in->key_len, .queue_num = in->queue_num, }; - out->conf.queue = - memcpy(out->queue, in->queue, - sizeof(*in->queue) * in->queue_num); + out->conf.queue = memcpy(out->queue, in->queue, + sizeof(*in->queue) * in->queue_num); if (in->key) out->conf.key = memcpy(out->key, in->key, in->key_len); return 0; } +static bool +hns3_rss_input_tuple_supported(struct hns3_hw *hw, + const struct rte_flow_action_rss *rss) +{ + /* + * For IP packet, it is not supported to use src/dst port fields to RSS + * hash for the following packet types. + * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG + * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst + * port fields to RSS hash for IPV6 SCTP packet type. However, the + * Kunpeng930 and future kunpeng series support to use src/dst port + * fields to RSS hash for IPv6 SCTP packet type. + */ + if (rss->types & (ETH_RSS_L4_DST_ONLY | ETH_RSS_L4_SRC_ONLY) && + (rss->types & ETH_RSS_IP || + (!hw->rss_info.ipv6_sctp_offload_supported && + rss->types & ETH_RSS_NONFRAG_IPV6_SCTP))) + return false; + + return true; +} + /* * This function is used to parse rss action validatation. */ @@ -1247,65 +1321,55 @@ hns3_parse_rss_filter(struct rte_eth_dev *dev, const struct rte_flow_action_rss *rss; const struct rte_flow_action *act; uint32_t act_index = 0; - uint64_t flow_types; uint16_t n; NEXT_ITEM_OF_ACTION(act, actions, act_index); - /* Get configuration args from APP cmdline input */ rss = act->conf; - if (rss == NULL || rss->queue_num == 0) { + if (rss == NULL) { return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "no valid queues"); } + if (rss->queue_num > RTE_DIM(rss_conf->queue)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "queue number configured exceeds " + "queue buffer size driver supported"); + for (n = 0; n < rss->queue_num; n++) { - if (rss->queue[n] < dev->data->nb_rx_queues) + if (rss->queue[n] < hw->alloc_rss_size) continue; return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, - act, - "queue id > max number of queues"); + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "queue id must be less than queue number allocated to a TC"); } - /* Parse flow types of RSS */ if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types) return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ACTION, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "Flow types is unsupported by " "hns3's RSS"); - - flow_types = rss->types & HNS3_ETH_RSS_SUPPORT; - if (flow_types != rss->types) - hns3_warn(hw, "RSS flow types(%" PRIx64 ") include unsupported " - "flow types", rss->types); - - /* Parse RSS related parameters from RSS configuration */ - switch (rss->func) { - case RTE_ETH_HASH_FUNCTION_DEFAULT: - case RTE_ETH_HASH_FUNCTION_TOEPLITZ: - case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: - break; - default: + if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "input RSS hash functions are not supported"); - } - + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, + "RSS hash func are not supported"); if (rss->level) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "a nonzero RSS encapsulation level is not supported"); if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key)) return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, act, "RSS hash key must be exactly 40 bytes"); - if (rss->queue_num > RTE_DIM(rss_conf->queue)) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, act, - "too many queues for RSS context"); + + if (!hns3_rss_input_tuple_supported(hw, rss)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + &rss->types, + "input RSS types are not supported"); act_index++; @@ -1333,6 +1397,7 @@ hns3_disable_rss(struct hns3_hw *hw) /* Disable RSS */ hw->rss_info.conf.types = 0; + hw->rss_dis_flag = true; return 0; } @@ -1340,9 +1405,8 @@ hns3_disable_rss(struct hns3_hw *hw) static void hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf) { - if (rss_conf->key == NULL || - rss_conf->key_len < HNS3_RSS_KEY_SIZE) { - hns3_info(hw, "Default RSS hash key to be set"); + if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) { + hns3_warn(hw, "Default RSS hash key to be set"); rss_conf->key = hns3_hash_key; rss_conf->key_len = HNS3_RSS_KEY_SIZE; } @@ -1364,8 +1428,11 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR: *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE; break; + case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ: + *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP; + break; default: - hns3_err(hw, "Invalid RSS algorithm configuration(%u)", + hns3_err(hw, "Invalid RSS algorithm configuration(%d)", algo_func); return -EINVAL; } @@ -1377,28 +1444,22 @@ hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func, static int hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config) { - uint8_t hash_algo = - (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_TOEPLITZ ? - HNS3_RSS_HASH_ALGO_TOEPLITZ : HNS3_RSS_HASH_ALGO_SIMPLE); struct hns3_rss_tuple_cfg *tuple; int ret; - /* Parse hash key */ hns3_parse_rss_key(hw, rss_config); - /* Parse hash algorithm */ - ret = hns3_parse_rss_algorithm(hw, &rss_config->func, &hash_algo); + ret = hns3_parse_rss_algorithm(hw, &rss_config->func, + &hw->rss_info.hash_algo); if (ret) return ret; - ret = hns3_set_rss_algo_key(hw, hash_algo, rss_config->key); + ret = hns3_rss_set_algo_key(hw, rss_config->key); if (ret) return ret; - /* Update algorithm of hw */ hw->rss_info.conf.func = rss_config->func; - /* Set flow type supported */ tuple = &hw->rss_info.rss_tuple_sets; ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types); if (ret) @@ -1413,34 +1474,25 @@ hns3_update_indir_table(struct rte_eth_dev *dev, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - uint8_t indir_tbl[HNS3_RSS_IND_TBL_SIZE]; - uint16_t j, allow_rss_queues; - uint8_t queue_id; + uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX]; + uint16_t j; uint32_t i; - if (num == 0) { - hns3_err(hw, "No PF queues are configured to enable RSS"); - return -ENOTSUP; - } - - allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max); /* Fill in redirection table */ memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl, - HNS3_RSS_IND_TBL_SIZE); - for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) { + sizeof(hw->rss_info.rss_indirection_tbl)); + for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) { j %= num; - if (conf->queue[j] >= allow_rss_queues) { - hns3_err(hw, "Invalid queue id(%u) to be set in " - "redirection table, max number of rss " - "queues: %u", conf->queue[j], - allow_rss_queues); + if (conf->queue[j] >= hw->alloc_rss_size) { + hns3_err(hw, "queue id(%u) set to redirection table " + "exceeds queue number(%u) allocated to a TC.", + conf->queue[j], hw->alloc_rss_size); return -EINVAL; } - queue_id = conf->queue[j]; - indir_tbl[i] = queue_id; + indir_tbl[i] = conf->queue[j]; } - return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE); + return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size); } static int @@ -1448,6 +1500,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, const struct hns3_rss_conf *conf, bool add) { struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_hw *hw = &hns->hw; struct hns3_rss_conf *rss_info; uint64_t flow_types; @@ -1465,56 +1518,55 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, .queue = conf->conf.queue, }; - /* The types is Unsupported by hns3' RSS */ - if (!(rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT) && - rss_flow_conf.types) { - hns3_err(hw, - "Flow types(%" PRIx64 ") is unsupported by hns3's RSS", - rss_flow_conf.types); - return -EINVAL; - } - /* Filter the unsupported flow types */ - flow_types = rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT; + flow_types = conf->conf.types ? + rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT : + hw->rss_info.conf.types; if (flow_types != rss_flow_conf.types) hns3_warn(hw, "modified RSS types based on hardware support, " - "requested:%" PRIx64 " configured:%" PRIx64, + "requested:0x%" PRIx64 " configured:0x%" PRIx64, rss_flow_conf.types, flow_types); /* Update the useful flow types */ rss_flow_conf.types = flow_types; - if ((rss_flow_conf.types & ETH_RSS_PROTO_MASK) == 0) - return hns3_disable_rss(hw); - rss_info = &hw->rss_info; if (!add) { - if (hns3_action_rss_same(&rss_info->conf, &rss_flow_conf)) { - ret = hns3_disable_rss(hw); - if (ret) { - hns3_err(hw, "RSS disable failed(%d)", ret); - return ret; - } - memset(rss_info, 0, sizeof(struct hns3_rss_conf)); + if (!conf->valid) return 0; + + ret = hns3_disable_rss(hw); + if (ret) { + hns3_err(hw, "RSS disable failed(%d)", ret); + return ret; } - return -EINVAL; - } - /* Get rx queues num */ - num = dev->data->nb_rx_queues; + if (rss_flow_conf.queue_num) { + /* + * Due the content of queue pointer have been reset to + * 0, the rss_info->conf.queue should be set to NULL + */ + rss_info->conf.queue = NULL; + rss_info->conf.queue_num = 0; + } + + /* set RSS func invalid after flushed */ + rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX; + return 0; + } /* Set rx queues to use */ - num = RTE_MIN(num, rss_flow_conf.queue_num); + num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num); if (rss_flow_conf.queue_num > num) hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated", rss_flow_conf.queue_num); hns3_info(hw, "Max of contiguous %u PF queues are configured", num); rte_spinlock_lock(&hw->lock); - /* Update redirection talbe of rss */ - ret = hns3_update_indir_table(dev, &rss_flow_conf, num); - if (ret) - goto rss_config_err; + if (num) { + ret = hns3_update_indir_table(dev, &rss_flow_conf, num); + if (ret) + goto rss_config_err; + } /* Set hash algorithm and flow types by the user's config */ ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf); @@ -1527,33 +1579,60 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, goto rss_config_err; } + /* + * When create a new RSS rule, the old rule will be overlaid and set + * invalid. + */ + TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries) + rss_filter_ptr->filter_info.valid = false; + rss_config_err: rte_spinlock_unlock(&hw->lock); return ret; } -/* Remove the rss filter */ static int hns3_clear_rss_filter(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; + struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_hw *hw = &hns->hw; + int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */ + int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */ + int ret = 0; - if (hw->rss_info.conf.queue_num == 0) - return 0; + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); + while (rss_filter_ptr) { + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); + ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + false); + if (ret) + rss_rule_fail_cnt++; + else + rss_rule_succ_cnt++; + rte_free(rss_filter_ptr); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); + } + + if (rss_rule_fail_cnt) { + hns3_err(hw, "fail to delete all RSS filters, success num = %d " + "fail num = %d", rss_rule_succ_cnt, + rss_rule_fail_cnt); + ret = -EIO; + } - return hns3_config_rss_filter(dev, &hw->rss_info, false); + return ret; } -/* Restore the rss filter */ int hns3_restore_rss_filter(struct rte_eth_dev *dev) { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; - if (hw->rss_info.conf.queue_num == 0) + /* When user flush all rules, it doesn't need to restore RSS rule */ + if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX) return 0; return hns3_config_rss_filter(dev, &hw->rss_info, true); @@ -1567,7 +1646,6 @@ hns3_flow_parse_rss(struct rte_eth_dev *dev, struct hns3_hw *hw = &hns->hw; bool ret; - /* Action rss same */ ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf); if (ret) { hns3_err(hw, "Enter duplicate RSS configuration : %d", ret); @@ -1619,7 +1697,7 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, if (ret) return ret; - if (find_rss_action(actions)) + if (hns3_find_rss_general_action(pattern, actions)) return hns3_parse_rss_filter(dev, actions, error); memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule)); @@ -1629,7 +1707,7 @@ hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, /* * Create or destroy a flow rule. * Theorically one rule can match more than one filters. - * We will let it use the filter which it hitt first. + * We will let it use the filter which it hit first. * So, the sequence matters. */ static struct rte_flow * @@ -1638,7 +1716,6 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; const struct hns3_rss_conf *rss_conf; @@ -1650,31 +1727,29 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct hns3_fdir_rule fdir_rule; int ret; - ret = hns3_flow_args_check(attr, pattern, actions, error); + ret = hns3_flow_validate(dev, attr, pattern, actions, error); if (ret) return NULL; flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0); if (flow == NULL) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate flow memory"); + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate flow memory"); return NULL; } flow_node = rte_zmalloc("hns3 flow node", sizeof(struct hns3_flow_mem), 0); if (flow_node == NULL) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Failed to allocate flow list memory"); + rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, + NULL, "Failed to allocate flow list memory"); rte_free(flow); return NULL; } flow_node->flow = flow; - TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries); + TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries); - act = find_rss_action(actions); + act = hns3_find_rss_general_action(pattern, actions); if (act) { rss_conf = act->conf; @@ -1691,10 +1766,10 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, ret = -ENOMEM; goto err; } - memcpy(&rss_filter_ptr->filter_info, rss_conf, - sizeof(struct hns3_rss_conf)); - TAILQ_INSERT_TAIL(&process_list->filter_rss_list, - rss_filter_ptr, entries); + hns3_rss_conf_copy(&rss_filter_ptr->filter_info, + &rss_conf->conf); + rss_filter_ptr->filter_info.valid = true; + TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; flow->filter_type = RTE_ETH_FILTER_HASH; @@ -1714,35 +1789,36 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, flow->counter_id = fdir_rule.act_cnt.id; } + + fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", + sizeof(struct hns3_fdir_rule_ele), + 0); + if (fdir_rule_ptr == NULL) { + hns3_err(hw, "failed to allocate fdir_rule memory."); + ret = -ENOMEM; + goto err_fdir; + } + ret = hns3_fdir_filter_program(hns, &fdir_rule, false); if (!ret) { - fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", - sizeof(struct hns3_fdir_rule_ele), - 0); - if (fdir_rule_ptr == NULL) { - hns3_err(hw, "Failed to allocate fdir_rule memory"); - ret = -ENOMEM; - goto err_fdir; - } memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, sizeof(struct hns3_fdir_rule)); - TAILQ_INSERT_TAIL(&process_list->fdir_list, - fdir_rule_ptr, entries); + TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); flow->rule = fdir_rule_ptr; flow->filter_type = RTE_ETH_FILTER_FDIR; return flow; } + rte_free(fdir_rule_ptr); err_fdir: if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) hns3_counter_release(dev, fdir_rule.act_cnt.id); - err: rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow"); out: - TAILQ_REMOVE(&process_list->flow_list, flow_node, entries); + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); rte_free(flow_node); rte_free(flow); return NULL; @@ -1753,20 +1829,20 @@ static int hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_fdir_rule_ele *fdir_rule_ptr; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; - struct hns3_hw *hw = &hns->hw; enum rte_filter_type filter_type; struct hns3_fdir_rule fdir_rule; + struct hns3_hw *hw = &hns->hw; int ret; if (flow == NULL) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, flow, "Flow is NULL"); + filter_type = flow->filter_type; switch (filter_type) { case RTE_ETH_FILTER_FDIR: @@ -1782,20 +1858,20 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, "Destroy FDIR fail.Try again"); if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) hns3_counter_release(dev, fdir_rule.act_cnt.id); - TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries); + TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries); rte_free(fdir_rule_ptr); fdir_rule_ptr = NULL; break; case RTE_ETH_FILTER_HASH: rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule; - ret = hns3_config_rss_filter(dev, &hw->rss_info, false); + ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, + false); if (ret) return rte_flow_error_set(error, EIO, RTE_FLOW_ERROR_TYPE_HANDLE, flow, "Destroy RSS fail.Try again"); - TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, - entries); + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); rte_free(rss_filter_ptr); rss_filter_ptr = NULL; break; @@ -1805,10 +1881,9 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, "Unsupported filter type"); } - TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) { + TAILQ_FOREACH(flow_node, &hw->flow_list, entries) { if (flow_node->flow == flow) { - TAILQ_REMOVE(&process_list->flow_list, flow_node, - entries); + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); rte_free(flow_node); flow_node = NULL; break; @@ -1857,9 +1932,15 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, const struct rte_flow_action *actions, void *data, struct rte_flow_error *error) { + struct rte_flow_action_rss *rss_conf; + struct hns3_rss_conf_ele *rss_rule; struct rte_flow_query_count *qc; int ret; + if (!flow->rule) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule"); + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { case RTE_FLOW_ACTION_TYPE_VOID: @@ -1870,53 +1951,148 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, if (ret) return ret; break; + case RTE_FLOW_ACTION_TYPE_RSS: + if (flow->filter_type != RTE_ETH_FILTER_HASH) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "action is not supported"); + } + rss_conf = (struct rte_flow_action_rss *)data; + rss_rule = (struct hns3_rss_conf_ele *)flow->rule; + rte_memcpy(rss_conf, &rss_rule->filter_info.conf, + sizeof(struct rte_flow_action_rss)); + break; default: return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "Query action only support count"); + RTE_FLOW_ERROR_TYPE_ACTION, + actions, "action is not supported"); } } + return 0; } +static int +hns3_flow_validate_wrap(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_validate(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static struct rte_flow * +hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_flow *flow; + + pthread_mutex_lock(&hw->flows_lock); + flow = hns3_flow_create(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return flow; +} + +static int +hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_destroy(dev, flow, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_flush(dev, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *actions, void *data, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_query(dev, flow, actions, data, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + static const struct rte_flow_ops hns3_flow_ops = { - .validate = hns3_flow_validate, - .create = hns3_flow_create, - .destroy = hns3_flow_destroy, - .flush = hns3_flow_flush, - .query = hns3_flow_query, + .validate = hns3_flow_validate_wrap, + .create = hns3_flow_create_wrap, + .destroy = hns3_flow_destroy_wrap, + .flush = hns3_flow_flush_wrap, + .query = hns3_flow_query_wrap, .isolate = NULL, }; -/* - * The entry of flow API. - * @param dev - * Pointer to Ethernet device. - * @return - * 0 on success, a negative errno value otherwise is set. - */ int -hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, - enum rte_filter_op filter_op, void *arg) +hns3_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) { struct hns3_hw *hw; - int ret = 0; hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch (filter_type) { - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - if (hw->adapter_state >= HNS3_NIC_CLOSED) - return -ENODEV; - *(const void **)arg = &hns3_flow_ops; - break; - default: - hns3_err(hw, "Filter type (%d) not supported", filter_type); - ret = -EOPNOTSUPP; - break; - } + if (hw->adapter_state >= HNS3_NIC_CLOSED) + return -ENODEV; - return ret; + *ops = &hns3_flow_ops; + return 0; +} + +void +hns3_flow_init(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pthread_mutexattr_t attr; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + pthread_mutexattr_init(&attr); + pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(&hw->flows_lock, &attr); + dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; + + TAILQ_INIT(&hw->flow_fdir_list); + TAILQ_INIT(&hw->flow_rss_list); + TAILQ_INIT(&hw->flow_list); +} + +void +hns3_flow_uninit(struct rte_eth_dev *dev) +{ + struct rte_flow_error error; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + hns3_flow_flush_wrap(dev, &error); }