X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_flow.c;h=fc77979c5f14360a9ea865eabe9be4ce3a112470;hb=7fe741821337f3cbeecac768b8ef3a16bf21c938;hp=c484114b32eebefddede9e4397f7884e6b98c0d3;hpb=e9550856f8c65b6e15f8f59810560bc6d240a1f8;p=dpdk.git diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index c484114b32..fc77979c5f 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018-2019 Hisilicon Limited. + * Copyright(c) 2018-2021 HiSilicon Limited. */ #include @@ -158,7 +158,10 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; struct hns3_flow_counter *cnt; + uint64_t value; + int ret; cnt = hns3_counter_lookup(dev, id); if (cnt) { @@ -171,6 +174,13 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; } + /* Clear the counter by read ops because the counter is read-clear */ + ret = hns3_get_count(hw, id, &value); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Clear counter failed!"); + cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); if (cnt == NULL) return rte_flow_error_set(error, ENOMEM, @@ -213,6 +223,8 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, } qc->hits_set = 1; qc->hits = value; + qc->bytes_set = 0; + qc->bytes = 0; return 0; } @@ -1044,37 +1056,37 @@ hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, case RTE_FLOW_ITEM_TYPE_ETH: ret = hns3_parse_eth(item, rule, error); step_mngr->items = L2_next_items; - step_mngr->count = ARRAY_SIZE(L2_next_items); + step_mngr->count = RTE_DIM(L2_next_items); break; case RTE_FLOW_ITEM_TYPE_VLAN: ret = hns3_parse_vlan(item, rule, error); step_mngr->items = L2_next_items; - step_mngr->count = ARRAY_SIZE(L2_next_items); + step_mngr->count = RTE_DIM(L2_next_items); break; case RTE_FLOW_ITEM_TYPE_IPV4: ret = hns3_parse_ipv4(item, rule, error); step_mngr->items = L3_next_items; - step_mngr->count = ARRAY_SIZE(L3_next_items); + step_mngr->count = RTE_DIM(L3_next_items); break; case RTE_FLOW_ITEM_TYPE_IPV6: ret = hns3_parse_ipv6(item, rule, error); step_mngr->items = L3_next_items; - step_mngr->count = ARRAY_SIZE(L3_next_items); + step_mngr->count = RTE_DIM(L3_next_items); break; case RTE_FLOW_ITEM_TYPE_TCP: ret = hns3_parse_tcp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; case RTE_FLOW_ITEM_TYPE_UDP: ret = hns3_parse_udp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; case RTE_FLOW_ITEM_TYPE_SCTP: ret = hns3_parse_sctp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; default: return rte_flow_error_set(error, ENOTSUP, @@ -1122,42 +1134,29 @@ is_tunnel_packet(enum rte_flow_item_type type) } /* - * Parse the rule to see if it is a IP or MAC VLAN flow director rule. - * And get the flow director filter info BTW. - * UDP/TCP/SCTP PATTERN: - * The first not void item can be ETH or IPV4 or IPV6 - * The second not void item must be IPV4 or IPV6 if the first one is ETH. - * The next not void item could be UDP or TCP or SCTP (optional) - * The next not void item could be RAW (for flexbyte, optional) - * The next not void item must be END. - * A Fuzzy Match pattern can appear at any place before END. - * Fuzzy Match is optional for IPV4 but is required for IPV6 - * MAC VLAN PATTERN: - * The first not void item must be ETH. - * The second not void item must be MAC VLAN. - * The next not void item must be END. - * ACTION: - * The first not void action should be QUEUE or DROP. - * The second not void optional action should be MARK, - * mark_id is a uint32_t number. - * The next not void action should be END. - * UDP/TCP/SCTP pattern example: - * ITEM Spec Mask - * ETH NULL NULL - * IPV4 src_addr 192.168.1.20 0xFFFFFFFF - * dst_addr 192.167.3.50 0xFFFFFFFF - * UDP/TCP/SCTP src_port 80 0xFFFF - * dst_port 80 0xFFFF - * END - * MAC VLAN pattern example: - * ITEM Spec Mask - * ETH dst_addr - {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF, - 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF} - * MAC VLAN tci 0x2016 0xEFFF - * END - * Other members in mask and spec should set to 0x00. - * Item->last should be NULL. + * Parse the flow director rule. + * The supported PATTERN: + * case: non-tunnel packet: + * ETH : src-mac, dst-mac, ethertype + * VLAN: tag1, tag2 + * IPv4: src-ip, dst-ip, tos, proto + * IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto + * UDP : src-port, dst-port + * TCP : src-port, dst-port + * SCTP: src-port, dst-port, tag + * case: tunnel packet: + * OUTER-ETH: ethertype + * OUTER-L3 : proto + * OUTER-L4 : src-port, dst-port + * TUNNEL : vni, flow-id(only valid when NVGRE) + * INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet + * The supported ACTION: + * QUEUE + * DROP + * COUNT + * MARK: the id range [0, 4094] + * FLAG + * RSS: only valid if firmware support FD_QUEUE_REGION. */ static int hns3_parse_fdir_filter(struct rte_eth_dev *dev, @@ -1178,7 +1177,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, "Fdir not supported in VF"); step_mngr.items = first_items; - step_mngr.count = ARRAY_SIZE(first_items); + step_mngr.count = RTE_DIM(first_items); for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->type == RTE_FLOW_ITEM_TYPE_VOID) continue; @@ -1192,7 +1191,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, if (ret) return ret; step_mngr.items = tunnel_next_items; - step_mngr.count = ARRAY_SIZE(tunnel_next_items); + step_mngr.count = RTE_DIM(tunnel_next_items); } else { ret = hns3_parse_normal(item, rule, &step_mngr, error); if (ret) @@ -1203,45 +1202,34 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, return hns3_handle_actions(dev, actions, rule, error); } -void -hns3_filterlist_init(struct rte_eth_dev *dev) -{ - struct hns3_process_private *process_list = dev->process_private; - - TAILQ_INIT(&process_list->fdir_list); - TAILQ_INIT(&process_list->filter_rss_list); - TAILQ_INIT(&process_list->flow_list); -} - static void hns3_filterlist_flush(struct rte_eth_dev *dev) { - struct hns3_process_private *process_list = dev->process_private; + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_fdir_rule_ele *fdir_rule_ptr; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; - fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); while (fdir_rule_ptr) { - TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries); + TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries); rte_free(fdir_rule_ptr); - fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list); + fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list); } - rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); while (rss_filter_ptr) { - TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, - entries); + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); rte_free(rss_filter_ptr); - rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); } - flow_node = TAILQ_FIRST(&process_list->flow_list); + flow_node = TAILQ_FIRST(&hw->flow_list); while (flow_node) { - TAILQ_REMOVE(&process_list->flow_list, flow_node, entries); + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); rte_free(flow_node->flow); rte_free(flow_node); - flow_node = TAILQ_FIRST(&process_list->flow_list); + flow_node = TAILQ_FIRST(&hw->flow_list); } } @@ -1511,7 +1499,6 @@ static int hns3_config_rss_filter(struct rte_eth_dev *dev, const struct hns3_rss_conf *conf, bool add) { - struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_hw *hw = &hns->hw; @@ -1537,7 +1524,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, hw->rss_info.conf.types; if (flow_types != rss_flow_conf.types) hns3_warn(hw, "modified RSS types based on hardware support, " - "requested:%" PRIx64 " configured:%" PRIx64, + "requested:0x%" PRIx64 " configured:0x%" PRIx64, rss_flow_conf.types, flow_types); /* Update the useful flow types */ rss_flow_conf.types = flow_types; @@ -1596,7 +1583,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, * When create a new RSS rule, the old rule will be overlaid and set * invalid. */ - TAILQ_FOREACH(rss_filter_ptr, &process_list->filter_rss_list, entries) + TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries) rss_filter_ptr->filter_info.valid = false; rss_config_err: @@ -1608,7 +1595,6 @@ rss_config_err: static int hns3_clear_rss_filter(struct rte_eth_dev *dev) { - struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_hw *hw = &hns->hw; @@ -1616,10 +1602,9 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev) int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */ int ret = 0; - rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); while (rss_filter_ptr) { - TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, - entries); + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info, false); if (ret) @@ -1627,7 +1612,7 @@ hns3_clear_rss_filter(struct rte_eth_dev *dev) else rss_rule_succ_cnt++; rte_free(rss_filter_ptr); - rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list); + rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list); } if (rss_rule_fail_cnt) { @@ -1731,7 +1716,6 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_hw *hw = &hns->hw; const struct hns3_rss_conf *rss_conf; @@ -1763,7 +1747,7 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } flow_node->flow = flow; - TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries); + TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries); act = hns3_find_rss_general_action(pattern, actions); if (act) { @@ -1785,8 +1769,7 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf); rss_filter_ptr->filter_info.valid = true; - TAILQ_INSERT_TAIL(&process_list->filter_rss_list, - rss_filter_ptr, entries); + TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries); flow->rule = rss_filter_ptr; flow->filter_type = RTE_ETH_FILTER_HASH; @@ -1806,36 +1789,36 @@ hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, flow->counter_id = fdir_rule.act_cnt.id; } + + fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", + sizeof(struct hns3_fdir_rule_ele), + 0); + if (fdir_rule_ptr == NULL) { + hns3_err(hw, "failed to allocate fdir_rule memory."); + ret = -ENOMEM; + goto err_fdir; + } + ret = hns3_fdir_filter_program(hns, &fdir_rule, false); if (!ret) { - fdir_rule_ptr = rte_zmalloc("hns3 fdir rule", - sizeof(struct hns3_fdir_rule_ele), - 0); - if (fdir_rule_ptr == NULL) { - hns3_err(hw, "Failed to allocate fdir_rule memory"); - ret = -ENOMEM; - goto err_fdir; - } - memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule, sizeof(struct hns3_fdir_rule)); - TAILQ_INSERT_TAIL(&process_list->fdir_list, - fdir_rule_ptr, entries); + TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries); flow->rule = fdir_rule_ptr; flow->filter_type = RTE_ETH_FILTER_FDIR; return flow; } + rte_free(fdir_rule_ptr); err_fdir: if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) hns3_counter_release(dev, fdir_rule.act_cnt.id); - err: rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "Failed to create flow"); out: - TAILQ_REMOVE(&process_list->flow_list, flow_node, entries); + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); rte_free(flow_node); rte_free(flow); return NULL; @@ -1846,13 +1829,13 @@ static int hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - struct hns3_process_private *process_list = dev->process_private; struct hns3_adapter *hns = dev->data->dev_private; struct hns3_fdir_rule_ele *fdir_rule_ptr; struct hns3_rss_conf_ele *rss_filter_ptr; struct hns3_flow_mem *flow_node; enum rte_filter_type filter_type; struct hns3_fdir_rule fdir_rule; + struct hns3_hw *hw = &hns->hw; int ret; if (flow == NULL) @@ -1875,7 +1858,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, "Destroy FDIR fail.Try again"); if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) hns3_counter_release(dev, fdir_rule.act_cnt.id); - TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries); + TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries); rte_free(fdir_rule_ptr); fdir_rule_ptr = NULL; break; @@ -1888,8 +1871,7 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, RTE_FLOW_ERROR_TYPE_HANDLE, flow, "Destroy RSS fail.Try again"); - TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr, - entries); + TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries); rte_free(rss_filter_ptr); rss_filter_ptr = NULL; break; @@ -1899,10 +1881,9 @@ hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, "Unsupported filter type"); } - TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) { + TAILQ_FOREACH(flow_node, &hw->flow_list, entries) { if (flow_node->flow == flow) { - TAILQ_REMOVE(&process_list->flow_list, flow_node, - entries); + TAILQ_REMOVE(&hw->flow_list, flow_node, entries); rte_free(flow_node); flow_node = NULL; break; @@ -1991,43 +1972,127 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, return 0; } +static int +hns3_flow_validate_wrap(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_validate(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static struct rte_flow * +hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_flow *flow; + + pthread_mutex_lock(&hw->flows_lock); + flow = hns3_flow_create(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return flow; +} + +static int +hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_destroy(dev, flow, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_flush(dev, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *actions, void *data, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_query(dev, flow, actions, data, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + static const struct rte_flow_ops hns3_flow_ops = { - .validate = hns3_flow_validate, - .create = hns3_flow_create, - .destroy = hns3_flow_destroy, - .flush = hns3_flow_flush, - .query = hns3_flow_query, + .validate = hns3_flow_validate_wrap, + .create = hns3_flow_create_wrap, + .destroy = hns3_flow_destroy_wrap, + .flush = hns3_flow_flush_wrap, + .query = hns3_flow_query_wrap, .isolate = NULL, }; -/* - * The entry of flow API. - * @param dev - * Pointer to Ethernet device. - * @return - * 0 on success, a negative errno value otherwise is set. - */ int -hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, - enum rte_filter_op filter_op, void *arg) +hns3_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) { struct hns3_hw *hw; - int ret = 0; hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch (filter_type) { - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - if (hw->adapter_state >= HNS3_NIC_CLOSED) - return -ENODEV; - *(const void **)arg = &hns3_flow_ops; - break; - default: - hns3_err(hw, "Filter type (%d) not supported", filter_type); - ret = -EOPNOTSUPP; - break; - } + if (hw->adapter_state >= HNS3_NIC_CLOSED) + return -ENODEV; - return ret; + *ops = &hns3_flow_ops; + return 0; +} + +void +hns3_flow_init(struct rte_eth_dev *dev) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + pthread_mutexattr_t attr; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return; + + pthread_mutexattr_init(&attr); + pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(&hw->flows_lock, &attr); + dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; + + TAILQ_INIT(&hw->flow_fdir_list); + TAILQ_INIT(&hw->flow_rss_list); + TAILQ_INIT(&hw->flow_list); +} + +void +hns3_flow_uninit(struct rte_eth_dev *dev) +{ + struct rte_flow_error error; + if (rte_eal_process_type() == RTE_PROC_PRIMARY) + hns3_flow_flush_wrap(dev, &error); }