X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fhns3%2Fhns3_flow.c;h=82810e00e8e188cfb79b3fea9cbabb64c5bcdf72;hb=4d8cce267840556cec8483c61f8cfbf25873496d;hp=711eed3f54e350bb14111ba533a4258fcfc28a13;hpb=53e6f86cf550a75fb1d55a76f411e214e783d14d;p=dpdk.git diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index 711eed3f54..82810e00e8 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -158,7 +158,10 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; struct hns3_flow_counter *cnt; + uint64_t value; + int ret; cnt = hns3_counter_lookup(dev, id); if (cnt) { @@ -171,6 +174,13 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; } + /* Clear the counter by read ops because the counter is read-clear */ + ret = hns3_get_count(hw, id, &value); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Clear counter failed!"); + cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); if (cnt == NULL) return rte_flow_error_set(error, ENOMEM, @@ -213,6 +223,8 @@ hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow, } qc->hits_set = 1; qc->hits = value; + qc->bytes_set = 0; + qc->bytes = 0; return 0; } @@ -1044,37 +1056,37 @@ hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule, case RTE_FLOW_ITEM_TYPE_ETH: ret = hns3_parse_eth(item, rule, error); step_mngr->items = L2_next_items; - step_mngr->count = ARRAY_SIZE(L2_next_items); + step_mngr->count = RTE_DIM(L2_next_items); break; case RTE_FLOW_ITEM_TYPE_VLAN: ret = hns3_parse_vlan(item, rule, error); step_mngr->items = L2_next_items; - step_mngr->count = ARRAY_SIZE(L2_next_items); + step_mngr->count = RTE_DIM(L2_next_items); break; case RTE_FLOW_ITEM_TYPE_IPV4: ret = hns3_parse_ipv4(item, rule, error); step_mngr->items = L3_next_items; - step_mngr->count = ARRAY_SIZE(L3_next_items); + step_mngr->count = RTE_DIM(L3_next_items); break; case RTE_FLOW_ITEM_TYPE_IPV6: ret = hns3_parse_ipv6(item, rule, error); step_mngr->items = L3_next_items; - step_mngr->count = ARRAY_SIZE(L3_next_items); + step_mngr->count = RTE_DIM(L3_next_items); break; case RTE_FLOW_ITEM_TYPE_TCP: ret = hns3_parse_tcp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; case RTE_FLOW_ITEM_TYPE_UDP: ret = hns3_parse_udp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; case RTE_FLOW_ITEM_TYPE_SCTP: ret = hns3_parse_sctp(item, rule, error); step_mngr->items = L4_next_items; - step_mngr->count = ARRAY_SIZE(L4_next_items); + step_mngr->count = RTE_DIM(L4_next_items); break; default: return rte_flow_error_set(error, ENOTSUP, @@ -1178,7 +1190,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, "Fdir not supported in VF"); step_mngr.items = first_items; - step_mngr.count = ARRAY_SIZE(first_items); + step_mngr.count = RTE_DIM(first_items); for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { if (item->type == RTE_FLOW_ITEM_TYPE_VOID) continue; @@ -1192,7 +1204,7 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, if (ret) return ret; step_mngr.items = tunnel_next_items; - step_mngr.count = ARRAY_SIZE(tunnel_next_items); + step_mngr.count = RTE_DIM(tunnel_next_items); } else { ret = hns3_parse_normal(item, rule, &step_mngr, error); if (ret) @@ -1204,9 +1216,18 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, } void -hns3_filterlist_init(struct rte_eth_dev *dev) +hns3_flow_init(struct rte_eth_dev *dev) { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_process_private *process_list = dev->process_private; + pthread_mutexattr_t attr; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + pthread_mutexattr_init(&attr); + pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(&hw->flows_lock, &attr); + dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; + } TAILQ_INIT(&process_list->fdir_list); TAILQ_INIT(&process_list->filter_rss_list); @@ -1537,7 +1558,7 @@ hns3_config_rss_filter(struct rte_eth_dev *dev, hw->rss_info.conf.types; if (flow_types != rss_flow_conf.types) hns3_warn(hw, "modified RSS types based on hardware support, " - "requested:%" PRIx64 " configured:%" PRIx64, + "requested:0x%" PRIx64 " configured:0x%" PRIx64, rss_flow_conf.types, flow_types); /* Update the useful flow types */ rss_flow_conf.types = flow_types; @@ -1992,12 +2013,87 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, return 0; } +static int +hns3_flow_validate_wrap(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_validate(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static struct rte_flow * +hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_flow *flow; + + pthread_mutex_lock(&hw->flows_lock); + flow = hns3_flow_create(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return flow; +} + +static int +hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_destroy(dev, flow, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_flush(dev, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *actions, void *data, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_query(dev, flow, actions, data, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + static const struct rte_flow_ops hns3_flow_ops = { - .validate = hns3_flow_validate, - .create = hns3_flow_create, - .destroy = hns3_flow_destroy, - .flush = hns3_flow_flush, - .query = hns3_flow_query, + .validate = hns3_flow_validate_wrap, + .create = hns3_flow_create_wrap, + .destroy = hns3_flow_destroy_wrap, + .flush = hns3_flow_flush_wrap, + .query = hns3_flow_query_wrap, .isolate = NULL, };