X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fhns3%2Fhns3_flow.c;h=4511a49d9db47d3b04f18d8b64c245be2363f825;hb=871aa63542cedab7b920f2dd54fc3dc8ef82f3ca;hp=a016857aa5db2a048300304328495896840a71fd;hpb=2b9a66e1b606d3813d72dd81c626949e09706e27;p=dpdk.git diff --git a/drivers/net/hns3/hns3_flow.c b/drivers/net/hns3/hns3_flow.c index a016857aa5..4511a49d9d 100644 --- a/drivers/net/hns3/hns3_flow.c +++ b/drivers/net/hns3/hns3_flow.c @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright(c) 2018-2019 Hisilicon Limited. + * Copyright(c) 2018-2021 HiSilicon Limited. */ #include @@ -158,7 +158,10 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, { struct hns3_adapter *hns = dev->data->dev_private; struct hns3_pf *pf = &hns->pf; + struct hns3_hw *hw = &hns->hw; struct hns3_flow_counter *cnt; + uint64_t value; + int ret; cnt = hns3_counter_lookup(dev, id); if (cnt) { @@ -171,6 +174,13 @@ hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id, return 0; } + /* Clear the counter by read ops because the counter is read-clear */ + ret = hns3_get_count(hw, id, &value); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Clear counter failed!"); + cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0); if (cnt == NULL) return rte_flow_error_set(error, ENOMEM, @@ -1204,9 +1214,18 @@ hns3_parse_fdir_filter(struct rte_eth_dev *dev, } void -hns3_filterlist_init(struct rte_eth_dev *dev) +hns3_flow_init(struct rte_eth_dev *dev) { + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct hns3_process_private *process_list = dev->process_private; + pthread_mutexattr_t attr; + + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { + pthread_mutexattr_init(&attr); + pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED); + pthread_mutex_init(&hw->flows_lock, &attr); + dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE; + } TAILQ_INIT(&process_list->fdir_list); TAILQ_INIT(&process_list->filter_rss_list); @@ -1992,43 +2011,100 @@ hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow, return 0; } +static int +hns3_flow_validate_wrap(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_validate(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static struct rte_flow * +hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, + const struct rte_flow_item pattern[], + const struct rte_flow_action actions[], + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_flow *flow; + + pthread_mutex_lock(&hw->flows_lock); + flow = hns3_flow_create(dev, attr, pattern, actions, error); + pthread_mutex_unlock(&hw->flows_lock); + + return flow; +} + +static int +hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_destroy(dev, flow, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_flush(dev, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + +static int +hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow, + const struct rte_flow_action *actions, void *data, + struct rte_flow_error *error) +{ + struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); + int ret; + + pthread_mutex_lock(&hw->flows_lock); + ret = hns3_flow_query(dev, flow, actions, data, error); + pthread_mutex_unlock(&hw->flows_lock); + + return ret; +} + static const struct rte_flow_ops hns3_flow_ops = { - .validate = hns3_flow_validate, - .create = hns3_flow_create, - .destroy = hns3_flow_destroy, - .flush = hns3_flow_flush, - .query = hns3_flow_query, + .validate = hns3_flow_validate_wrap, + .create = hns3_flow_create_wrap, + .destroy = hns3_flow_destroy_wrap, + .flush = hns3_flow_flush_wrap, + .query = hns3_flow_query_wrap, .isolate = NULL, }; -/* - * The entry of flow API. - * @param dev - * Pointer to Ethernet device. - * @return - * 0 on success, a negative errno value otherwise is set. - */ int -hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type, - enum rte_filter_op filter_op, void *arg) +hns3_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) { struct hns3_hw *hw; - int ret = 0; hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch (filter_type) { - case RTE_ETH_FILTER_GENERIC: - if (filter_op != RTE_ETH_FILTER_GET) - return -EINVAL; - if (hw->adapter_state >= HNS3_NIC_CLOSED) - return -ENODEV; - *(const void **)arg = &hns3_flow_ops; - break; - default: - hns3_err(hw, "Filter type (%d) not supported", filter_type); - ret = -EOPNOTSUPP; - break; - } + if (hw->adapter_state >= HNS3_NIC_CLOSED) + return -ENODEV; - return ret; + *ops = &hns3_flow_ops; + return 0; }