}
void
-hns3_filterlist_init(struct rte_eth_dev *dev)
+hns3_flow_init(struct rte_eth_dev *dev)
{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct hns3_process_private *process_list = dev->process_private;
+ pthread_mutexattr_t attr;
+
+ if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
+ pthread_mutex_init(&hw->flows_lock, &attr);
+ dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
+ }
TAILQ_INIT(&process_list->fdir_list);
TAILQ_INIT(&process_list->filter_rss_list);
return 0;
}
+static int
+hns3_flow_validate_wrap(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ pthread_mutex_lock(&hw->flows_lock);
+ ret = hns3_flow_validate(dev, attr, pattern, actions, error);
+ pthread_mutex_unlock(&hw->flows_lock);
+
+ return ret;
+}
+
+static struct rte_flow *
+hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct rte_flow *flow;
+
+ pthread_mutex_lock(&hw->flows_lock);
+ flow = hns3_flow_create(dev, attr, pattern, actions, error);
+ pthread_mutex_unlock(&hw->flows_lock);
+
+ return flow;
+}
+
+static int
+hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ pthread_mutex_lock(&hw->flows_lock);
+ ret = hns3_flow_destroy(dev, flow, error);
+ pthread_mutex_unlock(&hw->flows_lock);
+
+ return ret;
+}
+
+static int
+hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ pthread_mutex_lock(&hw->flows_lock);
+ ret = hns3_flow_flush(dev, error);
+ pthread_mutex_unlock(&hw->flows_lock);
+
+ return ret;
+}
+
+static int
+hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
+ const struct rte_flow_action *actions, void *data,
+ struct rte_flow_error *error)
+{
+ struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ int ret;
+
+ pthread_mutex_lock(&hw->flows_lock);
+ ret = hns3_flow_query(dev, flow, actions, data, error);
+ pthread_mutex_unlock(&hw->flows_lock);
+
+ return ret;
+}
+
static const struct rte_flow_ops hns3_flow_ops = {
- .validate = hns3_flow_validate,
- .create = hns3_flow_create,
- .destroy = hns3_flow_destroy,
- .flush = hns3_flow_flush,
- .query = hns3_flow_query,
+ .validate = hns3_flow_validate_wrap,
+ .create = hns3_flow_create_wrap,
+ .destroy = hns3_flow_destroy_wrap,
+ .flush = hns3_flow_flush_wrap,
+ .query = hns3_flow_query_wrap,
.isolate = NULL,
};