/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
*/
-#include <stdbool.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_hash.h>
#include <rte_hash_crc.h>
#include <rte_io.h>
hns3_warn(hw, "Unsupported tunnel filter in 4K*200Bit");
break;
default:
- hns3_err(hw, "Unsupported flow director mode %d",
+ hns3_err(hw, "Unsupported flow director mode %u",
pf->fdir.fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
key_conf->mask.ip_proto);
break;
default:
- hns3_warn(hw, "not support tuple of (%d)", tuple);
+ hns3_warn(hw, "not support tuple of (%u)", tuple);
break;
}
return true;
ret = hns3_fd_tcam_config(hw, false, rule->location, key_y, true);
if (ret) {
- hns3_err(hw, "Config fd key_y fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_y fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
ret = hns3_fd_tcam_config(hw, true, rule->location, key_x, true);
if (ret)
- hns3_err(hw, "Config fd key_x fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_x fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
fdir_hash_params.socket_id = rte_socket_id();
TAILQ_INIT(&fdir_info->fdir_list);
- rte_spinlock_init(&fdir_info->flows_lock);
snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, "%s", hns->hw.data->name);
fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
if (fdir_info->hash_handle == NULL) {
struct hns3_fdir_info *fdir_info = &pf->fdir;
struct hns3_fdir_rule_ele *fdir_filter;
- rte_spinlock_lock(&fdir_info->flows_lock);
if (fdir_info->hash_map) {
rte_free(fdir_info->hash_map);
fdir_info->hash_map = NULL;
rte_hash_free(fdir_info->hash_handle);
fdir_info->hash_handle = NULL;
}
- rte_spinlock_unlock(&fdir_info->flows_lock);
fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
while (fdir_filter) {
hash_sig_t sig;
int ret;
- rte_spinlock_lock(&fdir_info->flows_lock);
sig = rte_hash_crc(key, sizeof(*key), 0);
ret = rte_hash_lookup_with_hash(fdir_info->hash_handle, key, sig);
- rte_spinlock_unlock(&fdir_info->flows_lock);
return ret;
}
int ret;
key = &fdir_filter->fdir_conf.key_conf;
- rte_spinlock_lock(&fdir_info->flows_lock);
sig = rte_hash_crc(key, sizeof(*key), 0);
ret = rte_hash_add_key_with_hash(fdir_info->hash_handle, key, sig);
if (ret < 0) {
- rte_spinlock_unlock(&fdir_info->flows_lock);
hns3_err(hw, "Hash table full? err:%d(%s)!", ret,
strerror(-ret));
return ret;
fdir_info->hash_map[ret] = fdir_filter;
TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
- rte_spinlock_unlock(&fdir_info->flows_lock);
return ret;
}
hash_sig_t sig;
int ret;
- rte_spinlock_lock(&fdir_info->flows_lock);
sig = rte_hash_crc(key, sizeof(*key), 0);
ret = rte_hash_del_key_with_hash(fdir_info->hash_handle, key, sig);
if (ret < 0) {
- rte_spinlock_unlock(&fdir_info->flows_lock);
hns3_err(hw, "Delete hash key fail ret=%d", ret);
return ret;
}
fdir_filter = fdir_info->hash_map[ret];
fdir_info->hash_map[ret] = NULL;
TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
- rte_spinlock_unlock(&fdir_info->flows_lock);
rte_free(fdir_filter);
ret = hns3_fd_tcam_config(hw, true, rule->location, NULL,
false);
if (ret)
- hns3_err(hw, "Failed to delete fdir: %d src_ip:%x "
- "dst_ip:%x src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to delete fdir: %u src_ip:%x "
+ "dst_ip:%x src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
rule->location = ret;
node->fdir_conf.location = ret;
- rte_spinlock_lock(&fdir_info->flows_lock);
ret = hns3_config_action(hw, rule);
if (!ret)
ret = hns3_config_key(hns, rule);
- rte_spinlock_unlock(&fdir_info->flows_lock);
if (ret) {
- hns3_err(hw, "Failed to config fdir: %d src_ip:%x dst_ip:%x "
- "src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to config fdir: %u src_ip:%x dst_ip:%x "
+ "src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
int ret = 0;
/* flush flow director */
- rte_spinlock_lock(&fdir_info->flows_lock);
rte_hash_reset(fdir_info->hash_handle);
- rte_spinlock_unlock(&fdir_info->flows_lock);
fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
while (fdir_filter) {
bool err = false;
int ret;
+ /*
+ * This API is called in the reset recovery process, the parent function
+ * must hold hw->lock.
+ * There maybe deadlock if acquire hw->flows_lock directly because rte
+ * flow driver ops first acquire hw->flows_lock and then may acquire
+ * hw->lock.
+ * So here first release the hw->lock and then acquire the
+ * hw->flows_lock to avoid deadlock.
+ */
+ rte_spinlock_unlock(&hw->lock);
+ pthread_mutex_lock(&hw->flows_lock);
TAILQ_FOREACH(fdir_filter, &fdir_info->fdir_list, entries) {
ret = hns3_config_action(hw, &fdir_filter->fdir_conf);
if (!ret)
break;
}
}
+ pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
if (err) {
hns3_err(hw, "Fail to restore FDIR filter, ret = %d", ret);