/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2018-2019 Hisilicon Limited.
+ * Copyright(c) 2018-2021 HiSilicon Limited.
*/
-#include <stdbool.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_hash.h>
#include <rte_hash_crc.h>
#include <rte_io.h>
#define HNS3_FD_AD_DATA_S 32
#define HNS3_FD_AD_DROP_B 0
-#define HNS3_FD_AD_DIRECT_QID_B 1
+#define HNS3_FD_AD_DIRECT_QID_B 1
#define HNS3_FD_AD_QID_S 2
-#define HNS3_FD_AD_QID_M GENMASK(12, 2)
+#define HNS3_FD_AD_QID_M GENMASK(11, 2)
#define HNS3_FD_AD_USE_COUNTER_B 12
#define HNS3_FD_AD_COUNTER_NUM_S 13
-#define HNS3_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HNS3_FD_AD_COUNTER_NUM_M GENMASK(19, 13)
#define HNS3_FD_AD_NXT_STEP_B 20
#define HNS3_FD_AD_NXT_KEY_S 21
-#define HNS3_FD_AD_NXT_KEY_M GENMASK(26, 21)
-#define HNS3_FD_AD_WR_RULE_ID_B 0
+#define HNS3_FD_AD_NXT_KEY_M GENMASK(25, 21)
+#define HNS3_FD_AD_WR_RULE_ID_B 0
#define HNS3_FD_AD_RULE_ID_S 1
-#define HNS3_FD_AD_RULE_ID_M GENMASK(13, 1)
+#define HNS3_FD_AD_RULE_ID_M GENMASK(12, 1)
+#define HNS3_FD_AD_QUEUE_REGION_EN_B 16
+#define HNS3_FD_AD_QUEUE_REGION_SIZE_S 17
+#define HNS3_FD_AD_QUEUE_REGION_SIZE_M GENMASK(20, 17)
+#define HNS3_FD_AD_COUNTER_HIGH_BIT 7
+#define HNS3_FD_AD_COUNTER_HIGH_BIT_B 26
+#define HNS3_FD_AD_QUEUE_ID_HIGH_BIT 10
+#define HNS3_FD_AD_QUEUE_ID_HIGH_BIT_B 21
enum HNS3_PORT_TYPE {
HOST_PORT,
{INNER_SCTP_TAG, 32},
};
-#define HNS3_BITS_PER_BYTE 8
#define MAX_KEY_LENGTH 400
#define MAX_200B_KEY_LENGTH 200
#define MAX_META_DATA_LENGTH 16
hns3_warn(hw, "Unsupported tunnel filter in 4K*200Bit");
break;
default:
- hns3_err(hw, "Unsupported flow director mode %d",
+ hns3_err(hw, "Unsupported flow director mode %u",
pf->fdir.fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+ hns3_dbg(hw, "fdir tuple: inner<vlan_tag1 eth_type ip_src ip_dst "
+ "ip_proto ip_tos l4_src_port l4_dst_port>");
/* If use max 400bit key, we can support tuples for ether type */
if (pf->fdir.fd_cfg.max_key_length == MAX_KEY_LENGTH) {
BIT(OUTER_DST_PORT) | BIT(INNER_VLAN_TAG2) |
BIT(OUTER_TUN_VNI) | BIT(OUTER_TUN_FLOW_ID) |
BIT(OUTER_ETH_TYPE) | BIT(OUTER_IP_PROTO);
+ hns3_dbg(hw, "fdir tuple more: inner<dst_mac src_mac "
+ "vlan_tag2 sctp_tag> outer<eth_type ip_proto "
+ "l4_src_port l4_dst_port tun_vni tun_flow_id>");
}
/* roce_type is used to filter roce frames
*/
key_cfg->meta_data_active = BIT(DST_VPORT) | BIT(TUNNEL_PACKET) |
BIT(VLAN_NUMBER);
+ hns3_dbg(hw, "fdir meta data: dst_vport tunnel_packet vlan_number");
ret = hns3_get_fd_allocation(hw,
&pf->fdir.fd_cfg.rule_num[HNS3_FD_STAGE_1],
if (ret)
return ret;
+ hns3_dbg(hw, "fdir: stage1<rules-%u counters-%u> stage2<rules-%u "
+ "counters=%u>",
+ pf->fdir.fd_cfg.rule_num[HNS3_FD_STAGE_1],
+ pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1],
+ pf->fdir.fd_cfg.rule_num[HNS3_FD_STAGE_2],
+ pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_2]);
+
return hns3_set_fd_key_config(hns);
}
action->write_rule_id_to_bd);
hns3_set_field(ad_data, HNS3_FD_AD_RULE_ID_M, HNS3_FD_AD_RULE_ID_S,
action->rule_id);
+ if (action->nb_queues > 1) {
+ hns3_set_bit(ad_data, HNS3_FD_AD_QUEUE_REGION_EN_B, 1);
+ hns3_set_field(ad_data, HNS3_FD_AD_QUEUE_REGION_SIZE_M,
+ HNS3_FD_AD_QUEUE_REGION_SIZE_S,
+ rte_log2_u32(action->nb_queues));
+ }
+ /* set extend bit if counter_id is in [128 ~ 255] */
+ if (action->counter_id & BIT(HNS3_FD_AD_COUNTER_HIGH_BIT))
+ hns3_set_bit(ad_data, HNS3_FD_AD_COUNTER_HIGH_BIT_B, 1);
+ /* set extend bit if queue id > 1024 */
+ if (action->queue_id & BIT(HNS3_FD_AD_QUEUE_ID_HIGH_BIT))
+ hns3_set_bit(ad_data, HNS3_FD_AD_QUEUE_ID_HIGH_BIT_B, 1);
ad_data <<= HNS3_FD_AD_DATA_S;
hns3_set_bit(ad_data, HNS3_FD_AD_DROP_B, action->drop_packet);
- hns3_set_bit(ad_data, HNS3_FD_AD_DIRECT_QID_B,
- action->forward_to_direct_queue);
+ if (action->nb_queues == 1)
+ hns3_set_bit(ad_data, HNS3_FD_AD_DIRECT_QID_B, 1);
hns3_set_field(ad_data, HNS3_FD_AD_QID_M, HNS3_FD_AD_QID_S,
action->queue_id);
hns3_set_bit(ad_data, HNS3_FD_AD_USE_COUNTER_B, action->use_counter);
HNS3_FD_AD_COUNTER_NUM_S, action->counter_id);
hns3_set_bit(ad_data, HNS3_FD_AD_NXT_STEP_B, action->use_next_stage);
hns3_set_field(ad_data, HNS3_FD_AD_NXT_KEY_M, HNS3_FD_AD_NXT_KEY_S,
- action->counter_id);
+ action->next_input_key);
req->ad_data = rte_cpu_to_le_64(ad_data);
ret = hns3_cmd_send(hw, &desc, 1);
memcpy(val_y, &tmp_y_l, sizeof(tmp_y_l));
}
-static bool hns3_fd_convert_tuple(uint32_t tuple, uint8_t *key_x,
+static bool hns3_fd_convert_tuple(struct hns3_hw *hw,
+ uint32_t tuple, uint8_t *key_x,
uint8_t *key_y, struct hns3_fdir_rule *rule)
{
struct hns3_fdir_key_conf *key_conf;
calc_y(*key_y, key_conf->spec.ip_proto,
key_conf->mask.ip_proto);
break;
+ default:
+ hns3_warn(hw, "not support tuple of (%u)", tuple);
+ break;
}
return true;
}
uint8_t *key_x, uint8_t *key_y)
{
uint16_t meta_data = 0;
- uint16_t port_number;
+ uint32_t port_number;
uint8_t cur_pos = 0;
uint8_t tuple_size;
uint8_t shift_bits;
rule->key_conf.spec.tunnel_type ? 1 : 0);
cur_pos += tuple_size;
} else if (i == VLAN_NUMBER) {
- uint8_t vlan_tag;
+ uint32_t vlan_tag;
uint8_t vlan_num;
if (rule->key_conf.spec.tunnel_type == 0)
vlan_num = rule->key_conf.vlan_num;
tuple_size = tuple_key_info[i].key_length / HNS3_BITS_PER_BYTE;
if (key_cfg->tuple_active & BIT(i)) {
- tuple_valid = hns3_fd_convert_tuple(i, cur_key_x,
+ tuple_valid = hns3_fd_convert_tuple(hw, i, cur_key_x,
cur_key_y, rule);
if (tuple_valid) {
cur_key_x += tuple_size;
ret = hns3_fd_tcam_config(hw, false, rule->location, key_y, true);
if (ret) {
- hns3_err(hw, "Config fd key_y fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_y fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
ret = hns3_fd_tcam_config(hw, true, rule->location, key_x, true);
if (ret)
- hns3_err(hw, "Config fd key_x fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_x fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
if (rule->action == HNS3_FD_ACTION_DROP_PACKET) {
ad_data.drop_packet = true;
- ad_data.forward_to_direct_queue = false;
ad_data.queue_id = 0;
+ ad_data.nb_queues = 0;
} else {
ad_data.drop_packet = false;
- ad_data.forward_to_direct_queue = true;
ad_data.queue_id = rule->queue_id;
+ ad_data.nb_queues = rule->nb_queues;
}
if (unlikely(rule->flags & HNS3_RULE_FLAG_COUNTER)) {
fdir_hash_params.socket_id = rte_socket_id();
TAILQ_INIT(&fdir_info->fdir_list);
- rte_spinlock_init(&fdir_info->flows_lock);
snprintf(fdir_hash_name, RTE_HASH_NAMESIZE, "%s", hns->hw.data->name);
fdir_info->hash_handle = rte_hash_create(&fdir_hash_params);
if (fdir_info->hash_handle == NULL) {
struct hns3_fdir_info *fdir_info = &pf->fdir;
struct hns3_fdir_rule_ele *fdir_filter;
- rte_spinlock_lock(&fdir_info->flows_lock);
if (fdir_info->hash_map) {
rte_free(fdir_info->hash_map);
fdir_info->hash_map = NULL;
rte_hash_free(fdir_info->hash_handle);
fdir_info->hash_handle = NULL;
}
- rte_spinlock_unlock(&fdir_info->flows_lock);
fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
while (fdir_filter) {
hash_sig_t sig;
int ret;
- rte_spinlock_lock(&fdir_info->flows_lock);
sig = rte_hash_crc(key, sizeof(*key), 0);
ret = rte_hash_lookup_with_hash(fdir_info->hash_handle, key, sig);
- rte_spinlock_unlock(&fdir_info->flows_lock);
return ret;
}
int ret;
key = &fdir_filter->fdir_conf.key_conf;
- rte_spinlock_lock(&fdir_info->flows_lock);
sig = rte_hash_crc(key, sizeof(*key), 0);
ret = rte_hash_add_key_with_hash(fdir_info->hash_handle, key, sig);
if (ret < 0) {
- rte_spinlock_unlock(&fdir_info->flows_lock);
hns3_err(hw, "Hash table full? err:%d(%s)!", ret,
- strerror(ret));
+ strerror(-ret));
return ret;
}
fdir_info->hash_map[ret] = fdir_filter;
TAILQ_INSERT_TAIL(&fdir_info->fdir_list, fdir_filter, entries);
- rte_spinlock_unlock(&fdir_info->flows_lock);
return ret;
}
hash_sig_t sig;
int ret;
- rte_spinlock_lock(&fdir_info->flows_lock);
sig = rte_hash_crc(key, sizeof(*key), 0);
ret = rte_hash_del_key_with_hash(fdir_info->hash_handle, key, sig);
if (ret < 0) {
- rte_spinlock_unlock(&fdir_info->flows_lock);
hns3_err(hw, "Delete hash key fail ret=%d", ret);
return ret;
}
fdir_filter = fdir_info->hash_map[ret];
fdir_info->hash_map[ret] = NULL;
TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
- rte_spinlock_unlock(&fdir_info->flows_lock);
rte_free(fdir_filter);
ret = hns3_fd_tcam_config(hw, true, rule->location, NULL,
false);
if (ret)
- hns3_err(hw, "Failed to delete fdir: %d src_ip:%x "
- "dst_ip:%x src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to delete fdir: %u src_ip:%x "
+ "dst_ip:%x src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
rule->location = ret;
node->fdir_conf.location = ret;
- rte_spinlock_lock(&fdir_info->flows_lock);
ret = hns3_config_action(hw, rule);
if (!ret)
ret = hns3_config_key(hns, rule);
- rte_spinlock_unlock(&fdir_info->flows_lock);
if (ret) {
- hns3_err(hw, "Failed to config fdir: %d src_ip:%x dst_ip:%x "
- "src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to config fdir: %u src_ip:%x dst_ip:%x "
+ "src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
struct hns3_fdir_info *fdir_info = &pf->fdir;
struct hns3_fdir_rule_ele *fdir_filter;
struct hns3_hw *hw = &hns->hw;
+ int succ_cnt = 0;
+ int fail_cnt = 0;
int ret = 0;
/* flush flow director */
- rte_spinlock_lock(&fdir_info->flows_lock);
rte_hash_reset(fdir_info->hash_handle);
- rte_spinlock_unlock(&fdir_info->flows_lock);
+
+ memset(fdir_info->hash_map, 0,
+ sizeof(struct hns3_fdir_rule_ele *) *
+ fdir_info->fd_cfg.rule_num[HNS3_FD_STAGE_1]);
fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
while (fdir_filter) {
TAILQ_REMOVE(&fdir_info->fdir_list, fdir_filter, entries);
- ret += hns3_fd_tcam_config(hw, true,
- fdir_filter->fdir_conf.location,
- NULL, false);
+ ret = hns3_fd_tcam_config(hw, true,
+ fdir_filter->fdir_conf.location,
+ NULL, false);
+ if (ret == 0)
+ succ_cnt++;
+ else
+ fail_cnt++;
rte_free(fdir_filter);
fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list);
}
- if (ret) {
- hns3_err(hw, "Fail to delete FDIR filter, ret = %d", ret);
+ if (fail_cnt > 0) {
+ hns3_err(hw, "fail to delete all FDIR filter, success num = %d "
+ "fail num = %d", succ_cnt, fail_cnt);
ret = -EIO;
}
+
return ret;
}
bool err = false;
int ret;
+ /*
+ * This API is called in the reset recovery process, the parent function
+ * must hold hw->lock.
+ * There maybe deadlock if acquire hw->flows_lock directly because rte
+ * flow driver ops first acquire hw->flows_lock and then may acquire
+ * hw->lock.
+ * So here first release the hw->lock and then acquire the
+ * hw->flows_lock to avoid deadlock.
+ */
+ rte_spinlock_unlock(&hw->lock);
+ pthread_mutex_lock(&hw->flows_lock);
TAILQ_FOREACH(fdir_filter, &fdir_info->fdir_list, entries) {
ret = hns3_config_action(hw, &fdir_filter->fdir_conf);
if (!ret)
break;
}
}
+ pthread_mutex_unlock(&hw->flows_lock);
+ rte_spinlock_lock(&hw->lock);
if (err) {
hns3_err(hw, "Fail to restore FDIR filter, ret = %d", ret);