1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 HiSilicon Limited.
5 #include <rte_flow_driver.h>
7 #include <rte_malloc.h>
9 #include "hns3_ethdev.h"
10 #include "hns3_logs.h"
11 #include "hns3_flow.h"
13 /* Default default keys */
14 static uint8_t hns3_hash_key[] = {
15 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
16 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
17 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
18 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
19 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
22 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
23 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
25 /* Special Filter id for non-specific packet flagging. Don't change value */
26 #define HNS3_MAX_FILTER_ID 0x0FFF
28 #define ETHER_TYPE_MASK 0xFFFF
29 #define IPPROTO_MASK 0xFF
30 #define TUNNEL_TYPE_MASK 0xFFFF
32 #define HNS3_TUNNEL_TYPE_VXLAN 0x12B5
33 #define HNS3_TUNNEL_TYPE_VXLAN_GPE 0x12B6
34 #define HNS3_TUNNEL_TYPE_GENEVE 0x17C1
35 #define HNS3_TUNNEL_TYPE_NVGRE 0x6558
37 static enum rte_flow_item_type first_items[] = {
38 RTE_FLOW_ITEM_TYPE_ETH,
39 RTE_FLOW_ITEM_TYPE_IPV4,
40 RTE_FLOW_ITEM_TYPE_IPV6,
41 RTE_FLOW_ITEM_TYPE_TCP,
42 RTE_FLOW_ITEM_TYPE_UDP,
43 RTE_FLOW_ITEM_TYPE_SCTP,
44 RTE_FLOW_ITEM_TYPE_ICMP,
45 RTE_FLOW_ITEM_TYPE_NVGRE,
46 RTE_FLOW_ITEM_TYPE_VXLAN,
47 RTE_FLOW_ITEM_TYPE_GENEVE,
48 RTE_FLOW_ITEM_TYPE_VXLAN_GPE
51 static enum rte_flow_item_type L2_next_items[] = {
52 RTE_FLOW_ITEM_TYPE_VLAN,
53 RTE_FLOW_ITEM_TYPE_IPV4,
54 RTE_FLOW_ITEM_TYPE_IPV6
57 static enum rte_flow_item_type L3_next_items[] = {
58 RTE_FLOW_ITEM_TYPE_TCP,
59 RTE_FLOW_ITEM_TYPE_UDP,
60 RTE_FLOW_ITEM_TYPE_SCTP,
61 RTE_FLOW_ITEM_TYPE_NVGRE,
62 RTE_FLOW_ITEM_TYPE_ICMP
65 static enum rte_flow_item_type L4_next_items[] = {
66 RTE_FLOW_ITEM_TYPE_VXLAN,
67 RTE_FLOW_ITEM_TYPE_GENEVE,
68 RTE_FLOW_ITEM_TYPE_VXLAN_GPE
71 static enum rte_flow_item_type tunnel_next_items[] = {
72 RTE_FLOW_ITEM_TYPE_ETH,
73 RTE_FLOW_ITEM_TYPE_VLAN
76 struct items_step_mngr {
77 enum rte_flow_item_type *items;
82 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
86 for (i = 0; i < len; i++)
87 dst[i] = rte_be_to_cpu_32(src[i]);
91 * This function is used to find rss general action.
92 * 1. As we know RSS is used to spread packets among several queues, the flow
93 * API provide the struct rte_flow_action_rss, user could config its field
94 * sush as: func/level/types/key/queue to control RSS function.
95 * 2. The flow API also supports queue region configuration for hns3. It was
96 * implemented by FDIR + RSS in hns3 hardware, user can create one FDIR rule
97 * which action is RSS queues region.
98 * 3. When action is RSS, we use the following rule to distinguish:
99 * Case 1: pattern have ETH and action's queue_num > 0, indicate it is queue
100 * region configuration.
101 * Case other: an rss general action.
103 static const struct rte_flow_action *
104 hns3_find_rss_general_action(const struct rte_flow_item pattern[],
105 const struct rte_flow_action actions[])
107 const struct rte_flow_action *act = NULL;
108 const struct hns3_rss_conf *rss;
109 bool have_eth = false;
111 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
112 if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
120 for (; pattern->type != RTE_FLOW_ITEM_TYPE_END; pattern++) {
121 if (pattern->type == RTE_FLOW_ITEM_TYPE_ETH) {
128 if (have_eth && rss->conf.queue_num) {
130 * Pattern have ETH and action's queue_num > 0, indicate this is
131 * queue region configuration.
132 * Because queue region is implemented by FDIR + RSS in hns3
133 * hardware, it needs to enter FDIR process, so here return NULL
134 * to avoid enter RSS process.
142 static inline struct hns3_flow_counter *
143 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
145 struct hns3_adapter *hns = dev->data->dev_private;
146 struct hns3_pf *pf = &hns->pf;
147 struct hns3_flow_counter *cnt;
149 LIST_FOREACH(cnt, &pf->flow_counters, next) {
157 hns3_counter_new(struct rte_eth_dev *dev, uint32_t indirect, uint32_t id,
158 struct rte_flow_error *error)
160 struct hns3_adapter *hns = dev->data->dev_private;
161 struct hns3_pf *pf = &hns->pf;
162 struct hns3_hw *hw = &hns->hw;
163 struct hns3_flow_counter *cnt;
167 cnt = hns3_counter_lookup(dev, id);
169 if (!cnt->indirect || cnt->indirect != indirect)
170 return rte_flow_error_set(error, ENOTSUP,
171 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
173 "Counter id is used, indirect flag not match");
174 /* Clear the indirect counter on first use. */
175 if (cnt->indirect && cnt->ref_cnt == 1)
176 (void)hns3_get_count(hw, id, &value);
181 /* Clear the counter by read ops because the counter is read-clear */
182 ret = hns3_get_count(hw, id, &value);
184 return rte_flow_error_set(error, EIO,
185 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
186 "Clear counter failed!");
188 cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
190 return rte_flow_error_set(error, ENOMEM,
191 RTE_FLOW_ERROR_TYPE_HANDLE, cnt,
192 "Alloc mem for counter failed");
194 cnt->indirect = indirect;
197 LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
202 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
203 struct rte_flow_query_count *qc,
204 struct rte_flow_error *error)
206 struct hns3_adapter *hns = dev->data->dev_private;
207 struct hns3_flow_counter *cnt;
211 /* FDIR is available only in PF driver */
213 return rte_flow_error_set(error, ENOTSUP,
214 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
215 "Fdir is not supported in VF");
216 cnt = hns3_counter_lookup(dev, flow->counter_id);
218 return rte_flow_error_set(error, EINVAL,
219 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
220 "Can't find counter id");
222 ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
224 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE,
225 NULL, "Read counter fail.");
237 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
239 struct hns3_adapter *hns = dev->data->dev_private;
240 struct hns3_hw *hw = &hns->hw;
241 struct hns3_flow_counter *cnt;
243 cnt = hns3_counter_lookup(dev, id);
245 hns3_err(hw, "Can't find available counter to release");
249 if (cnt->ref_cnt == 0) {
250 LIST_REMOVE(cnt, next);
257 hns3_counter_flush(struct rte_eth_dev *dev)
259 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
260 LIST_HEAD(counters, hns3_flow_counter) indir_counters;
261 struct hns3_flow_counter *cnt_ptr;
263 LIST_INIT(&indir_counters);
264 cnt_ptr = LIST_FIRST(&pf->flow_counters);
266 LIST_REMOVE(cnt_ptr, next);
267 if (cnt_ptr->indirect)
268 LIST_INSERT_HEAD(&indir_counters, cnt_ptr, next);
271 cnt_ptr = LIST_FIRST(&pf->flow_counters);
274 /* Reset the indirect action and add to pf->flow_counters list. */
275 cnt_ptr = LIST_FIRST(&indir_counters);
277 LIST_REMOVE(cnt_ptr, next);
278 cnt_ptr->ref_cnt = 1;
280 LIST_INSERT_HEAD(&pf->flow_counters, cnt_ptr, next);
281 cnt_ptr = LIST_FIRST(&indir_counters);
286 hns3_handle_action_queue(struct rte_eth_dev *dev,
287 const struct rte_flow_action *action,
288 struct hns3_fdir_rule *rule,
289 struct rte_flow_error *error)
291 struct hns3_adapter *hns = dev->data->dev_private;
292 const struct rte_flow_action_queue *queue;
293 struct hns3_hw *hw = &hns->hw;
295 queue = (const struct rte_flow_action_queue *)action->conf;
296 if (queue->index >= hw->data->nb_rx_queues) {
297 hns3_err(hw, "queue ID(%u) is greater than number of "
298 "available queue (%u) in driver.",
299 queue->index, hw->data->nb_rx_queues);
300 return rte_flow_error_set(error, EINVAL,
301 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
302 action, "Invalid queue ID in PF");
305 rule->queue_id = queue->index;
307 rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
312 hns3_handle_action_queue_region(struct rte_eth_dev *dev,
313 const struct rte_flow_action *action,
314 struct hns3_fdir_rule *rule,
315 struct rte_flow_error *error)
317 struct hns3_adapter *hns = dev->data->dev_private;
318 const struct rte_flow_action_rss *conf = action->conf;
319 struct hns3_hw *hw = &hns->hw;
322 if (!hns3_dev_get_support(hw, FD_QUEUE_REGION))
323 return rte_flow_error_set(error, ENOTSUP,
324 RTE_FLOW_ERROR_TYPE_ACTION, action,
325 "Not support config queue region!");
327 if ((!rte_is_power_of_2(conf->queue_num)) ||
328 conf->queue_num > hw->rss_size_max ||
329 conf->queue[0] >= hw->data->nb_rx_queues ||
330 conf->queue[0] + conf->queue_num > hw->data->nb_rx_queues) {
331 return rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
333 "Invalid start queue ID and queue num! the start queue "
334 "ID must valid, the queue num must be power of 2 and "
338 for (idx = 1; idx < conf->queue_num; idx++) {
339 if (conf->queue[idx] != conf->queue[idx - 1] + 1)
340 return rte_flow_error_set(error, EINVAL,
341 RTE_FLOW_ERROR_TYPE_ACTION_CONF, action,
342 "Invalid queue ID sequence! the queue ID "
343 "must be continuous increment.");
346 rule->queue_id = conf->queue[0];
347 rule->nb_queues = conf->queue_num;
348 rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
353 hns3_handle_action_indirect(struct rte_eth_dev *dev,
354 const struct rte_flow_action *action,
355 struct hns3_fdir_rule *rule,
356 struct rte_flow_error *error)
358 const struct rte_flow_action_handle *indir = action->conf;
360 if (indir->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT)
361 return rte_flow_error_set(error, EINVAL,
362 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
363 action, "Invalid indirect type");
365 if (hns3_counter_lookup(dev, indir->counter_id) == NULL)
366 return rte_flow_error_set(error, EINVAL,
367 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
368 action, "Counter id not exist");
370 rule->act_cnt.id = indir->counter_id;
371 rule->flags |= (HNS3_RULE_FLAG_COUNTER | HNS3_RULE_FLAG_COUNTER_INDIR);
377 * Parse actions structure from the provided pattern.
378 * The pattern is validated as the items are copied.
382 * NIC specific actions derived from the actions.
386 hns3_handle_actions(struct rte_eth_dev *dev,
387 const struct rte_flow_action actions[],
388 struct hns3_fdir_rule *rule, struct rte_flow_error *error)
390 struct hns3_adapter *hns = dev->data->dev_private;
391 const struct rte_flow_action_count *act_count;
392 const struct rte_flow_action_mark *mark;
393 struct hns3_pf *pf = &hns->pf;
394 uint32_t counter_num;
397 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
398 switch (actions->type) {
399 case RTE_FLOW_ACTION_TYPE_QUEUE:
400 ret = hns3_handle_action_queue(dev, actions, rule,
405 case RTE_FLOW_ACTION_TYPE_DROP:
406 rule->action = HNS3_FD_ACTION_DROP_PACKET;
409 * Here RSS's real action is queue region.
410 * Queue region is implemented by FDIR + RSS in hns3 hardware,
411 * the FDIR's action is one queue region (start_queue_id and
412 * queue_num), then RSS spread packets to the queue region by
415 case RTE_FLOW_ACTION_TYPE_RSS:
416 ret = hns3_handle_action_queue_region(dev, actions,
421 case RTE_FLOW_ACTION_TYPE_MARK:
423 (const struct rte_flow_action_mark *)actions->conf;
424 if (mark->id >= HNS3_MAX_FILTER_ID)
425 return rte_flow_error_set(error, EINVAL,
426 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
429 rule->fd_id = mark->id;
430 rule->flags |= HNS3_RULE_FLAG_FDID;
432 case RTE_FLOW_ACTION_TYPE_FLAG:
433 rule->fd_id = HNS3_MAX_FILTER_ID;
434 rule->flags |= HNS3_RULE_FLAG_FDID;
436 case RTE_FLOW_ACTION_TYPE_COUNT:
438 (const struct rte_flow_action_count *)actions->conf;
439 counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
440 if (act_count->id >= counter_num)
441 return rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
444 "Invalid counter id");
445 rule->act_cnt = *act_count;
446 rule->flags |= HNS3_RULE_FLAG_COUNTER;
447 rule->flags &= ~HNS3_RULE_FLAG_COUNTER_INDIR;
449 case RTE_FLOW_ACTION_TYPE_INDIRECT:
450 ret = hns3_handle_action_indirect(dev, actions, rule,
455 case RTE_FLOW_ACTION_TYPE_VOID:
458 return rte_flow_error_set(error, ENOTSUP,
459 RTE_FLOW_ERROR_TYPE_ACTION,
460 NULL, "Unsupported action");
468 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
471 return rte_flow_error_set(error, EINVAL,
472 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
473 attr, "Ingress can't be zero");
475 return rte_flow_error_set(error, ENOTSUP,
476 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
477 attr, "Not support egress");
479 return rte_flow_error_set(error, ENOTSUP,
480 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
481 attr, "No support for transfer");
483 return rte_flow_error_set(error, ENOTSUP,
484 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
485 attr, "Not support priority");
487 return rte_flow_error_set(error, ENOTSUP,
488 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
489 attr, "Not support group");
494 hns3_parse_eth(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
495 struct rte_flow_error *error __rte_unused)
497 const struct rte_flow_item_eth *eth_spec;
498 const struct rte_flow_item_eth *eth_mask;
500 /* Only used to describe the protocol stack. */
501 if (item->spec == NULL && item->mask == NULL)
505 eth_mask = item->mask;
506 if (eth_mask->type) {
507 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
508 rule->key_conf.mask.ether_type =
509 rte_be_to_cpu_16(eth_mask->type);
511 if (!rte_is_zero_ether_addr(ð_mask->src)) {
512 hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
513 memcpy(rule->key_conf.mask.src_mac,
514 eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
516 if (!rte_is_zero_ether_addr(ð_mask->dst)) {
517 hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
518 memcpy(rule->key_conf.mask.dst_mac,
519 eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
523 eth_spec = item->spec;
524 rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
525 memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
527 memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
533 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
534 struct rte_flow_error *error)
536 const struct rte_flow_item_vlan *vlan_spec;
537 const struct rte_flow_item_vlan *vlan_mask;
539 rule->key_conf.vlan_num++;
540 if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
541 return rte_flow_error_set(error, EINVAL,
542 RTE_FLOW_ERROR_TYPE_ITEM, item,
543 "Vlan_num is more than 2");
545 /* Only used to describe the protocol stack. */
546 if (item->spec == NULL && item->mask == NULL)
550 vlan_mask = item->mask;
551 if (vlan_mask->tci) {
552 if (rule->key_conf.vlan_num == 1) {
553 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
555 rule->key_conf.mask.vlan_tag1 =
556 rte_be_to_cpu_16(vlan_mask->tci);
558 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
560 rule->key_conf.mask.vlan_tag2 =
561 rte_be_to_cpu_16(vlan_mask->tci);
566 vlan_spec = item->spec;
567 if (rule->key_conf.vlan_num == 1)
568 rule->key_conf.spec.vlan_tag1 =
569 rte_be_to_cpu_16(vlan_spec->tci);
571 rule->key_conf.spec.vlan_tag2 =
572 rte_be_to_cpu_16(vlan_spec->tci);
577 hns3_check_ipv4_mask_supported(const struct rte_flow_item_ipv4 *ipv4_mask)
579 if (ipv4_mask->hdr.total_length || ipv4_mask->hdr.packet_id ||
580 ipv4_mask->hdr.fragment_offset || ipv4_mask->hdr.time_to_live ||
581 ipv4_mask->hdr.hdr_checksum)
588 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
589 struct rte_flow_error *error)
591 const struct rte_flow_item_ipv4 *ipv4_spec;
592 const struct rte_flow_item_ipv4 *ipv4_mask;
594 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
595 rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
596 rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
598 /* Only used to describe the protocol stack. */
599 if (item->spec == NULL && item->mask == NULL)
603 ipv4_mask = item->mask;
604 if (!hns3_check_ipv4_mask_supported(ipv4_mask)) {
605 return rte_flow_error_set(error, EINVAL,
606 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
608 "Only support src & dst ip,tos,proto in IPV4");
611 if (ipv4_mask->hdr.src_addr) {
612 hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
613 rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
614 rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
617 if (ipv4_mask->hdr.dst_addr) {
618 hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
619 rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
620 rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
623 if (ipv4_mask->hdr.type_of_service) {
624 hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
625 rule->key_conf.mask.ip_tos =
626 ipv4_mask->hdr.type_of_service;
629 if (ipv4_mask->hdr.next_proto_id) {
630 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
631 rule->key_conf.mask.ip_proto =
632 ipv4_mask->hdr.next_proto_id;
636 ipv4_spec = item->spec;
637 rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
638 rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
639 rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
640 rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
641 rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
642 rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
647 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
648 struct rte_flow_error *error)
650 const struct rte_flow_item_ipv6 *ipv6_spec;
651 const struct rte_flow_item_ipv6 *ipv6_mask;
653 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
654 rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
655 rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
657 /* Only used to describe the protocol stack. */
658 if (item->spec == NULL && item->mask == NULL)
662 ipv6_mask = item->mask;
663 if (ipv6_mask->hdr.vtc_flow || ipv6_mask->hdr.payload_len ||
664 ipv6_mask->hdr.hop_limits) {
665 return rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
668 "Only support src & dst ip,proto in IPV6");
670 net_addr_to_host(rule->key_conf.mask.src_ip,
671 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
673 net_addr_to_host(rule->key_conf.mask.dst_ip,
674 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
676 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
677 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
678 hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
679 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
680 hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
681 if (ipv6_mask->hdr.proto)
682 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
685 ipv6_spec = item->spec;
686 net_addr_to_host(rule->key_conf.spec.src_ip,
687 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
689 net_addr_to_host(rule->key_conf.spec.dst_ip,
690 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
692 rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
698 hns3_check_tcp_mask_supported(const struct rte_flow_item_tcp *tcp_mask)
700 if (tcp_mask->hdr.sent_seq || tcp_mask->hdr.recv_ack ||
701 tcp_mask->hdr.data_off || tcp_mask->hdr.tcp_flags ||
702 tcp_mask->hdr.rx_win || tcp_mask->hdr.cksum ||
703 tcp_mask->hdr.tcp_urp)
710 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
711 struct rte_flow_error *error)
713 const struct rte_flow_item_tcp *tcp_spec;
714 const struct rte_flow_item_tcp *tcp_mask;
716 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
717 rule->key_conf.spec.ip_proto = IPPROTO_TCP;
718 rule->key_conf.mask.ip_proto = IPPROTO_MASK;
720 /* Only used to describe the protocol stack. */
721 if (item->spec == NULL && item->mask == NULL)
725 tcp_mask = item->mask;
726 if (!hns3_check_tcp_mask_supported(tcp_mask)) {
727 return rte_flow_error_set(error, EINVAL,
728 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
730 "Only support src & dst port in TCP");
733 if (tcp_mask->hdr.src_port) {
734 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
735 rule->key_conf.mask.src_port =
736 rte_be_to_cpu_16(tcp_mask->hdr.src_port);
738 if (tcp_mask->hdr.dst_port) {
739 hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
740 rule->key_conf.mask.dst_port =
741 rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
745 tcp_spec = item->spec;
746 rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
747 rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
753 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
754 struct rte_flow_error *error)
756 const struct rte_flow_item_udp *udp_spec;
757 const struct rte_flow_item_udp *udp_mask;
759 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
760 rule->key_conf.spec.ip_proto = IPPROTO_UDP;
761 rule->key_conf.mask.ip_proto = IPPROTO_MASK;
763 /* Only used to describe the protocol stack. */
764 if (item->spec == NULL && item->mask == NULL)
768 udp_mask = item->mask;
769 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
770 return rte_flow_error_set(error, EINVAL,
771 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
773 "Only support src & dst port in UDP");
775 if (udp_mask->hdr.src_port) {
776 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
777 rule->key_conf.mask.src_port =
778 rte_be_to_cpu_16(udp_mask->hdr.src_port);
780 if (udp_mask->hdr.dst_port) {
781 hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
782 rule->key_conf.mask.dst_port =
783 rte_be_to_cpu_16(udp_mask->hdr.dst_port);
787 udp_spec = item->spec;
788 rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
789 rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
795 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
796 struct rte_flow_error *error)
798 const struct rte_flow_item_sctp *sctp_spec;
799 const struct rte_flow_item_sctp *sctp_mask;
801 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
802 rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
803 rule->key_conf.mask.ip_proto = IPPROTO_MASK;
805 /* Only used to describe the protocol stack. */
806 if (item->spec == NULL && item->mask == NULL)
810 sctp_mask = item->mask;
811 if (sctp_mask->hdr.cksum)
812 return rte_flow_error_set(error, EINVAL,
813 RTE_FLOW_ERROR_TYPE_ITEM_MASK,
815 "Only support src & dst port in SCTP");
816 if (sctp_mask->hdr.src_port) {
817 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
818 rule->key_conf.mask.src_port =
819 rte_be_to_cpu_16(sctp_mask->hdr.src_port);
821 if (sctp_mask->hdr.dst_port) {
822 hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
823 rule->key_conf.mask.dst_port =
824 rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
826 if (sctp_mask->hdr.tag) {
827 hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
828 rule->key_conf.mask.sctp_tag =
829 rte_be_to_cpu_32(sctp_mask->hdr.tag);
833 sctp_spec = item->spec;
834 rule->key_conf.spec.src_port =
835 rte_be_to_cpu_16(sctp_spec->hdr.src_port);
836 rule->key_conf.spec.dst_port =
837 rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
838 rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
844 * Check items before tunnel, save inner configs to outer configs, and clear
846 * The key consists of two parts: meta_data and tuple keys.
847 * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
849 * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
850 * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
851 * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
852 * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
853 * Vlantag2(16bit) and sctp-tag(32bit).
856 hns3_handle_tunnel(const struct rte_flow_item *item,
857 struct hns3_fdir_rule *rule, struct rte_flow_error *error)
859 /* check eth config */
860 if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
861 return rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ITEM,
863 item, "Outer eth mac is unsupported");
864 if (rule->input_set & BIT(INNER_ETH_TYPE)) {
865 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
866 rule->key_conf.spec.outer_ether_type =
867 rule->key_conf.spec.ether_type;
868 rule->key_conf.mask.outer_ether_type =
869 rule->key_conf.mask.ether_type;
870 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
871 rule->key_conf.spec.ether_type = 0;
872 rule->key_conf.mask.ether_type = 0;
875 /* check vlan config */
876 if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
877 return rte_flow_error_set(error, EINVAL,
878 RTE_FLOW_ERROR_TYPE_ITEM,
880 "Outer vlan tags is unsupported");
882 /* clear vlan_num for inner vlan select */
883 rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
884 rule->key_conf.vlan_num = 0;
886 /* check L3 config */
887 if (rule->input_set &
888 (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
889 return rte_flow_error_set(error, EINVAL,
890 RTE_FLOW_ERROR_TYPE_ITEM,
891 item, "Outer ip is unsupported");
892 if (rule->input_set & BIT(INNER_IP_PROTO)) {
893 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
894 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
895 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
896 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
897 rule->key_conf.spec.ip_proto = 0;
898 rule->key_conf.mask.ip_proto = 0;
901 /* check L4 config */
902 if (rule->input_set & BIT(INNER_SCTP_TAG))
903 return rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ITEM, item,
905 "Outer sctp tag is unsupported");
907 if (rule->input_set & BIT(INNER_SRC_PORT)) {
908 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
909 rule->key_conf.spec.outer_src_port =
910 rule->key_conf.spec.src_port;
911 rule->key_conf.mask.outer_src_port =
912 rule->key_conf.mask.src_port;
913 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
914 rule->key_conf.spec.src_port = 0;
915 rule->key_conf.mask.src_port = 0;
917 if (rule->input_set & BIT(INNER_DST_PORT)) {
918 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
919 rule->key_conf.spec.dst_port = 0;
920 rule->key_conf.mask.dst_port = 0;
926 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
927 struct rte_flow_error *error)
929 const struct rte_flow_item_vxlan *vxlan_spec;
930 const struct rte_flow_item_vxlan *vxlan_mask;
932 hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
933 rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
934 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
935 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
937 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
939 /* Only used to describe the protocol stack. */
940 if (item->spec == NULL && item->mask == NULL)
943 vxlan_mask = item->mask;
944 vxlan_spec = item->spec;
946 if (vxlan_mask->flags)
947 return rte_flow_error_set(error, EINVAL,
948 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
949 "Flags is not supported in VxLAN");
951 /* VNI must be totally masked or not. */
952 if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
953 memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
954 return rte_flow_error_set(error, EINVAL,
955 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
956 "VNI must be totally masked or not in VxLAN");
957 if (vxlan_mask->vni[0]) {
958 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
959 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
962 memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
968 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
969 struct rte_flow_error *error)
971 const struct rte_flow_item_nvgre *nvgre_spec;
972 const struct rte_flow_item_nvgre *nvgre_mask;
974 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
975 rule->key_conf.spec.outer_proto = IPPROTO_GRE;
976 rule->key_conf.mask.outer_proto = IPPROTO_MASK;
978 hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
979 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
980 rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
981 /* Only used to describe the protocol stack. */
982 if (item->spec == NULL && item->mask == NULL)
985 nvgre_mask = item->mask;
986 nvgre_spec = item->spec;
988 if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
989 return rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
991 "Ver/protocol is not supported in NVGRE");
993 /* TNI must be totally masked or not. */
994 if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
995 memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
996 return rte_flow_error_set(error, EINVAL,
997 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
998 "TNI must be totally masked or not in NVGRE");
1000 if (nvgre_mask->tni[0]) {
1001 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1002 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
1005 memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
1008 if (nvgre_mask->flow_id) {
1009 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
1010 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
1012 rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
1017 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1018 struct rte_flow_error *error)
1020 const struct rte_flow_item_geneve *geneve_spec;
1021 const struct rte_flow_item_geneve *geneve_mask;
1023 hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
1024 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
1025 rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
1026 /* Only used to describe the protocol stack. */
1027 if (item->spec == NULL && item->mask == NULL)
1030 geneve_mask = item->mask;
1031 geneve_spec = item->spec;
1033 if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
1034 return rte_flow_error_set(error, EINVAL,
1035 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1036 "Ver/protocol is not supported in GENEVE");
1037 /* VNI must be totally masked or not. */
1038 if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
1039 memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
1040 return rte_flow_error_set(error, EINVAL,
1041 RTE_FLOW_ERROR_TYPE_ITEM_MASK, item,
1042 "VNI must be totally masked or not in GENEVE");
1043 if (geneve_mask->vni[0]) {
1044 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
1045 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
1048 memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
1054 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1055 struct rte_flow_error *error)
1059 if (item->spec == NULL && item->mask)
1060 return rte_flow_error_set(error, EINVAL,
1061 RTE_FLOW_ERROR_TYPE_ITEM, item,
1062 "Can't configure FDIR with mask "
1063 "but without spec");
1064 else if (item->spec && (item->mask == NULL))
1065 return rte_flow_error_set(error, EINVAL,
1066 RTE_FLOW_ERROR_TYPE_ITEM, item,
1067 "Tunnel packets must configure "
1070 switch (item->type) {
1071 case RTE_FLOW_ITEM_TYPE_VXLAN:
1072 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1073 ret = hns3_parse_vxlan(item, rule, error);
1075 case RTE_FLOW_ITEM_TYPE_NVGRE:
1076 ret = hns3_parse_nvgre(item, rule, error);
1078 case RTE_FLOW_ITEM_TYPE_GENEVE:
1079 ret = hns3_parse_geneve(item, rule, error);
1082 return rte_flow_error_set(error, ENOTSUP,
1083 RTE_FLOW_ERROR_TYPE_ITEM,
1084 NULL, "Unsupported tunnel type!");
1088 return hns3_handle_tunnel(item, rule, error);
1092 hns3_parse_normal(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
1093 struct items_step_mngr *step_mngr,
1094 struct rte_flow_error *error)
1098 if (item->spec == NULL && item->mask)
1099 return rte_flow_error_set(error, EINVAL,
1100 RTE_FLOW_ERROR_TYPE_ITEM, item,
1101 "Can't configure FDIR with mask "
1102 "but without spec");
1104 switch (item->type) {
1105 case RTE_FLOW_ITEM_TYPE_ETH:
1106 ret = hns3_parse_eth(item, rule, error);
1107 step_mngr->items = L2_next_items;
1108 step_mngr->count = RTE_DIM(L2_next_items);
1110 case RTE_FLOW_ITEM_TYPE_VLAN:
1111 ret = hns3_parse_vlan(item, rule, error);
1112 step_mngr->items = L2_next_items;
1113 step_mngr->count = RTE_DIM(L2_next_items);
1115 case RTE_FLOW_ITEM_TYPE_IPV4:
1116 ret = hns3_parse_ipv4(item, rule, error);
1117 step_mngr->items = L3_next_items;
1118 step_mngr->count = RTE_DIM(L3_next_items);
1120 case RTE_FLOW_ITEM_TYPE_IPV6:
1121 ret = hns3_parse_ipv6(item, rule, error);
1122 step_mngr->items = L3_next_items;
1123 step_mngr->count = RTE_DIM(L3_next_items);
1125 case RTE_FLOW_ITEM_TYPE_TCP:
1126 ret = hns3_parse_tcp(item, rule, error);
1127 step_mngr->items = L4_next_items;
1128 step_mngr->count = RTE_DIM(L4_next_items);
1130 case RTE_FLOW_ITEM_TYPE_UDP:
1131 ret = hns3_parse_udp(item, rule, error);
1132 step_mngr->items = L4_next_items;
1133 step_mngr->count = RTE_DIM(L4_next_items);
1135 case RTE_FLOW_ITEM_TYPE_SCTP:
1136 ret = hns3_parse_sctp(item, rule, error);
1137 step_mngr->items = L4_next_items;
1138 step_mngr->count = RTE_DIM(L4_next_items);
1141 return rte_flow_error_set(error, ENOTSUP,
1142 RTE_FLOW_ERROR_TYPE_ITEM,
1143 NULL, "Unsupported normal type!");
1150 hns3_validate_item(const struct rte_flow_item *item,
1151 struct items_step_mngr step_mngr,
1152 struct rte_flow_error *error)
1157 return rte_flow_error_set(error, ENOTSUP,
1158 RTE_FLOW_ERROR_TYPE_ITEM_LAST, item,
1159 "Not supported last point for range");
1161 for (i = 0; i < step_mngr.count; i++) {
1162 if (item->type == step_mngr.items[i])
1166 if (i == step_mngr.count) {
1167 return rte_flow_error_set(error, EINVAL,
1168 RTE_FLOW_ERROR_TYPE_ITEM,
1169 item, "Inval or missing item");
1175 is_tunnel_packet(enum rte_flow_item_type type)
1177 if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1178 type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1179 type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1180 type == RTE_FLOW_ITEM_TYPE_GENEVE)
1186 * Parse the flow director rule.
1187 * The supported PATTERN:
1188 * case: non-tunnel packet:
1189 * ETH : src-mac, dst-mac, ethertype
1191 * IPv4: src-ip, dst-ip, tos, proto
1192 * IPv6: src-ip(last 32 bit addr), dst-ip(last 32 bit addr), proto
1193 * UDP : src-port, dst-port
1194 * TCP : src-port, dst-port
1195 * SCTP: src-port, dst-port, tag
1196 * case: tunnel packet:
1197 * OUTER-ETH: ethertype
1199 * OUTER-L4 : src-port, dst-port
1200 * TUNNEL : vni, flow-id(only valid when NVGRE)
1201 * INNER-ETH/VLAN/IPv4/IPv6/UDP/TCP/SCTP: same as non-tunnel packet
1202 * The supported ACTION:
1206 * MARK: the id range [0, 4094]
1208 * RSS: only valid if firmware support FD_QUEUE_REGION.
1211 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1212 const struct rte_flow_item pattern[],
1213 const struct rte_flow_action actions[],
1214 struct hns3_fdir_rule *rule,
1215 struct rte_flow_error *error)
1217 struct hns3_adapter *hns = dev->data->dev_private;
1218 const struct rte_flow_item *item;
1219 struct items_step_mngr step_mngr;
1222 /* FDIR is available only in PF driver */
1224 return rte_flow_error_set(error, ENOTSUP,
1225 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1226 "Fdir not supported in VF");
1228 step_mngr.items = first_items;
1229 step_mngr.count = RTE_DIM(first_items);
1230 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1231 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1234 ret = hns3_validate_item(item, step_mngr, error);
1238 if (is_tunnel_packet(item->type)) {
1239 ret = hns3_parse_tunnel(item, rule, error);
1242 step_mngr.items = tunnel_next_items;
1243 step_mngr.count = RTE_DIM(tunnel_next_items);
1245 ret = hns3_parse_normal(item, rule, &step_mngr, error);
1251 return hns3_handle_actions(dev, actions, rule, error);
1255 hns3_filterlist_flush(struct rte_eth_dev *dev)
1257 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1258 struct hns3_fdir_rule_ele *fdir_rule_ptr;
1259 struct hns3_rss_conf_ele *rss_filter_ptr;
1260 struct hns3_flow_mem *flow_node;
1262 fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1263 while (fdir_rule_ptr) {
1264 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1265 rte_free(fdir_rule_ptr);
1266 fdir_rule_ptr = TAILQ_FIRST(&hw->flow_fdir_list);
1269 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1270 while (rss_filter_ptr) {
1271 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1272 rte_free(rss_filter_ptr);
1273 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1276 flow_node = TAILQ_FIRST(&hw->flow_list);
1278 TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1279 rte_free(flow_node->flow);
1280 rte_free(flow_node);
1281 flow_node = TAILQ_FIRST(&hw->flow_list);
1286 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1287 const struct rte_flow_action_rss *with)
1289 bool rss_key_is_same;
1293 * When user flush all RSS rule, RSS func is set invalid with
1294 * RTE_ETH_HASH_FUNCTION_MAX. Then the user create a flow after
1295 * flushed, any validate RSS func is different with it before
1296 * flushed. Others, when user create an action RSS with RSS func
1297 * specified RTE_ETH_HASH_FUNCTION_DEFAULT, the func is the same
1298 * between continuous RSS flow.
1300 if (comp->func == RTE_ETH_HASH_FUNCTION_MAX)
1301 func_is_same = false;
1303 func_is_same = (with->func != RTE_ETH_HASH_FUNCTION_DEFAULT) ?
1304 (comp->func == with->func) : true;
1306 if (with->key_len == 0 || with->key == NULL)
1307 rss_key_is_same = 1;
1309 rss_key_is_same = comp->key_len == with->key_len &&
1310 !memcmp(comp->key, with->key, with->key_len);
1312 return (func_is_same && rss_key_is_same &&
1313 comp->types == (with->types & HNS3_ETH_RSS_SUPPORT) &&
1314 comp->level == with->level &&
1315 comp->queue_num == with->queue_num &&
1316 !memcmp(comp->queue, with->queue,
1317 sizeof(*with->queue) * with->queue_num));
1321 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1322 const struct rte_flow_action_rss *in)
1324 if (in->key_len > RTE_DIM(out->key) ||
1325 in->queue_num > RTE_DIM(out->queue))
1327 if (in->key == NULL && in->key_len)
1329 out->conf = (struct rte_flow_action_rss) {
1333 .key_len = in->key_len,
1334 .queue_num = in->queue_num,
1336 out->conf.queue = memcpy(out->queue, in->queue,
1337 sizeof(*in->queue) * in->queue_num);
1339 out->conf.key = memcpy(out->key, in->key, in->key_len);
1345 hns3_rss_input_tuple_supported(struct hns3_hw *hw,
1346 const struct rte_flow_action_rss *rss)
1349 * For IP packet, it is not supported to use src/dst port fields to RSS
1350 * hash for the following packet types.
1351 * - IPV4 FRAG | IPV4 NONFRAG | IPV6 FRAG | IPV6 NONFRAG
1352 * Besides, for Kunpeng920, the NIC HW is not supported to use src/dst
1353 * port fields to RSS hash for IPV6 SCTP packet type. However, the
1354 * Kunpeng930 and future kunpeng series support to use src/dst port
1355 * fields to RSS hash for IPv6 SCTP packet type.
1357 if (rss->types & (RTE_ETH_RSS_L4_DST_ONLY | RTE_ETH_RSS_L4_SRC_ONLY) &&
1358 (rss->types & RTE_ETH_RSS_IP ||
1359 (!hw->rss_info.ipv6_sctp_offload_supported &&
1360 rss->types & RTE_ETH_RSS_NONFRAG_IPV6_SCTP)))
1367 * This function is used to parse rss action validation.
1370 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1371 const struct rte_flow_action *actions,
1372 struct rte_flow_error *error)
1374 struct hns3_adapter *hns = dev->data->dev_private;
1375 struct hns3_hw *hw = &hns->hw;
1376 struct hns3_rss_conf *rss_conf = &hw->rss_info;
1377 const struct rte_flow_action_rss *rss;
1378 const struct rte_flow_action *act;
1379 uint32_t act_index = 0;
1382 NEXT_ITEM_OF_ACTION(act, actions, act_index);
1386 return rte_flow_error_set(error, EINVAL,
1387 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1388 act, "no valid queues");
1391 if (rss->queue_num > RTE_DIM(rss_conf->queue))
1392 return rte_flow_error_set(error, ENOTSUP,
1393 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1394 "queue number configured exceeds "
1395 "queue buffer size driver supported");
1397 for (n = 0; n < rss->queue_num; n++) {
1398 if (rss->queue[n] < hw->alloc_rss_size)
1400 return rte_flow_error_set(error, EINVAL,
1401 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1402 "queue id must be less than queue number allocated to a TC");
1405 if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1406 return rte_flow_error_set(error, EINVAL,
1407 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1409 "Flow types is unsupported by "
1411 if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX)
1412 return rte_flow_error_set(error, ENOTSUP,
1413 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1414 "RSS hash func are not supported");
1416 return rte_flow_error_set(error, ENOTSUP,
1417 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1418 "a nonzero RSS encapsulation level is not supported");
1419 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1420 return rte_flow_error_set(error, ENOTSUP,
1421 RTE_FLOW_ERROR_TYPE_ACTION_CONF, act,
1422 "RSS hash key must be exactly 40 bytes");
1424 if (!hns3_rss_input_tuple_supported(hw, rss))
1425 return rte_flow_error_set(error, EINVAL,
1426 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1428 "input RSS types are not supported");
1432 /* Check if the next not void action is END */
1433 NEXT_ITEM_OF_ACTION(act, actions, act_index);
1434 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1435 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1436 return rte_flow_error_set(error, EINVAL,
1437 RTE_FLOW_ERROR_TYPE_ACTION,
1438 act, "Not supported action.");
1445 hns3_disable_rss(struct hns3_hw *hw)
1449 /* Redirected the redirection table to queue 0 */
1450 ret = hns3_rss_reset_indir_table(hw);
1455 hw->rss_info.conf.types = 0;
1456 hw->rss_dis_flag = true;
1462 hns3_adjust_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1464 if (rss_conf->key == NULL || rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1465 hns3_warn(hw, "Default RSS hash key to be set");
1466 rss_conf->key = hns3_hash_key;
1467 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1472 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1475 enum rte_eth_hash_function algo_func = *func;
1476 switch (algo_func) {
1477 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1478 /* Keep *hash_algo as what it used to be */
1479 algo_func = hw->rss_info.conf.func;
1481 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1482 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1484 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1485 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1487 case RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ:
1488 *hash_algo = HNS3_RSS_HASH_ALGO_SYMMETRIC_TOEP;
1491 hns3_err(hw, "Invalid RSS algorithm configuration(%d)",
1501 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1503 struct hns3_rss_tuple_cfg *tuple;
1506 hns3_adjust_rss_key(hw, rss_config);
1508 ret = hns3_parse_rss_algorithm(hw, &rss_config->func,
1509 &hw->rss_info.hash_algo);
1513 ret = hns3_rss_set_algo_key(hw, rss_config->key);
1517 hw->rss_info.conf.func = rss_config->func;
1519 tuple = &hw->rss_info.rss_tuple_sets;
1520 ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1522 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1528 hns3_update_indir_table(struct rte_eth_dev *dev,
1529 const struct rte_flow_action_rss *conf, uint16_t num)
1531 struct hns3_adapter *hns = dev->data->dev_private;
1532 struct hns3_hw *hw = &hns->hw;
1533 uint16_t indir_tbl[HNS3_RSS_IND_TBL_SIZE_MAX];
1537 /* Fill in redirection table */
1538 memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1539 sizeof(hw->rss_info.rss_indirection_tbl));
1540 for (i = 0, j = 0; i < hw->rss_ind_tbl_size; i++, j++) {
1542 if (conf->queue[j] >= hw->alloc_rss_size) {
1543 hns3_err(hw, "queue id(%u) set to redirection table "
1544 "exceeds queue number(%u) allocated to a TC.",
1545 conf->queue[j], hw->alloc_rss_size);
1548 indir_tbl[i] = conf->queue[j];
1551 return hns3_set_rss_indir_table(hw, indir_tbl, hw->rss_ind_tbl_size);
1555 hns3_config_rss_filter(struct rte_eth_dev *dev,
1556 const struct hns3_rss_conf *conf, bool add)
1558 struct hns3_adapter *hns = dev->data->dev_private;
1559 struct hns3_rss_conf_ele *rss_filter_ptr;
1560 struct hns3_hw *hw = &hns->hw;
1561 struct hns3_rss_conf *rss_info;
1562 uint64_t flow_types;
1566 struct rte_flow_action_rss rss_flow_conf = {
1567 .func = conf->conf.func,
1568 .level = conf->conf.level,
1569 .types = conf->conf.types,
1570 .key_len = conf->conf.key_len,
1571 .queue_num = conf->conf.queue_num,
1572 .key = conf->conf.key_len ?
1573 (void *)(uintptr_t)conf->conf.key : NULL,
1574 .queue = conf->conf.queue,
1577 /* Filter the unsupported flow types */
1578 flow_types = conf->conf.types ?
1579 rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT :
1580 hw->rss_info.conf.types;
1581 if (flow_types != rss_flow_conf.types)
1582 hns3_warn(hw, "modified RSS types based on hardware support, "
1583 "requested:0x%" PRIx64 " configured:0x%" PRIx64,
1584 rss_flow_conf.types, flow_types);
1585 /* Update the useful flow types */
1586 rss_flow_conf.types = flow_types;
1588 rss_info = &hw->rss_info;
1593 ret = hns3_disable_rss(hw);
1595 hns3_err(hw, "RSS disable failed(%d)", ret);
1599 if (rss_flow_conf.queue_num) {
1601 * Due the content of queue pointer have been reset to
1602 * 0, the rss_info->conf.queue should be set to NULL
1604 rss_info->conf.queue = NULL;
1605 rss_info->conf.queue_num = 0;
1608 /* set RSS func invalid after flushed */
1609 rss_info->conf.func = RTE_ETH_HASH_FUNCTION_MAX;
1613 /* Set rx queues to use */
1614 num = RTE_MIN(dev->data->nb_rx_queues, rss_flow_conf.queue_num);
1615 if (rss_flow_conf.queue_num > num)
1616 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1617 rss_flow_conf.queue_num);
1618 hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1620 rte_spinlock_lock(&hw->lock);
1622 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1624 goto rss_config_err;
1627 /* Set hash algorithm and flow types by the user's config */
1628 ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1630 goto rss_config_err;
1632 ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1634 hns3_err(hw, "RSS config init fail(%d)", ret);
1635 goto rss_config_err;
1639 * When create a new RSS rule, the old rule will be overlaid and set
1642 TAILQ_FOREACH(rss_filter_ptr, &hw->flow_rss_list, entries)
1643 rss_filter_ptr->filter_info.valid = false;
1646 rte_spinlock_unlock(&hw->lock);
1652 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1654 struct hns3_adapter *hns = dev->data->dev_private;
1655 struct hns3_rss_conf_ele *rss_filter_ptr;
1656 struct hns3_hw *hw = &hns->hw;
1657 int rss_rule_succ_cnt = 0; /* count for success of clearing RSS rules */
1658 int rss_rule_fail_cnt = 0; /* count for failure of clearing RSS rules */
1661 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1662 while (rss_filter_ptr) {
1663 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1664 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1667 rss_rule_fail_cnt++;
1669 rss_rule_succ_cnt++;
1670 rte_free(rss_filter_ptr);
1671 rss_filter_ptr = TAILQ_FIRST(&hw->flow_rss_list);
1674 if (rss_rule_fail_cnt) {
1675 hns3_err(hw, "fail to delete all RSS filters, success num = %d "
1676 "fail num = %d", rss_rule_succ_cnt,
1685 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1687 struct hns3_adapter *hns = dev->data->dev_private;
1688 struct hns3_hw *hw = &hns->hw;
1690 /* When user flush all rules, it doesn't need to restore RSS rule */
1691 if (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_MAX)
1694 return hns3_config_rss_filter(dev, &hw->rss_info, true);
1698 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1699 const struct hns3_rss_conf *conf, bool add)
1701 struct hns3_adapter *hns = dev->data->dev_private;
1702 struct hns3_hw *hw = &hns->hw;
1705 ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1707 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1711 return hns3_config_rss_filter(dev, conf, add);
1715 hns3_flow_args_check(const struct rte_flow_attr *attr,
1716 const struct rte_flow_item pattern[],
1717 const struct rte_flow_action actions[],
1718 struct rte_flow_error *error)
1720 if (pattern == NULL)
1721 return rte_flow_error_set(error, EINVAL,
1722 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1723 NULL, "NULL pattern.");
1725 if (actions == NULL)
1726 return rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1728 NULL, "NULL action.");
1731 return rte_flow_error_set(error, EINVAL,
1732 RTE_FLOW_ERROR_TYPE_ATTR,
1733 NULL, "NULL attribute.");
1735 return hns3_check_attr(attr, error);
1739 * Check if the flow rule is supported by hns3.
1740 * It only checks the format. Don't guarantee the rule can be programmed into
1741 * the HW. Because there can be no enough room for the rule.
1744 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1745 const struct rte_flow_item pattern[],
1746 const struct rte_flow_action actions[],
1747 struct rte_flow_error *error)
1749 struct hns3_fdir_rule fdir_rule;
1752 ret = hns3_flow_args_check(attr, pattern, actions, error);
1756 if (hns3_find_rss_general_action(pattern, actions))
1757 return hns3_parse_rss_filter(dev, actions, error);
1759 memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1760 return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1764 hns3_flow_create_rss_rule(struct rte_eth_dev *dev,
1765 const struct rte_flow_action *act,
1766 struct rte_flow *flow)
1768 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1769 struct hns3_rss_conf_ele *rss_filter_ptr;
1770 const struct hns3_rss_conf *rss_conf;
1773 rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1774 sizeof(struct hns3_rss_conf_ele), 0);
1775 if (rss_filter_ptr == NULL) {
1776 hns3_err(hw, "failed to allocate hns3_rss_filter memory");
1781 * After all the preceding tasks are successfully configured, configure
1782 * rules to the hardware to simplify the rollback of rules in the
1785 rss_conf = (const struct hns3_rss_conf *)act->conf;
1786 ret = hns3_flow_parse_rss(dev, rss_conf, true);
1788 rte_free(rss_filter_ptr);
1792 hns3_rss_conf_copy(&rss_filter_ptr->filter_info, &rss_conf->conf);
1793 rss_filter_ptr->filter_info.valid = true;
1794 TAILQ_INSERT_TAIL(&hw->flow_rss_list, rss_filter_ptr, entries);
1795 flow->rule = rss_filter_ptr;
1796 flow->filter_type = RTE_ETH_FILTER_HASH;
1802 hns3_flow_create_fdir_rule(struct rte_eth_dev *dev,
1803 const struct rte_flow_item pattern[],
1804 const struct rte_flow_action actions[],
1805 struct rte_flow_error *error,
1806 struct rte_flow *flow)
1808 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1809 struct hns3_adapter *hns = HNS3_DEV_HW_TO_ADAPTER(hw);
1810 struct hns3_fdir_rule_ele *fdir_rule_ptr;
1811 struct hns3_fdir_rule fdir_rule;
1815 memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1816 ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1820 indir = !!(fdir_rule.flags & HNS3_RULE_FLAG_COUNTER_INDIR);
1821 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1822 ret = hns3_counter_new(dev, indir, fdir_rule.act_cnt.id,
1827 flow->counter_id = fdir_rule.act_cnt.id;
1830 fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1831 sizeof(struct hns3_fdir_rule_ele), 0);
1832 if (fdir_rule_ptr == NULL) {
1833 hns3_err(hw, "failed to allocate fdir_rule memory.");
1839 * After all the preceding tasks are successfully configured, configure
1840 * rules to the hardware to simplify the rollback of rules in the
1843 ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1845 goto err_fdir_filter;
1847 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1848 sizeof(struct hns3_fdir_rule));
1849 TAILQ_INSERT_TAIL(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1850 flow->rule = fdir_rule_ptr;
1851 flow->filter_type = RTE_ETH_FILTER_FDIR;
1856 rte_free(fdir_rule_ptr);
1858 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1859 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1865 * Create or destroy a flow rule.
1866 * Theorically one rule can match more than one filters.
1867 * We will let it use the filter which it hit first.
1868 * So, the sequence matters.
1870 static struct rte_flow *
1871 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1872 const struct rte_flow_item pattern[],
1873 const struct rte_flow_action actions[],
1874 struct rte_flow_error *error)
1876 struct hns3_adapter *hns = dev->data->dev_private;
1877 struct hns3_hw *hw = &hns->hw;
1878 struct hns3_flow_mem *flow_node;
1879 const struct rte_flow_action *act;
1880 struct rte_flow *flow;
1883 ret = hns3_flow_validate(dev, attr, pattern, actions, error);
1887 flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1889 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1890 NULL, "Failed to allocate flow memory");
1893 flow_node = rte_zmalloc("hns3 flow node",
1894 sizeof(struct hns3_flow_mem), 0);
1895 if (flow_node == NULL) {
1896 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1897 NULL, "Failed to allocate flow list memory");
1902 flow_node->flow = flow;
1903 TAILQ_INSERT_TAIL(&hw->flow_list, flow_node, entries);
1905 act = hns3_find_rss_general_action(pattern, actions);
1907 ret = hns3_flow_create_rss_rule(dev, act, flow);
1909 ret = hns3_flow_create_fdir_rule(dev, pattern, actions,
1914 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1915 "Failed to create flow");
1916 TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1917 rte_free(flow_node);
1923 /* Destroy a flow rule on hns3. */
1925 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1926 struct rte_flow_error *error)
1928 struct hns3_adapter *hns = dev->data->dev_private;
1929 struct hns3_fdir_rule_ele *fdir_rule_ptr;
1930 struct hns3_rss_conf_ele *rss_filter_ptr;
1931 struct hns3_flow_mem *flow_node;
1932 enum rte_filter_type filter_type;
1933 struct hns3_fdir_rule fdir_rule;
1934 struct hns3_hw *hw = &hns->hw;
1938 return rte_flow_error_set(error, EINVAL,
1939 RTE_FLOW_ERROR_TYPE_HANDLE,
1940 flow, "Flow is NULL");
1942 filter_type = flow->filter_type;
1943 switch (filter_type) {
1944 case RTE_ETH_FILTER_FDIR:
1945 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1946 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1947 sizeof(struct hns3_fdir_rule));
1949 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1951 return rte_flow_error_set(error, EIO,
1952 RTE_FLOW_ERROR_TYPE_HANDLE,
1954 "Destroy FDIR fail.Try again");
1955 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1956 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1957 TAILQ_REMOVE(&hw->flow_fdir_list, fdir_rule_ptr, entries);
1958 rte_free(fdir_rule_ptr);
1959 fdir_rule_ptr = NULL;
1961 case RTE_ETH_FILTER_HASH:
1962 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1963 ret = hns3_config_rss_filter(dev, &rss_filter_ptr->filter_info,
1966 return rte_flow_error_set(error, EIO,
1967 RTE_FLOW_ERROR_TYPE_HANDLE,
1969 "Destroy RSS fail.Try again");
1970 TAILQ_REMOVE(&hw->flow_rss_list, rss_filter_ptr, entries);
1971 rte_free(rss_filter_ptr);
1972 rss_filter_ptr = NULL;
1975 return rte_flow_error_set(error, EINVAL,
1976 RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1977 "Unsupported filter type");
1980 TAILQ_FOREACH(flow_node, &hw->flow_list, entries) {
1981 if (flow_node->flow == flow) {
1982 TAILQ_REMOVE(&hw->flow_list, flow_node, entries);
1983 rte_free(flow_node);
1993 /* Destroy all flow rules associated with a port on hns3. */
1995 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1997 struct hns3_adapter *hns = dev->data->dev_private;
2000 /* FDIR is available only in PF driver */
2002 ret = hns3_clear_all_fdir_filter(hns);
2004 rte_flow_error_set(error, ret,
2005 RTE_FLOW_ERROR_TYPE_HANDLE,
2006 NULL, "Failed to flush rule");
2009 hns3_counter_flush(dev);
2012 ret = hns3_clear_rss_filter(dev);
2014 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
2015 NULL, "Failed to flush rss filter");
2019 hns3_filterlist_flush(dev);
2024 /* Query an existing flow rule. */
2026 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
2027 const struct rte_flow_action *actions, void *data,
2028 struct rte_flow_error *error)
2030 struct rte_flow_action_rss *rss_conf;
2031 struct hns3_rss_conf_ele *rss_rule;
2032 struct rte_flow_query_count *qc;
2036 return rte_flow_error_set(error, EINVAL,
2037 RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "invalid rule");
2039 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2040 switch (actions->type) {
2041 case RTE_FLOW_ACTION_TYPE_VOID:
2043 case RTE_FLOW_ACTION_TYPE_COUNT:
2044 qc = (struct rte_flow_query_count *)data;
2045 ret = hns3_counter_query(dev, flow, qc, error);
2049 case RTE_FLOW_ACTION_TYPE_RSS:
2050 if (flow->filter_type != RTE_ETH_FILTER_HASH) {
2051 return rte_flow_error_set(error, ENOTSUP,
2052 RTE_FLOW_ERROR_TYPE_ACTION,
2053 actions, "action is not supported");
2055 rss_conf = (struct rte_flow_action_rss *)data;
2056 rss_rule = (struct hns3_rss_conf_ele *)flow->rule;
2057 rte_memcpy(rss_conf, &rss_rule->filter_info.conf,
2058 sizeof(struct rte_flow_action_rss));
2061 return rte_flow_error_set(error, ENOTSUP,
2062 RTE_FLOW_ERROR_TYPE_ACTION,
2063 actions, "action is not supported");
2071 hns3_flow_validate_wrap(struct rte_eth_dev *dev,
2072 const struct rte_flow_attr *attr,
2073 const struct rte_flow_item pattern[],
2074 const struct rte_flow_action actions[],
2075 struct rte_flow_error *error)
2077 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2080 pthread_mutex_lock(&hw->flows_lock);
2081 ret = hns3_flow_validate(dev, attr, pattern, actions, error);
2082 pthread_mutex_unlock(&hw->flows_lock);
2087 static struct rte_flow *
2088 hns3_flow_create_wrap(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2089 const struct rte_flow_item pattern[],
2090 const struct rte_flow_action actions[],
2091 struct rte_flow_error *error)
2093 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2094 struct rte_flow *flow;
2096 pthread_mutex_lock(&hw->flows_lock);
2097 flow = hns3_flow_create(dev, attr, pattern, actions, error);
2098 pthread_mutex_unlock(&hw->flows_lock);
2104 hns3_flow_destroy_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2105 struct rte_flow_error *error)
2107 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2110 pthread_mutex_lock(&hw->flows_lock);
2111 ret = hns3_flow_destroy(dev, flow, error);
2112 pthread_mutex_unlock(&hw->flows_lock);
2118 hns3_flow_flush_wrap(struct rte_eth_dev *dev, struct rte_flow_error *error)
2120 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2123 pthread_mutex_lock(&hw->flows_lock);
2124 ret = hns3_flow_flush(dev, error);
2125 pthread_mutex_unlock(&hw->flows_lock);
2131 hns3_flow_query_wrap(struct rte_eth_dev *dev, struct rte_flow *flow,
2132 const struct rte_flow_action *actions, void *data,
2133 struct rte_flow_error *error)
2135 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2138 pthread_mutex_lock(&hw->flows_lock);
2139 ret = hns3_flow_query(dev, flow, actions, data, error);
2140 pthread_mutex_unlock(&hw->flows_lock);
2146 hns3_check_indir_action(const struct rte_flow_indir_action_conf *conf,
2147 const struct rte_flow_action *action,
2148 struct rte_flow_error *error)
2151 return rte_flow_error_set(error, EINVAL,
2152 RTE_FLOW_ERROR_TYPE_ACTION,
2153 NULL, "Indir action ingress can't be zero");
2156 return rte_flow_error_set(error, EINVAL,
2157 RTE_FLOW_ERROR_TYPE_ACTION,
2158 NULL, "Indir action not support egress");
2161 return rte_flow_error_set(error, EINVAL,
2162 RTE_FLOW_ERROR_TYPE_ACTION,
2163 NULL, "Indir action not support transfer");
2165 if (action->type != RTE_FLOW_ACTION_TYPE_COUNT)
2166 return rte_flow_error_set(error, EINVAL,
2167 RTE_FLOW_ERROR_TYPE_ACTION,
2168 NULL, "Indir action only support count");
2173 static struct rte_flow_action_handle *
2174 hns3_flow_action_create(struct rte_eth_dev *dev,
2175 const struct rte_flow_indir_action_conf *conf,
2176 const struct rte_flow_action *action,
2177 struct rte_flow_error *error)
2179 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2180 struct hns3_pf *pf = HNS3_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2181 const struct rte_flow_action_count *act_count;
2182 struct rte_flow_action_handle *handle = NULL;
2183 struct hns3_flow_counter *counter;
2185 if (hns3_check_indir_action(conf, action, error))
2188 handle = rte_zmalloc("hns3 action handle",
2189 sizeof(struct rte_flow_action_handle), 0);
2190 if (handle == NULL) {
2191 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
2192 NULL, "Failed to allocate action memory");
2196 pthread_mutex_lock(&hw->flows_lock);
2198 act_count = (const struct rte_flow_action_count *)action->conf;
2199 if (act_count->id >= pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1]) {
2200 rte_flow_error_set(error, EINVAL,
2201 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2202 action, "Invalid counter id");
2206 if (hns3_counter_new(dev, false, act_count->id, error))
2209 counter = hns3_counter_lookup(dev, act_count->id);
2210 if (counter == NULL) {
2211 rte_flow_error_set(error, EINVAL,
2212 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2213 action, "Counter id not found");
2217 counter->indirect = true;
2218 handle->indirect_type = HNS3_INDIRECT_ACTION_TYPE_COUNT;
2219 handle->counter_id = counter->id;
2221 pthread_mutex_unlock(&hw->flows_lock);
2225 pthread_mutex_unlock(&hw->flows_lock);
2231 hns3_flow_action_destroy(struct rte_eth_dev *dev,
2232 struct rte_flow_action_handle *handle,
2233 struct rte_flow_error *error)
2235 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2236 struct hns3_flow_counter *counter;
2238 pthread_mutex_lock(&hw->flows_lock);
2240 if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2241 pthread_mutex_unlock(&hw->flows_lock);
2242 return rte_flow_error_set(error, EINVAL,
2243 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2244 handle, "Invalid indirect type");
2247 counter = hns3_counter_lookup(dev, handle->counter_id);
2248 if (counter == NULL) {
2249 pthread_mutex_unlock(&hw->flows_lock);
2250 return rte_flow_error_set(error, EINVAL,
2251 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2252 handle, "Counter id not exist");
2255 if (counter->ref_cnt > 1) {
2256 pthread_mutex_unlock(&hw->flows_lock);
2257 return rte_flow_error_set(error, EBUSY,
2258 RTE_FLOW_ERROR_TYPE_HANDLE,
2259 handle, "Counter id in use");
2262 (void)hns3_counter_release(dev, handle->counter_id);
2265 pthread_mutex_unlock(&hw->flows_lock);
2270 hns3_flow_action_query(struct rte_eth_dev *dev,
2271 const struct rte_flow_action_handle *handle,
2273 struct rte_flow_error *error)
2275 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2276 struct rte_flow flow;
2279 pthread_mutex_lock(&hw->flows_lock);
2281 if (handle->indirect_type != HNS3_INDIRECT_ACTION_TYPE_COUNT) {
2282 pthread_mutex_unlock(&hw->flows_lock);
2283 return rte_flow_error_set(error, EINVAL,
2284 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2285 handle, "Invalid indirect type");
2288 memset(&flow, 0, sizeof(flow));
2289 flow.counter_id = handle->counter_id;
2290 ret = hns3_counter_query(dev, &flow,
2291 (struct rte_flow_query_count *)data, error);
2292 pthread_mutex_unlock(&hw->flows_lock);
2296 static const struct rte_flow_ops hns3_flow_ops = {
2297 .validate = hns3_flow_validate_wrap,
2298 .create = hns3_flow_create_wrap,
2299 .destroy = hns3_flow_destroy_wrap,
2300 .flush = hns3_flow_flush_wrap,
2301 .query = hns3_flow_query_wrap,
2303 .action_handle_create = hns3_flow_action_create,
2304 .action_handle_destroy = hns3_flow_action_destroy,
2305 .action_handle_query = hns3_flow_action_query,
2309 hns3_dev_flow_ops_get(struct rte_eth_dev *dev,
2310 const struct rte_flow_ops **ops)
2314 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2315 if (hw->adapter_state >= HNS3_NIC_CLOSED)
2318 *ops = &hns3_flow_ops;
2323 hns3_flow_init(struct rte_eth_dev *dev)
2325 struct hns3_hw *hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2326 pthread_mutexattr_t attr;
2328 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2331 pthread_mutexattr_init(&attr);
2332 pthread_mutexattr_setpshared(&attr, PTHREAD_PROCESS_SHARED);
2333 pthread_mutex_init(&hw->flows_lock, &attr);
2334 dev->data->dev_flags |= RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE;
2336 TAILQ_INIT(&hw->flow_fdir_list);
2337 TAILQ_INIT(&hw->flow_rss_list);
2338 TAILQ_INIT(&hw->flow_list);
2342 hns3_flow_uninit(struct rte_eth_dev *dev)
2344 struct rte_flow_error error;
2345 if (rte_eal_process_type() == RTE_PROC_PRIMARY)
2346 hns3_flow_flush_wrap(dev, &error);