1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2019 Hisilicon Limited.
7 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
11 #include "hns3_ethdev.h"
12 #include "hns3_logs.h"
14 /* Default default keys */
15 static uint8_t hns3_hash_key[] = {
16 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
17 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
18 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
19 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
20 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
23 static const uint8_t full_mask[VNI_OR_TNI_LEN] = { 0xFF, 0xFF, 0xFF };
24 static const uint8_t zero_mask[VNI_OR_TNI_LEN] = { 0x00, 0x00, 0x00 };
26 /* Special Filter id for non-specific packet flagging. Don't change value */
27 #define HNS3_MAX_FILTER_ID 0x0FFF
29 #define ETHER_TYPE_MASK 0xFFFF
30 #define IPPROTO_MASK 0xFF
31 #define TUNNEL_TYPE_MASK 0xFFFF
33 #define HNS3_TUNNEL_TYPE_VXLAN 0x12B5
34 #define HNS3_TUNNEL_TYPE_VXLAN_GPE 0x12B6
35 #define HNS3_TUNNEL_TYPE_GENEVE 0x17C1
36 #define HNS3_TUNNEL_TYPE_NVGRE 0x6558
38 static enum rte_flow_item_type first_items[] = {
39 RTE_FLOW_ITEM_TYPE_ETH,
40 RTE_FLOW_ITEM_TYPE_IPV4,
41 RTE_FLOW_ITEM_TYPE_IPV6,
42 RTE_FLOW_ITEM_TYPE_TCP,
43 RTE_FLOW_ITEM_TYPE_UDP,
44 RTE_FLOW_ITEM_TYPE_SCTP,
45 RTE_FLOW_ITEM_TYPE_ICMP,
46 RTE_FLOW_ITEM_TYPE_NVGRE,
47 RTE_FLOW_ITEM_TYPE_VXLAN,
48 RTE_FLOW_ITEM_TYPE_GENEVE,
49 RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
50 RTE_FLOW_ITEM_TYPE_MPLS
53 static enum rte_flow_item_type L2_next_items[] = {
54 RTE_FLOW_ITEM_TYPE_VLAN,
55 RTE_FLOW_ITEM_TYPE_IPV4,
56 RTE_FLOW_ITEM_TYPE_IPV6
59 static enum rte_flow_item_type L3_next_items[] = {
60 RTE_FLOW_ITEM_TYPE_TCP,
61 RTE_FLOW_ITEM_TYPE_UDP,
62 RTE_FLOW_ITEM_TYPE_SCTP,
63 RTE_FLOW_ITEM_TYPE_NVGRE,
64 RTE_FLOW_ITEM_TYPE_ICMP
67 static enum rte_flow_item_type L4_next_items[] = {
68 RTE_FLOW_ITEM_TYPE_VXLAN,
69 RTE_FLOW_ITEM_TYPE_GENEVE,
70 RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
71 RTE_FLOW_ITEM_TYPE_MPLS
74 static enum rte_flow_item_type tunnel_next_items[] = {
75 RTE_FLOW_ITEM_TYPE_ETH,
76 RTE_FLOW_ITEM_TYPE_VLAN
79 struct items_step_mngr {
80 enum rte_flow_item_type *items;
85 net_addr_to_host(uint32_t *dst, const rte_be32_t *src, size_t len)
89 for (i = 0; i < len; i++)
90 dst[i] = rte_be_to_cpu_32(src[i]);
93 static inline const struct rte_flow_action *
94 find_rss_action(const struct rte_flow_action actions[])
96 const struct rte_flow_action *next = &actions[0];
98 for (; next->type != RTE_FLOW_ACTION_TYPE_END; next++) {
99 if (next->type == RTE_FLOW_ACTION_TYPE_RSS)
105 static inline struct hns3_flow_counter *
106 hns3_counter_lookup(struct rte_eth_dev *dev, uint32_t id)
108 struct hns3_adapter *hns = dev->data->dev_private;
109 struct hns3_pf *pf = &hns->pf;
110 struct hns3_flow_counter *cnt;
112 LIST_FOREACH(cnt, &pf->flow_counters, next) {
120 hns3_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
121 struct rte_flow_error *error)
123 struct hns3_adapter *hns = dev->data->dev_private;
124 struct hns3_pf *pf = &hns->pf;
125 struct hns3_flow_counter *cnt;
127 cnt = hns3_counter_lookup(dev, id);
129 if (!cnt->shared || cnt->shared != shared)
130 return rte_flow_error_set(error, ENOTSUP,
131 RTE_FLOW_ERROR_TYPE_ACTION,
133 "Counter id is used,shared flag not match");
138 cnt = rte_zmalloc("hns3 counter", sizeof(*cnt), 0);
140 return rte_flow_error_set(error, ENOMEM,
141 RTE_FLOW_ERROR_TYPE_ACTION, cnt,
142 "Alloc mem for counter failed");
144 cnt->shared = shared;
147 LIST_INSERT_HEAD(&pf->flow_counters, cnt, next);
152 hns3_counter_query(struct rte_eth_dev *dev, struct rte_flow *flow,
153 struct rte_flow_query_count *qc,
154 struct rte_flow_error *error)
156 struct hns3_adapter *hns = dev->data->dev_private;
157 struct hns3_flow_counter *cnt;
161 /* FDIR is available only in PF driver */
163 return rte_flow_error_set(error, ENOTSUP,
164 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
165 "Fdir is not supported in VF");
166 cnt = hns3_counter_lookup(dev, flow->counter_id);
168 return rte_flow_error_set(error, EINVAL,
169 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
170 "Can't find counter id");
172 ret = hns3_get_count(&hns->hw, flow->counter_id, &value);
174 rte_flow_error_set(error, -ret,
175 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
176 NULL, "Read counter fail.");
186 hns3_counter_release(struct rte_eth_dev *dev, uint32_t id)
188 struct hns3_adapter *hns = dev->data->dev_private;
189 struct hns3_hw *hw = &hns->hw;
190 struct hns3_flow_counter *cnt;
192 cnt = hns3_counter_lookup(dev, id);
194 hns3_err(hw, "Can't find available counter to release");
198 if (cnt->ref_cnt == 0) {
199 LIST_REMOVE(cnt, next);
206 hns3_counter_flush(struct rte_eth_dev *dev)
208 struct hns3_adapter *hns = dev->data->dev_private;
209 struct hns3_pf *pf = &hns->pf;
210 struct hns3_flow_counter *cnt_ptr;
212 cnt_ptr = LIST_FIRST(&pf->flow_counters);
214 LIST_REMOVE(cnt_ptr, next);
216 cnt_ptr = LIST_FIRST(&pf->flow_counters);
221 hns3_handle_action_queue(struct rte_eth_dev *dev,
222 const struct rte_flow_action *action,
223 struct hns3_fdir_rule *rule,
224 struct rte_flow_error *error)
226 struct hns3_adapter *hns = dev->data->dev_private;
227 const struct rte_flow_action_queue *queue;
228 struct hns3_hw *hw = &hns->hw;
230 queue = (const struct rte_flow_action_queue *)action->conf;
231 if (queue->index >= hw->used_rx_queues) {
232 hns3_err(hw, "queue ID(%d) is greater than number of "
233 "available queue (%d) in driver.",
234 queue->index, hw->used_rx_queues);
235 return rte_flow_error_set(error, EINVAL,
236 RTE_FLOW_ERROR_TYPE_ACTION, action,
237 "Invalid queue ID in PF");
240 rule->queue_id = queue->index;
241 rule->action = HNS3_FD_ACTION_ACCEPT_PACKET;
246 * Parse actions structure from the provided pattern.
247 * The pattern is validated as the items are copied.
251 * NIC specfilc actions derived from the actions.
255 hns3_handle_actions(struct rte_eth_dev *dev,
256 const struct rte_flow_action actions[],
257 struct hns3_fdir_rule *rule, struct rte_flow_error *error)
259 struct hns3_adapter *hns = dev->data->dev_private;
260 const struct rte_flow_action_count *act_count;
261 const struct rte_flow_action_mark *mark;
262 struct hns3_pf *pf = &hns->pf;
263 uint32_t counter_num;
266 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
267 switch (actions->type) {
268 case RTE_FLOW_ACTION_TYPE_QUEUE:
269 ret = hns3_handle_action_queue(dev, actions, rule,
274 case RTE_FLOW_ACTION_TYPE_DROP:
275 rule->action = HNS3_FD_ACTION_DROP_PACKET;
277 case RTE_FLOW_ACTION_TYPE_MARK:
279 (const struct rte_flow_action_mark *)actions->conf;
280 if (mark->id >= HNS3_MAX_FILTER_ID)
281 return rte_flow_error_set(error, EINVAL,
282 RTE_FLOW_ERROR_TYPE_ACTION,
285 rule->fd_id = mark->id;
286 rule->flags |= HNS3_RULE_FLAG_FDID;
288 case RTE_FLOW_ACTION_TYPE_FLAG:
289 rule->fd_id = HNS3_MAX_FILTER_ID;
290 rule->flags |= HNS3_RULE_FLAG_FDID;
292 case RTE_FLOW_ACTION_TYPE_COUNT:
294 (const struct rte_flow_action_count *)actions->conf;
295 counter_num = pf->fdir.fd_cfg.cnt_num[HNS3_FD_STAGE_1];
296 if (act_count->id >= counter_num)
297 return rte_flow_error_set(error, EINVAL,
298 RTE_FLOW_ERROR_TYPE_ACTION,
300 "Invalid counter id");
301 rule->act_cnt = *act_count;
302 rule->flags |= HNS3_RULE_FLAG_COUNTER;
304 case RTE_FLOW_ACTION_TYPE_VOID:
307 return rte_flow_error_set(error, ENOTSUP,
308 RTE_FLOW_ERROR_TYPE_ACTION,
309 NULL, "Unsupported action");
316 /* Parse to get the attr and action info of flow director rule. */
318 hns3_check_attr(const struct rte_flow_attr *attr, struct rte_flow_error *error)
321 return rte_flow_error_set(error, EINVAL,
322 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
323 attr, "Ingress can't be zero");
325 return rte_flow_error_set(error, ENOTSUP,
326 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
327 attr, "Not support egress");
329 return rte_flow_error_set(error, ENOTSUP,
330 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
331 attr, "No support for transfer");
333 return rte_flow_error_set(error, ENOTSUP,
334 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
335 attr, "Not support priority");
337 return rte_flow_error_set(error, ENOTSUP,
338 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
339 attr, "Not support group");
344 hns3_parse_eth(const struct rte_flow_item *item,
345 struct hns3_fdir_rule *rule, struct rte_flow_error *error)
347 const struct rte_flow_item_eth *eth_spec;
348 const struct rte_flow_item_eth *eth_mask;
350 if (item->spec == NULL && item->mask)
351 return rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ITEM, item,
353 "Can't configure FDIR with mask but without spec");
355 /* Only used to describe the protocol stack. */
356 if (item->spec == NULL && item->mask == NULL)
360 eth_mask = item->mask;
361 if (eth_mask->type) {
362 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
363 rule->key_conf.mask.ether_type =
364 rte_be_to_cpu_16(eth_mask->type);
366 if (!rte_is_zero_ether_addr(ð_mask->src)) {
367 hns3_set_bit(rule->input_set, INNER_SRC_MAC, 1);
368 memcpy(rule->key_conf.mask.src_mac,
369 eth_mask->src.addr_bytes, RTE_ETHER_ADDR_LEN);
371 if (!rte_is_zero_ether_addr(ð_mask->dst)) {
372 hns3_set_bit(rule->input_set, INNER_DST_MAC, 1);
373 memcpy(rule->key_conf.mask.dst_mac,
374 eth_mask->dst.addr_bytes, RTE_ETHER_ADDR_LEN);
378 eth_spec = item->spec;
379 rule->key_conf.spec.ether_type = rte_be_to_cpu_16(eth_spec->type);
380 memcpy(rule->key_conf.spec.src_mac, eth_spec->src.addr_bytes,
382 memcpy(rule->key_conf.spec.dst_mac, eth_spec->dst.addr_bytes,
388 hns3_parse_vlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
389 struct rte_flow_error *error)
391 const struct rte_flow_item_vlan *vlan_spec;
392 const struct rte_flow_item_vlan *vlan_mask;
394 if (item->spec == NULL && item->mask)
395 return rte_flow_error_set(error, EINVAL,
396 RTE_FLOW_ERROR_TYPE_ITEM, item,
397 "Can't configure FDIR with mask but without spec");
399 rule->key_conf.vlan_num++;
400 if (rule->key_conf.vlan_num > VLAN_TAG_NUM_MAX)
401 return rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ITEM, item,
403 "Vlan_num is more than 2");
405 /* Only used to describe the protocol stack. */
406 if (item->spec == NULL && item->mask == NULL)
410 vlan_mask = item->mask;
411 if (vlan_mask->tci) {
412 if (rule->key_conf.vlan_num == 1) {
413 hns3_set_bit(rule->input_set, INNER_VLAN_TAG1,
415 rule->key_conf.mask.vlan_tag1 =
416 rte_be_to_cpu_16(vlan_mask->tci);
418 hns3_set_bit(rule->input_set, INNER_VLAN_TAG2,
420 rule->key_conf.mask.vlan_tag2 =
421 rte_be_to_cpu_16(vlan_mask->tci);
426 vlan_spec = item->spec;
427 if (rule->key_conf.vlan_num == 1)
428 rule->key_conf.spec.vlan_tag1 =
429 rte_be_to_cpu_16(vlan_spec->tci);
431 rule->key_conf.spec.vlan_tag2 =
432 rte_be_to_cpu_16(vlan_spec->tci);
437 hns3_parse_ipv4(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
438 struct rte_flow_error *error)
440 const struct rte_flow_item_ipv4 *ipv4_spec;
441 const struct rte_flow_item_ipv4 *ipv4_mask;
443 if (item->spec == NULL && item->mask)
444 return rte_flow_error_set(error, EINVAL,
445 RTE_FLOW_ERROR_TYPE_ITEM, item,
446 "Can't configure FDIR with mask but without spec");
448 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
449 rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV4;
450 rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
451 /* Only used to describe the protocol stack. */
452 if (item->spec == NULL && item->mask == NULL)
456 ipv4_mask = item->mask;
458 if (ipv4_mask->hdr.total_length ||
459 ipv4_mask->hdr.packet_id ||
460 ipv4_mask->hdr.fragment_offset ||
461 ipv4_mask->hdr.time_to_live ||
462 ipv4_mask->hdr.hdr_checksum) {
463 return rte_flow_error_set(error, EINVAL,
464 RTE_FLOW_ERROR_TYPE_ITEM,
466 "Only support src & dst ip,tos,proto in IPV4");
469 if (ipv4_mask->hdr.src_addr) {
470 hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
471 rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID] =
472 rte_be_to_cpu_32(ipv4_mask->hdr.src_addr);
475 if (ipv4_mask->hdr.dst_addr) {
476 hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
477 rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID] =
478 rte_be_to_cpu_32(ipv4_mask->hdr.dst_addr);
481 if (ipv4_mask->hdr.type_of_service) {
482 hns3_set_bit(rule->input_set, INNER_IP_TOS, 1);
483 rule->key_conf.mask.ip_tos =
484 ipv4_mask->hdr.type_of_service;
487 if (ipv4_mask->hdr.next_proto_id) {
488 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
489 rule->key_conf.mask.ip_proto =
490 ipv4_mask->hdr.next_proto_id;
494 ipv4_spec = item->spec;
495 rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID] =
496 rte_be_to_cpu_32(ipv4_spec->hdr.src_addr);
497 rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID] =
498 rte_be_to_cpu_32(ipv4_spec->hdr.dst_addr);
499 rule->key_conf.spec.ip_tos = ipv4_spec->hdr.type_of_service;
500 rule->key_conf.spec.ip_proto = ipv4_spec->hdr.next_proto_id;
505 hns3_parse_ipv6(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
506 struct rte_flow_error *error)
508 const struct rte_flow_item_ipv6 *ipv6_spec;
509 const struct rte_flow_item_ipv6 *ipv6_mask;
511 if (item->spec == NULL && item->mask)
512 return rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ITEM, item,
514 "Can't configure FDIR with mask but without spec");
516 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 1);
517 rule->key_conf.spec.ether_type = RTE_ETHER_TYPE_IPV6;
518 rule->key_conf.mask.ether_type = ETHER_TYPE_MASK;
520 /* Only used to describe the protocol stack. */
521 if (item->spec == NULL && item->mask == NULL)
525 ipv6_mask = item->mask;
526 if (ipv6_mask->hdr.vtc_flow ||
527 ipv6_mask->hdr.payload_len || ipv6_mask->hdr.hop_limits) {
528 return rte_flow_error_set(error, EINVAL,
529 RTE_FLOW_ERROR_TYPE_ITEM,
531 "Only support src & dst ip,proto in IPV6");
533 net_addr_to_host(rule->key_conf.mask.src_ip,
534 (const rte_be32_t *)ipv6_mask->hdr.src_addr,
536 net_addr_to_host(rule->key_conf.mask.dst_ip,
537 (const rte_be32_t *)ipv6_mask->hdr.dst_addr,
539 rule->key_conf.mask.ip_proto = ipv6_mask->hdr.proto;
540 if (rule->key_conf.mask.src_ip[IP_ADDR_KEY_ID])
541 hns3_set_bit(rule->input_set, INNER_SRC_IP, 1);
542 if (rule->key_conf.mask.dst_ip[IP_ADDR_KEY_ID])
543 hns3_set_bit(rule->input_set, INNER_DST_IP, 1);
544 if (ipv6_mask->hdr.proto)
545 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
548 ipv6_spec = item->spec;
549 net_addr_to_host(rule->key_conf.spec.src_ip,
550 (const rte_be32_t *)ipv6_spec->hdr.src_addr,
552 net_addr_to_host(rule->key_conf.spec.dst_ip,
553 (const rte_be32_t *)ipv6_spec->hdr.dst_addr,
555 rule->key_conf.spec.ip_proto = ipv6_spec->hdr.proto;
561 hns3_parse_tcp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
562 struct rte_flow_error *error)
564 const struct rte_flow_item_tcp *tcp_spec;
565 const struct rte_flow_item_tcp *tcp_mask;
567 if (item->spec == NULL && item->mask)
568 return rte_flow_error_set(error, EINVAL,
569 RTE_FLOW_ERROR_TYPE_ITEM, item,
570 "Can't configure FDIR with mask but without spec");
572 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
573 rule->key_conf.spec.ip_proto = IPPROTO_TCP;
574 rule->key_conf.mask.ip_proto = IPPROTO_MASK;
576 /* Only used to describe the protocol stack. */
577 if (item->spec == NULL && item->mask == NULL)
581 tcp_mask = item->mask;
582 if (tcp_mask->hdr.sent_seq ||
583 tcp_mask->hdr.recv_ack ||
584 tcp_mask->hdr.data_off ||
585 tcp_mask->hdr.tcp_flags ||
586 tcp_mask->hdr.rx_win ||
587 tcp_mask->hdr.cksum || tcp_mask->hdr.tcp_urp) {
588 return rte_flow_error_set(error, EINVAL,
589 RTE_FLOW_ERROR_TYPE_ITEM,
591 "Only support src & dst port in TCP");
594 if (tcp_mask->hdr.src_port) {
595 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
596 rule->key_conf.mask.src_port =
597 rte_be_to_cpu_16(tcp_mask->hdr.src_port);
599 if (tcp_mask->hdr.dst_port) {
600 hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
601 rule->key_conf.mask.dst_port =
602 rte_be_to_cpu_16(tcp_mask->hdr.dst_port);
606 tcp_spec = item->spec;
607 rule->key_conf.spec.src_port = rte_be_to_cpu_16(tcp_spec->hdr.src_port);
608 rule->key_conf.spec.dst_port = rte_be_to_cpu_16(tcp_spec->hdr.dst_port);
614 hns3_parse_udp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
615 struct rte_flow_error *error)
617 const struct rte_flow_item_udp *udp_spec;
618 const struct rte_flow_item_udp *udp_mask;
620 if (item->spec == NULL && item->mask)
621 return rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ITEM, item,
623 "Can't configure FDIR with mask but without spec");
625 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
626 rule->key_conf.spec.ip_proto = IPPROTO_UDP;
627 rule->key_conf.mask.ip_proto = IPPROTO_MASK;
628 /* Only used to describe the protocol stack. */
629 if (item->spec == NULL && item->mask == NULL)
633 udp_mask = item->mask;
634 if (udp_mask->hdr.dgram_len || udp_mask->hdr.dgram_cksum) {
635 return rte_flow_error_set(error, EINVAL,
636 RTE_FLOW_ERROR_TYPE_ITEM,
638 "Only support src & dst port in UDP");
640 if (udp_mask->hdr.src_port) {
641 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
642 rule->key_conf.mask.src_port =
643 rte_be_to_cpu_16(udp_mask->hdr.src_port);
645 if (udp_mask->hdr.dst_port) {
646 hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
647 rule->key_conf.mask.dst_port =
648 rte_be_to_cpu_16(udp_mask->hdr.dst_port);
652 udp_spec = item->spec;
653 rule->key_conf.spec.src_port = rte_be_to_cpu_16(udp_spec->hdr.src_port);
654 rule->key_conf.spec.dst_port = rte_be_to_cpu_16(udp_spec->hdr.dst_port);
660 hns3_parse_sctp(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
661 struct rte_flow_error *error)
663 const struct rte_flow_item_sctp *sctp_spec;
664 const struct rte_flow_item_sctp *sctp_mask;
666 if (item->spec == NULL && item->mask)
667 return rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ITEM, item,
669 "Can't configure FDIR with mask but without spec");
671 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 1);
672 rule->key_conf.spec.ip_proto = IPPROTO_SCTP;
673 rule->key_conf.mask.ip_proto = IPPROTO_MASK;
675 /* Only used to describe the protocol stack. */
676 if (item->spec == NULL && item->mask == NULL)
680 sctp_mask = item->mask;
681 if (sctp_mask->hdr.cksum)
682 return rte_flow_error_set(error, EINVAL,
683 RTE_FLOW_ERROR_TYPE_ITEM,
685 "Only support src & dst port in SCTP");
687 if (sctp_mask->hdr.src_port) {
688 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 1);
689 rule->key_conf.mask.src_port =
690 rte_be_to_cpu_16(sctp_mask->hdr.src_port);
692 if (sctp_mask->hdr.dst_port) {
693 hns3_set_bit(rule->input_set, INNER_DST_PORT, 1);
694 rule->key_conf.mask.dst_port =
695 rte_be_to_cpu_16(sctp_mask->hdr.dst_port);
697 if (sctp_mask->hdr.tag) {
698 hns3_set_bit(rule->input_set, INNER_SCTP_TAG, 1);
699 rule->key_conf.mask.sctp_tag =
700 rte_be_to_cpu_32(sctp_mask->hdr.tag);
704 sctp_spec = item->spec;
705 rule->key_conf.spec.src_port =
706 rte_be_to_cpu_16(sctp_spec->hdr.src_port);
707 rule->key_conf.spec.dst_port =
708 rte_be_to_cpu_16(sctp_spec->hdr.dst_port);
709 rule->key_conf.spec.sctp_tag = rte_be_to_cpu_32(sctp_spec->hdr.tag);
715 * Check items before tunnel, save inner configs to outer configs,and clear
717 * The key consists of two parts: meta_data and tuple keys.
718 * Meta data uses 15 bits, including vlan_num(2bit), des_port(12bit) and tunnel
720 * Tuple keys uses 384bit, including ot_dst-mac(48bit), ot_dst-port(16bit),
721 * ot_tun_vni(24bit), ot_flow_id(8bit), src-mac(48bit), dst-mac(48bit),
722 * src-ip(32/128bit), dst-ip(32/128bit), src-port(16bit), dst-port(16bit),
723 * tos(8bit), ether-proto(16bit), ip-proto(8bit), vlantag1(16bit),
724 * Vlantag2(16bit) and sctp-tag(32bit).
727 hns3_handle_tunnel(const struct rte_flow_item *item,
728 struct hns3_fdir_rule *rule, struct rte_flow_error *error)
730 /* check eth config */
731 if (rule->input_set & (BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC)))
732 return rte_flow_error_set(error, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ITEM,
734 item, "Outer eth mac is unsupported");
735 if (rule->input_set & BIT(INNER_ETH_TYPE)) {
736 hns3_set_bit(rule->input_set, OUTER_ETH_TYPE, 1);
737 rule->key_conf.spec.outer_ether_type =
738 rule->key_conf.spec.ether_type;
739 rule->key_conf.mask.outer_ether_type =
740 rule->key_conf.mask.ether_type;
741 hns3_set_bit(rule->input_set, INNER_ETH_TYPE, 0);
742 rule->key_conf.spec.ether_type = 0;
743 rule->key_conf.mask.ether_type = 0;
746 /* check vlan config */
747 if (rule->input_set & (BIT(INNER_VLAN_TAG1) | BIT(INNER_VLAN_TAG2)))
748 return rte_flow_error_set(error, EINVAL,
749 RTE_FLOW_ERROR_TYPE_ITEM,
751 "Outer vlan tags is unsupported");
753 /* clear vlan_num for inner vlan select */
754 rule->key_conf.outer_vlan_num = rule->key_conf.vlan_num;
755 rule->key_conf.vlan_num = 0;
757 /* check L3 config */
758 if (rule->input_set &
759 (BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) | BIT(INNER_IP_TOS)))
760 return rte_flow_error_set(error, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ITEM,
762 item, "Outer ip is unsupported");
763 if (rule->input_set & BIT(INNER_IP_PROTO)) {
764 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
765 rule->key_conf.spec.outer_proto = rule->key_conf.spec.ip_proto;
766 rule->key_conf.mask.outer_proto = rule->key_conf.mask.ip_proto;
767 hns3_set_bit(rule->input_set, INNER_IP_PROTO, 0);
768 rule->key_conf.spec.ip_proto = 0;
769 rule->key_conf.mask.ip_proto = 0;
772 /* check L4 config */
773 if (rule->input_set & BIT(INNER_SCTP_TAG))
774 return rte_flow_error_set(error, EINVAL,
775 RTE_FLOW_ERROR_TYPE_ITEM, item,
776 "Outer sctp tag is unsupported");
778 if (rule->input_set & BIT(INNER_SRC_PORT)) {
779 hns3_set_bit(rule->input_set, OUTER_SRC_PORT, 1);
780 rule->key_conf.spec.outer_src_port =
781 rule->key_conf.spec.src_port;
782 rule->key_conf.mask.outer_src_port =
783 rule->key_conf.mask.src_port;
784 hns3_set_bit(rule->input_set, INNER_SRC_PORT, 0);
785 rule->key_conf.spec.src_port = 0;
786 rule->key_conf.mask.src_port = 0;
788 if (rule->input_set & BIT(INNER_DST_PORT)) {
789 hns3_set_bit(rule->input_set, INNER_DST_PORT, 0);
790 rule->key_conf.spec.dst_port = 0;
791 rule->key_conf.mask.dst_port = 0;
797 hns3_parse_vxlan(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
798 struct rte_flow_error *error)
800 const struct rte_flow_item_vxlan *vxlan_spec;
801 const struct rte_flow_item_vxlan *vxlan_mask;
803 if (item->spec == NULL && item->mask)
804 return rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ITEM, item,
806 "Can't configure FDIR with mask but without spec");
807 else if (item->spec && (item->mask == NULL))
808 return rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ITEM, item,
810 "Tunnel packets must configure with mask");
812 hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
813 rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
814 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
815 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN;
817 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_VXLAN_GPE;
819 /* Only used to describe the protocol stack. */
820 if (item->spec == NULL && item->mask == NULL)
823 vxlan_mask = item->mask;
824 vxlan_spec = item->spec;
826 if (vxlan_mask->flags)
827 return rte_flow_error_set(error, EINVAL,
828 RTE_FLOW_ERROR_TYPE_ITEM, item,
829 "Flags is not supported in VxLAN");
831 /* VNI must be totally masked or not. */
832 if (memcmp(vxlan_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
833 memcmp(vxlan_mask->vni, zero_mask, VNI_OR_TNI_LEN))
834 return rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_ITEM, item,
836 "VNI must be totally masked or not in VxLAN");
837 if (vxlan_mask->vni[0]) {
838 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
839 memcpy(rule->key_conf.mask.outer_tun_vni, vxlan_mask->vni,
842 memcpy(rule->key_conf.spec.outer_tun_vni, vxlan_spec->vni,
848 hns3_parse_nvgre(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
849 struct rte_flow_error *error)
851 const struct rte_flow_item_nvgre *nvgre_spec;
852 const struct rte_flow_item_nvgre *nvgre_mask;
854 if (item->spec == NULL && item->mask)
855 return rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM, item,
857 "Can't configure FDIR with mask but without spec");
858 else if (item->spec && (item->mask == NULL))
859 return rte_flow_error_set(error, EINVAL,
860 RTE_FLOW_ERROR_TYPE_ITEM, item,
861 "Tunnel packets must configure with mask");
863 hns3_set_bit(rule->input_set, OUTER_IP_PROTO, 1);
864 rule->key_conf.spec.outer_proto = IPPROTO_GRE;
865 rule->key_conf.mask.outer_proto = IPPROTO_MASK;
867 hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
868 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_NVGRE;
869 rule->key_conf.mask.tunnel_type = ~HNS3_TUNNEL_TYPE_NVGRE;
870 /* Only used to describe the protocol stack. */
871 if (item->spec == NULL && item->mask == NULL)
874 nvgre_mask = item->mask;
875 nvgre_spec = item->spec;
877 if (nvgre_mask->protocol || nvgre_mask->c_k_s_rsvd0_ver)
878 return rte_flow_error_set(error, EINVAL,
879 RTE_FLOW_ERROR_TYPE_ITEM, item,
880 "Ver/protocal is not supported in NVGRE");
882 /* TNI must be totally masked or not. */
883 if (memcmp(nvgre_mask->tni, full_mask, VNI_OR_TNI_LEN) &&
884 memcmp(nvgre_mask->tni, zero_mask, VNI_OR_TNI_LEN))
885 return rte_flow_error_set(error, EINVAL,
886 RTE_FLOW_ERROR_TYPE_ITEM, item,
887 "TNI must be totally masked or not in NVGRE");
889 if (nvgre_mask->tni[0]) {
890 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
891 memcpy(rule->key_conf.mask.outer_tun_vni, nvgre_mask->tni,
894 memcpy(rule->key_conf.spec.outer_tun_vni, nvgre_spec->tni,
897 if (nvgre_mask->flow_id) {
898 hns3_set_bit(rule->input_set, OUTER_TUN_FLOW_ID, 1);
899 rule->key_conf.mask.outer_tun_flow_id = nvgre_mask->flow_id;
901 rule->key_conf.spec.outer_tun_flow_id = nvgre_spec->flow_id;
906 hns3_parse_geneve(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
907 struct rte_flow_error *error)
909 const struct rte_flow_item_geneve *geneve_spec;
910 const struct rte_flow_item_geneve *geneve_mask;
912 if (item->spec == NULL && item->mask)
913 return rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ITEM, item,
915 "Can't configure FDIR with mask but without spec");
916 else if (item->spec && (item->mask == NULL))
917 return rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ITEM, item,
919 "Tunnel packets must configure with mask");
921 hns3_set_bit(rule->input_set, OUTER_DST_PORT, 1);
922 rule->key_conf.spec.tunnel_type = HNS3_TUNNEL_TYPE_GENEVE;
923 rule->key_conf.mask.tunnel_type = TUNNEL_TYPE_MASK;
924 /* Only used to describe the protocol stack. */
925 if (item->spec == NULL && item->mask == NULL)
928 geneve_mask = item->mask;
929 geneve_spec = item->spec;
931 if (geneve_mask->ver_opt_len_o_c_rsvd0 || geneve_mask->protocol)
932 return rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ITEM, item,
934 "Ver/protocal is not supported in GENEVE");
935 /* VNI must be totally masked or not. */
936 if (memcmp(geneve_mask->vni, full_mask, VNI_OR_TNI_LEN) &&
937 memcmp(geneve_mask->vni, zero_mask, VNI_OR_TNI_LEN))
938 return rte_flow_error_set(error, EINVAL,
939 RTE_FLOW_ERROR_TYPE_ITEM, item,
940 "VNI must be totally masked or not in GENEVE");
941 if (geneve_mask->vni[0]) {
942 hns3_set_bit(rule->input_set, OUTER_TUN_VNI, 1);
943 memcpy(rule->key_conf.mask.outer_tun_vni, geneve_mask->vni,
946 memcpy(rule->key_conf.spec.outer_tun_vni, geneve_spec->vni,
952 hns3_parse_tunnel(const struct rte_flow_item *item, struct hns3_fdir_rule *rule,
953 struct rte_flow_error *error)
957 switch (item->type) {
958 case RTE_FLOW_ITEM_TYPE_VXLAN:
959 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
960 ret = hns3_parse_vxlan(item, rule, error);
962 case RTE_FLOW_ITEM_TYPE_NVGRE:
963 ret = hns3_parse_nvgre(item, rule, error);
965 case RTE_FLOW_ITEM_TYPE_GENEVE:
966 ret = hns3_parse_geneve(item, rule, error);
969 return rte_flow_error_set(error, ENOTSUP,
970 RTE_FLOW_ERROR_TYPE_HANDLE,
971 NULL, "Unsupported tunnel type!");
975 return hns3_handle_tunnel(item, rule, error);
979 hns3_parse_normal(const struct rte_flow_item *item,
980 struct hns3_fdir_rule *rule,
981 struct items_step_mngr *step_mngr,
982 struct rte_flow_error *error)
986 switch (item->type) {
987 case RTE_FLOW_ITEM_TYPE_ETH:
988 ret = hns3_parse_eth(item, rule, error);
989 step_mngr->items = L2_next_items;
990 step_mngr->count = ARRAY_SIZE(L2_next_items);
992 case RTE_FLOW_ITEM_TYPE_VLAN:
993 ret = hns3_parse_vlan(item, rule, error);
994 step_mngr->items = L2_next_items;
995 step_mngr->count = ARRAY_SIZE(L2_next_items);
997 case RTE_FLOW_ITEM_TYPE_IPV4:
998 ret = hns3_parse_ipv4(item, rule, error);
999 step_mngr->items = L3_next_items;
1000 step_mngr->count = ARRAY_SIZE(L3_next_items);
1002 case RTE_FLOW_ITEM_TYPE_IPV6:
1003 ret = hns3_parse_ipv6(item, rule, error);
1004 step_mngr->items = L3_next_items;
1005 step_mngr->count = ARRAY_SIZE(L3_next_items);
1007 case RTE_FLOW_ITEM_TYPE_TCP:
1008 ret = hns3_parse_tcp(item, rule, error);
1009 step_mngr->items = L4_next_items;
1010 step_mngr->count = ARRAY_SIZE(L4_next_items);
1012 case RTE_FLOW_ITEM_TYPE_UDP:
1013 ret = hns3_parse_udp(item, rule, error);
1014 step_mngr->items = L4_next_items;
1015 step_mngr->count = ARRAY_SIZE(L4_next_items);
1017 case RTE_FLOW_ITEM_TYPE_SCTP:
1018 ret = hns3_parse_sctp(item, rule, error);
1019 step_mngr->items = L4_next_items;
1020 step_mngr->count = ARRAY_SIZE(L4_next_items);
1023 return rte_flow_error_set(error, ENOTSUP,
1024 RTE_FLOW_ERROR_TYPE_HANDLE,
1025 NULL, "Unsupported normal type!");
1032 hns3_validate_item(const struct rte_flow_item *item,
1033 struct items_step_mngr step_mngr,
1034 struct rte_flow_error *error)
1039 return rte_flow_error_set(error, ENOTSUP,
1040 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
1041 "Not supported last point for range");
1043 for (i = 0; i < step_mngr.count; i++) {
1044 if (item->type == step_mngr.items[i])
1048 if (i == step_mngr.count) {
1049 return rte_flow_error_set(error, EINVAL,
1050 RTE_FLOW_ERROR_TYPE_ITEM,
1051 item, "Inval or missing item");
1057 is_tunnel_packet(enum rte_flow_item_type type)
1059 if (type == RTE_FLOW_ITEM_TYPE_VXLAN_GPE ||
1060 type == RTE_FLOW_ITEM_TYPE_VXLAN ||
1061 type == RTE_FLOW_ITEM_TYPE_NVGRE ||
1062 type == RTE_FLOW_ITEM_TYPE_GENEVE ||
1063 type == RTE_FLOW_ITEM_TYPE_MPLS)
1069 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1070 * And get the flow director filter info BTW.
1071 * UDP/TCP/SCTP PATTERN:
1072 * The first not void item can be ETH or IPV4 or IPV6
1073 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1074 * The next not void item could be UDP or TCP or SCTP (optional)
1075 * The next not void item could be RAW (for flexbyte, optional)
1076 * The next not void item must be END.
1077 * A Fuzzy Match pattern can appear at any place before END.
1078 * Fuzzy Match is optional for IPV4 but is required for IPV6
1080 * The first not void item must be ETH.
1081 * The second not void item must be MAC VLAN.
1082 * The next not void item must be END.
1084 * The first not void action should be QUEUE or DROP.
1085 * The second not void optional action should be MARK,
1086 * mark_id is a uint32_t number.
1087 * The next not void action should be END.
1088 * UDP/TCP/SCTP pattern example:
1091 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1092 * dst_addr 192.167.3.50 0xFFFFFFFF
1093 * UDP/TCP/SCTP src_port 80 0xFFFF
1094 * dst_port 80 0xFFFF
1096 * MAC VLAN pattern example:
1099 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1100 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1101 * MAC VLAN tci 0x2016 0xEFFF
1103 * Other members in mask and spec should set to 0x00.
1104 * Item->last should be NULL.
1107 hns3_parse_fdir_filter(struct rte_eth_dev *dev,
1108 const struct rte_flow_item pattern[],
1109 const struct rte_flow_action actions[],
1110 struct hns3_fdir_rule *rule,
1111 struct rte_flow_error *error)
1113 struct hns3_adapter *hns = dev->data->dev_private;
1114 const struct rte_flow_item *item;
1115 struct items_step_mngr step_mngr;
1118 /* FDIR is available only in PF driver */
1120 return rte_flow_error_set(error, ENOTSUP,
1121 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1122 "Fdir not supported in VF");
1124 if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_PERFECT)
1125 return rte_flow_error_set(error, ENOTSUP,
1126 RTE_FLOW_ERROR_TYPE_ITEM_NUM, NULL,
1127 "fdir_conf.mode isn't perfect");
1129 step_mngr.items = first_items;
1130 step_mngr.count = ARRAY_SIZE(first_items);
1131 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1132 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1135 ret = hns3_validate_item(item, step_mngr, error);
1139 if (is_tunnel_packet(item->type)) {
1140 ret = hns3_parse_tunnel(item, rule, error);
1143 step_mngr.items = tunnel_next_items;
1144 step_mngr.count = ARRAY_SIZE(tunnel_next_items);
1146 ret = hns3_parse_normal(item, rule, &step_mngr, error);
1152 return hns3_handle_actions(dev, actions, rule, error);
1156 hns3_filterlist_init(struct rte_eth_dev *dev)
1158 struct hns3_process_private *process_list = dev->process_private;
1160 TAILQ_INIT(&process_list->fdir_list);
1161 TAILQ_INIT(&process_list->filter_rss_list);
1162 TAILQ_INIT(&process_list->flow_list);
1166 hns3_filterlist_flush(struct rte_eth_dev *dev)
1168 struct hns3_process_private *process_list = dev->process_private;
1169 struct hns3_fdir_rule_ele *fdir_rule_ptr;
1170 struct hns3_rss_conf_ele *rss_filter_ptr;
1171 struct hns3_flow_mem *flow_node;
1173 fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1174 while (fdir_rule_ptr) {
1175 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1176 rte_free(fdir_rule_ptr);
1177 fdir_rule_ptr = TAILQ_FIRST(&process_list->fdir_list);
1180 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1181 while (rss_filter_ptr) {
1182 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1184 rte_free(rss_filter_ptr);
1185 rss_filter_ptr = TAILQ_FIRST(&process_list->filter_rss_list);
1188 flow_node = TAILQ_FIRST(&process_list->flow_list);
1190 TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1191 rte_free(flow_node->flow);
1192 rte_free(flow_node);
1193 flow_node = TAILQ_FIRST(&process_list->flow_list);
1198 hns3_action_rss_same(const struct rte_flow_action_rss *comp,
1199 const struct rte_flow_action_rss *with)
1201 return (comp->func == with->func &&
1202 comp->level == with->level &&
1203 comp->types == with->types &&
1204 comp->key_len == with->key_len &&
1205 comp->queue_num == with->queue_num &&
1206 !memcmp(comp->key, with->key, with->key_len) &&
1207 !memcmp(comp->queue, with->queue,
1208 sizeof(*with->queue) * with->queue_num));
1212 hns3_rss_conf_copy(struct hns3_rss_conf *out,
1213 const struct rte_flow_action_rss *in)
1215 if (in->key_len > RTE_DIM(out->key) ||
1216 in->queue_num > RTE_DIM(out->queue))
1218 if (in->key == NULL && in->key_len)
1220 out->conf = (struct rte_flow_action_rss) {
1224 .key_len = in->key_len,
1225 .queue_num = in->queue_num,
1228 memcpy(out->queue, in->queue,
1229 sizeof(*in->queue) * in->queue_num);
1231 out->conf.key = memcpy(out->key, in->key, in->key_len);
1237 * This function is used to parse rss action validatation.
1240 hns3_parse_rss_filter(struct rte_eth_dev *dev,
1241 const struct rte_flow_action *actions,
1242 struct rte_flow_error *error)
1244 struct hns3_adapter *hns = dev->data->dev_private;
1245 struct hns3_hw *hw = &hns->hw;
1246 struct hns3_rss_conf *rss_conf = &hw->rss_info;
1247 const struct rte_flow_action_rss *rss;
1248 const struct rte_flow_action *act;
1249 uint32_t act_index = 0;
1250 uint64_t flow_types;
1253 NEXT_ITEM_OF_ACTION(act, actions, act_index);
1254 /* Get configuration args from APP cmdline input */
1257 if (rss == NULL || rss->queue_num == 0) {
1258 return rte_flow_error_set(error, EINVAL,
1259 RTE_FLOW_ERROR_TYPE_ACTION,
1260 act, "no valid queues");
1263 for (n = 0; n < rss->queue_num; n++) {
1264 if (rss->queue[n] < dev->data->nb_rx_queues)
1266 return rte_flow_error_set(error, EINVAL,
1267 RTE_FLOW_ERROR_TYPE_ACTION,
1269 "queue id > max number of queues");
1272 /* Parse flow types of RSS */
1273 if (!(rss->types & HNS3_ETH_RSS_SUPPORT) && rss->types)
1274 return rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ACTION,
1277 "Flow types is unsupported by "
1280 flow_types = rss->types & HNS3_ETH_RSS_SUPPORT;
1281 if (flow_types != rss->types)
1282 hns3_warn(hw, "RSS flow types(%" PRIx64 ") include unsupported "
1283 "flow types", rss->types);
1285 /* Parse RSS related parameters from RSS configuration */
1286 switch (rss->func) {
1287 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1288 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1289 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1292 return rte_flow_error_set(error, ENOTSUP,
1293 RTE_FLOW_ERROR_TYPE_ACTION, act,
1294 "input RSS hash functions are not supported");
1298 return rte_flow_error_set(error, ENOTSUP,
1299 RTE_FLOW_ERROR_TYPE_ACTION, act,
1300 "a nonzero RSS encapsulation level is not supported");
1301 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1302 return rte_flow_error_set(error, ENOTSUP,
1303 RTE_FLOW_ERROR_TYPE_ACTION, act,
1304 "RSS hash key must be exactly 40 bytes");
1305 if (rss->queue_num > RTE_DIM(rss_conf->queue))
1306 return rte_flow_error_set(error, ENOTSUP,
1307 RTE_FLOW_ERROR_TYPE_ACTION, act,
1308 "too many queues for RSS context");
1312 /* Check if the next not void action is END */
1313 NEXT_ITEM_OF_ACTION(act, actions, act_index);
1314 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1315 memset(rss_conf, 0, sizeof(struct hns3_rss_conf));
1316 return rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_ACTION,
1318 act, "Not supported action.");
1325 hns3_disable_rss(struct hns3_hw *hw)
1329 /* Redirected the redirection table to queue 0 */
1330 ret = hns3_rss_reset_indir_table(hw);
1335 hw->rss_info.conf.types = 0;
1336 hw->rss_dis_flag = true;
1342 hns3_parse_rss_key(struct hns3_hw *hw, struct rte_flow_action_rss *rss_conf)
1344 if (rss_conf->key == NULL ||
1345 rss_conf->key_len < HNS3_RSS_KEY_SIZE) {
1346 hns3_info(hw, "Default RSS hash key to be set");
1347 rss_conf->key = hns3_hash_key;
1348 rss_conf->key_len = HNS3_RSS_KEY_SIZE;
1353 hns3_parse_rss_algorithm(struct hns3_hw *hw, enum rte_eth_hash_function *func,
1356 enum rte_eth_hash_function algo_func = *func;
1357 switch (algo_func) {
1358 case RTE_ETH_HASH_FUNCTION_DEFAULT:
1359 /* Keep *hash_algo as what it used to be */
1360 algo_func = hw->rss_info.conf.func;
1362 case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
1363 *hash_algo = HNS3_RSS_HASH_ALGO_TOEPLITZ;
1365 case RTE_ETH_HASH_FUNCTION_SIMPLE_XOR:
1366 *hash_algo = HNS3_RSS_HASH_ALGO_SIMPLE;
1369 hns3_err(hw, "Invalid RSS algorithm configuration(%u)",
1379 hns3_hw_rss_hash_set(struct hns3_hw *hw, struct rte_flow_action_rss *rss_config)
1382 (hw->rss_info.conf.func == RTE_ETH_HASH_FUNCTION_TOEPLITZ ?
1383 HNS3_RSS_HASH_ALGO_TOEPLITZ : HNS3_RSS_HASH_ALGO_SIMPLE);
1384 struct hns3_rss_tuple_cfg *tuple;
1387 /* Parse hash key */
1388 hns3_parse_rss_key(hw, rss_config);
1390 /* Parse hash algorithm */
1391 ret = hns3_parse_rss_algorithm(hw, &rss_config->func, &hash_algo);
1395 ret = hns3_set_rss_algo_key(hw, hash_algo, rss_config->key);
1399 /* Update algorithm of hw */
1400 hw->rss_info.conf.func = rss_config->func;
1402 /* Set flow type supported */
1403 tuple = &hw->rss_info.rss_tuple_sets;
1404 ret = hns3_set_rss_tuple_by_rss_hf(hw, tuple, rss_config->types);
1406 hns3_err(hw, "Update RSS tuples by rss hf failed %d", ret);
1412 hns3_update_indir_table(struct rte_eth_dev *dev,
1413 const struct rte_flow_action_rss *conf, uint16_t num)
1415 struct hns3_adapter *hns = dev->data->dev_private;
1416 struct hns3_hw *hw = &hns->hw;
1417 uint8_t indir_tbl[HNS3_RSS_IND_TBL_SIZE];
1418 uint16_t j, allow_rss_queues;
1423 hns3_err(hw, "No PF queues are configured to enable RSS");
1427 allow_rss_queues = RTE_MIN(dev->data->nb_rx_queues, hw->rss_size_max);
1428 /* Fill in redirection table */
1429 memcpy(indir_tbl, hw->rss_info.rss_indirection_tbl,
1430 HNS3_RSS_IND_TBL_SIZE);
1431 for (i = 0, j = 0; i < HNS3_RSS_IND_TBL_SIZE; i++, j++) {
1433 if (conf->queue[j] >= allow_rss_queues) {
1434 hns3_err(hw, "Invalid queue id(%u) to be set in "
1435 "redirection table, max number of rss "
1436 "queues: %u", conf->queue[j],
1440 queue_id = conf->queue[j];
1441 indir_tbl[i] = queue_id;
1444 return hns3_set_rss_indir_table(hw, indir_tbl, HNS3_RSS_IND_TBL_SIZE);
1448 hns3_config_rss_filter(struct rte_eth_dev *dev,
1449 const struct hns3_rss_conf *conf, bool add)
1451 struct hns3_adapter *hns = dev->data->dev_private;
1452 struct hns3_hw *hw = &hns->hw;
1453 struct hns3_rss_conf *rss_info;
1454 uint64_t flow_types;
1458 struct rte_flow_action_rss rss_flow_conf = {
1459 .func = conf->conf.func,
1460 .level = conf->conf.level,
1461 .types = conf->conf.types,
1462 .key_len = conf->conf.key_len,
1463 .queue_num = conf->conf.queue_num,
1464 .key = conf->conf.key_len ?
1465 (void *)(uintptr_t)conf->conf.key : NULL,
1466 .queue = conf->conf.queue,
1469 /* The types is Unsupported by hns3' RSS */
1470 if (!(rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT) &&
1471 rss_flow_conf.types) {
1473 "Flow types(%" PRIx64 ") is unsupported by hns3's RSS",
1474 rss_flow_conf.types);
1478 if (rss_flow_conf.key_len &&
1479 rss_flow_conf.key_len > RTE_DIM(rss_info->key)) {
1481 "input hash key(%u) greater than supported len(%zu)",
1482 rss_flow_conf.key_len, RTE_DIM(rss_info->key));
1486 /* Filter the unsupported flow types */
1487 flow_types = rss_flow_conf.types & HNS3_ETH_RSS_SUPPORT;
1488 if (flow_types != rss_flow_conf.types)
1489 hns3_warn(hw, "modified RSS types based on hardware support, "
1490 "requested:%" PRIx64 " configured:%" PRIx64,
1491 rss_flow_conf.types, flow_types);
1492 /* Update the useful flow types */
1493 rss_flow_conf.types = flow_types;
1495 if ((rss_flow_conf.types & ETH_RSS_PROTO_MASK) == 0)
1496 return hns3_disable_rss(hw);
1498 rss_info = &hw->rss_info;
1500 if (hns3_action_rss_same(&rss_info->conf, &rss_flow_conf)) {
1501 ret = hns3_disable_rss(hw);
1503 hns3_err(hw, "RSS disable failed(%d)", ret);
1506 memset(rss_info, 0, sizeof(struct hns3_rss_conf));
1512 /* Get rx queues num */
1513 num = dev->data->nb_rx_queues;
1515 /* Set rx queues to use */
1516 num = RTE_MIN(num, rss_flow_conf.queue_num);
1517 if (rss_flow_conf.queue_num > num)
1518 hns3_warn(hw, "Config queue numbers %u are beyond the scope of truncated",
1519 rss_flow_conf.queue_num);
1520 hns3_info(hw, "Max of contiguous %u PF queues are configured", num);
1522 rte_spinlock_lock(&hw->lock);
1523 /* Update redirection talbe of rss */
1524 ret = hns3_update_indir_table(dev, &rss_flow_conf, num);
1526 goto rss_config_err;
1528 /* Set hash algorithm and flow types by the user's config */
1529 ret = hns3_hw_rss_hash_set(hw, &rss_flow_conf);
1531 goto rss_config_err;
1533 ret = hns3_rss_conf_copy(rss_info, &rss_flow_conf);
1535 hns3_err(hw, "RSS config init fail(%d)", ret);
1536 goto rss_config_err;
1540 rte_spinlock_unlock(&hw->lock);
1545 /* Remove the rss filter */
1547 hns3_clear_rss_filter(struct rte_eth_dev *dev)
1549 struct hns3_adapter *hns = dev->data->dev_private;
1550 struct hns3_hw *hw = &hns->hw;
1552 if (hw->rss_info.conf.queue_num == 0)
1555 return hns3_config_rss_filter(dev, &hw->rss_info, false);
1558 /* Restore the rss filter */
1560 hns3_restore_rss_filter(struct rte_eth_dev *dev)
1562 struct hns3_adapter *hns = dev->data->dev_private;
1563 struct hns3_hw *hw = &hns->hw;
1565 if (hw->rss_info.conf.queue_num == 0)
1568 return hns3_config_rss_filter(dev, &hw->rss_info, true);
1572 hns3_flow_parse_rss(struct rte_eth_dev *dev,
1573 const struct hns3_rss_conf *conf, bool add)
1575 struct hns3_adapter *hns = dev->data->dev_private;
1576 struct hns3_hw *hw = &hns->hw;
1579 /* Action rss same */
1580 ret = hns3_action_rss_same(&hw->rss_info.conf, &conf->conf);
1582 hns3_err(hw, "Enter duplicate RSS configuration : %d", ret);
1586 return hns3_config_rss_filter(dev, conf, add);
1590 hns3_flow_args_check(const struct rte_flow_attr *attr,
1591 const struct rte_flow_item pattern[],
1592 const struct rte_flow_action actions[],
1593 struct rte_flow_error *error)
1595 if (pattern == NULL)
1596 return rte_flow_error_set(error, EINVAL,
1597 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1598 NULL, "NULL pattern.");
1600 if (actions == NULL)
1601 return rte_flow_error_set(error, EINVAL,
1602 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1603 NULL, "NULL action.");
1606 return rte_flow_error_set(error, EINVAL,
1607 RTE_FLOW_ERROR_TYPE_ATTR,
1608 NULL, "NULL attribute.");
1610 return hns3_check_attr(attr, error);
1614 * Check if the flow rule is supported by hns3.
1615 * It only checkes the format. Don't guarantee the rule can be programmed into
1616 * the HW. Because there can be no enough room for the rule.
1619 hns3_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1620 const struct rte_flow_item pattern[],
1621 const struct rte_flow_action actions[],
1622 struct rte_flow_error *error)
1624 struct hns3_fdir_rule fdir_rule;
1627 ret = hns3_flow_args_check(attr, pattern, actions, error);
1631 if (find_rss_action(actions))
1632 return hns3_parse_rss_filter(dev, actions, error);
1634 memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1635 return hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1639 * Create or destroy a flow rule.
1640 * Theorically one rule can match more than one filters.
1641 * We will let it use the filter which it hitt first.
1642 * So, the sequence matters.
1644 static struct rte_flow *
1645 hns3_flow_create(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1646 const struct rte_flow_item pattern[],
1647 const struct rte_flow_action actions[],
1648 struct rte_flow_error *error)
1650 struct hns3_process_private *process_list = dev->process_private;
1651 struct hns3_adapter *hns = dev->data->dev_private;
1652 struct hns3_hw *hw = &hns->hw;
1653 const struct hns3_rss_conf *rss_conf;
1654 struct hns3_fdir_rule_ele *fdir_rule_ptr;
1655 struct hns3_rss_conf_ele *rss_filter_ptr;
1656 struct hns3_flow_mem *flow_node;
1657 const struct rte_flow_action *act;
1658 struct rte_flow *flow;
1659 struct hns3_fdir_rule fdir_rule;
1662 ret = hns3_flow_args_check(attr, pattern, actions, error);
1666 flow = rte_zmalloc("hns3 flow", sizeof(struct rte_flow), 0);
1668 rte_flow_error_set(error, ENOMEM,
1669 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1670 "Failed to allocate flow memory");
1673 flow_node = rte_zmalloc("hns3 flow node",
1674 sizeof(struct hns3_flow_mem), 0);
1675 if (flow_node == NULL) {
1676 rte_flow_error_set(error, ENOMEM,
1677 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1678 "Failed to allocate flow list memory");
1683 flow_node->flow = flow;
1684 TAILQ_INSERT_TAIL(&process_list->flow_list, flow_node, entries);
1686 act = find_rss_action(actions);
1688 rss_conf = act->conf;
1690 ret = hns3_flow_parse_rss(dev, rss_conf, true);
1694 rss_filter_ptr = rte_zmalloc("hns3 rss filter",
1695 sizeof(struct hns3_rss_conf_ele),
1697 if (rss_filter_ptr == NULL) {
1699 "Failed to allocate hns3_rss_filter memory");
1703 memcpy(&rss_filter_ptr->filter_info, rss_conf,
1704 sizeof(struct hns3_rss_conf));
1705 TAILQ_INSERT_TAIL(&process_list->filter_rss_list,
1706 rss_filter_ptr, entries);
1708 flow->rule = rss_filter_ptr;
1709 flow->filter_type = RTE_ETH_FILTER_HASH;
1713 memset(&fdir_rule, 0, sizeof(struct hns3_fdir_rule));
1714 ret = hns3_parse_fdir_filter(dev, pattern, actions, &fdir_rule, error);
1718 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER) {
1719 ret = hns3_counter_new(dev, fdir_rule.act_cnt.shared,
1720 fdir_rule.act_cnt.id, error);
1724 flow->counter_id = fdir_rule.act_cnt.id;
1726 ret = hns3_fdir_filter_program(hns, &fdir_rule, false);
1728 fdir_rule_ptr = rte_zmalloc("hns3 fdir rule",
1729 sizeof(struct hns3_fdir_rule_ele),
1731 if (fdir_rule_ptr == NULL) {
1732 hns3_err(hw, "Failed to allocate fdir_rule memory");
1736 memcpy(&fdir_rule_ptr->fdir_conf, &fdir_rule,
1737 sizeof(struct hns3_fdir_rule));
1738 TAILQ_INSERT_TAIL(&process_list->fdir_list,
1739 fdir_rule_ptr, entries);
1740 flow->rule = fdir_rule_ptr;
1741 flow->filter_type = RTE_ETH_FILTER_FDIR;
1747 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1748 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1751 rte_flow_error_set(error, -ret, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1752 "Failed to create flow");
1754 TAILQ_REMOVE(&process_list->flow_list, flow_node, entries);
1755 rte_free(flow_node);
1760 /* Destroy a flow rule on hns3. */
1762 hns3_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1763 struct rte_flow_error *error)
1765 struct hns3_process_private *process_list = dev->process_private;
1766 struct hns3_adapter *hns = dev->data->dev_private;
1767 struct hns3_fdir_rule_ele *fdir_rule_ptr;
1768 struct hns3_rss_conf_ele *rss_filter_ptr;
1769 struct hns3_flow_mem *flow_node;
1770 struct hns3_hw *hw = &hns->hw;
1771 enum rte_filter_type filter_type;
1772 struct hns3_fdir_rule fdir_rule;
1776 return rte_flow_error_set(error, EINVAL,
1777 RTE_FLOW_ERROR_TYPE_HANDLE,
1778 flow, "Flow is NULL");
1779 filter_type = flow->filter_type;
1780 switch (filter_type) {
1781 case RTE_ETH_FILTER_FDIR:
1782 fdir_rule_ptr = (struct hns3_fdir_rule_ele *)flow->rule;
1783 memcpy(&fdir_rule, &fdir_rule_ptr->fdir_conf,
1784 sizeof(struct hns3_fdir_rule));
1786 ret = hns3_fdir_filter_program(hns, &fdir_rule, true);
1788 return rte_flow_error_set(error, EIO,
1789 RTE_FLOW_ERROR_TYPE_HANDLE,
1791 "Destroy FDIR fail.Try again");
1792 if (fdir_rule.flags & HNS3_RULE_FLAG_COUNTER)
1793 hns3_counter_release(dev, fdir_rule.act_cnt.id);
1794 TAILQ_REMOVE(&process_list->fdir_list, fdir_rule_ptr, entries);
1795 rte_free(fdir_rule_ptr);
1796 fdir_rule_ptr = NULL;
1798 case RTE_ETH_FILTER_HASH:
1799 rss_filter_ptr = (struct hns3_rss_conf_ele *)flow->rule;
1800 ret = hns3_config_rss_filter(dev, &hw->rss_info, false);
1802 return rte_flow_error_set(error, EIO,
1803 RTE_FLOW_ERROR_TYPE_HANDLE,
1805 "Destroy RSS fail.Try again");
1806 TAILQ_REMOVE(&process_list->filter_rss_list, rss_filter_ptr,
1808 rte_free(rss_filter_ptr);
1809 rss_filter_ptr = NULL;
1812 return rte_flow_error_set(error, EINVAL,
1813 RTE_FLOW_ERROR_TYPE_HANDLE, flow,
1814 "Unsupported filter type");
1817 TAILQ_FOREACH(flow_node, &process_list->flow_list, entries) {
1818 if (flow_node->flow == flow) {
1819 TAILQ_REMOVE(&process_list->flow_list, flow_node,
1821 rte_free(flow_node);
1832 /* Destroy all flow rules associated with a port on hns3. */
1834 hns3_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1836 struct hns3_adapter *hns = dev->data->dev_private;
1839 /* FDIR is available only in PF driver */
1841 ret = hns3_clear_all_fdir_filter(hns);
1843 rte_flow_error_set(error, ret,
1844 RTE_FLOW_ERROR_TYPE_HANDLE,
1845 NULL, "Failed to flush rule");
1848 hns3_counter_flush(dev);
1851 ret = hns3_clear_rss_filter(dev);
1853 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1854 NULL, "Failed to flush rss filter");
1858 hns3_filterlist_flush(dev);
1863 /* Query an existing flow rule. */
1865 hns3_flow_query(struct rte_eth_dev *dev, struct rte_flow *flow,
1866 const struct rte_flow_action *actions, void *data,
1867 struct rte_flow_error *error)
1869 struct rte_flow_query_count *qc;
1872 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1873 switch (actions->type) {
1874 case RTE_FLOW_ACTION_TYPE_VOID:
1876 case RTE_FLOW_ACTION_TYPE_COUNT:
1877 qc = (struct rte_flow_query_count *)data;
1878 ret = hns3_counter_query(dev, flow, qc, error);
1883 return rte_flow_error_set(error, ENOTSUP,
1884 RTE_FLOW_ERROR_TYPE_ACTION,
1886 "Query action only support count");
1892 static const struct rte_flow_ops hns3_flow_ops = {
1893 .validate = hns3_flow_validate,
1894 .create = hns3_flow_create,
1895 .destroy = hns3_flow_destroy,
1896 .flush = hns3_flow_flush,
1897 .query = hns3_flow_query,
1902 * The entry of flow API.
1904 * Pointer to Ethernet device.
1906 * 0 on success, a negative errno value otherwise is set.
1909 hns3_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1910 enum rte_filter_op filter_op, void *arg)
1915 hw = HNS3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1916 switch (filter_type) {
1917 case RTE_ETH_FILTER_GENERIC:
1918 if (filter_op != RTE_ETH_FILTER_GET)
1920 if (hw->adapter_state >= HNS3_NIC_CLOSED)
1922 *(const void **)arg = &hns3_flow_ops;
1925 hns3_err(hw, "Filter type (%d) not supported", filter_type);