1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
26 #define HINIC_MAX_RX_QUEUE_NUM 64
29 #define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */
30 #define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */
31 #define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */
32 #define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX (0x7F)
37 #define PA_ETH_TYPE_ROCE 0
38 #define PA_ETH_TYPE_IPV4 1
39 #define PA_ETH_TYPE_IPV6 2
40 #define PA_ETH_TYPE_OTHER 3
42 #define PA_IP_PROTOCOL_TYPE_TCP 1
43 #define PA_IP_PROTOCOL_TYPE_UDP 2
44 #define PA_IP_PROTOCOL_TYPE_ICMP 3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP 4
46 #define PA_IP_PROTOCOL_TYPE_SCTP 5
47 #define PA_IP_PROTOCOL_TYPE_VRRP 112
49 #define IP_HEADER_PROTOCOL_TYPE_TCP 6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP 17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP 1
52 #define IP_HEADER_PROTOCOL_TYPE_ICMPV6 58
54 #define FDIR_TCAM_NORMAL_PACKET 0
55 #define FDIR_TCAM_TUNNEL_PACKET 1
57 #define HINIC_MIN_N_TUPLE_PRIO 1
58 #define HINIC_MAX_N_TUPLE_PRIO 7
60 /* TCAM type mask in hardware */
61 #define TCAM_PKT_BGP_SPORT 1
62 #define TCAM_PKT_VRRP 2
63 #define TCAM_PKT_BGP_DPORT 3
64 #define TCAM_PKT_LACP 4
66 #define TCAM_DIP_IPV4_TYPE 0
67 #define TCAM_DIP_IPV6_TYPE 1
69 #define BGP_DPORT_ID 179
70 #define IPPROTO_VRRP 112
72 /* Packet type defined in hardware to perform filter */
73 #define PKT_IGMP_IPV4_TYPE 64
74 #define PKT_ICMP_IPV4_TYPE 65
75 #define PKT_ICMP_IPV6_TYPE 66
76 #define PKT_ICMP_IPV6RS_TYPE 67
77 #define PKT_ICMP_IPV6RA_TYPE 68
78 #define PKT_ICMP_IPV6NS_TYPE 69
79 #define PKT_ICMP_IPV6NA_TYPE 70
80 #define PKT_ICMP_IPV6RE_TYPE 71
81 #define PKT_DHCP_IPV4_TYPE 72
82 #define PKT_DHCP_IPV6_TYPE 73
83 #define PKT_LACP_TYPE 74
84 #define PKT_ARP_REQ_TYPE 79
85 #define PKT_ARP_REP_TYPE 80
86 #define PKT_ARP_TYPE 81
87 #define PKT_BGPD_DPORT_TYPE 83
88 #define PKT_BGPD_SPORT_TYPE 84
89 #define PKT_VRRP_TYPE 85
91 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
92 (&((struct hinic_nic_dev *)nic_dev)->filter)
94 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
95 (&((struct hinic_nic_dev *)nic_dev)->tcam)
98 enum hinic_atr_flow_type {
99 HINIC_ATR_FLOW_TYPE_IPV4_DIP = 0x1,
100 HINIC_ATR_FLOW_TYPE_IPV4_SIP = 0x2,
101 HINIC_ATR_FLOW_TYPE_DPORT = 0x3,
102 HINIC_ATR_FLOW_TYPE_SPORT = 0x4,
105 /* Structure to store fdir's info. */
106 struct hinic_fdir_info {
113 * Endless loop will never happen with below assumption
114 * 1. there is at least one no-void item(END)
115 * 2. cur is before END.
117 static inline const struct rte_flow_item *
118 next_no_void_pattern(const struct rte_flow_item pattern[],
119 const struct rte_flow_item *cur)
121 const struct rte_flow_item *next =
122 cur ? cur + 1 : &pattern[0];
124 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
130 static inline const struct rte_flow_action *
131 next_no_void_action(const struct rte_flow_action actions[],
132 const struct rte_flow_action *cur)
134 const struct rte_flow_action *next =
135 cur ? cur + 1 : &actions[0];
137 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
143 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
144 struct rte_flow_error *error)
146 /* Must be input direction */
147 if (!attr->ingress) {
148 rte_flow_error_set(error, EINVAL,
149 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
150 attr, "Only support ingress.");
155 rte_flow_error_set(error, EINVAL,
156 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
157 attr, "Not support egress.");
161 if (attr->priority) {
162 rte_flow_error_set(error, EINVAL,
163 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
164 attr, "Not support priority.");
169 rte_flow_error_set(error, EINVAL,
170 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
171 attr, "Not support group.");
178 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
179 const struct rte_flow_item *pattern,
180 const struct rte_flow_action *actions,
181 struct rte_flow_error *error)
184 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
185 NULL, "NULL pattern.");
190 rte_flow_error_set(error, EINVAL,
191 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
192 NULL, "NULL action.");
197 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
198 NULL, "NULL attribute.");
205 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
206 struct rte_flow_error *error)
208 /* The first non-void item should be MAC */
209 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
210 rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_ITEM,
212 item, "Not supported by ethertype filter");
216 /* Not supported last point for range */
218 rte_flow_error_set(error, EINVAL,
219 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220 item, "Not supported last point for range");
224 /* Get the MAC info. */
225 if (!item->spec || !item->mask) {
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ITEM,
228 item, "Not supported by ethertype filter");
235 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
236 const struct rte_flow_action *act,
237 const struct rte_flow_action_queue *act_q,
238 struct rte_eth_ethertype_filter *filter,
239 struct rte_flow_error *error)
242 act = next_no_void_action(actions, NULL);
243 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
244 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
245 rte_flow_error_set(error, EINVAL,
246 RTE_FLOW_ERROR_TYPE_ACTION,
247 act, "Not supported action.");
251 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
252 act_q = (const struct rte_flow_action_queue *)act->conf;
253 filter->queue = act_q->index;
255 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
258 /* Check if the next non-void item is END */
259 act = next_no_void_action(actions, act);
260 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
261 rte_flow_error_set(error, EINVAL,
262 RTE_FLOW_ERROR_TYPE_ACTION,
263 act, "Not supported action.");
271 * Parse the rule to see if it is a ethertype rule.
272 * And get the ethertype filter info BTW.
274 * The first not void item can be ETH.
275 * The next not void item must be END.
277 * The first not void action should be QUEUE.
278 * The next not void action should be END.
281 * ETH type 0x0807 0xFFFF
283 * other members in mask and spec should set to 0x00.
284 * item->last should be NULL.
286 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
287 const struct rte_flow_item *pattern,
288 const struct rte_flow_action *actions,
289 struct rte_eth_ethertype_filter *filter,
290 struct rte_flow_error *error)
292 const struct rte_flow_item *item;
293 const struct rte_flow_action *act = NULL;
294 const struct rte_flow_item_eth *eth_spec;
295 const struct rte_flow_item_eth *eth_mask;
296 const struct rte_flow_action_queue *act_q = NULL;
298 if (hinic_check_filter_arg(attr, pattern, actions, error))
301 item = next_no_void_pattern(pattern, NULL);
302 if (hinic_check_ethertype_first_item(item, error))
305 eth_spec = (const struct rte_flow_item_eth *)item->spec;
306 eth_mask = (const struct rte_flow_item_eth *)item->mask;
309 * Mask bits of source MAC address must be full of 0.
310 * Mask bits of destination MAC address must be full
313 if (!rte_is_zero_ether_addr(ð_mask->src) ||
314 (!rte_is_zero_ether_addr(ð_mask->dst) &&
315 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
316 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
317 item, "Invalid ether address mask");
321 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
322 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
323 item, "Invalid ethertype mask");
328 * If mask bits of destination MAC address
329 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
331 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
332 filter->mac_addr = eth_spec->dst;
333 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
335 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
337 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
339 /* Check if the next non-void item is END. */
340 item = next_no_void_pattern(pattern, item);
341 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
342 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
343 item, "Not supported by ethertype filter.");
347 if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
350 if (hinic_check_ethertype_attr_ele(attr, error))
356 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
357 const struct rte_flow_attr *attr,
358 const struct rte_flow_item pattern[],
359 const struct rte_flow_action actions[],
360 struct rte_eth_ethertype_filter *filter,
361 struct rte_flow_error *error)
363 if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
366 /* NIC doesn't support MAC address. */
367 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
368 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
371 NULL, "Not supported by ethertype filter");
375 if (filter->queue >= dev->data->nb_rx_queues) {
376 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM,
379 NULL, "Queue index much too big");
383 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
384 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
385 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ITEM,
388 NULL, "IPv4/IPv6 not supported by ethertype filter");
392 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
393 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
394 rte_flow_error_set(error, EINVAL,
395 RTE_FLOW_ERROR_TYPE_ITEM,
396 NULL, "Drop option is unsupported");
400 /* Hinic only support LACP/ARP for ether type */
401 if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
402 filter->ether_type != RTE_ETHER_TYPE_ARP) {
403 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
404 rte_flow_error_set(error, EINVAL,
405 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
406 "only lacp/arp type supported by ethertype filter");
413 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
414 struct rte_eth_ntuple_filter *filter,
415 struct rte_flow_error *error)
417 /* Must be input direction */
418 if (!attr->ingress) {
419 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
422 attr, "Only support ingress.");
427 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
430 attr, "Not support egress.");
434 if (attr->priority > 0xFFFF) {
435 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436 rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
438 attr, "Error priority.");
442 if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
443 attr->priority > HINIC_MAX_N_TUPLE_PRIO)
444 filter->priority = 1;
446 filter->priority = (uint16_t)attr->priority;
452 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
453 const struct rte_flow_action actions[],
454 struct rte_eth_ntuple_filter *filter,
455 struct rte_flow_error *error)
457 const struct rte_flow_action *act;
459 * n-tuple only supports forwarding,
460 * check if the first not void action is QUEUE.
462 act = next_no_void_action(actions, NULL);
463 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
464 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ACTION,
467 act, "Flow action type is not QUEUE.");
471 ((const struct rte_flow_action_queue *)act->conf)->index;
473 /* Check if the next not void item is END */
474 act = next_no_void_action(actions, act);
475 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
476 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
477 rte_flow_error_set(error, EINVAL,
478 RTE_FLOW_ERROR_TYPE_ACTION,
479 act, "Next not void item is not END.");
486 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
487 const struct rte_flow_item pattern[],
488 struct rte_flow_error *error)
490 const struct rte_flow_item *item;
492 /* The first not void item can be MAC or IPv4 */
493 item = next_no_void_pattern(pattern, NULL);
495 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
496 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM,
499 item, "Not supported by ntuple filter");
504 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
505 /* Not supported last point for range */
507 rte_flow_error_set(error,
509 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
510 item, "Not supported last point for range");
513 /* if the first item is MAC, the content should be NULL */
514 if (item->spec || item->mask) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ITEM,
517 item, "Not supported by ntuple filter");
520 /* check if the next not void item is IPv4 */
521 item = next_no_void_pattern(pattern, item);
522 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
523 rte_flow_error_set(error,
524 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
525 item, "Not supported by ntuple filter");
535 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
536 const struct rte_flow_item pattern[],
537 struct rte_eth_ntuple_filter *filter,
538 struct rte_flow_error *error)
540 const struct rte_flow_item_ipv4 *ipv4_spec;
541 const struct rte_flow_item_ipv4 *ipv4_mask;
542 const struct rte_flow_item *item = *in_out_item;
544 /* Get the IPv4 info */
545 if (!item->spec || !item->mask) {
546 rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_ITEM,
548 item, "Invalid ntuple mask");
551 /* Not supported last point for range */
553 rte_flow_error_set(error, EINVAL,
554 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
555 item, "Not supported last point for range");
559 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
561 * Only support src & dst addresses, protocol,
562 * others should be masked.
564 if (ipv4_mask->hdr.version_ihl ||
565 ipv4_mask->hdr.type_of_service ||
566 ipv4_mask->hdr.total_length ||
567 ipv4_mask->hdr.packet_id ||
568 ipv4_mask->hdr.fragment_offset ||
569 ipv4_mask->hdr.time_to_live ||
570 ipv4_mask->hdr.hdr_checksum ||
571 !ipv4_mask->hdr.next_proto_id) {
572 rte_flow_error_set(error,
573 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
574 item, "Not supported by ntuple filter");
578 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
579 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
580 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
582 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
583 filter->dst_ip = ipv4_spec->hdr.dst_addr;
584 filter->src_ip = ipv4_spec->hdr.src_addr;
585 filter->proto = ipv4_spec->hdr.next_proto_id;
587 /* Get next no void item */
588 *in_out_item = next_no_void_pattern(pattern, item);
592 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
593 const struct rte_flow_item pattern[],
594 struct rte_eth_ntuple_filter *filter,
595 struct rte_flow_error *error)
597 const struct rte_flow_item_tcp *tcp_spec;
598 const struct rte_flow_item_tcp *tcp_mask;
599 const struct rte_flow_item_icmp *icmp_mask;
600 const struct rte_flow_item *item = *in_out_item;
601 u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
603 if (item->type == RTE_FLOW_ITEM_TYPE_END)
606 /* Get TCP or UDP info */
607 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
608 (!item->spec || !item->mask)) {
609 memset(filter, 0, ntuple_filter_size);
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
612 item, "Invalid ntuple mask");
616 /* Not supported last point for range */
618 memset(filter, 0, ntuple_filter_size);
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
621 item, "Not supported last point for range");
625 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
626 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
629 * Only support src & dst ports, tcp flags,
630 * others should be masked.
632 if (tcp_mask->hdr.sent_seq ||
633 tcp_mask->hdr.recv_ack ||
634 tcp_mask->hdr.data_off ||
635 tcp_mask->hdr.rx_win ||
636 tcp_mask->hdr.cksum ||
637 tcp_mask->hdr.tcp_urp) {
638 memset(filter, 0, ntuple_filter_size);
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ITEM,
641 item, "Not supported by ntuple filter");
645 filter->dst_port_mask = tcp_mask->hdr.dst_port;
646 filter->src_port_mask = tcp_mask->hdr.src_port;
647 if (tcp_mask->hdr.tcp_flags == 0xFF) {
648 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
649 } else if (!tcp_mask->hdr.tcp_flags) {
650 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
652 memset(filter, 0, ntuple_filter_size);
653 rte_flow_error_set(error, EINVAL,
654 RTE_FLOW_ERROR_TYPE_ITEM,
655 item, "Not supported by ntuple filter");
659 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
660 filter->dst_port = tcp_spec->hdr.dst_port;
661 filter->src_port = tcp_spec->hdr.src_port;
662 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
663 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
664 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
666 /* ICMP all should be masked. */
667 if (icmp_mask->hdr.icmp_cksum ||
668 icmp_mask->hdr.icmp_ident ||
669 icmp_mask->hdr.icmp_seq_nb ||
670 icmp_mask->hdr.icmp_type ||
671 icmp_mask->hdr.icmp_code) {
672 memset(filter, 0, ntuple_filter_size);
673 rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ITEM,
675 item, "Not supported by ntuple filter");
680 /* Get next no void item */
681 *in_out_item = next_no_void_pattern(pattern, item);
685 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
686 struct rte_eth_ntuple_filter *filter,
687 struct rte_flow_error *error)
689 /* Check if the next not void item is END */
690 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
691 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
692 rte_flow_error_set(error, EINVAL,
693 RTE_FLOW_ERROR_TYPE_ITEM,
694 item, "Not supported by ntuple filter");
700 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
701 const struct rte_flow_item pattern[],
702 struct rte_eth_ntuple_filter *filter,
703 struct rte_flow_error *error)
705 if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
706 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
707 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
708 hinic_ntuple_item_check_end(item, filter, error))
715 * Parse the rule to see if it is a n-tuple rule.
716 * And get the n-tuple filter info BTW.
718 * The first not void item can be ETH or IPV4.
719 * The second not void item must be IPV4 if the first one is ETH.
720 * The third not void item must be UDP or TCP.
721 * The next not void item must be END.
723 * The first not void action should be QUEUE.
724 * The next not void action should be END.
728 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
729 * dst_addr 192.167.3.50 0xFFFFFFFF
730 * next_proto_id 17 0xFF
731 * UDP/TCP/ src_port 80 0xFFFF
732 * SCTP dst_port 80 0xFFFF
734 * other members in mask and spec should set to 0x00.
735 * item->last should be NULL.
736 * Please aware there's an asumption for all the parsers.
737 * rte_flow_item is using big endian, rte_flow_attr and
738 * rte_flow_action are using CPU order.
739 * Because the pattern is used to describe the packets,
740 * normally the packets should use network order.
742 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
743 const struct rte_flow_item pattern[],
744 const struct rte_flow_action actions[],
745 struct rte_eth_ntuple_filter *filter,
746 struct rte_flow_error *error)
748 const struct rte_flow_item *item = NULL;
750 if (hinic_check_filter_arg(attr, pattern, actions, error))
753 if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
756 if (hinic_check_ntuple_act_ele(item, actions, filter, error))
759 if (hinic_check_ntuple_attr_ele(attr, filter, error))
765 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
766 const struct rte_flow_attr *attr,
767 const struct rte_flow_item pattern[],
768 const struct rte_flow_action actions[],
769 struct rte_eth_ntuple_filter *filter,
770 struct rte_flow_error *error)
774 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
778 /* Hinic doesn't support tcp flags */
779 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
780 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
781 rte_flow_error_set(error, EINVAL,
782 RTE_FLOW_ERROR_TYPE_ITEM,
783 NULL, "Not supported by ntuple filter");
787 /* Hinic doesn't support many priorities */
788 if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
789 filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
790 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ITEM,
793 NULL, "Priority not supported by ntuple filter");
797 if (filter->queue >= dev->data->nb_rx_queues)
800 /* Fixed value for hinic */
801 filter->flags = RTE_5TUPLE_FLAGS;
805 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
806 const struct rte_flow_item pattern[],
807 struct rte_flow_error *error)
809 const struct rte_flow_item *item;
811 /* The first not void item can be MAC or IPv4 or TCP or UDP */
812 item = next_no_void_pattern(pattern, NULL);
814 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
815 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
816 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
817 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
818 rte_flow_error_set(error, EINVAL,
819 RTE_FLOW_ERROR_TYPE_ITEM, item,
820 "Not supported by fdir filter,support mac,ipv4,tcp,udp");
824 /* Not supported last point for range */
826 rte_flow_error_set(error, EINVAL,
827 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
828 "Not supported last point for range");
833 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
834 /* All should be masked. */
835 if (item->spec || item->mask) {
836 rte_flow_error_set(error, EINVAL,
837 RTE_FLOW_ERROR_TYPE_ITEM,
838 item, "Not supported by fdir filter,support mac");
841 /* Check if the next not void item is IPv4 */
842 item = next_no_void_pattern(pattern, item);
843 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
844 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ITEM, item,
847 "Not supported by fdir filter,support mac,ipv4");
856 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
857 const struct rte_flow_item pattern[],
858 struct hinic_fdir_rule *rule,
859 struct rte_flow_error *error)
861 const struct rte_flow_item_ipv4 *ipv4_spec;
862 const struct rte_flow_item_ipv4 *ipv4_mask;
863 const struct rte_flow_item_ipv6 *ipv6_spec;
864 const struct rte_flow_item_ipv6 *ipv6_mask;
865 const struct rte_flow_item *item = *in_out_item;
868 /* Get the IPv4 info */
869 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
870 /* Not supported last point for range */
872 rte_flow_error_set(error, EINVAL,
873 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
874 item, "Not supported last point for range");
879 memset(rule, 0, sizeof(struct hinic_fdir_rule));
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ITEM,
882 item, "Invalid fdir filter mask");
886 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
888 * Only support src & dst addresses,
889 * others should be masked.
891 if (ipv4_mask->hdr.version_ihl ||
892 ipv4_mask->hdr.type_of_service ||
893 ipv4_mask->hdr.total_length ||
894 ipv4_mask->hdr.packet_id ||
895 ipv4_mask->hdr.fragment_offset ||
896 ipv4_mask->hdr.time_to_live ||
897 ipv4_mask->hdr.next_proto_id ||
898 ipv4_mask->hdr.hdr_checksum) {
899 rte_flow_error_set(error,
900 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
901 "Not supported by fdir filter, support src,dst ip");
905 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
906 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
907 rule->mode = HINIC_FDIR_MODE_NORMAL;
911 (const struct rte_flow_item_ipv4 *)item->spec;
912 rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
913 rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
917 * Check if the next not void item is
920 item = next_no_void_pattern(pattern, item);
921 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
922 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
923 item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
924 item->type != RTE_FLOW_ITEM_TYPE_ANY &&
925 item->type != RTE_FLOW_ITEM_TYPE_END) {
926 memset(rule, 0, sizeof(struct hinic_fdir_rule));
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM, item,
929 "Not supported by fdir filter, support tcp, udp, end");
932 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
933 /* Not supported last point for range */
935 rte_flow_error_set(error, EINVAL,
936 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937 item, "Not supported last point for range");
942 memset(rule, 0, sizeof(struct hinic_fdir_rule));
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ITEM,
945 item, "Invalid fdir filter mask");
949 ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
951 /* Only support dst addresses, others should be masked */
952 if (ipv6_mask->hdr.vtc_flow ||
953 ipv6_mask->hdr.payload_len ||
954 ipv6_mask->hdr.proto ||
955 ipv6_mask->hdr.hop_limits) {
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ITEM, item,
958 "Not supported by fdir filter, support dst ipv6");
962 /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
963 for (i = 0; i < 16; i++) {
964 if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
965 rte_flow_error_set(error, EINVAL,
966 RTE_FLOW_ERROR_TYPE_ITEM, item,
967 "Not supported by fdir filter, do not support src ipv6");
973 rte_flow_error_set(error, EINVAL,
974 RTE_FLOW_ERROR_TYPE_ITEM, item,
975 "Not supported by fdir filter, ipv6 spec is NULL");
979 for (i = 0; i < 16; i++) {
980 if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
981 rule->mask.dst_ipv6_mask |= 1 << i;
984 ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
985 rte_memcpy(rule->hinic_fdir.dst_ipv6,
986 ipv6_spec->hdr.dst_addr, 16);
989 * Check if the next not void item is TCP or UDP or ICMP.
991 item = next_no_void_pattern(pattern, item);
992 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
993 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
994 item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
995 item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
996 memset(rule, 0, sizeof(struct hinic_fdir_rule));
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ITEM, item,
999 "Not supported by fdir filter, support tcp, udp, icmp");
1004 *in_out_item = item;
1008 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1009 __rte_unused const struct rte_flow_item pattern[],
1010 __rte_unused struct hinic_fdir_rule *rule,
1011 struct rte_flow_error *error)
1013 const struct rte_flow_item *item = *in_out_item;
1015 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1016 rte_flow_error_set(error, EINVAL,
1017 RTE_FLOW_ERROR_TYPE_ITEM,
1018 item, "Not supported by normal fdir filter, not support l4");
1026 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1027 struct hinic_fdir_rule *rule,
1028 struct rte_flow_error *error)
1030 /* Check if the next not void item is END */
1031 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1032 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ITEM,
1035 item, "Not supported by fdir filter, support end");
1042 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1043 const struct rte_flow_item pattern[],
1044 struct hinic_fdir_rule *rule,
1045 struct rte_flow_error *error)
1047 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1048 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1049 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1050 hinic_normal_item_check_end(item, rule, error))
1057 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1058 const struct rte_flow_item pattern[],
1059 struct hinic_fdir_rule *rule,
1060 struct rte_flow_error *error)
1062 const struct rte_flow_item *item = *in_out_item;
1063 const struct rte_flow_item_tcp *tcp_spec;
1064 const struct rte_flow_item_tcp *tcp_mask;
1065 const struct rte_flow_item_udp *udp_spec;
1066 const struct rte_flow_item_udp *udp_mask;
1068 if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
1069 rule->mode = HINIC_FDIR_MODE_TCAM;
1070 rule->mask.proto_mask = UINT16_MAX;
1071 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
1072 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
1073 rule->mode = HINIC_FDIR_MODE_TCAM;
1074 rule->mask.proto_mask = UINT16_MAX;
1075 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
1076 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1077 rule->mode = HINIC_FDIR_MODE_TCAM;
1078 } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1080 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ITEM,
1083 item, "Not supported by fdir filter, support src, dst ports");
1087 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1090 * Only support src & dst ports, tcp flags,
1091 * others should be masked.
1093 if (tcp_mask->hdr.sent_seq ||
1094 tcp_mask->hdr.recv_ack ||
1095 tcp_mask->hdr.data_off ||
1096 tcp_mask->hdr.rx_win ||
1097 tcp_mask->hdr.cksum ||
1098 tcp_mask->hdr.tcp_urp) {
1099 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1100 rte_flow_error_set(error, EINVAL,
1101 RTE_FLOW_ERROR_TYPE_ITEM,
1102 item, "Not supported by fdir normal tcam filter");
1106 rule->mode = HINIC_FDIR_MODE_TCAM;
1107 rule->mask.proto_mask = UINT16_MAX;
1108 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1109 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1111 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1113 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1114 rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1115 rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1117 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1119 * Only care about src & dst ports,
1120 * others should be masked.
1123 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1124 rte_flow_error_set(error, EINVAL,
1125 RTE_FLOW_ERROR_TYPE_ITEM,
1126 item, "Not supported by fdir filter, support src, dst ports");
1130 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1131 if (udp_mask->hdr.dgram_len ||
1132 udp_mask->hdr.dgram_cksum) {
1133 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ITEM,
1136 item, "Not supported by fdir filter, support udp");
1140 rule->mode = HINIC_FDIR_MODE_TCAM;
1141 rule->mask.proto_mask = UINT16_MAX;
1142 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1143 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1145 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1147 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1148 rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1149 rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1152 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1153 rte_flow_error_set(error, EINVAL,
1154 RTE_FLOW_ERROR_TYPE_ITEM,
1155 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1159 item = next_no_void_pattern(pattern, item);
1160 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1161 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1162 rte_flow_error_set(error, EINVAL,
1163 RTE_FLOW_ERROR_TYPE_ITEM,
1164 item, "Not supported by fdir filter tcam normal, support end");
1168 /* get next no void item */
1169 *in_out_item = item;
1174 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1175 const struct rte_flow_item pattern[],
1176 struct hinic_fdir_rule *rule,
1177 struct rte_flow_error *error)
1179 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1180 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1181 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1182 hinic_normal_item_check_end(item, rule, error))
1188 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1189 const struct rte_flow_item pattern[],
1190 struct hinic_fdir_rule *rule,
1191 struct rte_flow_error *error)
1193 const struct rte_flow_item *item = *in_out_item;
1195 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1196 item = next_no_void_pattern(pattern, item);
1197 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1198 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1199 rte_flow_error_set(error, EINVAL,
1200 RTE_FLOW_ERROR_TYPE_ITEM,
1201 item, "Not supported by fdir filter, support vxlan");
1205 *in_out_item = item;
1207 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ITEM,
1210 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1218 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1219 const struct rte_flow_item pattern[],
1220 struct hinic_fdir_rule *rule,
1221 struct rte_flow_error *error)
1223 const struct rte_flow_item *item = *in_out_item;
1226 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1227 item = next_no_void_pattern(pattern, item);
1228 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1229 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1230 item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1231 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1232 rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ITEM,
1234 item, "Not supported by fdir filter, support tcp/udp");
1238 *in_out_item = item;
1245 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1246 const struct rte_flow_item pattern[],
1247 struct hinic_fdir_rule *rule,
1248 struct rte_flow_error *error)
1250 const struct rte_flow_item_tcp *tcp_spec;
1251 const struct rte_flow_item_tcp *tcp_mask;
1252 const struct rte_flow_item_udp *udp_spec;
1253 const struct rte_flow_item_udp *udp_mask;
1254 const struct rte_flow_item *item = *in_out_item;
1256 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1257 /* Not supported last point for range */
1259 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1260 rte_flow_error_set(error, EINVAL,
1261 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1262 item, "Not supported last point for range");
1266 /* get the TCP/UDP info */
1267 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1269 * Only care about src & dst ports,
1270 * others should be masked.
1273 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1274 rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ITEM,
1276 item, "Not supported by fdir filter, support src, dst ports");
1280 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1281 if (tcp_mask->hdr.sent_seq ||
1282 tcp_mask->hdr.recv_ack ||
1283 tcp_mask->hdr.data_off ||
1284 tcp_mask->hdr.tcp_flags ||
1285 tcp_mask->hdr.rx_win ||
1286 tcp_mask->hdr.cksum ||
1287 tcp_mask->hdr.tcp_urp) {
1288 (void)memset(rule, 0,
1289 sizeof(struct hinic_fdir_rule));
1290 rte_flow_error_set(error, EINVAL,
1291 RTE_FLOW_ERROR_TYPE_ITEM,
1292 item, "Not supported by fdir filter, support tcp");
1296 rule->mode = HINIC_FDIR_MODE_TCAM;
1297 rule->mask.tunnel_flag = UINT16_MAX;
1298 rule->mask.tunnel_inner_src_port_mask =
1299 tcp_mask->hdr.src_port;
1300 rule->mask.tunnel_inner_dst_port_mask =
1301 tcp_mask->hdr.dst_port;
1302 rule->mask.proto_mask = UINT16_MAX;
1304 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1307 (const struct rte_flow_item_tcp *)item->spec;
1308 rule->hinic_fdir.tunnel_inner_src_port =
1309 tcp_spec->hdr.src_port;
1310 rule->hinic_fdir.tunnel_inner_dst_port =
1311 tcp_spec->hdr.dst_port;
1313 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1315 * Only care about src & dst ports,
1316 * others should be masked.
1319 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1320 rte_flow_error_set(error, EINVAL,
1321 RTE_FLOW_ERROR_TYPE_ITEM,
1322 item, "Not supported by fdir filter, support src, dst ports");
1326 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1327 if (udp_mask->hdr.dgram_len ||
1328 udp_mask->hdr.dgram_cksum) {
1329 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ITEM,
1332 item, "Not supported by fdir filter, support udp");
1336 rule->mode = HINIC_FDIR_MODE_TCAM;
1337 rule->mask.tunnel_flag = UINT16_MAX;
1338 rule->mask.tunnel_inner_src_port_mask =
1339 udp_mask->hdr.src_port;
1340 rule->mask.tunnel_inner_dst_port_mask =
1341 udp_mask->hdr.dst_port;
1342 rule->mask.proto_mask = UINT16_MAX;
1344 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1347 (const struct rte_flow_item_udp *)item->spec;
1348 rule->hinic_fdir.tunnel_inner_src_port =
1349 udp_spec->hdr.src_port;
1350 rule->hinic_fdir.tunnel_inner_dst_port =
1351 udp_spec->hdr.dst_port;
1353 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1354 rule->mode = HINIC_FDIR_MODE_TCAM;
1355 rule->mask.tunnel_flag = UINT16_MAX;
1357 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1358 rte_flow_error_set(error, EINVAL,
1359 RTE_FLOW_ERROR_TYPE_ITEM,
1360 item, "Not supported by fdir filter, support tcp/udp");
1364 /* get next no void item */
1365 *in_out_item = next_no_void_pattern(pattern, item);
1371 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1372 const struct rte_flow_item pattern[],
1373 struct hinic_fdir_rule *rule,
1374 struct rte_flow_error *error)
1376 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1377 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1378 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1379 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1380 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1381 hinic_normal_item_check_end(item, rule, error))
1387 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1388 struct hinic_fdir_rule *rule,
1389 struct rte_flow_error *error)
1391 /* Must be input direction */
1392 if (!attr->ingress) {
1393 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1394 rte_flow_error_set(error, EINVAL,
1395 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1396 attr, "Only support ingress.");
1402 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1403 rte_flow_error_set(error, EINVAL,
1404 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1405 attr, "Not support egress.");
1410 if (attr->priority) {
1411 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1412 rte_flow_error_set(error, EINVAL,
1413 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1414 attr, "Not support priority.");
1421 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1422 const struct rte_flow_action actions[],
1423 struct hinic_fdir_rule *rule,
1424 struct rte_flow_error *error)
1426 const struct rte_flow_action *act;
1428 /* Check if the first not void action is QUEUE */
1429 act = next_no_void_action(actions, NULL);
1430 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1431 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1432 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1433 item, "Not supported action.");
1437 rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1439 /* Check if the next not void item is END */
1440 act = next_no_void_action(actions, act);
1441 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1442 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1443 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1444 act, "Not supported action.");
1452 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1453 * And get the flow director filter info BTW.
1454 * UDP/TCP/SCTP PATTERN:
1455 * The first not void item can be ETH or IPV4 or IPV6
1456 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1457 * The next not void item could be UDP or TCP(optional)
1458 * The next not void item must be END.
1460 * The first not void action should be QUEUE.
1461 * The second not void optional action should be MARK,
1462 * mark_id is a uint32_t number.
1463 * The next not void action should be END.
1464 * UDP/TCP pattern example:
1467 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1468 * dst_addr 1.2.3.5 0xFFFFFFFF
1469 * UDP/TCP src_port 80 0xFFFF
1470 * dst_port 80 0xFFFF
1472 * Other members in mask and spec should set to 0x00.
1473 * Item->last should be NULL.
1476 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1477 const struct rte_flow_item pattern[],
1478 const struct rte_flow_action actions[],
1479 struct hinic_fdir_rule *rule,
1480 struct rte_flow_error *error)
1482 const struct rte_flow_item *item = NULL;
1484 if (hinic_check_filter_arg(attr, pattern, actions, error))
1487 if (hinic_check_normal_item_ele(item, pattern, rule, error))
1490 if (hinic_check_normal_attr_ele(attr, rule, error))
1493 if (hinic_check_normal_act_ele(item, actions, rule, error))
1500 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1501 * And get the flow director filter info BTW.
1502 * UDP/TCP/SCTP PATTERN:
1503 * The first not void item can be ETH or IPV4 or IPV6
1504 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1505 * The next not void item can be ANY/TCP/UDP
1507 * The first not void action should be QUEUE.
1508 * The second not void optional action should be MARK,
1509 * mark_id is a uint32_t number.
1510 * The next not void action should be END.
1511 * UDP/TCP pattern example:
1514 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1515 * dst_addr 1.2.3.5 0xFFFFFFFF
1516 * UDP/TCP src_port 80 0xFFFF
1517 * dst_port 80 0xFFFF
1519 * Other members in mask and spec should set to 0x00.
1520 * Item->last should be NULL.
1523 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1524 const struct rte_flow_item pattern[],
1525 const struct rte_flow_action actions[],
1526 struct hinic_fdir_rule *rule,
1527 struct rte_flow_error *error)
1529 const struct rte_flow_item *item = NULL;
1531 if (hinic_check_filter_arg(attr, pattern, actions, error))
1534 if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1537 if (hinic_check_normal_attr_ele(attr, rule, error))
1540 if (hinic_check_normal_act_ele(item, actions, rule, error))
1547 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1548 * And get the flow director filter info BTW.
1549 * UDP/TCP/SCTP PATTERN:
1550 * The first not void item can be ETH or IPV4 or IPV6
1551 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1552 * The next not void item must be UDP
1553 * The next not void item must be VXLAN(optional)
1554 * The first not void item can be ETH or IPV4 or IPV6
1555 * The next not void item could be ANY or UDP or TCP(optional)
1556 * The next not void item must be END.
1558 * The first not void action should be QUEUE.
1559 * The second not void optional action should be MARK,
1560 * mark_id is a uint32_t number.
1561 * The next not void action should be END.
1562 * UDP/TCP pattern example:
1565 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1566 * dst_addr 1.2.3.5 0xFFFFFFFF
1569 * UDP/TCP src_port 80 0xFFFF
1570 * dst_port 80 0xFFFF
1572 * Other members in mask and spec should set to 0x00.
1573 * Item->last should be NULL.
1576 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1577 const struct rte_flow_item pattern[],
1578 const struct rte_flow_action actions[],
1579 struct hinic_fdir_rule *rule,
1580 struct rte_flow_error *error)
1582 const struct rte_flow_item *item = NULL;
1584 if (hinic_check_filter_arg(attr, pattern, actions, error))
1587 if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1590 if (hinic_check_normal_attr_ele(attr, rule, error))
1593 if (hinic_check_normal_act_ele(item, actions, rule, error))
1599 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1600 const struct rte_flow_attr *attr,
1601 const struct rte_flow_item pattern[],
1602 const struct rte_flow_action actions[],
1603 struct hinic_fdir_rule *rule,
1604 struct rte_flow_error *error)
1608 ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1613 ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1618 ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1624 if (rule->queue >= dev->data->nb_rx_queues)
1631 * Check if the flow rule is supported by nic.
1632 * It only checkes the format. Don't guarantee the rule can be programmed into
1633 * the HW. Because there can be no enough room for the rule.
1635 static int hinic_flow_validate(struct rte_eth_dev *dev,
1636 const struct rte_flow_attr *attr,
1637 const struct rte_flow_item pattern[],
1638 const struct rte_flow_action actions[],
1639 struct rte_flow_error *error)
1641 struct rte_eth_ethertype_filter ethertype_filter;
1642 struct rte_eth_ntuple_filter ntuple_filter;
1643 struct hinic_fdir_rule fdir_rule;
1646 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1647 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1648 actions, &ntuple_filter, error);
1652 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1653 ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1654 actions, ðertype_filter, error);
1659 memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1660 ret = hinic_parse_fdir_filter(dev, attr, pattern,
1661 actions, &fdir_rule, error);
1666 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1667 struct hinic_5tuple_filter_info *hinic_filter_info)
1669 switch (filter->dst_ip_mask) {
1671 hinic_filter_info->dst_ip_mask = 0;
1672 hinic_filter_info->dst_ip = filter->dst_ip;
1675 hinic_filter_info->dst_ip_mask = 1;
1676 hinic_filter_info->dst_ip = 0;
1679 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1683 switch (filter->src_ip_mask) {
1685 hinic_filter_info->src_ip_mask = 0;
1686 hinic_filter_info->src_ip = filter->src_ip;
1689 hinic_filter_info->src_ip_mask = 1;
1690 hinic_filter_info->src_ip = 0;
1693 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1699 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1700 struct hinic_5tuple_filter_info *hinic_filter_info)
1702 switch (filter->dst_port_mask) {
1704 hinic_filter_info->dst_port_mask = 0;
1705 hinic_filter_info->dst_port = filter->dst_port;
1708 hinic_filter_info->dst_port_mask = 1;
1709 hinic_filter_info->dst_port = 0;
1712 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1716 switch (filter->src_port_mask) {
1718 hinic_filter_info->src_port_mask = 0;
1719 hinic_filter_info->src_port = filter->src_port;
1722 hinic_filter_info->src_port_mask = 1;
1723 hinic_filter_info->src_port = 0;
1726 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1733 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1734 struct hinic_5tuple_filter_info *hinic_filter_info)
1736 switch (filter->proto_mask) {
1738 hinic_filter_info->proto_mask = 0;
1739 hinic_filter_info->proto = filter->proto;
1742 hinic_filter_info->proto_mask = 1;
1743 hinic_filter_info->proto = 0;
1746 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1753 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1754 struct hinic_5tuple_filter_info *filter_info)
1756 if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1757 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1758 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1761 if (ntuple_ip_filter(filter, filter_info) ||
1762 ntuple_port_filter(filter, filter_info) ||
1763 ntuple_proto_filter(filter, filter_info))
1766 filter_info->priority = (uint8_t)filter->priority;
1770 static inline struct hinic_5tuple_filter *
1771 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1772 struct hinic_5tuple_filter_info *key)
1774 struct hinic_5tuple_filter *it;
1776 TAILQ_FOREACH(it, filter_list, entries) {
1777 if (memcmp(key, &it->filter_info,
1778 sizeof(struct hinic_5tuple_filter_info)) == 0) {
1786 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1788 struct tag_pa_rule lacp_rule;
1789 struct tag_pa_action lacp_action;
1791 memset(&lacp_rule, 0, sizeof(lacp_rule));
1792 memset(&lacp_action, 0, sizeof(lacp_action));
1793 /* LACP TCAM rule */
1794 lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1795 lacp_rule.l2_header.eth_type.val16 = 0x8809;
1796 lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1798 /* LACP TCAM action */
1799 lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1800 lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1801 lacp_action.pkt_type = PKT_LACP_TYPE;
1802 lacp_action.pri = 0x0;
1803 lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1805 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1806 &lacp_rule, &lacp_action);
1809 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1811 struct tag_pa_rule bgp_rule;
1812 struct tag_pa_action bgp_action;
1814 memset(&bgp_rule, 0, sizeof(bgp_rule));
1815 memset(&bgp_action, 0, sizeof(bgp_action));
1817 bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1818 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1819 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1820 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1821 bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1822 bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1824 /* BGP TCAM action */
1825 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1826 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1827 bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1828 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1829 * results, not need to convert
1831 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1833 return hinic_set_fdir_tcam(nic_dev->hwdev,
1834 TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1837 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1839 struct tag_pa_rule bgp_rule;
1840 struct tag_pa_action bgp_action;
1842 memset(&bgp_rule, 0, sizeof(bgp_rule));
1843 memset(&bgp_action, 0, sizeof(bgp_action));
1845 bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1846 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1847 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1848 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1849 bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1850 bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1852 /* BGP TCAM action */
1853 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1854 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1855 bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1856 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1857 * results, not need to convert
1859 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1861 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1862 &bgp_rule, &bgp_action);
1865 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1867 struct tag_pa_rule vrrp_rule;
1868 struct tag_pa_action vrrp_action;
1870 memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1871 memset(&vrrp_action, 0, sizeof(vrrp_action));
1872 /* VRRP TCAM rule */
1873 vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1874 vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1875 vrrp_rule.ip_header.protocol.mask8 = 0xff;
1876 vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1878 /* VRRP TCAM action */
1879 vrrp_action.err_type = 0x3f;
1880 vrrp_action.fwd_action = 0x7;
1881 vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1882 vrrp_action.pri = 0xf;
1883 vrrp_action.push_len = 0xf;
1885 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1886 &vrrp_rule, &vrrp_action);
1890 * Clear all fdir configuration.
1893 * The hardware interface of a Ethernet device.
1897 * negative error value otherwise.
1899 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1901 (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1903 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1905 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1907 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1909 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1911 (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1914 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1915 struct hinic_filter_info *filter_info)
1917 switch (filter->filter_info.proto) {
1919 /* Filter type is bgp type if dst_port or src_port is 179 */
1920 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1921 !(filter->filter_info.dst_port_mask)) {
1922 filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1923 } else if (filter->filter_info.src_port ==
1924 RTE_BE16(BGP_DPORT_ID) &&
1925 !(filter->filter_info.src_port_mask)) {
1926 filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1928 PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1929 " just support BGP now, proto:0x%x, "
1930 "dst_port:0x%x, dst_port_mask:0x%x."
1931 "src_port:0x%x, src_port_mask:0x%x.",
1932 filter->filter_info.proto,
1933 filter->filter_info.dst_port,
1934 filter->filter_info.dst_port_mask,
1935 filter->filter_info.src_port,
1936 filter->filter_info.src_port_mask);
1942 filter_info->pkt_type = PKT_VRRP_TYPE;
1946 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1949 case IPPROTO_ICMPV6:
1950 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1954 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1955 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1956 "src_port: 0x%x, src_port_mask: 0x%x.",
1957 filter->filter_info.proto, filter->filter_info.dst_port,
1958 filter->filter_info.dst_port_mask,
1959 filter->filter_info.src_port,
1960 filter->filter_info.src_port_mask);
1967 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1968 struct hinic_filter_info *filter_info, int *index)
1972 type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1974 if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1975 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1979 if (!(filter_info->type_mask & (1 << type_id))) {
1980 filter_info->type_mask |= 1 << type_id;
1981 filter->index = type_id;
1982 filter_info->pkt_filters[type_id].enable = true;
1983 filter_info->pkt_filters[type_id].pkt_proto =
1984 filter->filter_info.proto;
1985 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1988 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1997 * Add a 5tuple filter
2000 * Pointer to struct rte_eth_dev.
2002 * Pointer to the filter that will be added.
2004 * - On success, zero.
2005 * - On failure, a negative value.
2007 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
2008 struct hinic_5tuple_filter *filter)
2010 struct hinic_filter_info *filter_info =
2011 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2013 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2015 if (hinic_filter_info_init(filter, filter_info) ||
2016 hinic_lookup_new_filter(filter, filter_info, &i))
2019 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2021 filter_info->pkt_filters[i].enable,
2024 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2025 filter_info->pkt_type, filter->queue,
2026 filter_info->pkt_filters[i].enable);
2030 PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2031 filter_info->pkt_type, filter_info->qid,
2032 filter_info->pkt_filters[filter->index].enable);
2034 switch (filter->filter_info.proto) {
2036 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
2037 ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
2039 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
2040 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2041 filter_info->pkt_type, filter->queue,
2042 filter_info->pkt_filters[i].enable);
2046 PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
2048 filter_info->pkt_filters[i].enable);
2049 } else if (filter->filter_info.src_port ==
2050 RTE_BE16(BGP_DPORT_ID)) {
2051 ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
2053 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
2054 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2055 filter_info->pkt_type, filter->queue,
2056 filter_info->pkt_filters[i].enable);
2060 PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
2062 filter_info->pkt_filters[i].enable);
2068 ret_fw = hinic_set_vrrp_tcam(nic_dev);
2070 PMD_DRV_LOG(ERR, "Set VRRP failed, "
2071 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2072 filter_info->pkt_type, filter->queue,
2073 filter_info->pkt_filters[i].enable);
2076 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
2078 filter_info->pkt_filters[i].enable);
2089 * Remove a 5tuple filter
2092 * Pointer to struct rte_eth_dev.
2094 * The pointer of the filter will be removed.
2096 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2097 struct hinic_5tuple_filter *filter)
2099 struct hinic_filter_info *filter_info =
2100 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2101 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2103 switch (filter->filter_info.proto) {
2105 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2109 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2110 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2111 TCAM_PKT_BGP_DPORT);
2112 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2113 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2114 TCAM_PKT_BGP_SPORT);
2121 hinic_filter_info_init(filter, filter_info);
2123 filter_info->pkt_filters[filter->index].enable = false;
2124 filter_info->pkt_filters[filter->index].pkt_proto = 0;
2126 PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2127 filter_info->pkt_type,
2128 filter_info->pkt_filters[filter->index].qid,
2129 filter_info->pkt_filters[filter->index].enable);
2130 (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2131 filter_info->pkt_filters[filter->index].qid,
2132 filter_info->pkt_filters[filter->index].enable,
2135 filter_info->pkt_type = 0;
2136 filter_info->qid = 0;
2137 filter_info->pkt_filters[filter->index].qid = 0;
2138 filter_info->type_mask &= ~(1 << (filter->index));
2139 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2145 * Add or delete a ntuple filter
2148 * Pointer to struct rte_eth_dev.
2149 * @param ntuple_filter
2150 * Pointer to struct rte_eth_ntuple_filter
2152 * If true, add filter; if false, remove filter
2154 * - On success, zero.
2155 * - On failure, a negative value.
2157 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2158 struct rte_eth_ntuple_filter *ntuple_filter,
2161 struct hinic_filter_info *filter_info =
2162 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2163 struct hinic_5tuple_filter_info filter_5tuple;
2164 struct hinic_5tuple_filter *filter;
2167 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2168 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2172 memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2173 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2177 filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2179 if (filter != NULL && add) {
2180 PMD_DRV_LOG(ERR, "Filter exists.");
2183 if (filter == NULL && !add) {
2184 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2189 filter = rte_zmalloc("hinic_5tuple_filter",
2190 sizeof(struct hinic_5tuple_filter), 0);
2193 rte_memcpy(&filter->filter_info, &filter_5tuple,
2194 sizeof(struct hinic_5tuple_filter_info));
2195 filter->queue = ntuple_filter->queue;
2197 filter_info->qid = ntuple_filter->queue;
2199 ret = hinic_add_5tuple_filter(dev, filter);
2206 hinic_remove_5tuple_filter(dev, filter);
2212 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2214 if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2217 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2218 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2219 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2220 " ethertype filter", filter->ether_type);
2224 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2225 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2228 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2229 PMD_DRV_LOG(ERR, "Drop option is not supported");
2237 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2238 struct hinic_pkt_filter *ethertype_filter)
2240 switch (ethertype_filter->pkt_proto) {
2241 case RTE_ETHER_TYPE_SLOW:
2242 filter_info->pkt_type = PKT_LACP_TYPE;
2245 case RTE_ETHER_TYPE_ARP:
2246 filter_info->pkt_type = PKT_ARP_TYPE;
2250 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2254 return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2258 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2259 struct hinic_pkt_filter *ethertype_filter)
2263 /* Find LACP or VRRP type id */
2264 id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2268 if (!(filter_info->type_mask & (1 << id))) {
2269 filter_info->type_mask |= 1 << id;
2270 filter_info->pkt_filters[id].pkt_proto =
2271 ethertype_filter->pkt_proto;
2272 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2273 filter_info->qid = ethertype_filter->qid;
2277 PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2282 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2285 if (idx >= HINIC_MAX_Q_FILTERS)
2288 filter_info->pkt_type = 0;
2289 filter_info->type_mask &= ~(1 << idx);
2290 filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2291 filter_info->pkt_filters[idx].enable = FALSE;
2292 filter_info->pkt_filters[idx].qid = 0;
2296 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2297 struct rte_eth_ethertype_filter *filter,
2300 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2301 struct hinic_filter_info *filter_info =
2302 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2303 struct hinic_pkt_filter ethertype_filter;
2307 if (hinic_check_ethertype_filter(filter))
2311 ethertype_filter.pkt_proto = filter->ether_type;
2312 ethertype_filter.enable = TRUE;
2313 ethertype_filter.qid = (u8)filter->queue;
2314 i = hinic_ethertype_filter_insert(filter_info,
2319 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2320 filter_info->pkt_type, filter_info->qid,
2321 filter_info->pkt_filters[i].enable, true);
2323 PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2324 filter_info->pkt_type, filter->queue,
2325 filter_info->pkt_filters[i].enable);
2327 hinic_ethertype_filter_remove(filter_info, i);
2330 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2331 filter_info->pkt_type, filter->queue,
2332 filter_info->pkt_filters[i].enable);
2334 switch (ethertype_filter.pkt_proto) {
2335 case RTE_ETHER_TYPE_SLOW:
2336 ret_fw = hinic_set_lacp_tcam(nic_dev);
2338 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2339 hinic_ethertype_filter_remove(filter_info, i);
2343 PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2349 ethertype_filter.pkt_proto = filter->ether_type;
2350 i = hinic_ethertype_filter_lookup(filter_info,
2353 if ((filter_info->type_mask & (1 << i))) {
2354 filter_info->pkt_filters[i].enable = FALSE;
2355 (void)hinic_set_fdir_filter(nic_dev->hwdev,
2356 filter_info->pkt_type,
2357 filter_info->pkt_filters[i].qid,
2358 filter_info->pkt_filters[i].enable,
2361 PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2362 filter_info->pkt_type,
2363 filter_info->pkt_filters[i].qid,
2364 filter_info->pkt_filters[i].enable);
2366 switch (ethertype_filter.pkt_proto) {
2367 case RTE_ETHER_TYPE_SLOW:
2368 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2370 PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2376 hinic_ethertype_filter_remove(filter_info, i);
2379 PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2380 filter_info->pkt_type, filter->queue,
2381 filter_info->pkt_filters[i].enable);
2389 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2390 struct hinic_fdir_info *fdir_info)
2392 switch (rule->mask.src_ipv4_mask) {
2394 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2395 fdir_info->qid = rule->queue;
2396 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2403 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2407 switch (rule->mask.dst_ipv4_mask) {
2409 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2410 fdir_info->qid = rule->queue;
2411 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2418 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2422 if (fdir_info->fdir_flag == 0) {
2423 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2430 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2431 struct hinic_fdir_rule *rule, bool add)
2433 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2434 struct hinic_fdir_info fdir_info;
2437 memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2439 ret = hinic_fdir_info_init(rule, &fdir_info);
2441 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2446 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2447 true, fdir_info.fdir_key,
2448 true, fdir_info.fdir_flag);
2450 PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2451 fdir_info.fdir_flag, fdir_info.qid,
2452 fdir_info.fdir_key);
2455 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2456 fdir_info.fdir_flag, fdir_info.qid,
2457 fdir_info.fdir_key);
2459 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2460 false, fdir_info.fdir_key, true,
2461 fdir_info.fdir_flag);
2463 PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2464 fdir_info.fdir_flag, fdir_info.qid,
2465 fdir_info.fdir_key);
2468 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2469 fdir_info.fdir_flag, fdir_info.qid,
2470 fdir_info.fdir_key);
2476 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2480 for (idx = 0; idx < len; idx++)
2481 key_y[idx] = src_input[idx] & mask[idx];
2484 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2488 for (idx = 0; idx < len; idx++)
2489 key_x[idx] = key_y[idx] ^ mask[idx];
2492 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2493 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2495 tcam_translate_key_y(fdir_tcam_rule->key.y,
2496 (u8 *)(&tcam_key->key_info),
2497 (u8 *)(&tcam_key->key_mask),
2498 TCAM_FLOW_KEY_SIZE);
2499 tcam_translate_key_x(fdir_tcam_rule->key.x,
2500 fdir_tcam_rule->key.y,
2501 (u8 *)(&tcam_key->key_mask),
2502 TCAM_FLOW_KEY_SIZE);
2505 static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
2506 struct hinic_fdir_rule *rule,
2507 struct tag_tcam_key *tcam_key)
2509 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2511 switch (rule->mask.dst_ipv4_mask) {
2513 tcam_key->key_info.ext_dip_h =
2514 (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2515 tcam_key->key_info.ext_dip_l =
2516 rule->hinic_fdir.dst_ip & 0xffffU;
2517 tcam_key->key_mask.ext_dip_h =
2518 (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2519 tcam_key->key_mask.ext_dip_l =
2520 rule->mask.dst_ipv4_mask & 0xffffU;
2527 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2531 if (rule->mask.dst_port_mask > 0) {
2532 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2533 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2536 if (rule->mask.src_port_mask > 0) {
2537 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2538 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2541 switch (rule->mask.tunnel_flag) {
2543 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2544 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2548 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2549 tcam_key->key_mask.tunnel_flag = 0;
2553 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2557 if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2558 tcam_key->key_info.dst_port =
2559 rule->hinic_fdir.tunnel_inner_dst_port;
2560 tcam_key->key_mask.dst_port =
2561 rule->mask.tunnel_inner_dst_port_mask;
2564 if (rule->mask.tunnel_inner_src_port_mask > 0) {
2565 tcam_key->key_info.src_port =
2566 rule->hinic_fdir.tunnel_inner_src_port;
2567 tcam_key->key_mask.src_port =
2568 rule->mask.tunnel_inner_src_port_mask;
2571 switch (rule->mask.proto_mask) {
2573 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2574 tcam_key->key_mask.protocol = UINT8_MAX;
2581 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2585 tcam_key->key_mask.function_id = UINT16_MAX;
2586 tcam_key->key_info.function_id =
2587 hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
2592 static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
2593 struct hinic_fdir_rule *rule,
2594 struct tag_tcam_key *tcam_key)
2596 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2598 switch (rule->mask.dst_ipv6_mask) {
2600 tcam_key->key_info_ipv6.ipv6_key0 =
2601 ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
2602 rule->hinic_fdir.dst_ipv6[1];
2603 tcam_key->key_info_ipv6.ipv6_key1 =
2604 ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
2605 rule->hinic_fdir.dst_ipv6[3];
2606 tcam_key->key_info_ipv6.ipv6_key2 =
2607 ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
2608 rule->hinic_fdir.dst_ipv6[5];
2609 tcam_key->key_info_ipv6.ipv6_key3 =
2610 ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
2611 rule->hinic_fdir.dst_ipv6[7];
2612 tcam_key->key_info_ipv6.ipv6_key4 =
2613 ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
2614 rule->hinic_fdir.dst_ipv6[9];
2615 tcam_key->key_info_ipv6.ipv6_key5 =
2616 ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
2617 rule->hinic_fdir.dst_ipv6[11];
2618 tcam_key->key_info_ipv6.ipv6_key6 =
2619 ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
2620 rule->hinic_fdir.dst_ipv6[13];
2621 tcam_key->key_info_ipv6.ipv6_key7 =
2622 ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
2623 rule->hinic_fdir.dst_ipv6[15];
2624 tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
2625 tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
2626 tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
2627 tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
2628 tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
2629 tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
2630 tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
2631 tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
2638 PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
2642 if (rule->mask.dst_port_mask > 0) {
2643 tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
2644 tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
2647 switch (rule->mask.proto_mask) {
2649 tcam_key->key_info_ipv6.protocol =
2650 (rule->hinic_fdir.proto) & 0x7F;
2651 tcam_key->key_mask_ipv6.protocol = 0x7F;
2658 PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
2662 tcam_key->key_info_ipv6.ipv6_flag = 1;
2663 tcam_key->key_mask_ipv6.ipv6_flag = 1;
2665 tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
2666 tcam_key->key_info_ipv6.function_id =
2667 (u8)hinic_global_func_id(nic_dev->hwdev);
2672 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2673 struct hinic_fdir_rule *rule,
2674 struct tag_tcam_key *tcam_key,
2675 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2679 if (rule->mask.dst_ipv4_mask == UINT32_MAX)
2680 ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
2681 else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
2682 ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
2687 fdir_tcam_rule->data.qid = rule->queue;
2689 tcam_key_calculate(tcam_key, fdir_tcam_rule);
2694 static inline struct hinic_tcam_filter *
2695 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2696 struct tag_tcam_key *key)
2698 struct hinic_tcam_filter *it;
2700 TAILQ_FOREACH(it, filter_list, entries) {
2701 if (memcmp(key, &it->tcam_key,
2702 sizeof(struct tag_tcam_key)) == 0) {
2710 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2711 struct hinic_tcam_info *tcam_info,
2712 struct hinic_tcam_filter *tcam_filter,
2717 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2719 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2720 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2722 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2724 for (index = 0; index < max_index; index++) {
2725 if (tcam_info->tcam_index_array[index] == 0)
2729 if (index == max_index) {
2730 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2731 hinic_global_func_id(nic_dev->hwdev), max_index);
2735 tcam_filter->index = index;
2736 *tcam_index = index;
2741 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2742 struct hinic_tcam_filter *tcam_filter,
2743 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2745 struct hinic_tcam_info *tcam_info =
2746 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2747 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2749 u16 tcam_block_index = 0;
2752 if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2755 if (tcam_info->tcam_rule_nums == 0) {
2756 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2757 rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2758 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2760 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2764 rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2765 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2767 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2772 tcam_info->tcam_block_index = tcam_block_index;
2774 tcam_block_index = tcam_info->tcam_block_index;
2777 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2778 fdir_tcam_rule->index =
2779 HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2781 fdir_tcam_rule->index =
2782 tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2785 rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2787 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2791 PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2792 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2793 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2794 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2795 tcam_info->tcam_rule_nums + 1);
2797 if (tcam_info->tcam_rule_nums == 0) {
2798 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2800 (void)hinic_del_tcam_rule(nic_dev->hwdev,
2801 fdir_tcam_rule->index);
2806 TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2808 tcam_info->tcam_index_array[index] = 1;
2809 tcam_info->tcam_rule_nums++;
2814 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2815 struct hinic_tcam_filter *tcam_filter)
2817 struct hinic_tcam_info *tcam_info =
2818 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2819 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2821 u16 tcam_block_index = tcam_info->tcam_block_index;
2825 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2826 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2828 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2830 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2832 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2835 rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2837 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2841 PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2842 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2843 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2844 tcam_info->tcam_rule_nums - 1);
2846 TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2848 tcam_info->tcam_index_array[tcam_filter->index] = 0;
2850 rte_free(tcam_filter);
2852 tcam_info->tcam_rule_nums--;
2854 if (tcam_info->tcam_rule_nums == 0) {
2855 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2862 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2863 struct hinic_fdir_rule *rule, bool add)
2865 struct hinic_tcam_info *tcam_info =
2866 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2867 struct hinic_tcam_filter *tcam_filter;
2868 struct tag_tcam_cfg_rule fdir_tcam_rule;
2869 struct tag_tcam_key tcam_key;
2872 memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2873 memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2875 ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2877 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2881 tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2883 if (tcam_filter != NULL && add) {
2884 PMD_DRV_LOG(ERR, "Filter exists.");
2887 if (tcam_filter == NULL && !add) {
2888 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2893 tcam_filter = rte_zmalloc("hinic_5tuple_filter",
2894 sizeof(struct hinic_tcam_filter), 0);
2895 if (tcam_filter == NULL)
2897 (void)rte_memcpy(&tcam_filter->tcam_key,
2898 &tcam_key, sizeof(struct tag_tcam_key));
2899 tcam_filter->queue = fdir_tcam_rule.data.qid;
2901 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2903 rte_free(tcam_filter);
2907 rule->tcam_index = fdir_tcam_rule.index;
2910 PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
2911 ret = hinic_del_tcam_filter(dev, tcam_filter);
2920 * Create or destroy a flow rule.
2921 * Theorically one rule can match more than one filters.
2922 * We will let it use the filter which it hitt first.
2923 * So, the sequence matters.
2925 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2926 const struct rte_flow_attr *attr,
2927 const struct rte_flow_item pattern[],
2928 const struct rte_flow_action actions[],
2929 struct rte_flow_error *error)
2932 struct rte_eth_ntuple_filter ntuple_filter;
2933 struct rte_eth_ethertype_filter ethertype_filter;
2934 struct hinic_fdir_rule fdir_rule;
2935 struct rte_flow *flow = NULL;
2936 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2937 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2938 struct hinic_fdir_rule_ele *fdir_rule_ptr;
2939 struct hinic_flow_mem *hinic_flow_mem_ptr;
2940 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2942 flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2944 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2948 hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2949 sizeof(struct hinic_flow_mem), 0);
2950 if (!hinic_flow_mem_ptr) {
2951 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2956 hinic_flow_mem_ptr->flow = flow;
2957 TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2960 /* Add ntuple filter */
2961 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2962 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2963 actions, &ntuple_filter, error);
2965 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2967 ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2968 sizeof(struct hinic_ntuple_filter_ele), 0);
2969 rte_memcpy(&ntuple_filter_ptr->filter_info,
2971 sizeof(struct rte_eth_ntuple_filter));
2972 TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2973 ntuple_filter_ptr, entries);
2974 flow->rule = ntuple_filter_ptr;
2975 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2977 PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2978 hinic_global_func_id(nic_dev->hwdev));
2984 /* Add ethertype filter */
2985 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2986 ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2987 ðertype_filter, error);
2989 ret = hinic_add_del_ethertype_filter(dev, ðertype_filter,
2992 ethertype_filter_ptr =
2993 rte_zmalloc("hinic_ethertype_filter",
2994 sizeof(struct hinic_ethertype_filter_ele), 0);
2995 rte_memcpy(ðertype_filter_ptr->filter_info,
2997 sizeof(struct rte_eth_ethertype_filter));
2998 TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
2999 ethertype_filter_ptr, entries);
3000 flow->rule = ethertype_filter_ptr;
3001 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3003 PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
3004 hinic_global_func_id(nic_dev->hwdev));
3010 /* Add fdir filter */
3011 memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
3012 ret = hinic_parse_fdir_filter(dev, attr, pattern,
3013 actions, &fdir_rule, error);
3015 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3016 ret = hinic_add_del_fdir_filter(dev,
3018 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3019 ret = hinic_add_del_tcam_fdir_filter(dev,
3022 PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
3026 fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
3027 sizeof(struct hinic_fdir_rule_ele), 0);
3028 rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
3029 sizeof(struct hinic_fdir_rule));
3030 TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
3031 fdir_rule_ptr, entries);
3032 flow->rule = fdir_rule_ptr;
3033 flow->filter_type = RTE_ETH_FILTER_FDIR;
3035 PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
3036 hinic_global_func_id(nic_dev->hwdev));
3043 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
3044 rte_flow_error_set(error, -ret,
3045 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3046 "Failed to create flow.");
3047 rte_free(hinic_flow_mem_ptr);
3052 /* Destroy a flow rule on hinic. */
3053 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
3054 struct rte_flow_error *error)
3057 struct rte_flow *pmd_flow = flow;
3058 enum rte_filter_type filter_type = pmd_flow->filter_type;
3059 struct rte_eth_ntuple_filter ntuple_filter;
3060 struct rte_eth_ethertype_filter ethertype_filter;
3061 struct hinic_fdir_rule fdir_rule;
3062 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3063 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3064 struct hinic_fdir_rule_ele *fdir_rule_ptr;
3065 struct hinic_flow_mem *hinic_flow_mem_ptr;
3066 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3068 switch (filter_type) {
3069 case RTE_ETH_FILTER_NTUPLE:
3070 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
3072 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
3073 sizeof(struct rte_eth_ntuple_filter));
3074 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3076 TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
3077 ntuple_filter_ptr, entries);
3078 rte_free(ntuple_filter_ptr);
3081 case RTE_ETH_FILTER_ETHERTYPE:
3082 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
3084 rte_memcpy(ðertype_filter,
3085 ðertype_filter_ptr->filter_info,
3086 sizeof(struct rte_eth_ethertype_filter));
3087 ret = hinic_add_del_ethertype_filter(dev,
3088 ðertype_filter, FALSE);
3090 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3091 ethertype_filter_ptr, entries);
3092 rte_free(ethertype_filter_ptr);
3095 case RTE_ETH_FILTER_FDIR:
3096 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
3097 rte_memcpy(&fdir_rule,
3098 &fdir_rule_ptr->filter_info,
3099 sizeof(struct hinic_fdir_rule));
3100 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3101 ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
3102 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3103 ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
3106 PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
3110 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
3111 fdir_rule_ptr, entries);
3112 rte_free(fdir_rule_ptr);
3116 PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
3123 rte_flow_error_set(error, EINVAL,
3124 RTE_FLOW_ERROR_TYPE_HANDLE,
3125 NULL, "Failed to destroy flow");
3129 TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
3130 if (hinic_flow_mem_ptr->flow == pmd_flow) {
3131 TAILQ_REMOVE(&nic_dev->hinic_flow_list,
3132 hinic_flow_mem_ptr, entries);
3133 rte_free(hinic_flow_mem_ptr);
3139 PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
3140 hinic_global_func_id(nic_dev->hwdev));
3145 /* Remove all the n-tuple filters */
3146 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
3148 struct hinic_filter_info *filter_info =
3149 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3150 struct hinic_5tuple_filter *p_5tuple;
3152 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
3153 hinic_remove_5tuple_filter(dev, p_5tuple);
3156 /* Remove all the ether type filters */
3157 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
3159 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3160 struct hinic_filter_info *filter_info =
3161 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
3164 if (filter_info->type_mask &
3165 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
3166 hinic_ethertype_filter_remove(filter_info,
3167 HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
3168 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
3169 filter_info->qid, false, true);
3171 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
3174 if (filter_info->type_mask &
3175 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
3176 hinic_ethertype_filter_remove(filter_info,
3177 HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
3178 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3179 filter_info->qid, false, true);
3183 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3184 filter_info->pkt_type);
3187 /* Remove all the ether type filters */
3188 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3190 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3191 struct hinic_tcam_info *tcam_info =
3192 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3193 struct hinic_tcam_filter *tcam_filter_ptr;
3195 while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3196 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3198 (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3200 (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3203 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3205 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3206 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3207 struct hinic_fdir_rule_ele *fdir_rule_ptr;
3208 struct hinic_flow_mem *hinic_flow_mem_ptr;
3209 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3211 while ((ntuple_filter_ptr =
3212 TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3213 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3215 rte_free(ntuple_filter_ptr);
3218 while ((ethertype_filter_ptr =
3219 TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3220 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3221 ethertype_filter_ptr,
3223 rte_free(ethertype_filter_ptr);
3226 while ((fdir_rule_ptr =
3227 TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3228 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3230 rte_free(fdir_rule_ptr);
3233 while ((hinic_flow_mem_ptr =
3234 TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3235 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3237 rte_free(hinic_flow_mem_ptr->flow);
3238 rte_free(hinic_flow_mem_ptr);
3242 /* Destroy all flow rules associated with a port on hinic. */
3243 static int hinic_flow_flush(struct rte_eth_dev *dev,
3244 __rte_unused struct rte_flow_error *error)
3246 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3248 hinic_clear_all_ntuple_filter(dev);
3249 hinic_clear_all_ethertype_filter(dev);
3250 hinic_clear_all_fdir_filter(dev);
3251 hinic_filterlist_flush(dev);
3253 PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3254 hinic_global_func_id(nic_dev->hwdev));
3258 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3260 hinic_clear_all_ntuple_filter(dev);
3261 hinic_clear_all_ethertype_filter(dev);
3262 hinic_clear_all_fdir_filter(dev);
3263 hinic_filterlist_flush(dev);
3266 const struct rte_flow_ops hinic_flow_ops = {
3267 .validate = hinic_flow_validate,
3268 .create = hinic_flow_create,
3269 .destroy = hinic_flow_destroy,
3270 .flush = hinic_flow_flush,