1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
26 #define HINIC_MAX_RX_QUEUE_NUM 64
29 #define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */
30 #define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */
31 #define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */
32 #define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX (0x7F)
37 #define PA_ETH_TYPE_ROCE 0
38 #define PA_ETH_TYPE_IPV4 1
39 #define PA_ETH_TYPE_IPV6 2
40 #define PA_ETH_TYPE_OTHER 3
42 #define PA_IP_PROTOCOL_TYPE_TCP 1
43 #define PA_IP_PROTOCOL_TYPE_UDP 2
44 #define PA_IP_PROTOCOL_TYPE_ICMP 3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP 4
46 #define PA_IP_PROTOCOL_TYPE_SCTP 5
47 #define PA_IP_PROTOCOL_TYPE_VRRP 112
49 #define IP_HEADER_PROTOCOL_TYPE_TCP 6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP 17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP 1
52 #define IP_HEADER_PROTOCOL_TYPE_ICMPV6 58
54 #define FDIR_TCAM_NORMAL_PACKET 0
55 #define FDIR_TCAM_TUNNEL_PACKET 1
57 #define HINIC_MIN_N_TUPLE_PRIO 1
58 #define HINIC_MAX_N_TUPLE_PRIO 7
60 /* TCAM type mask in hardware */
61 #define TCAM_PKT_BGP_SPORT 1
62 #define TCAM_PKT_VRRP 2
63 #define TCAM_PKT_BGP_DPORT 3
64 #define TCAM_PKT_LACP 4
66 #define TCAM_DIP_IPV4_TYPE 0
67 #define TCAM_DIP_IPV6_TYPE 1
69 #define BGP_DPORT_ID 179
70 #define IPPROTO_VRRP 112
72 /* Packet type defined in hardware to perform filter */
73 #define PKT_IGMP_IPV4_TYPE 64
74 #define PKT_ICMP_IPV4_TYPE 65
75 #define PKT_ICMP_IPV6_TYPE 66
76 #define PKT_ICMP_IPV6RS_TYPE 67
77 #define PKT_ICMP_IPV6RA_TYPE 68
78 #define PKT_ICMP_IPV6NS_TYPE 69
79 #define PKT_ICMP_IPV6NA_TYPE 70
80 #define PKT_ICMP_IPV6RE_TYPE 71
81 #define PKT_DHCP_IPV4_TYPE 72
82 #define PKT_DHCP_IPV6_TYPE 73
83 #define PKT_LACP_TYPE 74
84 #define PKT_ARP_REQ_TYPE 79
85 #define PKT_ARP_REP_TYPE 80
86 #define PKT_ARP_TYPE 81
87 #define PKT_BGPD_DPORT_TYPE 83
88 #define PKT_BGPD_SPORT_TYPE 84
89 #define PKT_VRRP_TYPE 85
91 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
92 (&((struct hinic_nic_dev *)nic_dev)->filter)
94 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
95 (&((struct hinic_nic_dev *)nic_dev)->tcam)
98 enum hinic_atr_flow_type {
99 HINIC_ATR_FLOW_TYPE_IPV4_DIP = 0x1,
100 HINIC_ATR_FLOW_TYPE_IPV4_SIP = 0x2,
101 HINIC_ATR_FLOW_TYPE_DPORT = 0x3,
102 HINIC_ATR_FLOW_TYPE_SPORT = 0x4,
105 /* Structure to store fdir's info. */
106 struct hinic_fdir_info {
113 * Endless loop will never happen with below assumption
114 * 1. there is at least one no-void item(END)
115 * 2. cur is before END.
117 static inline const struct rte_flow_item *
118 next_no_void_pattern(const struct rte_flow_item pattern[],
119 const struct rte_flow_item *cur)
121 const struct rte_flow_item *next =
122 cur ? cur + 1 : &pattern[0];
124 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
130 static inline const struct rte_flow_action *
131 next_no_void_action(const struct rte_flow_action actions[],
132 const struct rte_flow_action *cur)
134 const struct rte_flow_action *next =
135 cur ? cur + 1 : &actions[0];
137 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
143 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
144 struct rte_flow_error *error)
146 /* Must be input direction */
147 if (!attr->ingress) {
148 rte_flow_error_set(error, EINVAL,
149 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
150 attr, "Only support ingress.");
155 rte_flow_error_set(error, EINVAL,
156 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
157 attr, "Not support egress.");
161 if (attr->priority) {
162 rte_flow_error_set(error, EINVAL,
163 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
164 attr, "Not support priority.");
169 rte_flow_error_set(error, EINVAL,
170 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
171 attr, "Not support group.");
178 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
179 const struct rte_flow_item *pattern,
180 const struct rte_flow_action *actions,
181 struct rte_flow_error *error)
184 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
185 NULL, "NULL pattern.");
190 rte_flow_error_set(error, EINVAL,
191 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
192 NULL, "NULL action.");
197 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
198 NULL, "NULL attribute.");
205 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
206 struct rte_flow_error *error)
208 /* The first non-void item should be MAC */
209 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
210 rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_ITEM,
212 item, "Not supported by ethertype filter");
216 /* Not supported last point for range */
218 rte_flow_error_set(error, EINVAL,
219 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220 item, "Not supported last point for range");
224 /* Get the MAC info. */
225 if (!item->spec || !item->mask) {
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ITEM,
228 item, "Not supported by ethertype filter");
235 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
236 const struct rte_flow_action *act,
237 const struct rte_flow_action_queue *act_q,
238 struct rte_eth_ethertype_filter *filter,
239 struct rte_flow_error *error)
242 act = next_no_void_action(actions, NULL);
243 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
244 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
245 rte_flow_error_set(error, EINVAL,
246 RTE_FLOW_ERROR_TYPE_ACTION,
247 act, "Not supported action.");
251 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
252 act_q = (const struct rte_flow_action_queue *)act->conf;
253 filter->queue = act_q->index;
255 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
258 /* Check if the next non-void item is END */
259 act = next_no_void_action(actions, act);
260 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
261 rte_flow_error_set(error, EINVAL,
262 RTE_FLOW_ERROR_TYPE_ACTION,
263 act, "Not supported action.");
271 * Parse the rule to see if it is a ethertype rule.
272 * And get the ethertype filter info BTW.
274 * The first not void item can be ETH.
275 * The next not void item must be END.
277 * The first not void action should be QUEUE.
278 * The next not void action should be END.
281 * ETH type 0x0807 0xFFFF
283 * other members in mask and spec should set to 0x00.
284 * item->last should be NULL.
286 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
287 const struct rte_flow_item *pattern,
288 const struct rte_flow_action *actions,
289 struct rte_eth_ethertype_filter *filter,
290 struct rte_flow_error *error)
292 const struct rte_flow_item *item;
293 const struct rte_flow_action *act = NULL;
294 const struct rte_flow_item_eth *eth_spec;
295 const struct rte_flow_item_eth *eth_mask;
296 const struct rte_flow_action_queue *act_q = NULL;
298 if (hinic_check_filter_arg(attr, pattern, actions, error))
301 item = next_no_void_pattern(pattern, NULL);
302 if (hinic_check_ethertype_first_item(item, error))
305 eth_spec = (const struct rte_flow_item_eth *)item->spec;
306 eth_mask = (const struct rte_flow_item_eth *)item->mask;
309 * Mask bits of source MAC address must be full of 0.
310 * Mask bits of destination MAC address must be full
313 if (!rte_is_zero_ether_addr(ð_mask->src) ||
314 (!rte_is_zero_ether_addr(ð_mask->dst) &&
315 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
316 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
317 item, "Invalid ether address mask");
321 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
322 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
323 item, "Invalid ethertype mask");
328 * If mask bits of destination MAC address
329 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
331 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
332 filter->mac_addr = eth_spec->dst;
333 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
335 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
337 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
339 /* Check if the next non-void item is END. */
340 item = next_no_void_pattern(pattern, item);
341 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
342 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
343 item, "Not supported by ethertype filter.");
347 if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
350 if (hinic_check_ethertype_attr_ele(attr, error))
356 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
357 const struct rte_flow_attr *attr,
358 const struct rte_flow_item pattern[],
359 const struct rte_flow_action actions[],
360 struct rte_eth_ethertype_filter *filter,
361 struct rte_flow_error *error)
363 if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
366 /* NIC doesn't support MAC address. */
367 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
368 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
371 NULL, "Not supported by ethertype filter");
375 if (filter->queue >= dev->data->nb_rx_queues) {
376 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM,
379 NULL, "Queue index much too big");
383 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
384 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
385 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ITEM,
388 NULL, "IPv4/IPv6 not supported by ethertype filter");
392 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
393 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
394 rte_flow_error_set(error, EINVAL,
395 RTE_FLOW_ERROR_TYPE_ITEM,
396 NULL, "Drop option is unsupported");
400 /* Hinic only support LACP/ARP for ether type */
401 if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
402 filter->ether_type != RTE_ETHER_TYPE_ARP) {
403 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
404 rte_flow_error_set(error, EINVAL,
405 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
406 "only lacp/arp type supported by ethertype filter");
413 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
414 struct rte_eth_ntuple_filter *filter,
415 struct rte_flow_error *error)
417 /* Must be input direction */
418 if (!attr->ingress) {
419 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
422 attr, "Only support ingress.");
427 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
430 attr, "Not support egress.");
434 if (attr->priority > 0xFFFF) {
435 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436 rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
438 attr, "Error priority.");
442 if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
443 attr->priority > HINIC_MAX_N_TUPLE_PRIO)
444 filter->priority = 1;
446 filter->priority = (uint16_t)attr->priority;
452 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
453 const struct rte_flow_action actions[],
454 struct rte_eth_ntuple_filter *filter,
455 struct rte_flow_error *error)
457 const struct rte_flow_action *act;
459 * n-tuple only supports forwarding,
460 * check if the first not void action is QUEUE.
462 act = next_no_void_action(actions, NULL);
463 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
464 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ACTION,
467 act, "Flow action type is not QUEUE.");
471 ((const struct rte_flow_action_queue *)act->conf)->index;
473 /* Check if the next not void item is END */
474 act = next_no_void_action(actions, act);
475 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
476 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
477 rte_flow_error_set(error, EINVAL,
478 RTE_FLOW_ERROR_TYPE_ACTION,
479 act, "Next not void item is not END.");
486 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
487 const struct rte_flow_item pattern[],
488 struct rte_flow_error *error)
490 const struct rte_flow_item *item;
492 /* The first not void item can be MAC or IPv4 */
493 item = next_no_void_pattern(pattern, NULL);
495 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
496 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM,
499 item, "Not supported by ntuple filter");
504 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
505 /* Not supported last point for range */
507 rte_flow_error_set(error,
509 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
510 item, "Not supported last point for range");
513 /* if the first item is MAC, the content should be NULL */
514 if (item->spec || item->mask) {
515 rte_flow_error_set(error, EINVAL,
516 RTE_FLOW_ERROR_TYPE_ITEM,
517 item, "Not supported by ntuple filter");
520 /* check if the next not void item is IPv4 */
521 item = next_no_void_pattern(pattern, item);
522 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
523 rte_flow_error_set(error,
524 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
525 item, "Not supported by ntuple filter");
535 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
536 const struct rte_flow_item pattern[],
537 struct rte_eth_ntuple_filter *filter,
538 struct rte_flow_error *error)
540 const struct rte_flow_item_ipv4 *ipv4_spec;
541 const struct rte_flow_item_ipv4 *ipv4_mask;
542 const struct rte_flow_item *item = *in_out_item;
544 /* Get the IPv4 info */
545 if (!item->spec || !item->mask) {
546 rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_ITEM,
548 item, "Invalid ntuple mask");
551 /* Not supported last point for range */
553 rte_flow_error_set(error, EINVAL,
554 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
555 item, "Not supported last point for range");
559 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
561 * Only support src & dst addresses, protocol,
562 * others should be masked.
564 if (ipv4_mask->hdr.version_ihl ||
565 ipv4_mask->hdr.type_of_service ||
566 ipv4_mask->hdr.total_length ||
567 ipv4_mask->hdr.packet_id ||
568 ipv4_mask->hdr.fragment_offset ||
569 ipv4_mask->hdr.time_to_live ||
570 ipv4_mask->hdr.hdr_checksum ||
571 !ipv4_mask->hdr.next_proto_id) {
572 rte_flow_error_set(error,
573 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
574 item, "Not supported by ntuple filter");
578 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
579 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
580 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
582 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
583 filter->dst_ip = ipv4_spec->hdr.dst_addr;
584 filter->src_ip = ipv4_spec->hdr.src_addr;
585 filter->proto = ipv4_spec->hdr.next_proto_id;
587 /* Get next no void item */
588 *in_out_item = next_no_void_pattern(pattern, item);
592 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
593 const struct rte_flow_item pattern[],
594 struct rte_eth_ntuple_filter *filter,
595 struct rte_flow_error *error)
597 const struct rte_flow_item_tcp *tcp_spec;
598 const struct rte_flow_item_tcp *tcp_mask;
599 const struct rte_flow_item_icmp *icmp_mask;
600 const struct rte_flow_item *item = *in_out_item;
601 u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
603 if (item->type == RTE_FLOW_ITEM_TYPE_END)
606 /* Get TCP or UDP info */
607 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
608 (!item->spec || !item->mask)) {
609 memset(filter, 0, ntuple_filter_size);
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
612 item, "Invalid ntuple mask");
616 /* Not supported last point for range */
618 memset(filter, 0, ntuple_filter_size);
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
621 item, "Not supported last point for range");
625 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
626 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
629 * Only support src & dst ports, tcp flags,
630 * others should be masked.
632 if (tcp_mask->hdr.sent_seq ||
633 tcp_mask->hdr.recv_ack ||
634 tcp_mask->hdr.data_off ||
635 tcp_mask->hdr.rx_win ||
636 tcp_mask->hdr.cksum ||
637 tcp_mask->hdr.tcp_urp) {
638 memset(filter, 0, ntuple_filter_size);
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ITEM,
641 item, "Not supported by ntuple filter");
645 filter->dst_port_mask = tcp_mask->hdr.dst_port;
646 filter->src_port_mask = tcp_mask->hdr.src_port;
647 if (tcp_mask->hdr.tcp_flags == 0xFF) {
648 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
649 } else if (!tcp_mask->hdr.tcp_flags) {
650 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
652 memset(filter, 0, ntuple_filter_size);
653 rte_flow_error_set(error, EINVAL,
654 RTE_FLOW_ERROR_TYPE_ITEM,
655 item, "Not supported by ntuple filter");
659 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
660 filter->dst_port = tcp_spec->hdr.dst_port;
661 filter->src_port = tcp_spec->hdr.src_port;
662 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
663 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
664 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
666 /* ICMP all should be masked. */
667 if (icmp_mask->hdr.icmp_cksum ||
668 icmp_mask->hdr.icmp_ident ||
669 icmp_mask->hdr.icmp_seq_nb ||
670 icmp_mask->hdr.icmp_type ||
671 icmp_mask->hdr.icmp_code) {
672 memset(filter, 0, ntuple_filter_size);
673 rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ITEM,
675 item, "Not supported by ntuple filter");
680 /* Get next no void item */
681 *in_out_item = next_no_void_pattern(pattern, item);
685 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
686 struct rte_eth_ntuple_filter *filter,
687 struct rte_flow_error *error)
689 /* Check if the next not void item is END */
690 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
691 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
692 rte_flow_error_set(error, EINVAL,
693 RTE_FLOW_ERROR_TYPE_ITEM,
694 item, "Not supported by ntuple filter");
700 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
701 const struct rte_flow_item pattern[],
702 struct rte_eth_ntuple_filter *filter,
703 struct rte_flow_error *error)
705 if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
706 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
707 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
708 hinic_ntuple_item_check_end(item, filter, error))
715 * Parse the rule to see if it is a n-tuple rule.
716 * And get the n-tuple filter info BTW.
718 * The first not void item can be ETH or IPV4.
719 * The second not void item must be IPV4 if the first one is ETH.
720 * The third not void item must be UDP or TCP.
721 * The next not void item must be END.
723 * The first not void action should be QUEUE.
724 * The next not void action should be END.
728 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
729 * dst_addr 192.167.3.50 0xFFFFFFFF
730 * next_proto_id 17 0xFF
731 * UDP/TCP/ src_port 80 0xFFFF
732 * SCTP dst_port 80 0xFFFF
734 * other members in mask and spec should set to 0x00.
735 * item->last should be NULL.
736 * Please aware there's an asumption for all the parsers.
737 * rte_flow_item is using big endian, rte_flow_attr and
738 * rte_flow_action are using CPU order.
739 * Because the pattern is used to describe the packets,
740 * normally the packets should use network order.
742 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
743 const struct rte_flow_item pattern[],
744 const struct rte_flow_action actions[],
745 struct rte_eth_ntuple_filter *filter,
746 struct rte_flow_error *error)
748 const struct rte_flow_item *item = NULL;
750 if (hinic_check_filter_arg(attr, pattern, actions, error))
753 if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
756 if (hinic_check_ntuple_act_ele(item, actions, filter, error))
759 if (hinic_check_ntuple_attr_ele(attr, filter, error))
765 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
766 const struct rte_flow_attr *attr,
767 const struct rte_flow_item pattern[],
768 const struct rte_flow_action actions[],
769 struct rte_eth_ntuple_filter *filter,
770 struct rte_flow_error *error)
774 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
778 /* Hinic doesn't support tcp flags */
779 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
780 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
781 rte_flow_error_set(error, EINVAL,
782 RTE_FLOW_ERROR_TYPE_ITEM,
783 NULL, "Not supported by ntuple filter");
787 /* Hinic doesn't support many priorities */
788 if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
789 filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
790 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ITEM,
793 NULL, "Priority not supported by ntuple filter");
797 if (filter->queue >= dev->data->nb_rx_queues)
800 /* Fixed value for hinic */
801 filter->flags = RTE_5TUPLE_FLAGS;
805 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
806 const struct rte_flow_item pattern[],
807 struct rte_flow_error *error)
809 const struct rte_flow_item *item;
811 /* The first not void item can be MAC or IPv4 or TCP or UDP */
812 item = next_no_void_pattern(pattern, NULL);
814 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
815 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
816 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
817 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
818 rte_flow_error_set(error, EINVAL,
819 RTE_FLOW_ERROR_TYPE_ITEM, item,
820 "Not supported by fdir filter,support mac,ipv4,tcp,udp");
824 /* Not supported last point for range */
826 rte_flow_error_set(error, EINVAL,
827 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
828 "Not supported last point for range");
833 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
834 /* All should be masked. */
835 if (item->spec || item->mask) {
836 rte_flow_error_set(error, EINVAL,
837 RTE_FLOW_ERROR_TYPE_ITEM,
838 item, "Not supported by fdir filter,support mac");
841 /* Check if the next not void item is IPv4 */
842 item = next_no_void_pattern(pattern, item);
843 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
844 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ITEM, item,
847 "Not supported by fdir filter,support mac,ipv4");
856 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
857 const struct rte_flow_item pattern[],
858 struct hinic_fdir_rule *rule,
859 struct rte_flow_error *error)
861 const struct rte_flow_item_ipv4 *ipv4_spec;
862 const struct rte_flow_item_ipv4 *ipv4_mask;
863 const struct rte_flow_item_ipv6 *ipv6_spec;
864 const struct rte_flow_item_ipv6 *ipv6_mask;
865 const struct rte_flow_item *item = *in_out_item;
868 /* Get the IPv4 info */
869 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
870 /* Not supported last point for range */
872 rte_flow_error_set(error, EINVAL,
873 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
874 item, "Not supported last point for range");
879 memset(rule, 0, sizeof(struct hinic_fdir_rule));
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ITEM,
882 item, "Invalid fdir filter mask");
886 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
888 * Only support src & dst addresses,
889 * others should be masked.
891 if (ipv4_mask->hdr.version_ihl ||
892 ipv4_mask->hdr.type_of_service ||
893 ipv4_mask->hdr.total_length ||
894 ipv4_mask->hdr.packet_id ||
895 ipv4_mask->hdr.fragment_offset ||
896 ipv4_mask->hdr.time_to_live ||
897 ipv4_mask->hdr.next_proto_id ||
898 ipv4_mask->hdr.hdr_checksum) {
899 rte_flow_error_set(error,
900 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
901 "Not supported by fdir filter, support src,dst ip");
905 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
906 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
907 rule->mode = HINIC_FDIR_MODE_NORMAL;
911 (const struct rte_flow_item_ipv4 *)item->spec;
912 rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
913 rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
917 * Check if the next not void item is
920 item = next_no_void_pattern(pattern, item);
921 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
922 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
923 item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
924 item->type != RTE_FLOW_ITEM_TYPE_ANY &&
925 item->type != RTE_FLOW_ITEM_TYPE_END) {
926 memset(rule, 0, sizeof(struct hinic_fdir_rule));
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM, item,
929 "Not supported by fdir filter, support tcp, udp, end");
932 } else if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
933 /* Not supported last point for range */
935 rte_flow_error_set(error, EINVAL,
936 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937 item, "Not supported last point for range");
942 memset(rule, 0, sizeof(struct hinic_fdir_rule));
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ITEM,
945 item, "Invalid fdir filter mask");
949 ipv6_mask = (const struct rte_flow_item_ipv6 *)item->mask;
951 /* Only support dst addresses, others should be masked */
952 if (ipv6_mask->hdr.vtc_flow ||
953 ipv6_mask->hdr.payload_len ||
954 ipv6_mask->hdr.proto ||
955 ipv6_mask->hdr.hop_limits) {
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ITEM, item,
958 "Not supported by fdir filter, support dst ipv6");
962 /* check ipv6 src addr mask, ipv6 src addr is 16 bytes */
963 for (i = 0; i < 16; i++) {
964 if (ipv6_mask->hdr.src_addr[i] == UINT8_MAX) {
965 rte_flow_error_set(error, EINVAL,
966 RTE_FLOW_ERROR_TYPE_ITEM, item,
967 "Not supported by fdir filter, do not support src ipv6");
973 rte_flow_error_set(error, EINVAL,
974 RTE_FLOW_ERROR_TYPE_ITEM, item,
975 "Not supported by fdir filter, ipv6 spec is NULL");
979 for (i = 0; i < 16; i++) {
980 if (ipv6_mask->hdr.dst_addr[i] == UINT8_MAX)
981 rule->mask.dst_ipv6_mask |= 1 << i;
984 ipv6_spec = (const struct rte_flow_item_ipv6 *)item->spec;
985 rte_memcpy(rule->hinic_fdir.dst_ipv6,
986 ipv6_spec->hdr.dst_addr, 16);
989 * Check if the next not void item is TCP or UDP or ICMP.
991 item = next_no_void_pattern(pattern, item);
992 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
993 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
994 item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
995 item->type != RTE_FLOW_ITEM_TYPE_ICMP6){
996 memset(rule, 0, sizeof(struct hinic_fdir_rule));
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ITEM, item,
999 "Not supported by fdir filter, support tcp, udp, icmp");
1004 *in_out_item = item;
1008 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1009 __rte_unused const struct rte_flow_item pattern[],
1010 __rte_unused struct hinic_fdir_rule *rule,
1011 struct rte_flow_error *error)
1013 const struct rte_flow_item *item = *in_out_item;
1015 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1016 rte_flow_error_set(error, EINVAL,
1017 RTE_FLOW_ERROR_TYPE_ITEM,
1018 item, "Not supported by normal fdir filter, not support l4");
1026 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1027 struct hinic_fdir_rule *rule,
1028 struct rte_flow_error *error)
1030 /* Check if the next not void item is END */
1031 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1032 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ITEM,
1035 item, "Not supported by fdir filter, support end");
1042 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1043 const struct rte_flow_item pattern[],
1044 struct hinic_fdir_rule *rule,
1045 struct rte_flow_error *error)
1047 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1048 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1049 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1050 hinic_normal_item_check_end(item, rule, error))
1057 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
1058 const struct rte_flow_item pattern[],
1059 struct hinic_fdir_rule *rule,
1060 struct rte_flow_error *error)
1062 const struct rte_flow_item *item = *in_out_item;
1063 const struct rte_flow_item_tcp *tcp_spec;
1064 const struct rte_flow_item_tcp *tcp_mask;
1065 const struct rte_flow_item_udp *udp_spec;
1066 const struct rte_flow_item_udp *udp_mask;
1068 if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
1069 rule->mode = HINIC_FDIR_MODE_TCAM;
1070 rule->mask.proto_mask = UINT16_MAX;
1071 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
1072 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6) {
1073 rule->mode = HINIC_FDIR_MODE_TCAM;
1074 rule->mask.proto_mask = UINT16_MAX;
1075 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMPV6;
1076 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1077 rule->mode = HINIC_FDIR_MODE_TCAM;
1078 } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1080 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ITEM,
1083 item, "Not supported by fdir filter, support src, dst ports");
1087 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1090 * Only support src & dst ports, tcp flags,
1091 * others should be masked.
1093 if (tcp_mask->hdr.sent_seq ||
1094 tcp_mask->hdr.recv_ack ||
1095 tcp_mask->hdr.data_off ||
1096 tcp_mask->hdr.rx_win ||
1097 tcp_mask->hdr.cksum ||
1098 tcp_mask->hdr.tcp_urp) {
1099 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1100 rte_flow_error_set(error, EINVAL,
1101 RTE_FLOW_ERROR_TYPE_ITEM,
1102 item, "Not supported by fdir normal tcam filter");
1106 rule->mode = HINIC_FDIR_MODE_TCAM;
1107 rule->mask.proto_mask = UINT16_MAX;
1108 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1109 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1111 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1113 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1114 rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1115 rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1117 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1119 * Only care about src & dst ports,
1120 * others should be masked.
1123 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1124 rte_flow_error_set(error, EINVAL,
1125 RTE_FLOW_ERROR_TYPE_ITEM,
1126 item, "Not supported by fdir filter, support src, dst ports");
1130 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1131 if (udp_mask->hdr.dgram_len ||
1132 udp_mask->hdr.dgram_cksum) {
1133 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ITEM,
1136 item, "Not supported by fdir filter, support udp");
1140 rule->mode = HINIC_FDIR_MODE_TCAM;
1141 rule->mask.proto_mask = UINT16_MAX;
1142 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1143 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1145 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1147 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1148 rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1149 rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1152 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1153 rte_flow_error_set(error, EINVAL,
1154 RTE_FLOW_ERROR_TYPE_ITEM,
1155 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1159 item = next_no_void_pattern(pattern, item);
1160 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1161 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1162 rte_flow_error_set(error, EINVAL,
1163 RTE_FLOW_ERROR_TYPE_ITEM,
1164 item, "Not supported by fdir filter tcam normal, support end");
1168 /* get next no void item */
1169 *in_out_item = item;
1174 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1175 const struct rte_flow_item pattern[],
1176 struct hinic_fdir_rule *rule,
1177 struct rte_flow_error *error)
1179 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1180 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1181 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1182 hinic_normal_item_check_end(item, rule, error))
1188 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1189 const struct rte_flow_item pattern[],
1190 struct hinic_fdir_rule *rule,
1191 struct rte_flow_error *error)
1193 const struct rte_flow_item *item = *in_out_item;
1195 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1196 item = next_no_void_pattern(pattern, item);
1197 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1198 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1199 rte_flow_error_set(error, EINVAL,
1200 RTE_FLOW_ERROR_TYPE_ITEM,
1201 item, "Not supported by fdir filter, support vxlan");
1205 *in_out_item = item;
1207 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ITEM,
1210 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1218 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1219 const struct rte_flow_item pattern[],
1220 struct hinic_fdir_rule *rule,
1221 struct rte_flow_error *error)
1223 const struct rte_flow_item *item = *in_out_item;
1226 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1227 item = next_no_void_pattern(pattern, item);
1228 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1229 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1230 item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1231 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1232 rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ITEM,
1234 item, "Not supported by fdir filter, support tcp/udp");
1238 *in_out_item = item;
1245 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1246 const struct rte_flow_item pattern[],
1247 struct hinic_fdir_rule *rule,
1248 struct rte_flow_error *error)
1250 const struct rte_flow_item_tcp *tcp_spec;
1251 const struct rte_flow_item_tcp *tcp_mask;
1252 const struct rte_flow_item_udp *udp_spec;
1253 const struct rte_flow_item_udp *udp_mask;
1254 const struct rte_flow_item *item = *in_out_item;
1256 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1257 /* Not supported last point for range */
1259 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1260 rte_flow_error_set(error, EINVAL,
1261 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1262 item, "Not supported last point for range");
1266 /* get the TCP/UDP info */
1267 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1269 * Only care about src & dst ports,
1270 * others should be masked.
1273 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1274 rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ITEM,
1276 item, "Not supported by fdir filter, support src, dst ports");
1280 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1281 if (tcp_mask->hdr.sent_seq ||
1282 tcp_mask->hdr.recv_ack ||
1283 tcp_mask->hdr.data_off ||
1284 tcp_mask->hdr.tcp_flags ||
1285 tcp_mask->hdr.rx_win ||
1286 tcp_mask->hdr.cksum ||
1287 tcp_mask->hdr.tcp_urp) {
1288 (void)memset(rule, 0,
1289 sizeof(struct hinic_fdir_rule));
1290 rte_flow_error_set(error, EINVAL,
1291 RTE_FLOW_ERROR_TYPE_ITEM,
1292 item, "Not supported by fdir filter, support tcp");
1296 rule->mode = HINIC_FDIR_MODE_TCAM;
1297 rule->mask.tunnel_flag = UINT16_MAX;
1298 rule->mask.tunnel_inner_src_port_mask =
1299 tcp_mask->hdr.src_port;
1300 rule->mask.tunnel_inner_dst_port_mask =
1301 tcp_mask->hdr.dst_port;
1302 rule->mask.proto_mask = UINT16_MAX;
1304 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1307 (const struct rte_flow_item_tcp *)item->spec;
1308 rule->hinic_fdir.tunnel_inner_src_port =
1309 tcp_spec->hdr.src_port;
1310 rule->hinic_fdir.tunnel_inner_dst_port =
1311 tcp_spec->hdr.dst_port;
1313 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1315 * Only care about src & dst ports,
1316 * others should be masked.
1319 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1320 rte_flow_error_set(error, EINVAL,
1321 RTE_FLOW_ERROR_TYPE_ITEM,
1322 item, "Not supported by fdir filter, support src, dst ports");
1326 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1327 if (udp_mask->hdr.dgram_len ||
1328 udp_mask->hdr.dgram_cksum) {
1329 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ITEM,
1332 item, "Not supported by fdir filter, support udp");
1336 rule->mode = HINIC_FDIR_MODE_TCAM;
1337 rule->mask.tunnel_flag = UINT16_MAX;
1338 rule->mask.tunnel_inner_src_port_mask =
1339 udp_mask->hdr.src_port;
1340 rule->mask.tunnel_inner_dst_port_mask =
1341 udp_mask->hdr.dst_port;
1342 rule->mask.proto_mask = UINT16_MAX;
1344 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1347 (const struct rte_flow_item_udp *)item->spec;
1348 rule->hinic_fdir.tunnel_inner_src_port =
1349 udp_spec->hdr.src_port;
1350 rule->hinic_fdir.tunnel_inner_dst_port =
1351 udp_spec->hdr.dst_port;
1353 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1354 rule->mode = HINIC_FDIR_MODE_TCAM;
1355 rule->mask.tunnel_flag = UINT16_MAX;
1357 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1358 rte_flow_error_set(error, EINVAL,
1359 RTE_FLOW_ERROR_TYPE_ITEM,
1360 item, "Not supported by fdir filter, support tcp/udp");
1364 /* get next no void item */
1365 *in_out_item = next_no_void_pattern(pattern, item);
1371 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1372 const struct rte_flow_item pattern[],
1373 struct hinic_fdir_rule *rule,
1374 struct rte_flow_error *error)
1376 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1377 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1378 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1379 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1380 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1381 hinic_normal_item_check_end(item, rule, error))
1387 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1388 struct hinic_fdir_rule *rule,
1389 struct rte_flow_error *error)
1391 /* Must be input direction */
1392 if (!attr->ingress) {
1393 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1394 rte_flow_error_set(error, EINVAL,
1395 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1396 attr, "Only support ingress.");
1402 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1403 rte_flow_error_set(error, EINVAL,
1404 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1405 attr, "Not support egress.");
1410 if (attr->priority) {
1411 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1412 rte_flow_error_set(error, EINVAL,
1413 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1414 attr, "Not support priority.");
1421 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1422 const struct rte_flow_action actions[],
1423 struct hinic_fdir_rule *rule,
1424 struct rte_flow_error *error)
1426 const struct rte_flow_action *act;
1428 /* Check if the first not void action is QUEUE */
1429 act = next_no_void_action(actions, NULL);
1430 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1431 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1432 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1433 item, "Not supported action.");
1437 rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1439 /* Check if the next not void item is END */
1440 act = next_no_void_action(actions, act);
1441 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1442 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1443 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1444 act, "Not supported action.");
1452 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1453 * And get the flow director filter info BTW.
1454 * UDP/TCP/SCTP PATTERN:
1455 * The first not void item can be ETH or IPV4 or IPV6
1456 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1457 * The next not void item could be UDP or TCP(optional)
1458 * The next not void item must be END.
1460 * The first not void action should be QUEUE.
1461 * The second not void optional action should be MARK,
1462 * mark_id is a uint32_t number.
1463 * The next not void action should be END.
1464 * UDP/TCP pattern example:
1467 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1468 * dst_addr 1.2.3.5 0xFFFFFFFF
1469 * UDP/TCP src_port 80 0xFFFF
1470 * dst_port 80 0xFFFF
1472 * Other members in mask and spec should set to 0x00.
1473 * Item->last should be NULL.
1476 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1477 const struct rte_flow_item pattern[],
1478 const struct rte_flow_action actions[],
1479 struct hinic_fdir_rule *rule,
1480 struct rte_flow_error *error)
1482 const struct rte_flow_item *item = NULL;
1484 if (hinic_check_filter_arg(attr, pattern, actions, error))
1487 if (hinic_check_normal_item_ele(item, pattern, rule, error))
1490 if (hinic_check_normal_attr_ele(attr, rule, error))
1493 if (hinic_check_normal_act_ele(item, actions, rule, error))
1500 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1501 * And get the flow director filter info BTW.
1502 * UDP/TCP/SCTP PATTERN:
1503 * The first not void item can be ETH or IPV4 or IPV6
1504 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1505 * The next not void item can be ANY/TCP/UDP
1507 * The first not void action should be QUEUE.
1508 * The second not void optional action should be MARK,
1509 * mark_id is a uint32_t number.
1510 * The next not void action should be END.
1511 * UDP/TCP pattern example:
1514 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1515 * dst_addr 1.2.3.5 0xFFFFFFFF
1516 * UDP/TCP src_port 80 0xFFFF
1517 * dst_port 80 0xFFFF
1519 * Other members in mask and spec should set to 0x00.
1520 * Item->last should be NULL.
1523 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1524 const struct rte_flow_item pattern[],
1525 const struct rte_flow_action actions[],
1526 struct hinic_fdir_rule *rule,
1527 struct rte_flow_error *error)
1529 const struct rte_flow_item *item = NULL;
1531 if (hinic_check_filter_arg(attr, pattern, actions, error))
1534 if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1537 if (hinic_check_normal_attr_ele(attr, rule, error))
1540 if (hinic_check_normal_act_ele(item, actions, rule, error))
1547 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1548 * And get the flow director filter info BTW.
1549 * UDP/TCP/SCTP PATTERN:
1550 * The first not void item can be ETH or IPV4 or IPV6
1551 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1552 * The next not void item must be UDP
1553 * The next not void item must be VXLAN(optional)
1554 * The first not void item can be ETH or IPV4 or IPV6
1555 * The next not void item could be ANY or UDP or TCP(optional)
1556 * The next not void item must be END.
1558 * The first not void action should be QUEUE.
1559 * The second not void optional action should be MARK,
1560 * mark_id is a uint32_t number.
1561 * The next not void action should be END.
1562 * UDP/TCP pattern example:
1565 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1566 * dst_addr 1.2.3.5 0xFFFFFFFF
1569 * UDP/TCP src_port 80 0xFFFF
1570 * dst_port 80 0xFFFF
1572 * Other members in mask and spec should set to 0x00.
1573 * Item->last should be NULL.
1576 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1577 const struct rte_flow_item pattern[],
1578 const struct rte_flow_action actions[],
1579 struct hinic_fdir_rule *rule,
1580 struct rte_flow_error *error)
1582 const struct rte_flow_item *item = NULL;
1584 if (hinic_check_filter_arg(attr, pattern, actions, error))
1587 if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1590 if (hinic_check_normal_attr_ele(attr, rule, error))
1593 if (hinic_check_normal_act_ele(item, actions, rule, error))
1599 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1600 const struct rte_flow_attr *attr,
1601 const struct rte_flow_item pattern[],
1602 const struct rte_flow_action actions[],
1603 struct hinic_fdir_rule *rule,
1604 struct rte_flow_error *error)
1608 ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1613 ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1618 ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1624 if (rule->queue >= dev->data->nb_rx_queues)
1631 * Check if the flow rule is supported by nic.
1632 * It only checkes the format. Don't guarantee the rule can be programmed into
1633 * the HW. Because there can be no enough room for the rule.
1635 static int hinic_flow_validate(struct rte_eth_dev *dev,
1636 const struct rte_flow_attr *attr,
1637 const struct rte_flow_item pattern[],
1638 const struct rte_flow_action actions[],
1639 struct rte_flow_error *error)
1641 struct rte_eth_ethertype_filter ethertype_filter;
1642 struct rte_eth_ntuple_filter ntuple_filter;
1643 struct hinic_fdir_rule fdir_rule;
1646 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1647 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1648 actions, &ntuple_filter, error);
1652 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1653 ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1654 actions, ðertype_filter, error);
1659 memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1660 ret = hinic_parse_fdir_filter(dev, attr, pattern,
1661 actions, &fdir_rule, error);
1666 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1667 struct hinic_5tuple_filter_info *hinic_filter_info)
1669 switch (filter->dst_ip_mask) {
1671 hinic_filter_info->dst_ip_mask = 0;
1672 hinic_filter_info->dst_ip = filter->dst_ip;
1675 hinic_filter_info->dst_ip_mask = 1;
1676 hinic_filter_info->dst_ip = 0;
1679 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1683 switch (filter->src_ip_mask) {
1685 hinic_filter_info->src_ip_mask = 0;
1686 hinic_filter_info->src_ip = filter->src_ip;
1689 hinic_filter_info->src_ip_mask = 1;
1690 hinic_filter_info->src_ip = 0;
1693 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1699 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1700 struct hinic_5tuple_filter_info *hinic_filter_info)
1702 switch (filter->dst_port_mask) {
1704 hinic_filter_info->dst_port_mask = 0;
1705 hinic_filter_info->dst_port = filter->dst_port;
1708 hinic_filter_info->dst_port_mask = 1;
1709 hinic_filter_info->dst_port = 0;
1712 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1716 switch (filter->src_port_mask) {
1718 hinic_filter_info->src_port_mask = 0;
1719 hinic_filter_info->src_port = filter->src_port;
1722 hinic_filter_info->src_port_mask = 1;
1723 hinic_filter_info->src_port = 0;
1726 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1733 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1734 struct hinic_5tuple_filter_info *hinic_filter_info)
1736 switch (filter->proto_mask) {
1738 hinic_filter_info->proto_mask = 0;
1739 hinic_filter_info->proto = filter->proto;
1742 hinic_filter_info->proto_mask = 1;
1743 hinic_filter_info->proto = 0;
1746 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1753 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1754 struct hinic_5tuple_filter_info *filter_info)
1756 if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1757 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1758 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1761 if (ntuple_ip_filter(filter, filter_info) ||
1762 ntuple_port_filter(filter, filter_info) ||
1763 ntuple_proto_filter(filter, filter_info))
1766 filter_info->priority = (uint8_t)filter->priority;
1770 static inline struct hinic_5tuple_filter *
1771 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1772 struct hinic_5tuple_filter_info *key)
1774 struct hinic_5tuple_filter *it;
1776 TAILQ_FOREACH(it, filter_list, entries) {
1777 if (memcmp(key, &it->filter_info,
1778 sizeof(struct hinic_5tuple_filter_info)) == 0) {
1786 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1788 struct tag_pa_rule lacp_rule;
1789 struct tag_pa_action lacp_action;
1791 memset(&lacp_rule, 0, sizeof(lacp_rule));
1792 memset(&lacp_action, 0, sizeof(lacp_action));
1793 /* LACP TCAM rule */
1794 lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1795 lacp_rule.l2_header.eth_type.val16 = 0x8809;
1796 lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1798 /* LACP TCAM action */
1799 lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1800 lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1801 lacp_action.pkt_type = PKT_LACP_TYPE;
1802 lacp_action.pri = 0x0;
1803 lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1805 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1806 &lacp_rule, &lacp_action);
1809 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1811 struct tag_pa_rule bgp_rule;
1812 struct tag_pa_action bgp_action;
1814 memset(&bgp_rule, 0, sizeof(bgp_rule));
1815 memset(&bgp_action, 0, sizeof(bgp_action));
1817 bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1818 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1819 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1820 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1821 bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1822 bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1824 /* BGP TCAM action */
1825 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1826 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1827 bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1828 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1829 * results, not need to convert
1831 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1833 return hinic_set_fdir_tcam(nic_dev->hwdev,
1834 TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1837 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1839 struct tag_pa_rule bgp_rule;
1840 struct tag_pa_action bgp_action;
1842 memset(&bgp_rule, 0, sizeof(bgp_rule));
1843 memset(&bgp_action, 0, sizeof(bgp_action));
1845 bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1846 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1847 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1848 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1849 bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1850 bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1852 /* BGP TCAM action */
1853 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1854 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1855 bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1856 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1857 * results, not need to convert
1859 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1861 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1862 &bgp_rule, &bgp_action);
1865 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1867 struct tag_pa_rule vrrp_rule;
1868 struct tag_pa_action vrrp_action;
1870 memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1871 memset(&vrrp_action, 0, sizeof(vrrp_action));
1872 /* VRRP TCAM rule */
1873 vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1874 vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1875 vrrp_rule.ip_header.protocol.mask8 = 0xff;
1876 vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1878 /* VRRP TCAM action */
1879 vrrp_action.err_type = 0x3f;
1880 vrrp_action.fwd_action = 0x7;
1881 vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1882 vrrp_action.pri = 0xf;
1883 vrrp_action.push_len = 0xf;
1885 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1886 &vrrp_rule, &vrrp_action);
1890 * Clear all fdir configuration.
1893 * The hardware interface of a Ethernet device.
1897 * negative error value otherwise.
1899 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1901 (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1903 (void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
1905 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1907 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1909 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1911 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1913 (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1916 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1917 struct hinic_filter_info *filter_info)
1919 switch (filter->filter_info.proto) {
1921 /* Filter type is bgp type if dst_port or src_port is 179 */
1922 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1923 !(filter->filter_info.dst_port_mask)) {
1924 filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1925 } else if (filter->filter_info.src_port ==
1926 RTE_BE16(BGP_DPORT_ID) &&
1927 !(filter->filter_info.src_port_mask)) {
1928 filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1930 PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1931 " just support BGP now, proto:0x%x, "
1932 "dst_port:0x%x, dst_port_mask:0x%x."
1933 "src_port:0x%x, src_port_mask:0x%x.",
1934 filter->filter_info.proto,
1935 filter->filter_info.dst_port,
1936 filter->filter_info.dst_port_mask,
1937 filter->filter_info.src_port,
1938 filter->filter_info.src_port_mask);
1944 filter_info->pkt_type = PKT_VRRP_TYPE;
1948 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1951 case IPPROTO_ICMPV6:
1952 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1956 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1957 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1958 "src_port: 0x%x, src_port_mask: 0x%x.",
1959 filter->filter_info.proto, filter->filter_info.dst_port,
1960 filter->filter_info.dst_port_mask,
1961 filter->filter_info.src_port,
1962 filter->filter_info.src_port_mask);
1969 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1970 struct hinic_filter_info *filter_info, int *index)
1974 type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1976 if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1977 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1981 if (!(filter_info->type_mask & (1 << type_id))) {
1982 filter_info->type_mask |= 1 << type_id;
1983 filter->index = type_id;
1984 filter_info->pkt_filters[type_id].enable = true;
1985 filter_info->pkt_filters[type_id].pkt_proto =
1986 filter->filter_info.proto;
1987 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1990 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1999 * Add a 5tuple filter
2002 * Pointer to struct rte_eth_dev.
2004 * Pointer to the filter that will be added.
2006 * - On success, zero.
2007 * - On failure, a negative value.
2009 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
2010 struct hinic_5tuple_filter *filter)
2012 struct hinic_filter_info *filter_info =
2013 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2015 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2017 if (hinic_filter_info_init(filter, filter_info) ||
2018 hinic_lookup_new_filter(filter, filter_info, &i))
2021 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2023 filter_info->pkt_filters[i].enable,
2026 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2027 filter_info->pkt_type, filter->queue,
2028 filter_info->pkt_filters[i].enable);
2032 PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2033 filter_info->pkt_type, filter_info->qid,
2034 filter_info->pkt_filters[filter->index].enable);
2036 switch (filter->filter_info.proto) {
2038 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
2039 ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
2041 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
2042 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2043 filter_info->pkt_type, filter->queue,
2044 filter_info->pkt_filters[i].enable);
2048 PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
2050 filter_info->pkt_filters[i].enable);
2051 } else if (filter->filter_info.src_port ==
2052 RTE_BE16(BGP_DPORT_ID)) {
2053 ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
2055 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
2056 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2057 filter_info->pkt_type, filter->queue,
2058 filter_info->pkt_filters[i].enable);
2062 PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
2064 filter_info->pkt_filters[i].enable);
2070 ret_fw = hinic_set_vrrp_tcam(nic_dev);
2072 PMD_DRV_LOG(ERR, "Set VRRP failed, "
2073 "type: 0x%x, qid: 0x%x, enable: 0x%x",
2074 filter_info->pkt_type, filter->queue,
2075 filter_info->pkt_filters[i].enable);
2078 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
2080 filter_info->pkt_filters[i].enable);
2091 * Remove a 5tuple filter
2094 * Pointer to struct rte_eth_dev.
2096 * The pointer of the filter will be removed.
2098 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2099 struct hinic_5tuple_filter *filter)
2101 struct hinic_filter_info *filter_info =
2102 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2103 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2105 switch (filter->filter_info.proto) {
2107 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2111 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2112 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2113 TCAM_PKT_BGP_DPORT);
2114 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2115 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2116 TCAM_PKT_BGP_SPORT);
2123 hinic_filter_info_init(filter, filter_info);
2125 filter_info->pkt_filters[filter->index].enable = false;
2126 filter_info->pkt_filters[filter->index].pkt_proto = 0;
2128 PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2129 filter_info->pkt_type,
2130 filter_info->pkt_filters[filter->index].qid,
2131 filter_info->pkt_filters[filter->index].enable);
2132 (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2133 filter_info->pkt_filters[filter->index].qid,
2134 filter_info->pkt_filters[filter->index].enable,
2137 filter_info->pkt_type = 0;
2138 filter_info->qid = 0;
2139 filter_info->pkt_filters[filter->index].qid = 0;
2140 filter_info->type_mask &= ~(1 << (filter->index));
2141 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2147 * Add or delete a ntuple filter
2150 * Pointer to struct rte_eth_dev.
2151 * @param ntuple_filter
2152 * Pointer to struct rte_eth_ntuple_filter
2154 * If true, add filter; if false, remove filter
2156 * - On success, zero.
2157 * - On failure, a negative value.
2159 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2160 struct rte_eth_ntuple_filter *ntuple_filter,
2163 struct hinic_filter_info *filter_info =
2164 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2165 struct hinic_5tuple_filter_info filter_5tuple;
2166 struct hinic_5tuple_filter *filter;
2169 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2170 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2174 memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2175 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2179 filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2181 if (filter != NULL && add) {
2182 PMD_DRV_LOG(ERR, "Filter exists.");
2185 if (filter == NULL && !add) {
2186 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2191 filter = rte_zmalloc("hinic_5tuple_filter",
2192 sizeof(struct hinic_5tuple_filter), 0);
2195 rte_memcpy(&filter->filter_info, &filter_5tuple,
2196 sizeof(struct hinic_5tuple_filter_info));
2197 filter->queue = ntuple_filter->queue;
2199 filter_info->qid = ntuple_filter->queue;
2201 ret = hinic_add_5tuple_filter(dev, filter);
2208 hinic_remove_5tuple_filter(dev, filter);
2214 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2216 if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2219 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2220 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2221 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2222 " ethertype filter", filter->ether_type);
2226 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2227 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2230 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2231 PMD_DRV_LOG(ERR, "Drop option is not supported");
2239 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2240 struct hinic_pkt_filter *ethertype_filter)
2242 switch (ethertype_filter->pkt_proto) {
2243 case RTE_ETHER_TYPE_SLOW:
2244 filter_info->pkt_type = PKT_LACP_TYPE;
2247 case RTE_ETHER_TYPE_ARP:
2248 filter_info->pkt_type = PKT_ARP_TYPE;
2252 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2256 return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2260 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2261 struct hinic_pkt_filter *ethertype_filter)
2265 /* Find LACP or VRRP type id */
2266 id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2270 if (!(filter_info->type_mask & (1 << id))) {
2271 filter_info->type_mask |= 1 << id;
2272 filter_info->pkt_filters[id].pkt_proto =
2273 ethertype_filter->pkt_proto;
2274 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2275 filter_info->qid = ethertype_filter->qid;
2279 PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2284 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2287 if (idx >= HINIC_MAX_Q_FILTERS)
2290 filter_info->pkt_type = 0;
2291 filter_info->type_mask &= ~(1 << idx);
2292 filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2293 filter_info->pkt_filters[idx].enable = FALSE;
2294 filter_info->pkt_filters[idx].qid = 0;
2298 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2299 struct rte_eth_ethertype_filter *filter,
2302 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2303 struct hinic_filter_info *filter_info =
2304 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2305 struct hinic_pkt_filter ethertype_filter;
2309 if (hinic_check_ethertype_filter(filter))
2313 ethertype_filter.pkt_proto = filter->ether_type;
2314 ethertype_filter.enable = TRUE;
2315 ethertype_filter.qid = (u8)filter->queue;
2316 i = hinic_ethertype_filter_insert(filter_info,
2321 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2322 filter_info->pkt_type, filter_info->qid,
2323 filter_info->pkt_filters[i].enable, true);
2325 PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2326 filter_info->pkt_type, filter->queue,
2327 filter_info->pkt_filters[i].enable);
2329 hinic_ethertype_filter_remove(filter_info, i);
2332 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2333 filter_info->pkt_type, filter->queue,
2334 filter_info->pkt_filters[i].enable);
2336 switch (ethertype_filter.pkt_proto) {
2337 case RTE_ETHER_TYPE_SLOW:
2338 ret_fw = hinic_set_lacp_tcam(nic_dev);
2340 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2341 hinic_ethertype_filter_remove(filter_info, i);
2345 PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2351 ethertype_filter.pkt_proto = filter->ether_type;
2352 i = hinic_ethertype_filter_lookup(filter_info,
2357 if ((filter_info->type_mask & (1 << i))) {
2358 filter_info->pkt_filters[i].enable = FALSE;
2359 (void)hinic_set_fdir_filter(nic_dev->hwdev,
2360 filter_info->pkt_type,
2361 filter_info->pkt_filters[i].qid,
2362 filter_info->pkt_filters[i].enable,
2365 PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2366 filter_info->pkt_type,
2367 filter_info->pkt_filters[i].qid,
2368 filter_info->pkt_filters[i].enable);
2370 switch (ethertype_filter.pkt_proto) {
2371 case RTE_ETHER_TYPE_SLOW:
2372 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2374 PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2380 hinic_ethertype_filter_remove(filter_info, i);
2383 PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2384 filter_info->pkt_type, filter->queue,
2385 filter_info->pkt_filters[i].enable);
2393 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2394 struct hinic_fdir_info *fdir_info)
2396 switch (rule->mask.src_ipv4_mask) {
2398 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2399 fdir_info->qid = rule->queue;
2400 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2407 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2411 switch (rule->mask.dst_ipv4_mask) {
2413 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2414 fdir_info->qid = rule->queue;
2415 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2422 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2426 if (fdir_info->fdir_flag == 0) {
2427 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2434 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2435 struct hinic_fdir_rule *rule, bool add)
2437 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2438 struct hinic_fdir_info fdir_info;
2441 memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2443 ret = hinic_fdir_info_init(rule, &fdir_info);
2445 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2450 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2451 true, fdir_info.fdir_key,
2452 true, fdir_info.fdir_flag);
2454 PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2455 fdir_info.fdir_flag, fdir_info.qid,
2456 fdir_info.fdir_key);
2459 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2460 fdir_info.fdir_flag, fdir_info.qid,
2461 fdir_info.fdir_key);
2463 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2464 false, fdir_info.fdir_key, true,
2465 fdir_info.fdir_flag);
2467 PMD_DRV_LOG(ERR, "Del fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2468 fdir_info.fdir_flag, fdir_info.qid,
2469 fdir_info.fdir_key);
2472 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2473 fdir_info.fdir_flag, fdir_info.qid,
2474 fdir_info.fdir_key);
2480 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2484 for (idx = 0; idx < len; idx++)
2485 key_y[idx] = src_input[idx] & mask[idx];
2488 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2492 for (idx = 0; idx < len; idx++)
2493 key_x[idx] = key_y[idx] ^ mask[idx];
2496 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2497 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2499 tcam_translate_key_y(fdir_tcam_rule->key.y,
2500 (u8 *)(&tcam_key->key_info),
2501 (u8 *)(&tcam_key->key_mask),
2502 TCAM_FLOW_KEY_SIZE);
2503 tcam_translate_key_x(fdir_tcam_rule->key.x,
2504 fdir_tcam_rule->key.y,
2505 (u8 *)(&tcam_key->key_mask),
2506 TCAM_FLOW_KEY_SIZE);
2509 static int hinic_fdir_tcam_ipv4_init(struct rte_eth_dev *dev,
2510 struct hinic_fdir_rule *rule,
2511 struct tag_tcam_key *tcam_key)
2513 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2515 switch (rule->mask.dst_ipv4_mask) {
2517 tcam_key->key_info.ext_dip_h =
2518 (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2519 tcam_key->key_info.ext_dip_l =
2520 rule->hinic_fdir.dst_ip & 0xffffU;
2521 tcam_key->key_mask.ext_dip_h =
2522 (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2523 tcam_key->key_mask.ext_dip_l =
2524 rule->mask.dst_ipv4_mask & 0xffffU;
2531 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2535 if (rule->mask.dst_port_mask > 0) {
2536 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2537 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2540 if (rule->mask.src_port_mask > 0) {
2541 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2542 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2545 switch (rule->mask.tunnel_flag) {
2547 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2548 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2552 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2553 tcam_key->key_mask.tunnel_flag = 0;
2557 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2561 if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2562 tcam_key->key_info.dst_port =
2563 rule->hinic_fdir.tunnel_inner_dst_port;
2564 tcam_key->key_mask.dst_port =
2565 rule->mask.tunnel_inner_dst_port_mask;
2568 if (rule->mask.tunnel_inner_src_port_mask > 0) {
2569 tcam_key->key_info.src_port =
2570 rule->hinic_fdir.tunnel_inner_src_port;
2571 tcam_key->key_mask.src_port =
2572 rule->mask.tunnel_inner_src_port_mask;
2575 switch (rule->mask.proto_mask) {
2577 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2578 tcam_key->key_mask.protocol = UINT8_MAX;
2585 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2589 tcam_key->key_mask.function_id = UINT16_MAX;
2590 tcam_key->key_info.function_id =
2591 hinic_global_func_id(nic_dev->hwdev) & 0x7fff;
2596 static int hinic_fdir_tcam_ipv6_init(struct rte_eth_dev *dev,
2597 struct hinic_fdir_rule *rule,
2598 struct tag_tcam_key *tcam_key)
2600 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2602 switch (rule->mask.dst_ipv6_mask) {
2604 tcam_key->key_info_ipv6.ipv6_key0 =
2605 ((rule->hinic_fdir.dst_ipv6[0] << 8) & 0xff00) |
2606 rule->hinic_fdir.dst_ipv6[1];
2607 tcam_key->key_info_ipv6.ipv6_key1 =
2608 ((rule->hinic_fdir.dst_ipv6[2] << 8) & 0xff00) |
2609 rule->hinic_fdir.dst_ipv6[3];
2610 tcam_key->key_info_ipv6.ipv6_key2 =
2611 ((rule->hinic_fdir.dst_ipv6[4] << 8) & 0xff00) |
2612 rule->hinic_fdir.dst_ipv6[5];
2613 tcam_key->key_info_ipv6.ipv6_key3 =
2614 ((rule->hinic_fdir.dst_ipv6[6] << 8) & 0xff00) |
2615 rule->hinic_fdir.dst_ipv6[7];
2616 tcam_key->key_info_ipv6.ipv6_key4 =
2617 ((rule->hinic_fdir.dst_ipv6[8] << 8) & 0xff00) |
2618 rule->hinic_fdir.dst_ipv6[9];
2619 tcam_key->key_info_ipv6.ipv6_key5 =
2620 ((rule->hinic_fdir.dst_ipv6[10] << 8) & 0xff00) |
2621 rule->hinic_fdir.dst_ipv6[11];
2622 tcam_key->key_info_ipv6.ipv6_key6 =
2623 ((rule->hinic_fdir.dst_ipv6[12] << 8) & 0xff00) |
2624 rule->hinic_fdir.dst_ipv6[13];
2625 tcam_key->key_info_ipv6.ipv6_key7 =
2626 ((rule->hinic_fdir.dst_ipv6[14] << 8) & 0xff00) |
2627 rule->hinic_fdir.dst_ipv6[15];
2628 tcam_key->key_mask_ipv6.ipv6_key0 = UINT16_MAX;
2629 tcam_key->key_mask_ipv6.ipv6_key1 = UINT16_MAX;
2630 tcam_key->key_mask_ipv6.ipv6_key2 = UINT16_MAX;
2631 tcam_key->key_mask_ipv6.ipv6_key3 = UINT16_MAX;
2632 tcam_key->key_mask_ipv6.ipv6_key4 = UINT16_MAX;
2633 tcam_key->key_mask_ipv6.ipv6_key5 = UINT16_MAX;
2634 tcam_key->key_mask_ipv6.ipv6_key6 = UINT16_MAX;
2635 tcam_key->key_mask_ipv6.ipv6_key7 = UINT16_MAX;
2642 PMD_DRV_LOG(ERR, "invalid dst_ipv6 mask");
2646 if (rule->mask.dst_port_mask > 0) {
2647 tcam_key->key_info_ipv6.dst_port = rule->hinic_fdir.dst_port;
2648 tcam_key->key_mask_ipv6.dst_port = rule->mask.dst_port_mask;
2651 switch (rule->mask.proto_mask) {
2653 tcam_key->key_info_ipv6.protocol =
2654 (rule->hinic_fdir.proto) & 0x7F;
2655 tcam_key->key_mask_ipv6.protocol = 0x7F;
2662 PMD_DRV_LOG(ERR, "invalid tunnel flag mask");
2666 tcam_key->key_info_ipv6.ipv6_flag = 1;
2667 tcam_key->key_mask_ipv6.ipv6_flag = 1;
2669 tcam_key->key_mask_ipv6.function_id = UINT8_MAX;
2670 tcam_key->key_info_ipv6.function_id =
2671 (u8)hinic_global_func_id(nic_dev->hwdev);
2676 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2677 struct hinic_fdir_rule *rule,
2678 struct tag_tcam_key *tcam_key,
2679 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2683 if (rule->mask.dst_ipv4_mask == UINT32_MAX)
2684 ret = hinic_fdir_tcam_ipv4_init(dev, rule, tcam_key);
2685 else if (rule->mask.dst_ipv6_mask == UINT16_MAX)
2686 ret = hinic_fdir_tcam_ipv6_init(dev, rule, tcam_key);
2691 fdir_tcam_rule->data.qid = rule->queue;
2693 tcam_key_calculate(tcam_key, fdir_tcam_rule);
2698 static inline struct hinic_tcam_filter *
2699 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2700 struct tag_tcam_key *key)
2702 struct hinic_tcam_filter *it;
2704 TAILQ_FOREACH(it, filter_list, entries) {
2705 if (memcmp(key, &it->tcam_key,
2706 sizeof(struct tag_tcam_key)) == 0) {
2714 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2715 struct hinic_tcam_info *tcam_info,
2716 struct hinic_tcam_filter *tcam_filter,
2721 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2723 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2724 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2726 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2728 for (index = 0; index < max_index; index++) {
2729 if (tcam_info->tcam_index_array[index] == 0)
2733 if (index == max_index) {
2734 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2735 hinic_global_func_id(nic_dev->hwdev), max_index);
2739 tcam_filter->index = index;
2740 *tcam_index = index;
2745 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2746 struct hinic_tcam_filter *tcam_filter,
2747 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2749 struct hinic_tcam_info *tcam_info =
2750 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2751 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2753 u16 tcam_block_index = 0;
2756 if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2759 if (tcam_info->tcam_rule_nums == 0) {
2760 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2761 rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2762 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2764 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2768 rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2769 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2771 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2776 tcam_info->tcam_block_index = tcam_block_index;
2778 tcam_block_index = tcam_info->tcam_block_index;
2781 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2782 fdir_tcam_rule->index =
2783 HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2785 fdir_tcam_rule->index =
2786 tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2789 rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2791 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2795 PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2796 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2797 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2798 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2799 tcam_info->tcam_rule_nums + 1);
2801 if (tcam_info->tcam_rule_nums == 0) {
2802 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2804 (void)hinic_del_tcam_rule(nic_dev->hwdev,
2805 fdir_tcam_rule->index);
2809 rc = hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, true);
2810 if (rc && rc != HINIC_MGMT_CMD_UNSUPPORTED) {
2811 (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0,
2813 (void)hinic_del_tcam_rule(nic_dev->hwdev,
2814 fdir_tcam_rule->index);
2819 TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2821 tcam_info->tcam_index_array[index] = 1;
2822 tcam_info->tcam_rule_nums++;
2827 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2828 struct hinic_tcam_filter *tcam_filter)
2830 struct hinic_tcam_info *tcam_info =
2831 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2832 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2834 u16 tcam_block_index = tcam_info->tcam_block_index;
2838 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2839 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2841 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2843 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2845 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2848 rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2850 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2854 PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2855 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2856 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2857 tcam_info->tcam_rule_nums - 1);
2859 TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2861 tcam_info->tcam_index_array[tcam_filter->index] = 0;
2863 rte_free(tcam_filter);
2865 tcam_info->tcam_rule_nums--;
2867 if (tcam_info->tcam_rule_nums == 0) {
2868 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2875 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2876 struct hinic_fdir_rule *rule, bool add)
2878 struct hinic_tcam_info *tcam_info =
2879 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2880 struct hinic_tcam_filter *tcam_filter;
2881 struct tag_tcam_cfg_rule fdir_tcam_rule;
2882 struct tag_tcam_key tcam_key;
2885 memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2886 memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2888 ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2890 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2894 tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2896 if (tcam_filter != NULL && add) {
2897 PMD_DRV_LOG(ERR, "Filter exists.");
2900 if (tcam_filter == NULL && !add) {
2901 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2906 tcam_filter = rte_zmalloc("hinic_5tuple_filter",
2907 sizeof(struct hinic_tcam_filter), 0);
2908 if (tcam_filter == NULL)
2910 (void)rte_memcpy(&tcam_filter->tcam_key,
2911 &tcam_key, sizeof(struct tag_tcam_key));
2912 tcam_filter->queue = fdir_tcam_rule.data.qid;
2914 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2916 rte_free(tcam_filter);
2920 rule->tcam_index = fdir_tcam_rule.index;
2923 PMD_DRV_LOG(INFO, "begin to hinic_del_tcam_filter");
2924 ret = hinic_del_tcam_filter(dev, tcam_filter);
2933 * Create or destroy a flow rule.
2934 * Theorically one rule can match more than one filters.
2935 * We will let it use the filter which it hitt first.
2936 * So, the sequence matters.
2938 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2939 const struct rte_flow_attr *attr,
2940 const struct rte_flow_item pattern[],
2941 const struct rte_flow_action actions[],
2942 struct rte_flow_error *error)
2945 struct rte_eth_ntuple_filter ntuple_filter;
2946 struct rte_eth_ethertype_filter ethertype_filter;
2947 struct hinic_fdir_rule fdir_rule;
2948 struct rte_flow *flow = NULL;
2949 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2950 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2951 struct hinic_fdir_rule_ele *fdir_rule_ptr;
2952 struct hinic_flow_mem *hinic_flow_mem_ptr;
2953 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2955 flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2957 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2961 hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2962 sizeof(struct hinic_flow_mem), 0);
2963 if (!hinic_flow_mem_ptr) {
2964 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2969 hinic_flow_mem_ptr->flow = flow;
2970 TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2973 /* Add ntuple filter */
2974 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2975 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2976 actions, &ntuple_filter, error);
2978 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2980 ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2981 sizeof(struct hinic_ntuple_filter_ele), 0);
2982 if (ntuple_filter_ptr == NULL) {
2983 PMD_DRV_LOG(ERR, "Failed to allocate ntuple_filter_ptr");
2986 rte_memcpy(&ntuple_filter_ptr->filter_info,
2988 sizeof(struct rte_eth_ntuple_filter));
2989 TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2990 ntuple_filter_ptr, entries);
2991 flow->rule = ntuple_filter_ptr;
2992 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2994 PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2995 hinic_global_func_id(nic_dev->hwdev));
3001 /* Add ethertype filter */
3002 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3003 ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
3004 ðertype_filter, error);
3006 ret = hinic_add_del_ethertype_filter(dev, ðertype_filter,
3009 ethertype_filter_ptr =
3010 rte_zmalloc("hinic_ethertype_filter",
3011 sizeof(struct hinic_ethertype_filter_ele), 0);
3012 if (ethertype_filter_ptr == NULL) {
3013 PMD_DRV_LOG(ERR, "Failed to allocate ethertype_filter_ptr");
3016 rte_memcpy(ðertype_filter_ptr->filter_info,
3018 sizeof(struct rte_eth_ethertype_filter));
3019 TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
3020 ethertype_filter_ptr, entries);
3021 flow->rule = ethertype_filter_ptr;
3022 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3024 PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
3025 hinic_global_func_id(nic_dev->hwdev));
3031 /* Add fdir filter */
3032 memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
3033 ret = hinic_parse_fdir_filter(dev, attr, pattern,
3034 actions, &fdir_rule, error);
3036 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3037 ret = hinic_add_del_fdir_filter(dev,
3039 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3040 ret = hinic_add_del_tcam_fdir_filter(dev,
3043 PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
3047 fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
3048 sizeof(struct hinic_fdir_rule_ele), 0);
3049 if (fdir_rule_ptr == NULL) {
3050 PMD_DRV_LOG(ERR, "Failed to allocate fdir_rule_ptr");
3053 rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
3054 sizeof(struct hinic_fdir_rule));
3055 TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
3056 fdir_rule_ptr, entries);
3057 flow->rule = fdir_rule_ptr;
3058 flow->filter_type = RTE_ETH_FILTER_FDIR;
3060 PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
3061 hinic_global_func_id(nic_dev->hwdev));
3068 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
3069 rte_flow_error_set(error, -ret,
3070 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3071 "Failed to create flow.");
3072 rte_free(hinic_flow_mem_ptr);
3077 /* Destroy a flow rule on hinic. */
3078 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
3079 struct rte_flow_error *error)
3082 struct rte_flow *pmd_flow = flow;
3083 enum rte_filter_type filter_type = pmd_flow->filter_type;
3084 struct rte_eth_ntuple_filter ntuple_filter;
3085 struct rte_eth_ethertype_filter ethertype_filter;
3086 struct hinic_fdir_rule fdir_rule;
3087 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3088 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3089 struct hinic_fdir_rule_ele *fdir_rule_ptr;
3090 struct hinic_flow_mem *hinic_flow_mem_ptr;
3091 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3093 switch (filter_type) {
3094 case RTE_ETH_FILTER_NTUPLE:
3095 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
3097 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
3098 sizeof(struct rte_eth_ntuple_filter));
3099 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3101 TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
3102 ntuple_filter_ptr, entries);
3103 rte_free(ntuple_filter_ptr);
3106 case RTE_ETH_FILTER_ETHERTYPE:
3107 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
3109 rte_memcpy(ðertype_filter,
3110 ðertype_filter_ptr->filter_info,
3111 sizeof(struct rte_eth_ethertype_filter));
3112 ret = hinic_add_del_ethertype_filter(dev,
3113 ðertype_filter, FALSE);
3115 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3116 ethertype_filter_ptr, entries);
3117 rte_free(ethertype_filter_ptr);
3120 case RTE_ETH_FILTER_FDIR:
3121 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
3122 rte_memcpy(&fdir_rule,
3123 &fdir_rule_ptr->filter_info,
3124 sizeof(struct hinic_fdir_rule));
3125 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
3126 ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
3127 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
3128 ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
3131 PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
3135 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
3136 fdir_rule_ptr, entries);
3137 rte_free(fdir_rule_ptr);
3141 PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
3148 rte_flow_error_set(error, EINVAL,
3149 RTE_FLOW_ERROR_TYPE_HANDLE,
3150 NULL, "Failed to destroy flow");
3154 TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
3155 if (hinic_flow_mem_ptr->flow == pmd_flow) {
3156 TAILQ_REMOVE(&nic_dev->hinic_flow_list,
3157 hinic_flow_mem_ptr, entries);
3158 rte_free(hinic_flow_mem_ptr);
3164 PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
3165 hinic_global_func_id(nic_dev->hwdev));
3170 /* Remove all the n-tuple filters */
3171 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
3173 struct hinic_filter_info *filter_info =
3174 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
3175 struct hinic_5tuple_filter *p_5tuple;
3177 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
3178 hinic_remove_5tuple_filter(dev, p_5tuple);
3181 /* Remove all the ether type filters */
3182 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
3184 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3185 struct hinic_filter_info *filter_info =
3186 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
3189 if (filter_info->type_mask &
3190 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
3191 hinic_ethertype_filter_remove(filter_info,
3192 HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
3193 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
3194 filter_info->qid, false, true);
3196 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
3199 if (filter_info->type_mask &
3200 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
3201 hinic_ethertype_filter_remove(filter_info,
3202 HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
3203 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3204 filter_info->qid, false, true);
3208 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3209 filter_info->pkt_type);
3212 /* Remove all the ether type filters */
3213 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3215 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3216 struct hinic_tcam_info *tcam_info =
3217 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3218 struct hinic_tcam_filter *tcam_filter_ptr;
3220 while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3221 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3223 (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3225 (void)hinic_set_fdir_tcam_rule_filter(nic_dev->hwdev, false);
3227 (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3230 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3232 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3233 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3234 struct hinic_fdir_rule_ele *fdir_rule_ptr;
3235 struct hinic_flow_mem *hinic_flow_mem_ptr;
3236 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3238 while ((ntuple_filter_ptr =
3239 TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3240 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3242 rte_free(ntuple_filter_ptr);
3245 while ((ethertype_filter_ptr =
3246 TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3247 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3248 ethertype_filter_ptr,
3250 rte_free(ethertype_filter_ptr);
3253 while ((fdir_rule_ptr =
3254 TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3255 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3257 rte_free(fdir_rule_ptr);
3260 while ((hinic_flow_mem_ptr =
3261 TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3262 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3264 rte_free(hinic_flow_mem_ptr->flow);
3265 rte_free(hinic_flow_mem_ptr);
3269 /* Destroy all flow rules associated with a port on hinic. */
3270 static int hinic_flow_flush(struct rte_eth_dev *dev,
3271 __rte_unused struct rte_flow_error *error)
3273 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3275 hinic_clear_all_ntuple_filter(dev);
3276 hinic_clear_all_ethertype_filter(dev);
3277 hinic_clear_all_fdir_filter(dev);
3278 hinic_filterlist_flush(dev);
3280 PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3281 hinic_global_func_id(nic_dev->hwdev));
3285 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3287 hinic_clear_all_ntuple_filter(dev);
3288 hinic_clear_all_ethertype_filter(dev);
3289 hinic_clear_all_fdir_filter(dev);
3290 hinic_filterlist_flush(dev);
3293 const struct rte_flow_ops hinic_flow_ops = {
3294 .validate = hinic_flow_validate,
3295 .create = hinic_flow_create,
3296 .destroy = hinic_flow_destroy,
3297 .flush = hinic_flow_flush,