1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
26 #define HINIC_MAX_RX_QUEUE_NUM 64
29 #define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */
30 #define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */
31 #define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */
32 #define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX (0x7F)
37 #define PA_ETH_TYPE_ROCE 0
38 #define PA_ETH_TYPE_IPV4 1
39 #define PA_ETH_TYPE_IPV6 2
40 #define PA_ETH_TYPE_OTHER 3
42 #define PA_IP_PROTOCOL_TYPE_TCP 1
43 #define PA_IP_PROTOCOL_TYPE_UDP 2
44 #define PA_IP_PROTOCOL_TYPE_ICMP 3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP 4
46 #define PA_IP_PROTOCOL_TYPE_SCTP 5
47 #define PA_IP_PROTOCOL_TYPE_VRRP 112
49 #define IP_HEADER_PROTOCOL_TYPE_TCP 6
50 #define IP_HEADER_PROTOCOL_TYPE_UDP 17
51 #define IP_HEADER_PROTOCOL_TYPE_ICMP 1
53 #define FDIR_TCAM_NORMAL_PACKET 0
54 #define FDIR_TCAM_TUNNEL_PACKET 1
56 #define HINIC_MIN_N_TUPLE_PRIO 1
57 #define HINIC_MAX_N_TUPLE_PRIO 7
59 /* TCAM type mask in hardware */
60 #define TCAM_PKT_BGP_SPORT 1
61 #define TCAM_PKT_VRRP 2
62 #define TCAM_PKT_BGP_DPORT 3
63 #define TCAM_PKT_LACP 4
65 #define BGP_DPORT_ID 179
66 #define IPPROTO_VRRP 112
68 /* Packet type defined in hardware to perform filter */
69 #define PKT_IGMP_IPV4_TYPE 64
70 #define PKT_ICMP_IPV4_TYPE 65
71 #define PKT_ICMP_IPV6_TYPE 66
72 #define PKT_ICMP_IPV6RS_TYPE 67
73 #define PKT_ICMP_IPV6RA_TYPE 68
74 #define PKT_ICMP_IPV6NS_TYPE 69
75 #define PKT_ICMP_IPV6NA_TYPE 70
76 #define PKT_ICMP_IPV6RE_TYPE 71
77 #define PKT_DHCP_IPV4_TYPE 72
78 #define PKT_DHCP_IPV6_TYPE 73
79 #define PKT_LACP_TYPE 74
80 #define PKT_ARP_REQ_TYPE 79
81 #define PKT_ARP_REP_TYPE 80
82 #define PKT_ARP_TYPE 81
83 #define PKT_BGPD_DPORT_TYPE 83
84 #define PKT_BGPD_SPORT_TYPE 84
85 #define PKT_VRRP_TYPE 85
87 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
88 (&((struct hinic_nic_dev *)nic_dev)->filter)
90 #define HINIC_DEV_PRIVATE_TO_TCAM_INFO(nic_dev) \
91 (&((struct hinic_nic_dev *)nic_dev)->tcam)
94 enum hinic_atr_flow_type {
95 HINIC_ATR_FLOW_TYPE_IPV4_DIP = 0x1,
96 HINIC_ATR_FLOW_TYPE_IPV4_SIP = 0x2,
97 HINIC_ATR_FLOW_TYPE_DPORT = 0x3,
98 HINIC_ATR_FLOW_TYPE_SPORT = 0x4,
101 /* Structure to store fdir's info. */
102 struct hinic_fdir_info {
109 * Endless loop will never happen with below assumption
110 * 1. there is at least one no-void item(END)
111 * 2. cur is before END.
113 static inline const struct rte_flow_item *
114 next_no_void_pattern(const struct rte_flow_item pattern[],
115 const struct rte_flow_item *cur)
117 const struct rte_flow_item *next =
118 cur ? cur + 1 : &pattern[0];
120 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
126 static inline const struct rte_flow_action *
127 next_no_void_action(const struct rte_flow_action actions[],
128 const struct rte_flow_action *cur)
130 const struct rte_flow_action *next =
131 cur ? cur + 1 : &actions[0];
133 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
139 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
140 struct rte_flow_error *error)
142 /* Must be input direction */
143 if (!attr->ingress) {
144 rte_flow_error_set(error, EINVAL,
145 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
146 attr, "Only support ingress.");
151 rte_flow_error_set(error, EINVAL,
152 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
153 attr, "Not support egress.");
157 if (attr->priority) {
158 rte_flow_error_set(error, EINVAL,
159 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
160 attr, "Not support priority.");
165 rte_flow_error_set(error, EINVAL,
166 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
167 attr, "Not support group.");
174 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
175 const struct rte_flow_item *pattern,
176 const struct rte_flow_action *actions,
177 struct rte_flow_error *error)
180 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
181 NULL, "NULL pattern.");
186 rte_flow_error_set(error, EINVAL,
187 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
188 NULL, "NULL action.");
193 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
194 NULL, "NULL attribute.");
201 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
202 struct rte_flow_error *error)
204 /* The first non-void item should be MAC */
205 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
206 rte_flow_error_set(error, EINVAL,
207 RTE_FLOW_ERROR_TYPE_ITEM,
208 item, "Not supported by ethertype filter");
212 /* Not supported last point for range */
214 rte_flow_error_set(error, EINVAL,
215 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
216 item, "Not supported last point for range");
220 /* Get the MAC info. */
221 if (!item->spec || !item->mask) {
222 rte_flow_error_set(error, EINVAL,
223 RTE_FLOW_ERROR_TYPE_ITEM,
224 item, "Not supported by ethertype filter");
231 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
232 const struct rte_flow_action *act,
233 const struct rte_flow_action_queue *act_q,
234 struct rte_eth_ethertype_filter *filter,
235 struct rte_flow_error *error)
238 act = next_no_void_action(actions, NULL);
239 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
240 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ACTION,
243 act, "Not supported action.");
247 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
248 act_q = (const struct rte_flow_action_queue *)act->conf;
249 filter->queue = act_q->index;
251 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
254 /* Check if the next non-void item is END */
255 act = next_no_void_action(actions, act);
256 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
257 rte_flow_error_set(error, EINVAL,
258 RTE_FLOW_ERROR_TYPE_ACTION,
259 act, "Not supported action.");
267 * Parse the rule to see if it is a ethertype rule.
268 * And get the ethertype filter info BTW.
270 * The first not void item can be ETH.
271 * The next not void item must be END.
273 * The first not void action should be QUEUE.
274 * The next not void action should be END.
277 * ETH type 0x0807 0xFFFF
279 * other members in mask and spec should set to 0x00.
280 * item->last should be NULL.
282 static int cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
283 const struct rte_flow_item *pattern,
284 const struct rte_flow_action *actions,
285 struct rte_eth_ethertype_filter *filter,
286 struct rte_flow_error *error)
288 const struct rte_flow_item *item;
289 const struct rte_flow_action *act = NULL;
290 const struct rte_flow_item_eth *eth_spec;
291 const struct rte_flow_item_eth *eth_mask;
292 const struct rte_flow_action_queue *act_q = NULL;
294 if (hinic_check_filter_arg(attr, pattern, actions, error))
297 item = next_no_void_pattern(pattern, NULL);
298 if (hinic_check_ethertype_first_item(item, error))
301 eth_spec = (const struct rte_flow_item_eth *)item->spec;
302 eth_mask = (const struct rte_flow_item_eth *)item->mask;
305 * Mask bits of source MAC address must be full of 0.
306 * Mask bits of destination MAC address must be full
309 if (!rte_is_zero_ether_addr(ð_mask->src) ||
310 (!rte_is_zero_ether_addr(ð_mask->dst) &&
311 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
312 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Invalid ether address mask");
317 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
318 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
319 item, "Invalid ethertype mask");
324 * If mask bits of destination MAC address
325 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
327 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
328 filter->mac_addr = eth_spec->dst;
329 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
331 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
333 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
335 /* Check if the next non-void item is END. */
336 item = next_no_void_pattern(pattern, item);
337 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
338 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
339 item, "Not supported by ethertype filter.");
343 if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
346 if (hinic_check_ethertype_attr_ele(attr, error))
352 static int hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
353 const struct rte_flow_attr *attr,
354 const struct rte_flow_item pattern[],
355 const struct rte_flow_action actions[],
356 struct rte_eth_ethertype_filter *filter,
357 struct rte_flow_error *error)
359 if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
362 /* NIC doesn't support MAC address. */
363 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
364 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
365 rte_flow_error_set(error, EINVAL,
366 RTE_FLOW_ERROR_TYPE_ITEM,
367 NULL, "Not supported by ethertype filter");
371 if (filter->queue >= dev->data->nb_rx_queues) {
372 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_ITEM,
375 NULL, "Queue index much too big");
379 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
380 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
381 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
382 rte_flow_error_set(error, EINVAL,
383 RTE_FLOW_ERROR_TYPE_ITEM,
384 NULL, "IPv4/IPv6 not supported by ethertype filter");
388 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
389 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
390 rte_flow_error_set(error, EINVAL,
391 RTE_FLOW_ERROR_TYPE_ITEM,
392 NULL, "Drop option is unsupported");
396 /* Hinic only support LACP/ARP for ether type */
397 if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
398 filter->ether_type != RTE_ETHER_TYPE_ARP) {
399 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
402 "only lacp/arp type supported by ethertype filter");
409 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
410 struct rte_eth_ntuple_filter *filter,
411 struct rte_flow_error *error)
413 /* Must be input direction */
414 if (!attr->ingress) {
415 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
416 rte_flow_error_set(error, EINVAL,
417 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
418 attr, "Only support ingress.");
423 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
426 attr, "Not support egress.");
430 if (attr->priority > 0xFFFF) {
431 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
434 attr, "Error priority.");
438 if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
439 attr->priority > HINIC_MAX_N_TUPLE_PRIO)
440 filter->priority = 1;
442 filter->priority = (uint16_t)attr->priority;
448 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
449 const struct rte_flow_action actions[],
450 struct rte_eth_ntuple_filter *filter,
451 struct rte_flow_error *error)
453 const struct rte_flow_action *act;
455 * n-tuple only supports forwarding,
456 * check if the first not void action is QUEUE.
458 act = next_no_void_action(actions, NULL);
459 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
460 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
461 rte_flow_error_set(error, EINVAL,
462 RTE_FLOW_ERROR_TYPE_ACTION,
463 act, "Flow action type is not QUEUE.");
467 ((const struct rte_flow_action_queue *)act->conf)->index;
469 /* Check if the next not void item is END */
470 act = next_no_void_action(actions, act);
471 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
472 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ACTION,
475 act, "Next not void item is not END.");
482 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
483 const struct rte_flow_item pattern[],
484 struct rte_flow_error *error)
486 const struct rte_flow_item *item;
488 /* The first not void item can be MAC or IPv4 */
489 item = next_no_void_pattern(pattern, NULL);
491 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
492 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
493 rte_flow_error_set(error, EINVAL,
494 RTE_FLOW_ERROR_TYPE_ITEM,
495 item, "Not supported by ntuple filter");
500 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
501 /* Not supported last point for range */
503 rte_flow_error_set(error,
505 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
506 item, "Not supported last point for range");
509 /* if the first item is MAC, the content should be NULL */
510 if (item->spec || item->mask) {
511 rte_flow_error_set(error, EINVAL,
512 RTE_FLOW_ERROR_TYPE_ITEM,
513 item, "Not supported by ntuple filter");
516 /* check if the next not void item is IPv4 */
517 item = next_no_void_pattern(pattern, item);
518 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
519 rte_flow_error_set(error,
520 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
521 item, "Not supported by ntuple filter");
531 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
532 const struct rte_flow_item pattern[],
533 struct rte_eth_ntuple_filter *filter,
534 struct rte_flow_error *error)
536 const struct rte_flow_item_ipv4 *ipv4_spec;
537 const struct rte_flow_item_ipv4 *ipv4_mask;
538 const struct rte_flow_item *item = *in_out_item;
540 /* Get the IPv4 info */
541 if (!item->spec || !item->mask) {
542 rte_flow_error_set(error, EINVAL,
543 RTE_FLOW_ERROR_TYPE_ITEM,
544 item, "Invalid ntuple mask");
547 /* Not supported last point for range */
549 rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
551 item, "Not supported last point for range");
555 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
557 * Only support src & dst addresses, protocol,
558 * others should be masked.
560 if (ipv4_mask->hdr.version_ihl ||
561 ipv4_mask->hdr.type_of_service ||
562 ipv4_mask->hdr.total_length ||
563 ipv4_mask->hdr.packet_id ||
564 ipv4_mask->hdr.fragment_offset ||
565 ipv4_mask->hdr.time_to_live ||
566 ipv4_mask->hdr.hdr_checksum ||
567 !ipv4_mask->hdr.next_proto_id) {
568 rte_flow_error_set(error,
569 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
570 item, "Not supported by ntuple filter");
574 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
575 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
576 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
578 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
579 filter->dst_ip = ipv4_spec->hdr.dst_addr;
580 filter->src_ip = ipv4_spec->hdr.src_addr;
581 filter->proto = ipv4_spec->hdr.next_proto_id;
583 /* Get next no void item */
584 *in_out_item = next_no_void_pattern(pattern, item);
588 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
589 const struct rte_flow_item pattern[],
590 struct rte_eth_ntuple_filter *filter,
591 struct rte_flow_error *error)
593 const struct rte_flow_item_tcp *tcp_spec;
594 const struct rte_flow_item_tcp *tcp_mask;
595 const struct rte_flow_item_icmp *icmp_mask;
596 const struct rte_flow_item *item = *in_out_item;
597 u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
599 if (item->type == RTE_FLOW_ITEM_TYPE_END)
602 /* Get TCP or UDP info */
603 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
604 (!item->spec || !item->mask)) {
605 memset(filter, 0, ntuple_filter_size);
606 rte_flow_error_set(error, EINVAL,
607 RTE_FLOW_ERROR_TYPE_ITEM,
608 item, "Invalid ntuple mask");
612 /* Not supported last point for range */
614 memset(filter, 0, ntuple_filter_size);
615 rte_flow_error_set(error, EINVAL,
616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
617 item, "Not supported last point for range");
621 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
622 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
625 * Only support src & dst ports, tcp flags,
626 * others should be masked.
628 if (tcp_mask->hdr.sent_seq ||
629 tcp_mask->hdr.recv_ack ||
630 tcp_mask->hdr.data_off ||
631 tcp_mask->hdr.rx_win ||
632 tcp_mask->hdr.cksum ||
633 tcp_mask->hdr.tcp_urp) {
634 memset(filter, 0, ntuple_filter_size);
635 rte_flow_error_set(error, EINVAL,
636 RTE_FLOW_ERROR_TYPE_ITEM,
637 item, "Not supported by ntuple filter");
641 filter->dst_port_mask = tcp_mask->hdr.dst_port;
642 filter->src_port_mask = tcp_mask->hdr.src_port;
643 if (tcp_mask->hdr.tcp_flags == 0xFF) {
644 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
645 } else if (!tcp_mask->hdr.tcp_flags) {
646 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
648 memset(filter, 0, ntuple_filter_size);
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ITEM,
651 item, "Not supported by ntuple filter");
655 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
656 filter->dst_port = tcp_spec->hdr.dst_port;
657 filter->src_port = tcp_spec->hdr.src_port;
658 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
659 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
660 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
662 /* ICMP all should be masked. */
663 if (icmp_mask->hdr.icmp_cksum ||
664 icmp_mask->hdr.icmp_ident ||
665 icmp_mask->hdr.icmp_seq_nb ||
666 icmp_mask->hdr.icmp_type ||
667 icmp_mask->hdr.icmp_code) {
668 memset(filter, 0, ntuple_filter_size);
669 rte_flow_error_set(error, EINVAL,
670 RTE_FLOW_ERROR_TYPE_ITEM,
671 item, "Not supported by ntuple filter");
676 /* Get next no void item */
677 *in_out_item = next_no_void_pattern(pattern, item);
681 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
682 struct rte_eth_ntuple_filter *filter,
683 struct rte_flow_error *error)
685 /* Check if the next not void item is END */
686 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
687 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ITEM,
690 item, "Not supported by ntuple filter");
696 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
697 const struct rte_flow_item pattern[],
698 struct rte_eth_ntuple_filter *filter,
699 struct rte_flow_error *error)
701 if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
702 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
703 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
704 hinic_ntuple_item_check_end(item, filter, error))
711 * Parse the rule to see if it is a n-tuple rule.
712 * And get the n-tuple filter info BTW.
714 * The first not void item can be ETH or IPV4.
715 * The second not void item must be IPV4 if the first one is ETH.
716 * The third not void item must be UDP or TCP.
717 * The next not void item must be END.
719 * The first not void action should be QUEUE.
720 * The next not void action should be END.
724 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
725 * dst_addr 192.167.3.50 0xFFFFFFFF
726 * next_proto_id 17 0xFF
727 * UDP/TCP/ src_port 80 0xFFFF
728 * SCTP dst_port 80 0xFFFF
730 * other members in mask and spec should set to 0x00.
731 * item->last should be NULL.
732 * Please aware there's an asumption for all the parsers.
733 * rte_flow_item is using big endian, rte_flow_attr and
734 * rte_flow_action are using CPU order.
735 * Because the pattern is used to describe the packets,
736 * normally the packets should use network order.
738 static int cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
739 const struct rte_flow_item pattern[],
740 const struct rte_flow_action actions[],
741 struct rte_eth_ntuple_filter *filter,
742 struct rte_flow_error *error)
744 const struct rte_flow_item *item = NULL;
746 if (hinic_check_filter_arg(attr, pattern, actions, error))
749 if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
752 if (hinic_check_ntuple_act_ele(item, actions, filter, error))
755 if (hinic_check_ntuple_attr_ele(attr, filter, error))
761 static int hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
762 const struct rte_flow_attr *attr,
763 const struct rte_flow_item pattern[],
764 const struct rte_flow_action actions[],
765 struct rte_eth_ntuple_filter *filter,
766 struct rte_flow_error *error)
770 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
774 /* Hinic doesn't support tcp flags */
775 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
776 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM,
779 NULL, "Not supported by ntuple filter");
783 /* Hinic doesn't support many priorities */
784 if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
785 filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
786 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ITEM,
789 NULL, "Priority not supported by ntuple filter");
793 if (filter->queue >= dev->data->nb_rx_queues)
796 /* Fixed value for hinic */
797 filter->flags = RTE_5TUPLE_FLAGS;
801 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
802 const struct rte_flow_item pattern[],
803 struct rte_flow_error *error)
805 const struct rte_flow_item *item;
807 /* The first not void item can be MAC or IPv4 or TCP or UDP */
808 item = next_no_void_pattern(pattern, NULL);
810 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
811 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
812 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
813 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM, item,
816 "Not supported by fdir filter,support mac,ipv4,tcp,udp");
820 /* Not supported last point for range */
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
824 "Not supported last point for range");
829 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
830 /* All should be masked. */
831 if (item->spec || item->mask) {
832 rte_flow_error_set(error, EINVAL,
833 RTE_FLOW_ERROR_TYPE_ITEM,
834 item, "Not supported by fdir filter,support mac");
837 /* Check if the next not void item is IPv4 */
838 item = next_no_void_pattern(pattern, item);
839 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
840 rte_flow_error_set(error, EINVAL,
841 RTE_FLOW_ERROR_TYPE_ITEM, item,
842 "Not supported by fdir filter,support mac,ipv4");
851 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
852 const struct rte_flow_item pattern[],
853 struct hinic_fdir_rule *rule,
854 struct rte_flow_error *error)
856 const struct rte_flow_item_ipv4 *ipv4_spec;
857 const struct rte_flow_item_ipv4 *ipv4_mask;
858 const struct rte_flow_item *item = *in_out_item;
860 /* Get the IPv4 info */
861 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
862 /* Not supported last point for range */
864 rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
866 item, "Not supported last point for range");
871 memset(rule, 0, sizeof(struct hinic_fdir_rule));
872 rte_flow_error_set(error, EINVAL,
873 RTE_FLOW_ERROR_TYPE_ITEM,
874 item, "Invalid fdir filter mask");
878 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
880 * Only support src & dst addresses,
881 * others should be masked.
883 if (ipv4_mask->hdr.version_ihl ||
884 ipv4_mask->hdr.type_of_service ||
885 ipv4_mask->hdr.total_length ||
886 ipv4_mask->hdr.packet_id ||
887 ipv4_mask->hdr.fragment_offset ||
888 ipv4_mask->hdr.time_to_live ||
889 ipv4_mask->hdr.next_proto_id ||
890 ipv4_mask->hdr.hdr_checksum) {
891 rte_flow_error_set(error,
892 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
893 "Not supported by fdir filter, support src,dst ip");
897 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
898 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
899 rule->mode = HINIC_FDIR_MODE_NORMAL;
903 (const struct rte_flow_item_ipv4 *)item->spec;
904 rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
905 rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
909 * Check if the next not void item is
912 item = next_no_void_pattern(pattern, item);
913 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
914 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
915 item->type != RTE_FLOW_ITEM_TYPE_ICMP &&
916 item->type != RTE_FLOW_ITEM_TYPE_ANY &&
917 item->type != RTE_FLOW_ITEM_TYPE_END) {
918 memset(rule, 0, sizeof(struct hinic_fdir_rule));
919 rte_flow_error_set(error, EINVAL,
920 RTE_FLOW_ERROR_TYPE_ITEM, item,
921 "Not supported by fdir filter, support tcp, udp, end");
930 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
931 __rte_unused const struct rte_flow_item pattern[],
932 __rte_unused struct hinic_fdir_rule *rule,
933 struct rte_flow_error *error)
935 const struct rte_flow_item *item = *in_out_item;
937 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
938 rte_flow_error_set(error, EINVAL,
939 RTE_FLOW_ERROR_TYPE_ITEM,
940 item, "Not supported by normal fdir filter,not support l4");
948 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
949 struct hinic_fdir_rule *rule,
950 struct rte_flow_error *error)
952 /* Check if the next not void item is END */
953 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
954 memset(rule, 0, sizeof(struct hinic_fdir_rule));
955 rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_ITEM,
957 item, "Not supported by fdir filter,support end");
964 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
965 const struct rte_flow_item pattern[],
966 struct hinic_fdir_rule *rule,
967 struct rte_flow_error *error)
969 if (hinic_normal_item_check_ether(&item, pattern, error) ||
970 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
971 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
972 hinic_normal_item_check_end(item, rule, error))
979 hinic_tcam_normal_item_check_l4(const struct rte_flow_item **in_out_item,
980 const struct rte_flow_item pattern[],
981 struct hinic_fdir_rule *rule,
982 struct rte_flow_error *error)
984 const struct rte_flow_item *item = *in_out_item;
985 const struct rte_flow_item_tcp *tcp_spec;
986 const struct rte_flow_item_tcp *tcp_mask;
987 const struct rte_flow_item_udp *udp_spec;
988 const struct rte_flow_item_udp *udp_mask;
990 if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
991 rule->mode = HINIC_FDIR_MODE_TCAM;
992 rule->mask.proto_mask = UINT16_MAX;
993 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_ICMP;
994 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
995 rule->mode = HINIC_FDIR_MODE_TCAM;
996 } else if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
998 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
999 rte_flow_error_set(error, EINVAL,
1000 RTE_FLOW_ERROR_TYPE_ITEM,
1001 item, "Not supported by fdir filter, support src, dst ports");
1005 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1008 * Only support src & dst ports, tcp flags,
1009 * others should be masked.
1011 if (tcp_mask->hdr.sent_seq ||
1012 tcp_mask->hdr.recv_ack ||
1013 tcp_mask->hdr.data_off ||
1014 tcp_mask->hdr.rx_win ||
1015 tcp_mask->hdr.cksum ||
1016 tcp_mask->hdr.tcp_urp) {
1017 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1018 rte_flow_error_set(error, EINVAL,
1019 RTE_FLOW_ERROR_TYPE_ITEM,
1020 item, "Not supported by fdir normal tcam filter");
1024 rule->mode = HINIC_FDIR_MODE_TCAM;
1025 rule->mask.proto_mask = UINT16_MAX;
1026 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1027 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1029 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1031 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1032 rule->hinic_fdir.dst_port = tcp_spec->hdr.dst_port;
1033 rule->hinic_fdir.src_port = tcp_spec->hdr.src_port;
1035 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1037 * Only care about src & dst ports,
1038 * others should be masked.
1041 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ITEM,
1044 item, "Not supported by fdir filter, support src, dst ports");
1048 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1049 if (udp_mask->hdr.dgram_len ||
1050 udp_mask->hdr.dgram_cksum) {
1051 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1052 rte_flow_error_set(error, EINVAL,
1053 RTE_FLOW_ERROR_TYPE_ITEM,
1054 item, "Not supported by fdir filter, support udp");
1058 rule->mode = HINIC_FDIR_MODE_TCAM;
1059 rule->mask.proto_mask = UINT16_MAX;
1060 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1061 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1063 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1065 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1066 rule->hinic_fdir.src_port = udp_spec->hdr.src_port;
1067 rule->hinic_fdir.dst_port = udp_spec->hdr.dst_port;
1070 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ITEM,
1073 item, "Not supported by fdir filter tcam normal, l4 only support icmp, tcp");
1077 item = next_no_void_pattern(pattern, item);
1078 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1079 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1080 rte_flow_error_set(error, EINVAL,
1081 RTE_FLOW_ERROR_TYPE_ITEM,
1082 item, "Not supported by fdir filter tcam normal, support end");
1086 /* get next no void item */
1087 *in_out_item = item;
1092 static int hinic_check_tcam_normal_item_ele(const struct rte_flow_item *item,
1093 const struct rte_flow_item pattern[],
1094 struct hinic_fdir_rule *rule,
1095 struct rte_flow_error *error)
1097 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1098 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1099 hinic_tcam_normal_item_check_l4(&item, pattern, rule, error) ||
1100 hinic_normal_item_check_end(item, rule, error))
1106 static int hinic_tunnel_item_check_l4(const struct rte_flow_item **in_out_item,
1107 const struct rte_flow_item pattern[],
1108 struct hinic_fdir_rule *rule,
1109 struct rte_flow_error *error)
1111 const struct rte_flow_item *item = *in_out_item;
1113 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1114 item = next_no_void_pattern(pattern, item);
1115 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1116 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1117 rte_flow_error_set(error, EINVAL,
1118 RTE_FLOW_ERROR_TYPE_ITEM,
1119 item, "Not supported by fdir filter, support vxlan");
1123 *in_out_item = item;
1125 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1126 rte_flow_error_set(error, EINVAL,
1127 RTE_FLOW_ERROR_TYPE_ITEM,
1128 item, "Not supported by fdir filter tcam tunnel, outer l4 only support udp");
1136 hinic_tunnel_item_check_vxlan(const struct rte_flow_item **in_out_item,
1137 const struct rte_flow_item pattern[],
1138 struct hinic_fdir_rule *rule,
1139 struct rte_flow_error *error)
1141 const struct rte_flow_item *item = *in_out_item;
1144 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1145 item = next_no_void_pattern(pattern, item);
1146 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1147 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1148 item->type != RTE_FLOW_ITEM_TYPE_ANY) {
1149 (void)memset(rule, 0, sizeof(struct hinic_fdir_rule));
1150 rte_flow_error_set(error, EINVAL,
1151 RTE_FLOW_ERROR_TYPE_ITEM,
1152 item, "Not supported by fdir filter, support tcp/udp");
1156 *in_out_item = item;
1163 hinic_tunnel_inner_item_check_l4(const struct rte_flow_item **in_out_item,
1164 const struct rte_flow_item pattern[],
1165 struct hinic_fdir_rule *rule,
1166 struct rte_flow_error *error)
1168 const struct rte_flow_item_tcp *tcp_spec;
1169 const struct rte_flow_item_tcp *tcp_mask;
1170 const struct rte_flow_item_udp *udp_spec;
1171 const struct rte_flow_item_udp *udp_mask;
1172 const struct rte_flow_item *item = *in_out_item;
1174 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1175 /* Not supported last point for range */
1177 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1178 rte_flow_error_set(error, EINVAL,
1179 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1180 item, "Not supported last point for range");
1184 /* get the TCP/UDP info */
1185 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1187 * Only care about src & dst ports,
1188 * others should be masked.
1191 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1192 rte_flow_error_set(error, EINVAL,
1193 RTE_FLOW_ERROR_TYPE_ITEM,
1194 item, "Not supported by fdir filter, support src, dst ports");
1198 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1199 if (tcp_mask->hdr.sent_seq ||
1200 tcp_mask->hdr.recv_ack ||
1201 tcp_mask->hdr.data_off ||
1202 tcp_mask->hdr.tcp_flags ||
1203 tcp_mask->hdr.rx_win ||
1204 tcp_mask->hdr.cksum ||
1205 tcp_mask->hdr.tcp_urp) {
1206 (void)memset(rule, 0,
1207 sizeof(struct hinic_fdir_rule));
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ITEM,
1210 item, "Not supported by fdir filter, support tcp");
1214 rule->mode = HINIC_FDIR_MODE_TCAM;
1215 rule->mask.tunnel_flag = UINT16_MAX;
1216 rule->mask.tunnel_inner_src_port_mask =
1217 tcp_mask->hdr.src_port;
1218 rule->mask.tunnel_inner_dst_port_mask =
1219 tcp_mask->hdr.dst_port;
1220 rule->mask.proto_mask = UINT16_MAX;
1222 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_TCP;
1225 (const struct rte_flow_item_tcp *)item->spec;
1226 rule->hinic_fdir.tunnel_inner_src_port =
1227 tcp_spec->hdr.src_port;
1228 rule->hinic_fdir.tunnel_inner_dst_port =
1229 tcp_spec->hdr.dst_port;
1231 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1233 * Only care about src & dst ports,
1234 * others should be masked.
1237 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1238 rte_flow_error_set(error, EINVAL,
1239 RTE_FLOW_ERROR_TYPE_ITEM,
1240 item, "Not supported by fdir filter, support src, dst ports");
1244 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1245 if (udp_mask->hdr.dgram_len ||
1246 udp_mask->hdr.dgram_cksum) {
1247 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ITEM,
1250 item, "Not supported by fdir filter, support udp");
1254 rule->mode = HINIC_FDIR_MODE_TCAM;
1255 rule->mask.tunnel_flag = UINT16_MAX;
1256 rule->mask.tunnel_inner_src_port_mask =
1257 udp_mask->hdr.src_port;
1258 rule->mask.tunnel_inner_dst_port_mask =
1259 udp_mask->hdr.dst_port;
1260 rule->mask.proto_mask = UINT16_MAX;
1262 rule->hinic_fdir.proto = IP_HEADER_PROTOCOL_TYPE_UDP;
1265 (const struct rte_flow_item_udp *)item->spec;
1266 rule->hinic_fdir.tunnel_inner_src_port =
1267 udp_spec->hdr.src_port;
1268 rule->hinic_fdir.tunnel_inner_dst_port =
1269 udp_spec->hdr.dst_port;
1271 } else if (item->type == RTE_FLOW_ITEM_TYPE_ANY) {
1272 rule->mode = HINIC_FDIR_MODE_TCAM;
1273 rule->mask.tunnel_flag = UINT16_MAX;
1275 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1276 rte_flow_error_set(error, EINVAL,
1277 RTE_FLOW_ERROR_TYPE_ITEM,
1278 item, "Not supported by fdir filter, support tcp/udp");
1282 /* get next no void item */
1283 *in_out_item = next_no_void_pattern(pattern, item);
1289 static int hinic_check_tcam_tunnel_item_ele(const struct rte_flow_item *item,
1290 const struct rte_flow_item pattern[],
1291 struct hinic_fdir_rule *rule,
1292 struct rte_flow_error *error)
1294 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1295 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1296 hinic_tunnel_item_check_l4(&item, pattern, rule, error) ||
1297 hinic_tunnel_item_check_vxlan(&item, pattern, rule, error) ||
1298 hinic_tunnel_inner_item_check_l4(&item, pattern, rule, error) ||
1299 hinic_normal_item_check_end(item, rule, error))
1305 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1306 struct hinic_fdir_rule *rule,
1307 struct rte_flow_error *error)
1309 /* Must be input direction */
1310 if (!attr->ingress) {
1311 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1312 rte_flow_error_set(error, EINVAL,
1313 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1314 attr, "Only support ingress.");
1320 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1321 rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1323 attr, "Not support egress.");
1328 if (attr->priority) {
1329 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1332 attr, "Not support priority.");
1339 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1340 const struct rte_flow_action actions[],
1341 struct hinic_fdir_rule *rule,
1342 struct rte_flow_error *error)
1344 const struct rte_flow_action *act;
1346 /* Check if the first not void action is QUEUE */
1347 act = next_no_void_action(actions, NULL);
1348 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1349 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1350 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1351 item, "Not supported action.");
1355 rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1357 /* Check if the next not void item is END */
1358 act = next_no_void_action(actions, act);
1359 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1360 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1361 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1362 act, "Not supported action.");
1370 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1371 * And get the flow director filter info BTW.
1372 * UDP/TCP/SCTP PATTERN:
1373 * The first not void item can be ETH or IPV4 or IPV6
1374 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1375 * The next not void item could be UDP or TCP(optional)
1376 * The next not void item must be END.
1378 * The first not void action should be QUEUE.
1379 * The second not void optional action should be MARK,
1380 * mark_id is a uint32_t number.
1381 * The next not void action should be END.
1382 * UDP/TCP pattern example:
1385 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1386 * dst_addr 1.2.3.5 0xFFFFFFFF
1387 * UDP/TCP src_port 80 0xFFFF
1388 * dst_port 80 0xFFFF
1390 * Other members in mask and spec should set to 0x00.
1391 * Item->last should be NULL.
1394 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1395 const struct rte_flow_item pattern[],
1396 const struct rte_flow_action actions[],
1397 struct hinic_fdir_rule *rule,
1398 struct rte_flow_error *error)
1400 const struct rte_flow_item *item = NULL;
1402 if (hinic_check_filter_arg(attr, pattern, actions, error))
1405 if (hinic_check_normal_item_ele(item, pattern, rule, error))
1408 if (hinic_check_normal_attr_ele(attr, rule, error))
1411 if (hinic_check_normal_act_ele(item, actions, rule, error))
1418 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1419 * And get the flow director filter info BTW.
1420 * UDP/TCP/SCTP PATTERN:
1421 * The first not void item can be ETH or IPV4 or IPV6
1422 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1423 * The next not void item can be ANY/TCP/UDP
1425 * The first not void action should be QUEUE.
1426 * The second not void optional action should be MARK,
1427 * mark_id is a uint32_t number.
1428 * The next not void action should be END.
1429 * UDP/TCP pattern example:
1432 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1433 * dst_addr 1.2.3.5 0xFFFFFFFF
1434 * UDP/TCP src_port 80 0xFFFF
1435 * dst_port 80 0xFFFF
1437 * Other members in mask and spec should set to 0x00.
1438 * Item->last should be NULL.
1441 hinic_parse_fdir_filter_tcam_normal(const struct rte_flow_attr *attr,
1442 const struct rte_flow_item pattern[],
1443 const struct rte_flow_action actions[],
1444 struct hinic_fdir_rule *rule,
1445 struct rte_flow_error *error)
1447 const struct rte_flow_item *item = NULL;
1449 if (hinic_check_filter_arg(attr, pattern, actions, error))
1452 if (hinic_check_tcam_normal_item_ele(item, pattern, rule, error))
1455 if (hinic_check_normal_attr_ele(attr, rule, error))
1458 if (hinic_check_normal_act_ele(item, actions, rule, error))
1465 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1466 * And get the flow director filter info BTW.
1467 * UDP/TCP/SCTP PATTERN:
1468 * The first not void item can be ETH or IPV4 or IPV6
1469 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1470 * The next not void item must be UDP
1471 * The next not void item must be VXLAN(optional)
1472 * The first not void item can be ETH or IPV4 or IPV6
1473 * The next not void item could be ANY or UDP or TCP(optional)
1474 * The next not void item must be END.
1476 * The first not void action should be QUEUE.
1477 * The second not void optional action should be MARK,
1478 * mark_id is a uint32_t number.
1479 * The next not void action should be END.
1480 * UDP/TCP pattern example:
1483 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1484 * dst_addr 1.2.3.5 0xFFFFFFFF
1487 * UDP/TCP src_port 80 0xFFFF
1488 * dst_port 80 0xFFFF
1490 * Other members in mask and spec should set to 0x00.
1491 * Item->last should be NULL.
1494 hinic_parse_fdir_filter_tacm_tunnel(const struct rte_flow_attr *attr,
1495 const struct rte_flow_item pattern[],
1496 const struct rte_flow_action actions[],
1497 struct hinic_fdir_rule *rule,
1498 struct rte_flow_error *error)
1500 const struct rte_flow_item *item = NULL;
1502 if (hinic_check_filter_arg(attr, pattern, actions, error))
1505 if (hinic_check_tcam_tunnel_item_ele(item, pattern, rule, error))
1508 if (hinic_check_normal_attr_ele(attr, rule, error))
1511 if (hinic_check_normal_act_ele(item, actions, rule, error))
1517 static int hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1518 const struct rte_flow_attr *attr,
1519 const struct rte_flow_item pattern[],
1520 const struct rte_flow_action actions[],
1521 struct hinic_fdir_rule *rule,
1522 struct rte_flow_error *error)
1526 ret = hinic_parse_fdir_filter_normal(attr, pattern, actions,
1531 ret = hinic_parse_fdir_filter_tcam_normal(attr, pattern, actions,
1536 ret = hinic_parse_fdir_filter_tacm_tunnel(attr, pattern, actions,
1542 if (rule->queue >= dev->data->nb_rx_queues)
1549 * Check if the flow rule is supported by nic.
1550 * It only checkes the format. Don't guarantee the rule can be programmed into
1551 * the HW. Because there can be no enough room for the rule.
1553 static int hinic_flow_validate(struct rte_eth_dev *dev,
1554 const struct rte_flow_attr *attr,
1555 const struct rte_flow_item pattern[],
1556 const struct rte_flow_action actions[],
1557 struct rte_flow_error *error)
1559 struct rte_eth_ethertype_filter ethertype_filter;
1560 struct rte_eth_ntuple_filter ntuple_filter;
1561 struct hinic_fdir_rule fdir_rule;
1564 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1565 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1566 actions, &ntuple_filter, error);
1570 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1571 ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1572 actions, ðertype_filter, error);
1577 memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1578 ret = hinic_parse_fdir_filter(dev, attr, pattern,
1579 actions, &fdir_rule, error);
1584 static inline int ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1585 struct hinic_5tuple_filter_info *hinic_filter_info)
1587 switch (filter->dst_ip_mask) {
1589 hinic_filter_info->dst_ip_mask = 0;
1590 hinic_filter_info->dst_ip = filter->dst_ip;
1593 hinic_filter_info->dst_ip_mask = 1;
1594 hinic_filter_info->dst_ip = 0;
1597 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1601 switch (filter->src_ip_mask) {
1603 hinic_filter_info->src_ip_mask = 0;
1604 hinic_filter_info->src_ip = filter->src_ip;
1607 hinic_filter_info->src_ip_mask = 1;
1608 hinic_filter_info->src_ip = 0;
1611 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1617 static inline int ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1618 struct hinic_5tuple_filter_info *hinic_filter_info)
1620 switch (filter->dst_port_mask) {
1622 hinic_filter_info->dst_port_mask = 0;
1623 hinic_filter_info->dst_port = filter->dst_port;
1626 hinic_filter_info->dst_port_mask = 1;
1627 hinic_filter_info->dst_port = 0;
1630 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1634 switch (filter->src_port_mask) {
1636 hinic_filter_info->src_port_mask = 0;
1637 hinic_filter_info->src_port = filter->src_port;
1640 hinic_filter_info->src_port_mask = 1;
1641 hinic_filter_info->src_port = 0;
1644 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1651 static inline int ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1652 struct hinic_5tuple_filter_info *hinic_filter_info)
1654 switch (filter->proto_mask) {
1656 hinic_filter_info->proto_mask = 0;
1657 hinic_filter_info->proto = filter->proto;
1660 hinic_filter_info->proto_mask = 1;
1661 hinic_filter_info->proto = 0;
1664 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1671 static inline int ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1672 struct hinic_5tuple_filter_info *filter_info)
1674 if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1675 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1676 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1679 if (ntuple_ip_filter(filter, filter_info) ||
1680 ntuple_port_filter(filter, filter_info) ||
1681 ntuple_proto_filter(filter, filter_info))
1684 filter_info->priority = (uint8_t)filter->priority;
1688 static inline struct hinic_5tuple_filter *
1689 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1690 struct hinic_5tuple_filter_info *key)
1692 struct hinic_5tuple_filter *it;
1694 TAILQ_FOREACH(it, filter_list, entries) {
1695 if (memcmp(key, &it->filter_info,
1696 sizeof(struct hinic_5tuple_filter_info)) == 0) {
1704 static int hinic_set_lacp_tcam(struct hinic_nic_dev *nic_dev)
1706 struct tag_pa_rule lacp_rule;
1707 struct tag_pa_action lacp_action;
1709 memset(&lacp_rule, 0, sizeof(lacp_rule));
1710 memset(&lacp_action, 0, sizeof(lacp_action));
1711 /* LACP TCAM rule */
1712 lacp_rule.eth_type = PA_ETH_TYPE_OTHER;
1713 lacp_rule.l2_header.eth_type.val16 = 0x8809;
1714 lacp_rule.l2_header.eth_type.mask16 = 0xffff;
1716 /* LACP TCAM action */
1717 lacp_action.err_type = 0x3f; /* err from ipsu, not convert */
1718 lacp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1719 lacp_action.pkt_type = PKT_LACP_TYPE;
1720 lacp_action.pri = 0x0;
1721 lacp_action.push_len = 0xf; /* push_len:0xf, not convert */
1723 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP,
1724 &lacp_rule, &lacp_action);
1727 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1729 struct tag_pa_rule bgp_rule;
1730 struct tag_pa_action bgp_action;
1732 memset(&bgp_rule, 0, sizeof(bgp_rule));
1733 memset(&bgp_action, 0, sizeof(bgp_action));
1735 bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1736 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1737 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1738 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1739 bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1740 bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1742 /* BGP TCAM action */
1743 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1744 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1745 bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1746 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1747 * results, not need to convert
1749 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1751 return hinic_set_fdir_tcam(nic_dev->hwdev,
1752 TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1755 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1757 struct tag_pa_rule bgp_rule;
1758 struct tag_pa_action bgp_action;
1760 memset(&bgp_rule, 0, sizeof(bgp_rule));
1761 memset(&bgp_action, 0, sizeof(bgp_action));
1763 bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1764 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1765 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1766 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1767 bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1768 bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1770 /* BGP TCAM action */
1771 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1772 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1773 bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1774 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1775 * results, not need to convert
1777 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1779 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1780 &bgp_rule, &bgp_action);
1783 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1785 struct tag_pa_rule vrrp_rule;
1786 struct tag_pa_action vrrp_action;
1788 memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1789 memset(&vrrp_action, 0, sizeof(vrrp_action));
1790 /* VRRP TCAM rule */
1791 vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1792 vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1793 vrrp_rule.ip_header.protocol.mask8 = 0xff;
1794 vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1796 /* VRRP TCAM action */
1797 vrrp_action.err_type = 0x3f;
1798 vrrp_action.fwd_action = 0x7;
1799 vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1800 vrrp_action.pri = 0xf;
1801 vrrp_action.push_len = 0xf;
1803 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1804 &vrrp_rule, &vrrp_action);
1808 * Clear all fdir configuration.
1811 * The hardware interface of a Ethernet device.
1815 * negative error value otherwise.
1817 void hinic_free_fdir_filter(struct hinic_nic_dev *nic_dev)
1819 (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
1821 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_DPORT);
1823 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT);
1825 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1827 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
1829 (void)hinic_flush_tcam_rule(nic_dev->hwdev);
1832 static int hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1833 struct hinic_filter_info *filter_info)
1835 switch (filter->filter_info.proto) {
1837 /* Filter type is bgp type if dst_port or src_port is 179 */
1838 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1839 !(filter->filter_info.dst_port_mask)) {
1840 filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1841 } else if (filter->filter_info.src_port ==
1842 RTE_BE16(BGP_DPORT_ID) &&
1843 !(filter->filter_info.src_port_mask)) {
1844 filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1846 PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1847 " just support BGP now, proto:0x%x, "
1848 "dst_port:0x%x, dst_port_mask:0x%x."
1849 "src_port:0x%x, src_port_mask:0x%x.",
1850 filter->filter_info.proto,
1851 filter->filter_info.dst_port,
1852 filter->filter_info.dst_port_mask,
1853 filter->filter_info.src_port,
1854 filter->filter_info.src_port_mask);
1860 filter_info->pkt_type = PKT_VRRP_TYPE;
1864 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1867 case IPPROTO_ICMPV6:
1868 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1872 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1873 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1874 "src_port: 0x%x, src_port_mask: 0x%x.",
1875 filter->filter_info.proto, filter->filter_info.dst_port,
1876 filter->filter_info.dst_port_mask,
1877 filter->filter_info.src_port,
1878 filter->filter_info.src_port_mask);
1885 static int hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1886 struct hinic_filter_info *filter_info, int *index)
1890 type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1892 if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1893 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1897 if (!(filter_info->type_mask & (1 << type_id))) {
1898 filter_info->type_mask |= 1 << type_id;
1899 filter->index = type_id;
1900 filter_info->pkt_filters[type_id].enable = true;
1901 filter_info->pkt_filters[type_id].pkt_proto =
1902 filter->filter_info.proto;
1903 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1906 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1915 * Add a 5tuple filter
1918 * Pointer to struct rte_eth_dev.
1920 * Pointer to the filter that will be added.
1922 * - On success, zero.
1923 * - On failure, a negative value.
1925 static int hinic_add_5tuple_filter(struct rte_eth_dev *dev,
1926 struct hinic_5tuple_filter *filter)
1928 struct hinic_filter_info *filter_info =
1929 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1931 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1933 if (hinic_filter_info_init(filter, filter_info) ||
1934 hinic_lookup_new_filter(filter, filter_info, &i))
1937 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1939 filter_info->pkt_filters[i].enable,
1942 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1943 filter_info->pkt_type, filter->queue,
1944 filter_info->pkt_filters[i].enable);
1948 PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1949 filter_info->pkt_type, filter_info->qid,
1950 filter_info->pkt_filters[filter->index].enable);
1952 switch (filter->filter_info.proto) {
1954 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
1955 ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
1957 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
1958 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1959 filter_info->pkt_type, filter->queue,
1960 filter_info->pkt_filters[i].enable);
1964 PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
1966 filter_info->pkt_filters[i].enable);
1967 } else if (filter->filter_info.src_port ==
1968 RTE_BE16(BGP_DPORT_ID)) {
1969 ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
1971 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
1972 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1973 filter_info->pkt_type, filter->queue,
1974 filter_info->pkt_filters[i].enable);
1978 PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
1980 filter_info->pkt_filters[i].enable);
1986 ret_fw = hinic_set_vrrp_tcam(nic_dev);
1988 PMD_DRV_LOG(ERR, "Set VRRP failed, "
1989 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1990 filter_info->pkt_type, filter->queue,
1991 filter_info->pkt_filters[i].enable);
1994 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
1996 filter_info->pkt_filters[i].enable);
2007 * Remove a 5tuple filter
2010 * Pointer to struct rte_eth_dev.
2012 * The pointer of the filter will be removed.
2014 static void hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
2015 struct hinic_5tuple_filter *filter)
2017 struct hinic_filter_info *filter_info =
2018 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2019 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2021 switch (filter->filter_info.proto) {
2023 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
2027 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
2028 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2029 TCAM_PKT_BGP_DPORT);
2030 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
2031 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2032 TCAM_PKT_BGP_SPORT);
2039 hinic_filter_info_init(filter, filter_info);
2041 filter_info->pkt_filters[filter->index].enable = false;
2042 filter_info->pkt_filters[filter->index].pkt_proto = 0;
2044 PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2045 filter_info->pkt_type,
2046 filter_info->pkt_filters[filter->index].qid,
2047 filter_info->pkt_filters[filter->index].enable);
2048 (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
2049 filter_info->pkt_filters[filter->index].qid,
2050 filter_info->pkt_filters[filter->index].enable,
2053 filter_info->pkt_type = 0;
2054 filter_info->qid = 0;
2055 filter_info->pkt_filters[filter->index].qid = 0;
2056 filter_info->type_mask &= ~(1 << (filter->index));
2057 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
2063 * Add or delete a ntuple filter
2066 * Pointer to struct rte_eth_dev.
2067 * @param ntuple_filter
2068 * Pointer to struct rte_eth_ntuple_filter
2070 * If true, add filter; if false, remove filter
2072 * - On success, zero.
2073 * - On failure, a negative value.
2075 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
2076 struct rte_eth_ntuple_filter *ntuple_filter,
2079 struct hinic_filter_info *filter_info =
2080 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2081 struct hinic_5tuple_filter_info filter_5tuple;
2082 struct hinic_5tuple_filter *filter;
2085 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
2086 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
2090 memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
2091 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
2095 filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
2097 if (filter != NULL && add) {
2098 PMD_DRV_LOG(ERR, "Filter exists.");
2101 if (filter == NULL && !add) {
2102 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2107 filter = rte_zmalloc("hinic_5tuple_filter",
2108 sizeof(struct hinic_5tuple_filter), 0);
2111 rte_memcpy(&filter->filter_info, &filter_5tuple,
2112 sizeof(struct hinic_5tuple_filter_info));
2113 filter->queue = ntuple_filter->queue;
2115 filter_info->qid = ntuple_filter->queue;
2117 ret = hinic_add_5tuple_filter(dev, filter);
2124 hinic_remove_5tuple_filter(dev, filter);
2130 hinic_check_ethertype_filter(struct rte_eth_ethertype_filter *filter)
2132 if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM)
2135 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2136 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
2137 PMD_DRV_LOG(ERR, "Unsupported ether_type(0x%04x) in"
2138 " ethertype filter", filter->ether_type);
2142 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
2143 PMD_DRV_LOG(ERR, "Mac compare is not supported");
2146 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
2147 PMD_DRV_LOG(ERR, "Drop option is not supported");
2155 hinic_ethertype_filter_lookup(struct hinic_filter_info *filter_info,
2156 struct hinic_pkt_filter *ethertype_filter)
2158 switch (ethertype_filter->pkt_proto) {
2159 case RTE_ETHER_TYPE_SLOW:
2160 filter_info->pkt_type = PKT_LACP_TYPE;
2163 case RTE_ETHER_TYPE_ARP:
2164 filter_info->pkt_type = PKT_ARP_TYPE;
2168 PMD_DRV_LOG(ERR, "Just support LACP/ARP for ethertype filters");
2172 return HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
2176 hinic_ethertype_filter_insert(struct hinic_filter_info *filter_info,
2177 struct hinic_pkt_filter *ethertype_filter)
2181 /* Find LACP or VRRP type id */
2182 id = hinic_ethertype_filter_lookup(filter_info, ethertype_filter);
2186 if (!(filter_info->type_mask & (1 << id))) {
2187 filter_info->type_mask |= 1 << id;
2188 filter_info->pkt_filters[id].pkt_proto =
2189 ethertype_filter->pkt_proto;
2190 filter_info->pkt_filters[id].enable = ethertype_filter->enable;
2191 filter_info->qid = ethertype_filter->qid;
2195 PMD_DRV_LOG(ERR, "Filter type: %d exists", id);
2200 hinic_ethertype_filter_remove(struct hinic_filter_info *filter_info,
2203 if (idx >= HINIC_MAX_Q_FILTERS)
2206 filter_info->pkt_type = 0;
2207 filter_info->type_mask &= ~(1 << idx);
2208 filter_info->pkt_filters[idx].pkt_proto = (uint16_t)0;
2209 filter_info->pkt_filters[idx].enable = FALSE;
2210 filter_info->pkt_filters[idx].qid = 0;
2214 hinic_add_del_ethertype_filter(struct rte_eth_dev *dev,
2215 struct rte_eth_ethertype_filter *filter,
2218 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2219 struct hinic_filter_info *filter_info =
2220 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2221 struct hinic_pkt_filter ethertype_filter;
2225 if (hinic_check_ethertype_filter(filter))
2229 ethertype_filter.pkt_proto = filter->ether_type;
2230 ethertype_filter.enable = TRUE;
2231 ethertype_filter.qid = (u8)filter->queue;
2232 i = hinic_ethertype_filter_insert(filter_info,
2237 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev,
2238 filter_info->pkt_type, filter_info->qid,
2239 filter_info->pkt_filters[i].enable, true);
2241 PMD_DRV_LOG(ERR, "add ethertype failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2242 filter_info->pkt_type, filter->queue,
2243 filter_info->pkt_filters[i].enable);
2245 hinic_ethertype_filter_remove(filter_info, i);
2248 PMD_DRV_LOG(INFO, "Add ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2249 filter_info->pkt_type, filter->queue,
2250 filter_info->pkt_filters[i].enable);
2252 switch (ethertype_filter.pkt_proto) {
2253 case RTE_ETHER_TYPE_SLOW:
2254 ret_fw = hinic_set_lacp_tcam(nic_dev);
2256 PMD_DRV_LOG(ERR, "Add lacp tcam failed");
2257 hinic_ethertype_filter_remove(filter_info, i);
2261 PMD_DRV_LOG(INFO, "Add lacp tcam succeed");
2267 ethertype_filter.pkt_proto = filter->ether_type;
2268 i = hinic_ethertype_filter_lookup(filter_info,
2271 if ((filter_info->type_mask & (1 << i))) {
2272 filter_info->pkt_filters[i].enable = FALSE;
2273 (void)hinic_set_fdir_filter(nic_dev->hwdev,
2274 filter_info->pkt_type,
2275 filter_info->pkt_filters[i].qid,
2276 filter_info->pkt_filters[i].enable,
2279 PMD_DRV_LOG(INFO, "Del ethertype succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
2280 filter_info->pkt_type,
2281 filter_info->pkt_filters[i].qid,
2282 filter_info->pkt_filters[i].enable);
2284 switch (ethertype_filter.pkt_proto) {
2285 case RTE_ETHER_TYPE_SLOW:
2286 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
2288 PMD_DRV_LOG(INFO, "Del lacp tcam succeed");
2294 hinic_ethertype_filter_remove(filter_info, i);
2297 PMD_DRV_LOG(ERR, "Ethertype doesn't exist, type: 0x%x, qid: 0x%x, enable: 0x%x",
2298 filter_info->pkt_type, filter->queue,
2299 filter_info->pkt_filters[i].enable);
2307 static int hinic_fdir_info_init(struct hinic_fdir_rule *rule,
2308 struct hinic_fdir_info *fdir_info)
2310 switch (rule->mask.src_ipv4_mask) {
2312 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_SIP;
2313 fdir_info->qid = rule->queue;
2314 fdir_info->fdir_key = rule->hinic_fdir.src_ip;
2321 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
2325 switch (rule->mask.dst_ipv4_mask) {
2327 fdir_info->fdir_flag = HINIC_ATR_FLOW_TYPE_IPV4_DIP;
2328 fdir_info->qid = rule->queue;
2329 fdir_info->fdir_key = rule->hinic_fdir.dst_ip;
2336 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
2340 if (fdir_info->fdir_flag == 0) {
2341 PMD_DRV_LOG(ERR, "All support mask is NULL.");
2348 static inline int hinic_add_del_fdir_filter(struct rte_eth_dev *dev,
2349 struct hinic_fdir_rule *rule, bool add)
2351 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2352 struct hinic_fdir_info fdir_info;
2355 memset(&fdir_info, 0, sizeof(struct hinic_fdir_info));
2357 ret = hinic_fdir_info_init(rule, &fdir_info);
2359 PMD_DRV_LOG(ERR, "Init hinic fdir info failed!");
2364 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2365 true, fdir_info.fdir_key,
2366 true, fdir_info.fdir_flag);
2368 PMD_DRV_LOG(ERR, "Add fdir filter failed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2369 fdir_info.fdir_flag, fdir_info.qid,
2370 fdir_info.fdir_key);
2373 PMD_DRV_LOG(INFO, "Add fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2374 fdir_info.fdir_flag, fdir_info.qid,
2375 fdir_info.fdir_key);
2377 ret = hinic_set_normal_filter(nic_dev->hwdev, fdir_info.qid,
2378 false, fdir_info.fdir_key, true,
2379 fdir_info.fdir_flag);
2381 PMD_DRV_LOG(ERR, "Del fdir filter ailed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2382 fdir_info.fdir_flag, fdir_info.qid,
2383 fdir_info.fdir_key);
2386 PMD_DRV_LOG(INFO, "Del fdir filter succeed, flag: 0x%x, qid: 0x%x, key: 0x%x",
2387 fdir_info.fdir_flag, fdir_info.qid,
2388 fdir_info.fdir_key);
2394 static void tcam_translate_key_y(u8 *key_y, u8 *src_input, u8 *mask, u8 len)
2398 for (idx = 0; idx < len; idx++)
2399 key_y[idx] = src_input[idx] & mask[idx];
2402 static void tcam_translate_key_x(u8 *key_x, u8 *key_y, u8 *mask, u8 len)
2406 for (idx = 0; idx < len; idx++)
2407 key_x[idx] = key_y[idx] ^ mask[idx];
2410 static void tcam_key_calculate(struct tag_tcam_key *tcam_key,
2411 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2413 tcam_translate_key_y(fdir_tcam_rule->key.y,
2414 (u8 *)(&tcam_key->key_info),
2415 (u8 *)(&tcam_key->key_mask),
2416 TCAM_FLOW_KEY_SIZE);
2417 tcam_translate_key_x(fdir_tcam_rule->key.x,
2418 fdir_tcam_rule->key.y,
2419 (u8 *)(&tcam_key->key_mask),
2420 TCAM_FLOW_KEY_SIZE);
2423 static int hinic_fdir_tcam_info_init(struct rte_eth_dev *dev,
2424 struct hinic_fdir_rule *rule,
2425 struct tag_tcam_key *tcam_key,
2426 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2428 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2430 switch (rule->mask.dst_ipv4_mask) {
2432 tcam_key->key_info.ext_dip_h =
2433 (rule->hinic_fdir.dst_ip >> 16) & 0xffffU;
2434 tcam_key->key_info.ext_dip_l =
2435 rule->hinic_fdir.dst_ip & 0xffffU;
2436 tcam_key->key_mask.ext_dip_h =
2437 (rule->mask.dst_ipv4_mask >> 16) & 0xffffU;
2438 tcam_key->key_mask.ext_dip_l =
2439 rule->mask.dst_ipv4_mask & 0xffffU;
2446 PMD_DRV_LOG(ERR, "invalid src_ip mask.");
2450 if (rule->mask.dst_port_mask > 0) {
2451 tcam_key->key_info.dst_port = rule->hinic_fdir.dst_port;
2452 tcam_key->key_mask.dst_port = rule->mask.dst_port_mask;
2455 if (rule->mask.src_port_mask > 0) {
2456 tcam_key->key_info.src_port = rule->hinic_fdir.src_port;
2457 tcam_key->key_mask.src_port = rule->mask.src_port_mask;
2460 switch (rule->mask.tunnel_flag) {
2462 tcam_key->key_info.tunnel_flag = FDIR_TCAM_TUNNEL_PACKET;
2463 tcam_key->key_mask.tunnel_flag = UINT8_MAX;
2467 tcam_key->key_info.tunnel_flag = FDIR_TCAM_NORMAL_PACKET;
2468 tcam_key->key_mask.tunnel_flag = 0;
2472 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2476 if (rule->mask.tunnel_inner_dst_port_mask > 0) {
2477 tcam_key->key_info.dst_port =
2478 rule->hinic_fdir.tunnel_inner_dst_port;
2479 tcam_key->key_mask.dst_port =
2480 rule->mask.tunnel_inner_dst_port_mask;
2483 if (rule->mask.tunnel_inner_src_port_mask > 0) {
2484 tcam_key->key_info.src_port =
2485 rule->hinic_fdir.tunnel_inner_src_port;
2486 tcam_key->key_mask.src_port =
2487 rule->mask.tunnel_inner_src_port_mask;
2490 switch (rule->mask.proto_mask) {
2492 tcam_key->key_info.protocol = rule->hinic_fdir.proto;
2493 tcam_key->key_mask.protocol = UINT8_MAX;
2500 PMD_DRV_LOG(ERR, "invalid tunnel flag mask.");
2504 tcam_key->key_mask.function_id = UINT16_MAX;
2506 tcam_key->key_info.function_id = hinic_global_func_id(nic_dev->hwdev);
2508 fdir_tcam_rule->data.qid = rule->queue;
2510 tcam_key_calculate(tcam_key, fdir_tcam_rule);
2515 static inline struct hinic_tcam_filter *
2516 hinic_tcam_filter_lookup(struct hinic_tcam_filter_list *filter_list,
2517 struct tag_tcam_key *key)
2519 struct hinic_tcam_filter *it;
2521 TAILQ_FOREACH(it, filter_list, entries) {
2522 if (memcmp(key, &it->tcam_key,
2523 sizeof(struct tag_tcam_key)) == 0) {
2531 static int hinic_lookup_new_tcam_filter(struct rte_eth_dev *dev,
2532 struct hinic_tcam_info *tcam_info,
2533 struct hinic_tcam_filter *tcam_filter,
2538 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2540 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF)
2541 max_index = HINIC_VF_MAX_TCAM_FILTERS;
2543 max_index = HINIC_PF_MAX_TCAM_FILTERS;
2545 for (index = 0; index < max_index; index++) {
2546 if (tcam_info->tcam_index_array[index] == 0)
2550 if (index == max_index) {
2551 PMD_DRV_LOG(ERR, "function 0x%x tcam filters only support %d filter rules",
2552 hinic_global_func_id(nic_dev->hwdev), max_index);
2556 tcam_filter->index = index;
2557 *tcam_index = index;
2562 static int hinic_add_tcam_filter(struct rte_eth_dev *dev,
2563 struct hinic_tcam_filter *tcam_filter,
2564 struct tag_tcam_cfg_rule *fdir_tcam_rule)
2566 struct hinic_tcam_info *tcam_info =
2567 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2568 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2570 u16 tcam_block_index = 0;
2573 if (hinic_lookup_new_tcam_filter(dev, tcam_info, tcam_filter, &index))
2576 if (tcam_info->tcam_rule_nums == 0) {
2577 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2578 rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2579 HINIC_TCAM_BLOCK_TYPE_VF, &tcam_block_index);
2581 PMD_DRV_LOG(ERR, "VF fdir filter tcam alloc block failed!");
2585 rc = hinic_alloc_tcam_block(nic_dev->hwdev,
2586 HINIC_TCAM_BLOCK_TYPE_PF, &tcam_block_index);
2588 PMD_DRV_LOG(ERR, "PF fdir filter tcam alloc block failed!");
2593 tcam_info->tcam_block_index = tcam_block_index;
2595 tcam_block_index = tcam_info->tcam_block_index;
2598 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2599 fdir_tcam_rule->index =
2600 HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) + index;
2602 fdir_tcam_rule->index =
2603 tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS + index;
2606 rc = hinic_add_tcam_rule(nic_dev->hwdev, fdir_tcam_rule);
2608 PMD_DRV_LOG(ERR, "Fdir_tcam_rule add failed!");
2612 PMD_DRV_LOG(INFO, "Add fdir_tcam_rule function_id: 0x%x,"
2613 "tcam_block_id: %d, index: %d, queue: %d, tcam_rule_nums: %d succeed",
2614 hinic_global_func_id(nic_dev->hwdev), tcam_block_index,
2615 fdir_tcam_rule->index, fdir_tcam_rule->data.qid,
2616 tcam_info->tcam_rule_nums + 1);
2618 if (tcam_info->tcam_rule_nums == 0) {
2619 rc = hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, true);
2621 (void)hinic_del_tcam_rule(nic_dev->hwdev,
2622 fdir_tcam_rule->index);
2627 TAILQ_INSERT_TAIL(&tcam_info->tcam_list, tcam_filter, entries);
2629 tcam_info->tcam_index_array[index] = 1;
2630 tcam_info->tcam_rule_nums++;
2635 static int hinic_del_tcam_filter(struct rte_eth_dev *dev,
2636 struct hinic_tcam_filter *tcam_filter)
2638 struct hinic_tcam_info *tcam_info =
2639 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2640 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2642 u16 tcam_block_index = tcam_info->tcam_block_index;
2646 if (hinic_func_type(nic_dev->hwdev) == TYPE_VF) {
2647 index = HINIC_PKT_VF_TCAM_INDEX_START(tcam_block_index) +
2649 block_type = HINIC_TCAM_BLOCK_TYPE_VF;
2651 index = tcam_block_index * HINIC_PF_MAX_TCAM_FILTERS +
2653 block_type = HINIC_TCAM_BLOCK_TYPE_PF;
2656 rc = hinic_del_tcam_rule(nic_dev->hwdev, index);
2658 PMD_DRV_LOG(ERR, "fdir_tcam_rule del failed!");
2662 PMD_DRV_LOG(INFO, "Del fdir_tcam_rule function_id: 0x%x, "
2663 "tcam_block_id: %d, index: %d, tcam_rule_nums: %d succeed",
2664 hinic_global_func_id(nic_dev->hwdev), tcam_block_index, index,
2665 tcam_info->tcam_rule_nums - 1);
2667 TAILQ_REMOVE(&tcam_info->tcam_list, tcam_filter, entries);
2669 tcam_info->tcam_index_array[tcam_filter->index] = 0;
2671 rte_free(tcam_filter);
2673 tcam_info->tcam_rule_nums--;
2675 if (tcam_info->tcam_rule_nums == 0) {
2676 (void)hinic_free_tcam_block(nic_dev->hwdev, block_type,
2683 static int hinic_add_del_tcam_fdir_filter(struct rte_eth_dev *dev,
2684 struct hinic_fdir_rule *rule, bool add)
2686 struct hinic_tcam_info *tcam_info =
2687 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
2688 struct hinic_tcam_filter *tcam_filter;
2689 struct tag_tcam_cfg_rule fdir_tcam_rule;
2690 struct tag_tcam_key tcam_key;
2693 memset(&fdir_tcam_rule, 0, sizeof(struct tag_tcam_cfg_rule));
2694 memset((void *)&tcam_key, 0, sizeof(struct tag_tcam_key));
2696 ret = hinic_fdir_tcam_info_init(dev, rule, &tcam_key, &fdir_tcam_rule);
2698 PMD_DRV_LOG(ERR, "Init hiovs fdir info failed!");
2702 tcam_filter = hinic_tcam_filter_lookup(&tcam_info->tcam_list,
2704 if (tcam_filter != NULL && add) {
2705 PMD_DRV_LOG(ERR, "Filter exists.");
2708 if (tcam_filter == NULL && !add) {
2709 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
2714 tcam_filter = rte_zmalloc("hiovs_5tuple_filter",
2715 sizeof(struct hinic_tcam_filter), 0);
2716 if (tcam_filter == NULL)
2718 (void)rte_memcpy(&tcam_filter->tcam_key,
2719 &tcam_key, sizeof(struct tag_tcam_key));
2720 tcam_filter->queue = fdir_tcam_rule.data.qid;
2722 ret = hinic_add_tcam_filter(dev, tcam_filter, &fdir_tcam_rule);
2724 rte_free(tcam_filter);
2728 rule->tcam_index = fdir_tcam_rule.index;
2731 PMD_DRV_LOG(ERR, "Begin to hiovs_del_tcam_filter");
2732 ret = hinic_del_tcam_filter(dev, tcam_filter);
2741 * Create or destroy a flow rule.
2742 * Theorically one rule can match more than one filters.
2743 * We will let it use the filter which it hitt first.
2744 * So, the sequence matters.
2746 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
2747 const struct rte_flow_attr *attr,
2748 const struct rte_flow_item pattern[],
2749 const struct rte_flow_action actions[],
2750 struct rte_flow_error *error)
2753 struct rte_eth_ntuple_filter ntuple_filter;
2754 struct rte_eth_ethertype_filter ethertype_filter;
2755 struct hinic_fdir_rule fdir_rule;
2756 struct rte_flow *flow = NULL;
2757 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2758 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2759 struct hinic_fdir_rule_ele *fdir_rule_ptr;
2760 struct hinic_flow_mem *hinic_flow_mem_ptr;
2761 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2763 flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
2765 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
2769 hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
2770 sizeof(struct hinic_flow_mem), 0);
2771 if (!hinic_flow_mem_ptr) {
2772 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
2777 hinic_flow_mem_ptr->flow = flow;
2778 TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
2781 /* Add ntuple filter */
2782 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2783 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
2784 actions, &ntuple_filter, error);
2786 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2788 ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
2789 sizeof(struct hinic_ntuple_filter_ele), 0);
2790 rte_memcpy(&ntuple_filter_ptr->filter_info,
2792 sizeof(struct rte_eth_ntuple_filter));
2793 TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
2794 ntuple_filter_ptr, entries);
2795 flow->rule = ntuple_filter_ptr;
2796 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2798 PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
2799 hinic_global_func_id(nic_dev->hwdev));
2805 /* Add ethertype filter */
2806 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2807 ret = hinic_parse_ethertype_filter(dev, attr, pattern, actions,
2808 ðertype_filter, error);
2810 ret = hinic_add_del_ethertype_filter(dev, ðertype_filter,
2813 ethertype_filter_ptr =
2814 rte_zmalloc("hinic_ethertype_filter",
2815 sizeof(struct hinic_ethertype_filter_ele), 0);
2816 rte_memcpy(ðertype_filter_ptr->filter_info,
2818 sizeof(struct rte_eth_ethertype_filter));
2819 TAILQ_INSERT_TAIL(&nic_dev->filter_ethertype_list,
2820 ethertype_filter_ptr, entries);
2821 flow->rule = ethertype_filter_ptr;
2822 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2824 PMD_DRV_LOG(INFO, "Create flow ethertype succeed, func_id: 0x%x",
2825 hinic_global_func_id(nic_dev->hwdev));
2831 /* Add fdir filter */
2832 memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
2833 ret = hinic_parse_fdir_filter(dev, attr, pattern,
2834 actions, &fdir_rule, error);
2836 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
2837 ret = hinic_add_del_fdir_filter(dev,
2839 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
2840 ret = hinic_add_del_tcam_fdir_filter(dev,
2843 PMD_DRV_LOG(INFO, "flow fdir rule create failed, rule mode wrong");
2847 fdir_rule_ptr = rte_zmalloc("hinic_fdir_rule",
2848 sizeof(struct hinic_fdir_rule_ele), 0);
2849 rte_memcpy(&fdir_rule_ptr->filter_info, &fdir_rule,
2850 sizeof(struct hinic_fdir_rule));
2851 TAILQ_INSERT_TAIL(&nic_dev->filter_fdir_rule_list,
2852 fdir_rule_ptr, entries);
2853 flow->rule = fdir_rule_ptr;
2854 flow->filter_type = RTE_ETH_FILTER_FDIR;
2856 PMD_DRV_LOG(INFO, "Create flow fdir rule succeed, func_id : 0x%x",
2857 hinic_global_func_id(nic_dev->hwdev));
2864 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
2865 rte_flow_error_set(error, -ret,
2866 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2867 "Failed to create flow.");
2868 rte_free(hinic_flow_mem_ptr);
2873 /* Destroy a flow rule on hinic. */
2874 static int hinic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2875 struct rte_flow_error *error)
2878 struct rte_flow *pmd_flow = flow;
2879 enum rte_filter_type filter_type = pmd_flow->filter_type;
2880 struct rte_eth_ntuple_filter ntuple_filter;
2881 struct rte_eth_ethertype_filter ethertype_filter;
2882 struct hinic_fdir_rule fdir_rule;
2883 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
2884 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
2885 struct hinic_fdir_rule_ele *fdir_rule_ptr;
2886 struct hinic_flow_mem *hinic_flow_mem_ptr;
2887 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2889 switch (filter_type) {
2890 case RTE_ETH_FILTER_NTUPLE:
2891 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
2893 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
2894 sizeof(struct rte_eth_ntuple_filter));
2895 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2897 TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
2898 ntuple_filter_ptr, entries);
2899 rte_free(ntuple_filter_ptr);
2902 case RTE_ETH_FILTER_ETHERTYPE:
2903 ethertype_filter_ptr = (struct hinic_ethertype_filter_ele *)
2905 rte_memcpy(ðertype_filter,
2906 ðertype_filter_ptr->filter_info,
2907 sizeof(struct rte_eth_ethertype_filter));
2908 ret = hinic_add_del_ethertype_filter(dev,
2909 ðertype_filter, FALSE);
2911 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
2912 ethertype_filter_ptr, entries);
2913 rte_free(ethertype_filter_ptr);
2916 case RTE_ETH_FILTER_FDIR:
2917 fdir_rule_ptr = (struct hinic_fdir_rule_ele *)pmd_flow->rule;
2918 rte_memcpy(&fdir_rule,
2919 &fdir_rule_ptr->filter_info,
2920 sizeof(struct hinic_fdir_rule));
2921 if (fdir_rule.mode == HINIC_FDIR_MODE_NORMAL) {
2922 ret = hinic_add_del_fdir_filter(dev, &fdir_rule, FALSE);
2923 } else if (fdir_rule.mode == HINIC_FDIR_MODE_TCAM) {
2924 ret = hinic_add_del_tcam_fdir_filter(dev, &fdir_rule,
2927 PMD_DRV_LOG(ERR, "FDIR Filter type is wrong!");
2931 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list,
2932 fdir_rule_ptr, entries);
2933 rte_free(fdir_rule_ptr);
2937 PMD_DRV_LOG(WARNING, "Filter type (%d) is not supported",
2944 rte_flow_error_set(error, EINVAL,
2945 RTE_FLOW_ERROR_TYPE_HANDLE,
2946 NULL, "Failed to destroy flow");
2950 TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
2951 if (hinic_flow_mem_ptr->flow == pmd_flow) {
2952 TAILQ_REMOVE(&nic_dev->hinic_flow_list,
2953 hinic_flow_mem_ptr, entries);
2954 rte_free(hinic_flow_mem_ptr);
2960 PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
2961 hinic_global_func_id(nic_dev->hwdev));
2966 /* Remove all the n-tuple filters */
2967 static void hinic_clear_all_ntuple_filter(struct rte_eth_dev *dev)
2969 struct hinic_filter_info *filter_info =
2970 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2971 struct hinic_5tuple_filter *p_5tuple;
2973 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
2974 hinic_remove_5tuple_filter(dev, p_5tuple);
2977 /* Remove all the ether type filters */
2978 static void hinic_clear_all_ethertype_filter(struct rte_eth_dev *dev)
2980 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
2981 struct hinic_filter_info *filter_info =
2982 HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev);
2985 if (filter_info->type_mask &
2986 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE))) {
2987 hinic_ethertype_filter_remove(filter_info,
2988 HINIC_PKT_TYPE_FIND_ID(PKT_LACP_TYPE));
2989 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_LACP_TYPE,
2990 filter_info->qid, false, true);
2992 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_LACP);
2995 if (filter_info->type_mask &
2996 (1 << HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE))) {
2997 hinic_ethertype_filter_remove(filter_info,
2998 HINIC_PKT_TYPE_FIND_ID(PKT_ARP_TYPE));
2999 ret = hinic_set_fdir_filter(nic_dev->hwdev, PKT_ARP_TYPE,
3000 filter_info->qid, false, true);
3004 PMD_DRV_LOG(ERR, "Clear ethertype failed, filter type: 0x%x",
3005 filter_info->pkt_type);
3008 /* Remove all the ether type filters */
3009 static void hinic_clear_all_fdir_filter(struct rte_eth_dev *dev)
3011 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3012 struct hinic_tcam_info *tcam_info =
3013 HINIC_DEV_PRIVATE_TO_TCAM_INFO(dev->data->dev_private);
3014 struct hinic_tcam_filter *tcam_filter_ptr;
3016 while ((tcam_filter_ptr = TAILQ_FIRST(&tcam_info->tcam_list)))
3017 (void)hinic_del_tcam_filter(dev, tcam_filter_ptr);
3019 (void)hinic_set_fdir_filter(nic_dev->hwdev, 0, 0, 0, false);
3021 (void)hinic_flush_tcam_rule(nic_dev->hwdev);
3024 static void hinic_filterlist_flush(struct rte_eth_dev *dev)
3026 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
3027 struct hinic_ethertype_filter_ele *ethertype_filter_ptr;
3028 struct hinic_fdir_rule_ele *fdir_rule_ptr;
3029 struct hinic_flow_mem *hinic_flow_mem_ptr;
3030 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3032 while ((ntuple_filter_ptr =
3033 TAILQ_FIRST(&nic_dev->filter_ntuple_list))) {
3034 TAILQ_REMOVE(&nic_dev->filter_ntuple_list, ntuple_filter_ptr,
3036 rte_free(ntuple_filter_ptr);
3039 while ((ethertype_filter_ptr =
3040 TAILQ_FIRST(&nic_dev->filter_ethertype_list))) {
3041 TAILQ_REMOVE(&nic_dev->filter_ethertype_list,
3042 ethertype_filter_ptr,
3044 rte_free(ethertype_filter_ptr);
3047 while ((fdir_rule_ptr =
3048 TAILQ_FIRST(&nic_dev->filter_fdir_rule_list))) {
3049 TAILQ_REMOVE(&nic_dev->filter_fdir_rule_list, fdir_rule_ptr,
3051 rte_free(fdir_rule_ptr);
3054 while ((hinic_flow_mem_ptr =
3055 TAILQ_FIRST(&nic_dev->hinic_flow_list))) {
3056 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
3058 rte_free(hinic_flow_mem_ptr->flow);
3059 rte_free(hinic_flow_mem_ptr);
3063 /* Destroy all flow rules associated with a port on hinic. */
3064 static int hinic_flow_flush(struct rte_eth_dev *dev,
3065 __rte_unused struct rte_flow_error *error)
3067 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
3069 hinic_clear_all_ntuple_filter(dev);
3070 hinic_clear_all_ethertype_filter(dev);
3071 hinic_clear_all_fdir_filter(dev);
3072 hinic_filterlist_flush(dev);
3074 PMD_DRV_LOG(INFO, "Flush flow succeed, func_id: 0x%x",
3075 hinic_global_func_id(nic_dev->hwdev));
3079 void hinic_destroy_fdir_filter(struct rte_eth_dev *dev)
3081 hinic_clear_all_ntuple_filter(dev);
3082 hinic_clear_all_ethertype_filter(dev);
3083 hinic_clear_all_fdir_filter(dev);
3084 hinic_filterlist_flush(dev);
3087 const struct rte_flow_ops hinic_flow_ops = {
3088 .validate = hinic_flow_validate,
3089 .create = hinic_flow_create,
3090 .destroy = hinic_flow_destroy,
3091 .flush = hinic_flow_flush,