1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Huawei Technologies Co., Ltd
11 #include <rte_byteorder.h>
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include "base/hinic_compat.h"
19 #include "base/hinic_pmd_hwdev.h"
20 #include "base/hinic_pmd_hwif.h"
21 #include "base/hinic_pmd_wq.h"
22 #include "base/hinic_pmd_cmdq.h"
23 #include "base/hinic_pmd_niccfg.h"
24 #include "hinic_pmd_ethdev.h"
26 #define HINIC_MAX_RX_QUEUE_NUM 64
29 #define UINT8_MAX (u8)(~((u8)0)) /* 0xFF */
30 #define UINT16_MAX (u16)(~((u16)0)) /* 0xFFFF */
31 #define UINT32_MAX (u32)(~((u32)0)) /* 0xFFFFFFFF */
32 #define UINT64_MAX (u64)(~((u64)0)) /* 0xFFFFFFFFFFFFFFFF */
33 #define ASCII_MAX (0x7F)
37 #define PA_ETH_TYPE_ROCE 0
38 #define PA_ETH_TYPE_IPV4 1
39 #define PA_ETH_TYPE_IPV6 2
40 #define PA_ETH_TYPE_OTHER 3
42 #define PA_IP_PROTOCOL_TYPE_TCP 1
43 #define PA_IP_PROTOCOL_TYPE_UDP 2
44 #define PA_IP_PROTOCOL_TYPE_ICMP 3
45 #define PA_IP_PROTOCOL_TYPE_IPV4_IGMP 4
46 #define PA_IP_PROTOCOL_TYPE_SCTP 5
47 #define PA_IP_PROTOCOL_TYPE_VRRP 112
49 #define IP_HEADER_PROTOCOL_TYPE_TCP 6
51 #define HINIC_MIN_N_TUPLE_PRIO 1
52 #define HINIC_MAX_N_TUPLE_PRIO 7
54 /* TCAM type mask in hardware */
55 #define TCAM_PKT_BGP_SPORT 1
56 #define TCAM_PKT_VRRP 2
57 #define TCAM_PKT_BGP_DPORT 3
58 #define TCAM_PKT_LACP 4
60 #define BGP_DPORT_ID 179
61 #define IPPROTO_VRRP 112
63 /* Packet type defined in hardware to perform filter */
64 #define PKT_IGMP_IPV4_TYPE 64
65 #define PKT_ICMP_IPV4_TYPE 65
66 #define PKT_ICMP_IPV6_TYPE 66
67 #define PKT_ICMP_IPV6RS_TYPE 67
68 #define PKT_ICMP_IPV6RA_TYPE 68
69 #define PKT_ICMP_IPV6NS_TYPE 69
70 #define PKT_ICMP_IPV6NA_TYPE 70
71 #define PKT_ICMP_IPV6RE_TYPE 71
72 #define PKT_DHCP_IPV4_TYPE 72
73 #define PKT_DHCP_IPV6_TYPE 73
74 #define PKT_LACP_TYPE 74
75 #define PKT_ARP_REQ_TYPE 79
76 #define PKT_ARP_REP_TYPE 80
77 #define PKT_ARP_TYPE 81
78 #define PKT_BGPD_DPORT_TYPE 83
79 #define PKT_BGPD_SPORT_TYPE 84
80 #define PKT_VRRP_TYPE 85
82 #define HINIC_DEV_PRIVATE_TO_FILTER_INFO(nic_dev) \
83 (&((struct hinic_nic_dev *)nic_dev)->filter)
87 * Endless loop will never happen with below assumption
88 * 1. there is at least one no-void item(END)
89 * 2. cur is before END.
91 static inline const struct rte_flow_item *
92 next_no_void_pattern(const struct rte_flow_item pattern[],
93 const struct rte_flow_item *cur)
95 const struct rte_flow_item *next =
96 cur ? cur + 1 : &pattern[0];
98 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
104 static inline const struct rte_flow_action *
105 next_no_void_action(const struct rte_flow_action actions[],
106 const struct rte_flow_action *cur)
108 const struct rte_flow_action *next =
109 cur ? cur + 1 : &actions[0];
111 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
117 static int hinic_check_ethertype_attr_ele(const struct rte_flow_attr *attr,
118 struct rte_flow_error *error)
120 /* Must be input direction */
121 if (!attr->ingress) {
122 rte_flow_error_set(error, EINVAL,
123 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
124 attr, "Only support ingress.");
129 rte_flow_error_set(error, EINVAL,
130 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
131 attr, "Not support egress.");
135 if (attr->priority) {
136 rte_flow_error_set(error, EINVAL,
137 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
138 attr, "Not support priority.");
143 rte_flow_error_set(error, EINVAL,
144 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
145 attr, "Not support group.");
152 static int hinic_check_filter_arg(const struct rte_flow_attr *attr,
153 const struct rte_flow_item *pattern,
154 const struct rte_flow_action *actions,
155 struct rte_flow_error *error)
158 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
159 NULL, "NULL pattern.");
164 rte_flow_error_set(error, EINVAL,
165 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
166 NULL, "NULL action.");
171 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
172 NULL, "NULL attribute.");
179 static int hinic_check_ethertype_first_item(const struct rte_flow_item *item,
180 struct rte_flow_error *error)
182 /* The first non-void item should be MAC */
183 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
184 rte_flow_error_set(error, EINVAL,
185 RTE_FLOW_ERROR_TYPE_ITEM,
186 item, "Not supported by ethertype filter");
190 /* Not supported last point for range */
192 rte_flow_error_set(error, EINVAL,
193 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
194 item, "Not supported last point for range");
198 /* Get the MAC info. */
199 if (!item->spec || !item->mask) {
200 rte_flow_error_set(error, EINVAL,
201 RTE_FLOW_ERROR_TYPE_ITEM,
202 item, "Not supported by ethertype filter");
209 hinic_parse_ethertype_aciton(const struct rte_flow_action *actions,
210 const struct rte_flow_action *act,
211 const struct rte_flow_action_queue *act_q,
212 struct rte_eth_ethertype_filter *filter,
213 struct rte_flow_error *error)
216 act = next_no_void_action(actions, NULL);
217 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
218 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
219 rte_flow_error_set(error, EINVAL,
220 RTE_FLOW_ERROR_TYPE_ACTION,
221 act, "Not supported action.");
225 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
226 act_q = (const struct rte_flow_action_queue *)act->conf;
227 filter->queue = act_q->index;
229 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
232 /* Check if the next non-void item is END */
233 act = next_no_void_action(actions, act);
234 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
235 rte_flow_error_set(error, EINVAL,
236 RTE_FLOW_ERROR_TYPE_ACTION,
237 act, "Not supported action.");
245 * Parse the rule to see if it is a ethertype rule.
246 * And get the ethertype filter info BTW.
248 * The first not void item can be ETH.
249 * The next not void item must be END.
251 * The first not void action should be QUEUE.
252 * The next not void action should be END.
255 * ETH type 0x0807 0xFFFF
257 * other members in mask and spec should set to 0x00.
258 * item->last should be NULL.
261 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
262 const struct rte_flow_item *pattern,
263 const struct rte_flow_action *actions,
264 struct rte_eth_ethertype_filter *filter,
265 struct rte_flow_error *error)
267 const struct rte_flow_item *item;
268 const struct rte_flow_action *act = NULL;
269 const struct rte_flow_item_eth *eth_spec;
270 const struct rte_flow_item_eth *eth_mask;
271 const struct rte_flow_action_queue *act_q = NULL;
273 if (hinic_check_filter_arg(attr, pattern, actions, error))
276 item = next_no_void_pattern(pattern, NULL);
277 if (hinic_check_ethertype_first_item(item, error))
280 eth_spec = (const struct rte_flow_item_eth *)item->spec;
281 eth_mask = (const struct rte_flow_item_eth *)item->mask;
284 * Mask bits of source MAC address must be full of 0.
285 * Mask bits of destination MAC address must be full
288 if (!rte_is_zero_ether_addr(ð_mask->src) ||
289 (!rte_is_zero_ether_addr(ð_mask->dst) &&
290 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
291 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
292 item, "Invalid ether address mask");
296 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
297 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
298 item, "Invalid ethertype mask");
303 * If mask bits of destination MAC address
304 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
306 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
307 filter->mac_addr = eth_spec->dst;
308 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
310 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
312 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
314 /* Check if the next non-void item is END. */
315 item = next_no_void_pattern(pattern, item);
316 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
317 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
318 item, "Not supported by ethertype filter.");
322 if (hinic_parse_ethertype_aciton(actions, act, act_q, filter, error))
325 if (hinic_check_ethertype_attr_ele(attr, error))
332 hinic_parse_ethertype_filter(struct rte_eth_dev *dev,
333 const struct rte_flow_attr *attr,
334 const struct rte_flow_item pattern[],
335 const struct rte_flow_action actions[],
336 struct rte_eth_ethertype_filter *filter,
337 struct rte_flow_error *error)
339 if (cons_parse_ethertype_filter(attr, pattern, actions, filter, error))
342 /* NIC doesn't support MAC address. */
343 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
344 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
345 rte_flow_error_set(error, EINVAL,
346 RTE_FLOW_ERROR_TYPE_ITEM,
347 NULL, "Not supported by ethertype filter");
351 if (filter->queue >= dev->data->nb_rx_queues) {
352 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
353 rte_flow_error_set(error, EINVAL,
354 RTE_FLOW_ERROR_TYPE_ITEM,
355 NULL, "Queue index much too big");
359 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
360 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
361 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
362 rte_flow_error_set(error, EINVAL,
363 RTE_FLOW_ERROR_TYPE_ITEM,
364 NULL, "IPv4/IPv6 not supported by ethertype filter");
368 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
369 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
370 rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ITEM,
372 NULL, "Drop option is unsupported");
376 /* Hinic only support LACP/ARP for ether type */
377 if (filter->ether_type != RTE_ETHER_TYPE_SLOW &&
378 filter->ether_type != RTE_ETHER_TYPE_ARP) {
379 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
380 rte_flow_error_set(error, EINVAL,
381 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
382 "only lacp/arp type supported by ethertype filter");
389 static int hinic_check_ntuple_attr_ele(const struct rte_flow_attr *attr,
390 struct rte_eth_ntuple_filter *filter,
391 struct rte_flow_error *error)
393 /* Must be input direction */
394 if (!attr->ingress) {
395 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
398 attr, "Only support ingress.");
403 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
404 rte_flow_error_set(error, EINVAL,
405 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
406 attr, "Not support egress.");
410 if (attr->priority > 0xFFFF) {
411 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412 rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
414 attr, "Error priority.");
418 if (attr->priority < HINIC_MIN_N_TUPLE_PRIO ||
419 attr->priority > HINIC_MAX_N_TUPLE_PRIO)
420 filter->priority = 1;
422 filter->priority = (uint16_t)attr->priority;
428 hinic_check_ntuple_act_ele(__rte_unused const struct rte_flow_item *item,
429 const struct rte_flow_action actions[],
430 struct rte_eth_ntuple_filter *filter,
431 struct rte_flow_error *error)
433 const struct rte_flow_action *act;
435 * n-tuple only supports forwarding,
436 * check if the first not void action is QUEUE.
438 act = next_no_void_action(actions, NULL);
439 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
440 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
441 rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ACTION,
443 act, "Flow action type is not QUEUE.");
447 ((const struct rte_flow_action_queue *)act->conf)->index;
449 /* Check if the next not void item is END */
450 act = next_no_void_action(actions, act);
451 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
452 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
453 rte_flow_error_set(error, EINVAL,
454 RTE_FLOW_ERROR_TYPE_ACTION,
455 act, "Next not void item is not END.");
462 static int hinic_ntuple_item_check_ether(const struct rte_flow_item **ipv4_item,
463 const struct rte_flow_item pattern[],
464 struct rte_flow_error *error)
466 const struct rte_flow_item *item;
468 /* The first not void item can be MAC or IPv4 */
469 item = next_no_void_pattern(pattern, NULL);
471 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
472 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ITEM,
475 item, "Not supported by ntuple filter");
480 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
481 /* Not supported last point for range */
483 rte_flow_error_set(error,
485 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
486 item, "Not supported last point for range");
489 /* if the first item is MAC, the content should be NULL */
490 if (item->spec || item->mask) {
491 rte_flow_error_set(error, EINVAL,
492 RTE_FLOW_ERROR_TYPE_ITEM,
493 item, "Not supported by ntuple filter");
496 /* check if the next not void item is IPv4 */
497 item = next_no_void_pattern(pattern, item);
498 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
499 rte_flow_error_set(error,
500 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
501 item, "Not supported by ntuple filter");
511 hinic_ntuple_item_check_ipv4(const struct rte_flow_item **in_out_item,
512 const struct rte_flow_item pattern[],
513 struct rte_eth_ntuple_filter *filter,
514 struct rte_flow_error *error)
516 const struct rte_flow_item_ipv4 *ipv4_spec;
517 const struct rte_flow_item_ipv4 *ipv4_mask;
518 const struct rte_flow_item *item = *in_out_item;
520 /* Get the IPv4 info */
521 if (!item->spec || !item->mask) {
522 rte_flow_error_set(error, EINVAL,
523 RTE_FLOW_ERROR_TYPE_ITEM,
524 item, "Invalid ntuple mask");
527 /* Not supported last point for range */
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
531 item, "Not supported last point for range");
535 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
537 * Only support src & dst addresses, protocol,
538 * others should be masked.
540 if (ipv4_mask->hdr.version_ihl ||
541 ipv4_mask->hdr.type_of_service ||
542 ipv4_mask->hdr.total_length ||
543 ipv4_mask->hdr.packet_id ||
544 ipv4_mask->hdr.fragment_offset ||
545 ipv4_mask->hdr.time_to_live ||
546 ipv4_mask->hdr.hdr_checksum ||
547 !ipv4_mask->hdr.next_proto_id) {
548 rte_flow_error_set(error,
549 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
550 item, "Not supported by ntuple filter");
554 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
555 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
556 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
558 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
559 filter->dst_ip = ipv4_spec->hdr.dst_addr;
560 filter->src_ip = ipv4_spec->hdr.src_addr;
561 filter->proto = ipv4_spec->hdr.next_proto_id;
563 /* Get next no void item */
564 *in_out_item = next_no_void_pattern(pattern, item);
568 static int hinic_ntuple_item_check_l4(const struct rte_flow_item **in_out_item,
569 const struct rte_flow_item pattern[],
570 struct rte_eth_ntuple_filter *filter,
571 struct rte_flow_error *error)
573 const struct rte_flow_item_tcp *tcp_spec;
574 const struct rte_flow_item_tcp *tcp_mask;
575 const struct rte_flow_item_icmp *icmp_mask;
576 const struct rte_flow_item *item = *in_out_item;
577 u32 ntuple_filter_size = sizeof(struct rte_eth_ntuple_filter);
579 if (item->type == RTE_FLOW_ITEM_TYPE_END)
582 /* Get TCP or UDP info */
583 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
584 (!item->spec || !item->mask)) {
585 memset(filter, 0, ntuple_filter_size);
586 rte_flow_error_set(error, EINVAL,
587 RTE_FLOW_ERROR_TYPE_ITEM,
588 item, "Invalid ntuple mask");
592 /* Not supported last point for range */
594 memset(filter, 0, ntuple_filter_size);
595 rte_flow_error_set(error, EINVAL,
596 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
597 item, "Not supported last point for range");
601 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
602 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
605 * Only support src & dst ports, tcp flags,
606 * others should be masked.
608 if (tcp_mask->hdr.sent_seq ||
609 tcp_mask->hdr.recv_ack ||
610 tcp_mask->hdr.data_off ||
611 tcp_mask->hdr.rx_win ||
612 tcp_mask->hdr.cksum ||
613 tcp_mask->hdr.tcp_urp) {
614 memset(filter, 0, ntuple_filter_size);
615 rte_flow_error_set(error, EINVAL,
616 RTE_FLOW_ERROR_TYPE_ITEM,
617 item, "Not supported by ntuple filter");
621 filter->dst_port_mask = tcp_mask->hdr.dst_port;
622 filter->src_port_mask = tcp_mask->hdr.src_port;
623 if (tcp_mask->hdr.tcp_flags == 0xFF) {
624 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
625 } else if (!tcp_mask->hdr.tcp_flags) {
626 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
628 memset(filter, 0, ntuple_filter_size);
629 rte_flow_error_set(error, EINVAL,
630 RTE_FLOW_ERROR_TYPE_ITEM,
631 item, "Not supported by ntuple filter");
635 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
636 filter->dst_port = tcp_spec->hdr.dst_port;
637 filter->src_port = tcp_spec->hdr.src_port;
638 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
639 } else if (item->type == RTE_FLOW_ITEM_TYPE_ICMP) {
640 icmp_mask = (const struct rte_flow_item_icmp *)item->mask;
642 /* ICMP all should be masked. */
643 if (icmp_mask->hdr.icmp_cksum ||
644 icmp_mask->hdr.icmp_ident ||
645 icmp_mask->hdr.icmp_seq_nb ||
646 icmp_mask->hdr.icmp_type ||
647 icmp_mask->hdr.icmp_code) {
648 memset(filter, 0, ntuple_filter_size);
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ITEM,
651 item, "Not supported by ntuple filter");
656 /* Get next no void item */
657 *in_out_item = next_no_void_pattern(pattern, item);
661 static int hinic_ntuple_item_check_end(const struct rte_flow_item *item,
662 struct rte_eth_ntuple_filter *filter,
663 struct rte_flow_error *error)
665 /* Check if the next not void item is END */
666 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
667 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ITEM,
670 item, "Not supported by ntuple filter");
676 static int hinic_check_ntuple_item_ele(const struct rte_flow_item *item,
677 const struct rte_flow_item pattern[],
678 struct rte_eth_ntuple_filter *filter,
679 struct rte_flow_error *error)
681 if (hinic_ntuple_item_check_ether(&item, pattern, error) ||
682 hinic_ntuple_item_check_ipv4(&item, pattern, filter, error) ||
683 hinic_ntuple_item_check_l4(&item, pattern, filter, error) ||
684 hinic_ntuple_item_check_end(item, filter, error))
691 * Parse the rule to see if it is a n-tuple rule.
692 * And get the n-tuple filter info BTW.
694 * The first not void item can be ETH or IPV4.
695 * The second not void item must be IPV4 if the first one is ETH.
696 * The third not void item must be UDP or TCP.
697 * The next not void item must be END.
699 * The first not void action should be QUEUE.
700 * The next not void action should be END.
704 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
705 * dst_addr 192.167.3.50 0xFFFFFFFF
706 * next_proto_id 17 0xFF
707 * UDP/TCP/ src_port 80 0xFFFF
708 * SCTP dst_port 80 0xFFFF
710 * other members in mask and spec should set to 0x00.
711 * item->last should be NULL.
712 * Please aware there's an asumption for all the parsers.
713 * rte_flow_item is using big endian, rte_flow_attr and
714 * rte_flow_action are using CPU order.
715 * Because the pattern is used to describe the packets,
716 * normally the packets should use network order.
719 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
720 const struct rte_flow_item pattern[],
721 const struct rte_flow_action actions[],
722 struct rte_eth_ntuple_filter *filter,
723 struct rte_flow_error *error)
725 const struct rte_flow_item *item = NULL;
727 if (hinic_check_filter_arg(attr, pattern, actions, error))
730 if (hinic_check_ntuple_item_ele(item, pattern, filter, error))
733 if (hinic_check_ntuple_act_ele(item, actions, filter, error))
736 if (hinic_check_ntuple_attr_ele(attr, filter, error))
743 hinic_parse_ntuple_filter(struct rte_eth_dev *dev,
744 const struct rte_flow_attr *attr,
745 const struct rte_flow_item pattern[],
746 const struct rte_flow_action actions[],
747 struct rte_eth_ntuple_filter *filter,
748 struct rte_flow_error *error)
752 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
756 /* Hinic doesn't support tcp flags */
757 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
758 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ITEM,
761 NULL, "Not supported by ntuple filter");
765 /* Hinic doesn't support many priorities */
766 if (filter->priority < HINIC_MIN_N_TUPLE_PRIO ||
767 filter->priority > HINIC_MAX_N_TUPLE_PRIO) {
768 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
769 rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ITEM,
771 NULL, "Priority not supported by ntuple filter");
775 if (filter->queue >= dev->data->nb_rx_queues)
778 /* Fixed value for hinic */
779 filter->flags = RTE_5TUPLE_FLAGS;
783 static int hinic_normal_item_check_ether(const struct rte_flow_item **ip_item,
784 const struct rte_flow_item pattern[],
785 struct rte_flow_error *error)
787 const struct rte_flow_item *item;
789 /* The first not void item can be MAC or IPv4 or TCP or UDP */
790 item = next_no_void_pattern(pattern, NULL);
792 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
793 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
794 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
795 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
796 rte_flow_error_set(error, EINVAL,
797 RTE_FLOW_ERROR_TYPE_ITEM, item,
798 "Not supported by fdir filter,support mac,ipv4,tcp,udp");
802 /* Not supported last point for range */
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, item,
806 "Not supported last point for range");
811 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
812 /* All should be masked. */
813 if (item->spec || item->mask) {
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM,
816 item, "Not supported by fdir filter,support mac");
819 /* Check if the next not void item is IPv4 */
820 item = next_no_void_pattern(pattern, item);
821 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM, item,
824 "Not supported by fdir filter,support mac,ipv4");
833 static int hinic_normal_item_check_ip(const struct rte_flow_item **in_out_item,
834 const struct rte_flow_item pattern[],
835 struct hinic_fdir_rule *rule,
836 struct rte_flow_error *error)
838 const struct rte_flow_item_ipv4 *ipv4_spec;
839 const struct rte_flow_item_ipv4 *ipv4_mask;
840 const struct rte_flow_item *item = *in_out_item;
842 /* Get the IPv4 info */
843 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
844 /* Not supported last point for range */
846 rte_flow_error_set(error, EINVAL,
847 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
848 item, "Not supported last point for range");
853 memset(rule, 0, sizeof(struct hinic_fdir_rule));
854 rte_flow_error_set(error, EINVAL,
855 RTE_FLOW_ERROR_TYPE_ITEM,
856 item, "Invalid fdir filter mask");
860 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
862 * Only support src & dst addresses,
863 * others should be masked.
865 if (ipv4_mask->hdr.version_ihl ||
866 ipv4_mask->hdr.type_of_service ||
867 ipv4_mask->hdr.total_length ||
868 ipv4_mask->hdr.packet_id ||
869 ipv4_mask->hdr.fragment_offset ||
870 ipv4_mask->hdr.time_to_live ||
871 ipv4_mask->hdr.next_proto_id ||
872 ipv4_mask->hdr.hdr_checksum) {
873 rte_flow_error_set(error,
874 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
875 "Not supported by fdir filter, support src,dst ip");
879 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
880 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
884 (const struct rte_flow_item_ipv4 *)item->spec;
885 rule->hinic_fdir.dst_ip = ipv4_spec->hdr.dst_addr;
886 rule->hinic_fdir.src_ip = ipv4_spec->hdr.src_addr;
890 * Check if the next not void item is
893 item = next_no_void_pattern(pattern, item);
894 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
895 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
896 item->type != RTE_FLOW_ITEM_TYPE_END) {
897 memset(rule, 0, sizeof(struct hinic_fdir_rule));
898 rte_flow_error_set(error, EINVAL,
899 RTE_FLOW_ERROR_TYPE_ITEM, item,
900 "Not supported by fdir filter, support tcp, udp, end");
909 static int hinic_normal_item_check_l4(const struct rte_flow_item **in_out_item,
910 const struct rte_flow_item pattern[],
911 struct hinic_fdir_rule *rule,
912 struct rte_flow_error *error)
914 const struct rte_flow_item_tcp *tcp_spec;
915 const struct rte_flow_item_tcp *tcp_mask;
916 const struct rte_flow_item_udp *udp_spec;
917 const struct rte_flow_item_udp *udp_mask;
918 const struct rte_flow_item *item = *in_out_item;
920 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
921 /* Not supported last point for range */
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
925 item, "Not supported last point for range");
929 /* Get TCP/UDP info */
930 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
932 * Only care about src & dst ports,
933 * others should be masked.
936 memset(rule, 0, sizeof(struct hinic_fdir_rule));
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ITEM, item,
939 "Not supported by fdir filter,support src,dst ports");
943 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
944 if (tcp_mask->hdr.sent_seq ||
945 tcp_mask->hdr.recv_ack ||
946 tcp_mask->hdr.data_off ||
947 tcp_mask->hdr.tcp_flags ||
948 tcp_mask->hdr.rx_win ||
949 tcp_mask->hdr.cksum ||
950 tcp_mask->hdr.tcp_urp) {
951 memset(rule, 0, sizeof(struct hinic_fdir_rule));
952 rte_flow_error_set(error, EINVAL,
953 RTE_FLOW_ERROR_TYPE_ITEM,
954 item, "Not supported by fdir filter,support tcp");
958 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
959 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
963 (const struct rte_flow_item_tcp *)
965 rule->hinic_fdir.src_port =
966 tcp_spec->hdr.src_port;
967 rule->hinic_fdir.dst_port =
968 tcp_spec->hdr.dst_port;
971 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
973 * Only care about src & dst ports,
974 * others should be masked.
977 memset(rule, 0, sizeof(struct hinic_fdir_rule));
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ITEM,
980 item, "Not supported by fdir filter,support src,dst ports");
984 udp_mask = (const struct rte_flow_item_udp *)item->mask;
985 if (udp_mask->hdr.dgram_len ||
986 udp_mask->hdr.dgram_cksum) {
987 memset(rule, 0, sizeof(struct hinic_fdir_rule));
988 rte_flow_error_set(error, EINVAL,
989 RTE_FLOW_ERROR_TYPE_ITEM,
990 item, "Not supported by fdir filter,support udp");
993 rule->mask.src_port_mask = udp_mask->hdr.src_port;
994 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
998 (const struct rte_flow_item_udp *)
1000 rule->hinic_fdir.src_port =
1001 udp_spec->hdr.src_port;
1002 rule->hinic_fdir.dst_port =
1003 udp_spec->hdr.dst_port;
1006 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1007 rte_flow_error_set(error, EINVAL,
1008 RTE_FLOW_ERROR_TYPE_ITEM,
1009 item, "Not supported by fdir filter,support tcp/udp");
1013 /* Get next no void item */
1014 *in_out_item = next_no_void_pattern(pattern, item);
1020 static int hinic_normal_item_check_end(const struct rte_flow_item *item,
1021 struct hinic_fdir_rule *rule,
1022 struct rte_flow_error *error)
1024 /* Check if the next not void item is END */
1025 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1026 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1027 rte_flow_error_set(error, EINVAL,
1028 RTE_FLOW_ERROR_TYPE_ITEM,
1029 item, "Not supported by fdir filter,support end");
1036 static int hinic_check_normal_item_ele(const struct rte_flow_item *item,
1037 const struct rte_flow_item pattern[],
1038 struct hinic_fdir_rule *rule,
1039 struct rte_flow_error *error)
1041 if (hinic_normal_item_check_ether(&item, pattern, error) ||
1042 hinic_normal_item_check_ip(&item, pattern, rule, error) ||
1043 hinic_normal_item_check_l4(&item, pattern, rule, error) ||
1044 hinic_normal_item_check_end(item, rule, error))
1050 static int hinic_check_normal_attr_ele(const struct rte_flow_attr *attr,
1051 struct hinic_fdir_rule *rule,
1052 struct rte_flow_error *error)
1054 /* Must be input direction */
1055 if (!attr->ingress) {
1056 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1057 rte_flow_error_set(error, EINVAL,
1058 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1059 attr, "Only support ingress.");
1065 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1066 rte_flow_error_set(error, EINVAL,
1067 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1068 attr, "Not support egress.");
1073 if (attr->priority) {
1074 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1075 rte_flow_error_set(error, EINVAL,
1076 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1077 attr, "Not support priority.");
1084 static int hinic_check_normal_act_ele(const struct rte_flow_item *item,
1085 const struct rte_flow_action actions[],
1086 struct hinic_fdir_rule *rule,
1087 struct rte_flow_error *error)
1089 const struct rte_flow_action *act;
1091 /* Check if the first not void action is QUEUE */
1092 act = next_no_void_action(actions, NULL);
1093 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1094 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1095 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1096 item, "Not supported action.");
1100 rule->queue = ((const struct rte_flow_action_queue *)act->conf)->index;
1102 /* Check if the next not void item is END */
1103 act = next_no_void_action(actions, act);
1104 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1105 memset(rule, 0, sizeof(struct hinic_fdir_rule));
1106 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1107 act, "Not supported action.");
1115 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1116 * And get the flow director filter info BTW.
1117 * UDP/TCP/SCTP PATTERN:
1118 * The first not void item can be ETH or IPV4 or IPV6
1119 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1120 * The next not void item could be UDP or TCP(optional)
1121 * The next not void item must be END.
1123 * The first not void action should be QUEUE.
1124 * The second not void optional action should be MARK,
1125 * mark_id is a uint32_t number.
1126 * The next not void action should be END.
1127 * UDP/TCP pattern example:
1130 * IPV4 src_addr 1.2.3.6 0xFFFFFFFF
1131 * dst_addr 1.2.3.5 0xFFFFFFFF
1132 * UDP/TCP src_port 80 0xFFFF
1133 * dst_port 80 0xFFFF
1135 * Other members in mask and spec should set to 0x00.
1136 * Item->last should be NULL.
1139 hinic_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1140 const struct rte_flow_item pattern[],
1141 const struct rte_flow_action actions[],
1142 struct hinic_fdir_rule *rule,
1143 struct rte_flow_error *error)
1145 const struct rte_flow_item *item = NULL;
1147 if (hinic_check_filter_arg(attr, pattern, actions, error))
1150 if (hinic_check_normal_item_ele(item, pattern, rule, error))
1153 if (hinic_check_normal_attr_ele(attr, rule, error))
1156 if (hinic_check_normal_act_ele(item, actions, rule, error))
1163 hinic_parse_fdir_filter(struct rte_eth_dev *dev,
1164 const struct rte_flow_attr *attr,
1165 const struct rte_flow_item pattern[],
1166 const struct rte_flow_action actions[],
1167 struct hinic_fdir_rule *rule,
1168 struct rte_flow_error *error)
1172 ret = hinic_parse_fdir_filter_normal(attr, pattern,
1173 actions, rule, error);
1177 if (rule->queue >= dev->data->nb_rx_queues)
1184 * Check if the flow rule is supported by nic.
1185 * It only checkes the format. Don't guarantee the rule can be programmed into
1186 * the HW. Because there can be no enough room for the rule.
1188 static int hinic_flow_validate(struct rte_eth_dev *dev,
1189 const struct rte_flow_attr *attr,
1190 const struct rte_flow_item pattern[],
1191 const struct rte_flow_action actions[],
1192 struct rte_flow_error *error)
1194 struct rte_eth_ethertype_filter ethertype_filter;
1195 struct rte_eth_ntuple_filter ntuple_filter;
1196 struct hinic_fdir_rule fdir_rule;
1199 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1200 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1201 actions, &ntuple_filter, error);
1205 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1206 ret = hinic_parse_ethertype_filter(dev, attr, pattern,
1207 actions, ðertype_filter, error);
1212 memset(&fdir_rule, 0, sizeof(struct hinic_fdir_rule));
1213 ret = hinic_parse_fdir_filter(dev, attr, pattern,
1214 actions, &fdir_rule, error);
1220 ntuple_ip_filter(struct rte_eth_ntuple_filter *filter,
1221 struct hinic_5tuple_filter_info *filter_info)
1223 switch (filter->dst_ip_mask) {
1225 filter_info->dst_ip_mask = 0;
1226 filter_info->dst_ip = filter->dst_ip;
1229 filter_info->dst_ip_mask = 1;
1230 filter_info->dst_ip = 0;
1233 PMD_DRV_LOG(ERR, "Invalid dst_ip mask.");
1237 switch (filter->src_ip_mask) {
1239 filter_info->src_ip_mask = 0;
1240 filter_info->src_ip = filter->src_ip;
1243 filter_info->src_ip_mask = 1;
1244 filter_info->src_ip = 0;
1247 PMD_DRV_LOG(ERR, "Invalid src_ip mask.");
1254 ntuple_port_filter(struct rte_eth_ntuple_filter *filter,
1255 struct hinic_5tuple_filter_info *filter_info)
1257 switch (filter->dst_port_mask) {
1259 filter_info->dst_port_mask = 0;
1260 filter_info->dst_port = filter->dst_port;
1263 filter_info->dst_port_mask = 1;
1264 filter_info->dst_port = 0;
1267 PMD_DRV_LOG(ERR, "Invalid dst_port mask.");
1271 switch (filter->src_port_mask) {
1273 filter_info->src_port_mask = 0;
1274 filter_info->src_port = filter->src_port;
1277 filter_info->src_port_mask = 1;
1278 filter_info->src_port = 0;
1281 PMD_DRV_LOG(ERR, "Invalid src_port mask.");
1289 ntuple_proto_filter(struct rte_eth_ntuple_filter *filter,
1290 struct hinic_5tuple_filter_info *filter_info)
1292 switch (filter->proto_mask) {
1294 filter_info->proto_mask = 0;
1295 filter_info->proto = filter->proto;
1298 filter_info->proto_mask = 1;
1299 filter_info->proto = 0;
1302 PMD_DRV_LOG(ERR, "Invalid protocol mask.");
1310 ntuple_filter_to_5tuple(struct rte_eth_ntuple_filter *filter,
1311 struct hinic_5tuple_filter_info *filter_info)
1313 if (filter->queue >= HINIC_MAX_RX_QUEUE_NUM ||
1314 filter->priority > HINIC_MAX_N_TUPLE_PRIO ||
1315 filter->priority < HINIC_MIN_N_TUPLE_PRIO)
1318 if (ntuple_ip_filter(filter, filter_info) ||
1319 ntuple_port_filter(filter, filter_info) ||
1320 ntuple_proto_filter(filter, filter_info))
1323 filter_info->priority = (uint8_t)filter->priority;
1327 static inline struct hinic_5tuple_filter *
1328 hinic_5tuple_filter_lookup(struct hinic_5tuple_filter_list *filter_list,
1329 struct hinic_5tuple_filter_info *key)
1331 struct hinic_5tuple_filter *it;
1333 TAILQ_FOREACH(it, filter_list, entries) {
1334 if (memcmp(key, &it->filter_info,
1335 sizeof(struct hinic_5tuple_filter_info)) == 0) {
1343 static int hinic_set_bgp_dport_tcam(struct hinic_nic_dev *nic_dev)
1345 struct tag_pa_rule bgp_rule;
1346 struct tag_pa_action bgp_action;
1348 memset(&bgp_rule, 0, sizeof(bgp_rule));
1349 memset(&bgp_action, 0, sizeof(bgp_action));
1351 bgp_rule.eth_type = PA_ETH_TYPE_IPV4; /* Eth type is IPV4 */
1352 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1353 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1354 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1355 bgp_rule.eth_ip_tcp.dport.val16 = BGP_DPORT_ID; /* Dport is 179 */
1356 bgp_rule.eth_ip_tcp.dport.mask16 = UINT16_MAX;
1358 /* BGP TCAM action */
1359 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1360 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1361 bgp_action.pkt_type = PKT_BGPD_DPORT_TYPE; /* bgp_dport: 83 */
1362 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1363 * results, not need to convert
1365 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1367 return hinic_set_fdir_tcam(nic_dev->hwdev,
1368 TCAM_PKT_BGP_DPORT, &bgp_rule, &bgp_action);
1371 static int hinic_set_bgp_sport_tcam(struct hinic_nic_dev *nic_dev)
1373 struct tag_pa_rule bgp_rule;
1374 struct tag_pa_action bgp_action;
1376 memset(&bgp_rule, 0, sizeof(bgp_rule));
1377 memset(&bgp_action, 0, sizeof(bgp_action));
1379 bgp_rule.eth_type = PA_ETH_TYPE_IPV4;
1380 bgp_rule.ip_header.protocol.val8 = IP_HEADER_PROTOCOL_TYPE_TCP;
1381 bgp_rule.ip_header.protocol.mask8 = UINT8_MAX;
1382 bgp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1383 bgp_rule.eth_ip_tcp.sport.val16 = BGP_DPORT_ID;
1384 bgp_rule.eth_ip_tcp.sport.mask16 = UINT16_MAX;
1386 /* BGP TCAM action */
1387 bgp_action.err_type = 0x3f; /* err from ipsu, not convert */
1388 bgp_action.fwd_action = 0x7; /* 0x3:drop; 0x7: not convert */
1389 bgp_action.pkt_type = PKT_BGPD_SPORT_TYPE; /* bgp:sport: 84 */
1390 bgp_action.pri = 0xf; /* pri of BGP is 0xf, result from ipsu parse
1391 * results, not need to convert
1393 bgp_action.push_len = 0xf; /* push_len:0xf, not convert */
1395 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_BGP_SPORT,
1396 &bgp_rule, &bgp_action);
1399 static int hinic_set_vrrp_tcam(struct hinic_nic_dev *nic_dev)
1401 struct tag_pa_rule vrrp_rule;
1402 struct tag_pa_action vrrp_action;
1404 memset(&vrrp_rule, 0, sizeof(vrrp_rule));
1405 memset(&vrrp_action, 0, sizeof(vrrp_action));
1406 /* VRRP TCAM rule */
1407 vrrp_rule.eth_type = PA_ETH_TYPE_IPV4;
1408 vrrp_rule.ip_protocol_type = PA_IP_PROTOCOL_TYPE_TCP;
1409 vrrp_rule.ip_header.protocol.mask8 = 0xff;
1410 vrrp_rule.ip_header.protocol.val8 = PA_IP_PROTOCOL_TYPE_VRRP;
1412 /* VRRP TCAM action */
1413 vrrp_action.err_type = 0x3f;
1414 vrrp_action.fwd_action = 0x7;
1415 vrrp_action.pkt_type = PKT_VRRP_TYPE; /* VRRP: 85 */
1416 vrrp_action.pri = 0xf;
1417 vrrp_action.push_len = 0xf;
1419 return hinic_set_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP,
1420 &vrrp_rule, &vrrp_action);
1424 hinic_filter_info_init(struct hinic_5tuple_filter *filter,
1425 struct hinic_filter_info *filter_info)
1427 switch (filter->filter_info.proto) {
1429 /* Filter type is bgp type if dst_port or src_port is 179 */
1430 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID) &&
1431 !(filter->filter_info.dst_port_mask)) {
1432 filter_info->pkt_type = PKT_BGPD_DPORT_TYPE;
1433 } else if (filter->filter_info.src_port ==
1434 RTE_BE16(BGP_DPORT_ID) &&
1435 !(filter->filter_info.src_port_mask)) {
1436 filter_info->pkt_type = PKT_BGPD_SPORT_TYPE;
1438 PMD_DRV_LOG(INFO, "TCP PROTOCOL:5tuple filters"
1439 " just support BGP now, proto:0x%x, "
1440 "dst_port:0x%x, dst_port_mask:0x%x."
1441 "src_port:0x%x, src_port_mask:0x%x.",
1442 filter->filter_info.proto,
1443 filter->filter_info.dst_port,
1444 filter->filter_info.dst_port_mask,
1445 filter->filter_info.src_port,
1446 filter->filter_info.src_port_mask);
1452 filter_info->pkt_type = PKT_VRRP_TYPE;
1456 filter_info->pkt_type = PKT_ICMP_IPV4_TYPE;
1459 case IPPROTO_ICMPV6:
1460 filter_info->pkt_type = PKT_ICMP_IPV6_TYPE;
1464 PMD_DRV_LOG(ERR, "5tuple filters just support BGP/VRRP/ICMP now, "
1465 "proto: 0x%x, dst_port: 0x%x, dst_port_mask: 0x%x."
1466 "src_port: 0x%x, src_port_mask: 0x%x.",
1467 filter->filter_info.proto, filter->filter_info.dst_port,
1468 filter->filter_info.dst_port_mask,
1469 filter->filter_info.src_port,
1470 filter->filter_info.src_port_mask);
1478 hinic_lookup_new_filter(struct hinic_5tuple_filter *filter,
1479 struct hinic_filter_info *filter_info,
1484 type_id = HINIC_PKT_TYPE_FIND_ID(filter_info->pkt_type);
1486 if (type_id > HINIC_MAX_Q_FILTERS - 1) {
1487 PMD_DRV_LOG(ERR, "Pkt filters only support 64 filter type.");
1491 if (!(filter_info->type_mask & (1 << type_id))) {
1492 filter_info->type_mask |= 1 << type_id;
1493 filter->index = type_id;
1494 filter_info->pkt_filters[type_id].enable = true;
1495 filter_info->pkt_filters[type_id].pkt_proto =
1496 filter->filter_info.proto;
1497 TAILQ_INSERT_TAIL(&filter_info->fivetuple_list,
1500 PMD_DRV_LOG(ERR, "Filter type: %d exists.", type_id);
1509 * Add a 5tuple filter
1512 * Pointer to struct rte_eth_dev.
1514 * Pointer to the filter that will be added.
1516 * - On success, zero.
1517 * - On failure, a negative value.
1520 hinic_add_5tuple_filter(struct rte_eth_dev *dev,
1521 struct hinic_5tuple_filter *filter)
1523 struct hinic_filter_info *filter_info =
1524 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1526 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1528 if (hinic_filter_info_init(filter, filter_info) ||
1529 hinic_lookup_new_filter(filter, filter_info, &i))
1532 ret_fw = hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1534 filter_info->pkt_filters[i].enable,
1537 PMD_DRV_LOG(ERR, "Set fdir filter failed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1538 filter_info->pkt_type, filter->queue,
1539 filter_info->pkt_filters[i].enable);
1543 PMD_DRV_LOG(INFO, "Add 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1544 filter_info->pkt_type, filter_info->qid,
1545 filter_info->pkt_filters[filter->index].enable);
1547 switch (filter->filter_info.proto) {
1549 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID)) {
1550 ret_fw = hinic_set_bgp_dport_tcam(nic_dev);
1552 PMD_DRV_LOG(ERR, "Set dport bgp failed, "
1553 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1554 filter_info->pkt_type, filter->queue,
1555 filter_info->pkt_filters[i].enable);
1559 PMD_DRV_LOG(INFO, "Set dport bgp succeed, qid: 0x%x, enable: 0x%x",
1561 filter_info->pkt_filters[i].enable);
1562 } else if (filter->filter_info.src_port ==
1563 RTE_BE16(BGP_DPORT_ID)) {
1564 ret_fw = hinic_set_bgp_sport_tcam(nic_dev);
1566 PMD_DRV_LOG(ERR, "Set sport bgp failed, "
1567 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1568 filter_info->pkt_type, filter->queue,
1569 filter_info->pkt_filters[i].enable);
1573 PMD_DRV_LOG(INFO, "Set sport bgp succeed, qid: 0x%x, enable: 0x%x",
1575 filter_info->pkt_filters[i].enable);
1581 ret_fw = hinic_set_vrrp_tcam(nic_dev);
1583 PMD_DRV_LOG(ERR, "Set VRRP failed, "
1584 "type: 0x%x, qid: 0x%x, enable: 0x%x",
1585 filter_info->pkt_type, filter->queue,
1586 filter_info->pkt_filters[i].enable);
1589 PMD_DRV_LOG(INFO, "Set VRRP succeed, qid: 0x%x, enable: 0x%x",
1591 filter_info->pkt_filters[i].enable);
1602 * Remove a 5tuple filter
1605 * Pointer to struct rte_eth_dev.
1607 * The pointer of the filter will be removed.
1610 hinic_remove_5tuple_filter(struct rte_eth_dev *dev,
1611 struct hinic_5tuple_filter *filter)
1613 struct hinic_filter_info *filter_info =
1614 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1615 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1617 switch (filter->filter_info.proto) {
1619 (void)hinic_clear_fdir_tcam(nic_dev->hwdev, TCAM_PKT_VRRP);
1623 if (filter->filter_info.dst_port == RTE_BE16(BGP_DPORT_ID))
1624 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1625 TCAM_PKT_BGP_DPORT);
1626 else if (filter->filter_info.src_port == RTE_BE16(BGP_DPORT_ID))
1627 (void)hinic_clear_fdir_tcam(nic_dev->hwdev,
1628 TCAM_PKT_BGP_SPORT);
1635 hinic_filter_info_init(filter, filter_info);
1637 filter_info->pkt_filters[filter->index].enable = false;
1638 filter_info->pkt_filters[filter->index].pkt_proto = 0;
1640 PMD_DRV_LOG(INFO, "Del 5tuple succeed, type: 0x%x, qid: 0x%x, enable: 0x%x",
1641 filter_info->pkt_type,
1642 filter_info->pkt_filters[filter->index].qid,
1643 filter_info->pkt_filters[filter->index].enable);
1644 (void)hinic_set_fdir_filter(nic_dev->hwdev, filter_info->pkt_type,
1645 filter_info->pkt_filters[filter->index].qid,
1646 filter_info->pkt_filters[filter->index].enable,
1649 filter_info->pkt_type = 0;
1650 filter_info->qid = 0;
1651 filter_info->pkt_filters[filter->index].qid = 0;
1652 filter_info->type_mask &= ~(1 << (filter->index));
1653 TAILQ_REMOVE(&filter_info->fivetuple_list, filter, entries);
1659 * Add or delete a ntuple filter
1662 * Pointer to struct rte_eth_dev.
1663 * @param ntuple_filter
1664 * Pointer to struct rte_eth_ntuple_filter
1666 * If true, add filter; if false, remove filter
1668 * - On success, zero.
1669 * - On failure, a negative value.
1671 static int hinic_add_del_ntuple_filter(struct rte_eth_dev *dev,
1672 struct rte_eth_ntuple_filter *ntuple_filter,
1675 struct hinic_filter_info *filter_info =
1676 HINIC_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1677 struct hinic_5tuple_filter_info filter_5tuple;
1678 struct hinic_5tuple_filter *filter;
1681 if (ntuple_filter->flags != RTE_5TUPLE_FLAGS) {
1682 PMD_DRV_LOG(ERR, "Only 5tuple is supported.");
1686 memset(&filter_5tuple, 0, sizeof(struct hinic_5tuple_filter_info));
1687 ret = ntuple_filter_to_5tuple(ntuple_filter, &filter_5tuple);
1691 filter = hinic_5tuple_filter_lookup(&filter_info->fivetuple_list,
1693 if (filter != NULL && add) {
1694 PMD_DRV_LOG(ERR, "Filter exists.");
1697 if (filter == NULL && !add) {
1698 PMD_DRV_LOG(ERR, "Filter doesn't exist.");
1703 filter = rte_zmalloc("hinic_5tuple_filter",
1704 sizeof(struct hinic_5tuple_filter), 0);
1707 rte_memcpy(&filter->filter_info, &filter_5tuple,
1708 sizeof(struct hinic_5tuple_filter_info));
1709 filter->queue = ntuple_filter->queue;
1711 filter_info->qid = ntuple_filter->queue;
1713 ret = hinic_add_5tuple_filter(dev, filter);
1720 hinic_remove_5tuple_filter(dev, filter);
1726 * Create or destroy a flow rule.
1727 * Theorically one rule can match more than one filters.
1728 * We will let it use the filter which it hitt first.
1729 * So, the sequence matters.
1731 static struct rte_flow *hinic_flow_create(struct rte_eth_dev *dev,
1732 const struct rte_flow_attr *attr,
1733 const struct rte_flow_item pattern[],
1734 const struct rte_flow_action actions[],
1735 struct rte_flow_error *error)
1738 struct rte_eth_ntuple_filter ntuple_filter;
1739 struct rte_flow *flow = NULL;
1740 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
1741 struct hinic_flow_mem *hinic_flow_mem_ptr;
1742 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1744 flow = rte_zmalloc("hinic_rte_flow", sizeof(struct rte_flow), 0);
1746 PMD_DRV_LOG(ERR, "Failed to allocate flow memory");
1750 hinic_flow_mem_ptr = rte_zmalloc("hinic_flow_mem",
1751 sizeof(struct hinic_flow_mem), 0);
1752 if (!hinic_flow_mem_ptr) {
1753 PMD_DRV_LOG(ERR, "Failed to allocate hinic_flow_mem_ptr");
1758 hinic_flow_mem_ptr->flow = flow;
1759 TAILQ_INSERT_TAIL(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr,
1762 /* add ntuple filter */
1763 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1764 ret = hinic_parse_ntuple_filter(dev, attr, pattern,
1765 actions, &ntuple_filter, error);
1769 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1772 ntuple_filter_ptr = rte_zmalloc("hinic_ntuple_filter",
1773 sizeof(struct hinic_ntuple_filter_ele), 0);
1774 rte_memcpy(&ntuple_filter_ptr->filter_info,
1776 sizeof(struct rte_eth_ntuple_filter));
1777 TAILQ_INSERT_TAIL(&nic_dev->filter_ntuple_list,
1778 ntuple_filter_ptr, entries);
1779 flow->rule = ntuple_filter_ptr;
1780 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1782 PMD_DRV_LOG(INFO, "Create flow ntuple succeed, func_id: 0x%x",
1783 hinic_global_func_id(nic_dev->hwdev));
1787 TAILQ_REMOVE(&nic_dev->hinic_flow_list, hinic_flow_mem_ptr, entries);
1788 rte_flow_error_set(error, -ret,
1789 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1790 "Failed to create flow.");
1791 rte_free(hinic_flow_mem_ptr);
1796 /* Destroy a flow rule on hinic. */
1797 static int hinic_flow_destroy(struct rte_eth_dev *dev,
1798 struct rte_flow *flow,
1799 struct rte_flow_error *error)
1802 struct rte_flow *pmd_flow = flow;
1803 enum rte_filter_type filter_type = pmd_flow->filter_type;
1804 struct rte_eth_ntuple_filter ntuple_filter;
1805 struct hinic_ntuple_filter_ele *ntuple_filter_ptr;
1806 struct hinic_flow_mem *hinic_flow_mem_ptr;
1807 struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
1809 switch (filter_type) {
1810 case RTE_ETH_FILTER_NTUPLE:
1811 ntuple_filter_ptr = (struct hinic_ntuple_filter_ele *)
1813 rte_memcpy(&ntuple_filter, &ntuple_filter_ptr->filter_info,
1814 sizeof(struct rte_eth_ntuple_filter));
1815 ret = hinic_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
1817 TAILQ_REMOVE(&nic_dev->filter_ntuple_list,
1818 ntuple_filter_ptr, entries);
1819 rte_free(ntuple_filter_ptr);
1823 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1830 rte_flow_error_set(error, EINVAL,
1831 RTE_FLOW_ERROR_TYPE_HANDLE,
1832 NULL, "Failed to destroy flow");
1836 TAILQ_FOREACH(hinic_flow_mem_ptr, &nic_dev->hinic_flow_list, entries) {
1837 if (hinic_flow_mem_ptr->flow == pmd_flow) {
1838 TAILQ_REMOVE(&nic_dev->hinic_flow_list,
1839 hinic_flow_mem_ptr, entries);
1840 rte_free(hinic_flow_mem_ptr);
1846 PMD_DRV_LOG(INFO, "Destroy flow succeed, func_id: 0x%x",
1847 hinic_global_func_id(nic_dev->hwdev));
1852 const struct rte_flow_ops hinic_flow_ops = {
1853 .validate = hinic_flow_validate,
1854 .create = hinic_flow_create,
1855 .destroy = hinic_flow_destroy,