1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
6 #include <rte_bus_pci.h>
8 #include <rte_flow_driver.h>
10 #include "txgbe_ethdev.h"
12 #define TXGBE_MIN_N_TUPLE_PRIO 1
13 #define TXGBE_MAX_N_TUPLE_PRIO 7
14 #define TXGBE_MAX_FLX_SOURCE_OFF 62
17 * Endless loop will never happen with below assumption
18 * 1. there is at least one no-void item(END)
19 * 2. cur is before END.
22 const struct rte_flow_item *next_no_void_pattern(
23 const struct rte_flow_item pattern[],
24 const struct rte_flow_item *cur)
26 const struct rte_flow_item *next =
27 cur ? cur + 1 : &pattern[0];
29 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
36 const struct rte_flow_action *next_no_void_action(
37 const struct rte_flow_action actions[],
38 const struct rte_flow_action *cur)
40 const struct rte_flow_action *next =
41 cur ? cur + 1 : &actions[0];
43 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
50 * Please aware there's an assumption for all the parsers.
51 * rte_flow_item is using big endian, rte_flow_attr and
52 * rte_flow_action are using CPU order.
53 * Because the pattern is used to describe the packets,
54 * normally the packets should use network order.
58 * Parse the rule to see if it is a n-tuple rule.
59 * And get the n-tuple filter info BTW.
61 * The first not void item can be ETH or IPV4.
62 * The second not void item must be IPV4 if the first one is ETH.
63 * The third not void item must be UDP or TCP.
64 * The next not void item must be END.
66 * The first not void action should be QUEUE.
67 * The next not void action should be END.
71 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
72 * dst_addr 192.167.3.50 0xFFFFFFFF
73 * next_proto_id 17 0xFF
74 * UDP/TCP/ src_port 80 0xFFFF
75 * SCTP dst_port 80 0xFFFF
77 * other members in mask and spec should set to 0x00.
78 * item->last should be NULL.
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error)
87 const struct rte_flow_item *item;
88 const struct rte_flow_action *act;
89 const struct rte_flow_item_ipv4 *ipv4_spec;
90 const struct rte_flow_item_ipv4 *ipv4_mask;
91 const struct rte_flow_item_tcp *tcp_spec;
92 const struct rte_flow_item_tcp *tcp_mask;
93 const struct rte_flow_item_udp *udp_spec;
94 const struct rte_flow_item_udp *udp_mask;
95 const struct rte_flow_item_sctp *sctp_spec;
96 const struct rte_flow_item_sctp *sctp_mask;
97 const struct rte_flow_item_eth *eth_spec;
98 const struct rte_flow_item_eth *eth_mask;
99 const struct rte_flow_item_vlan *vlan_spec;
100 const struct rte_flow_item_vlan *vlan_mask;
101 struct rte_flow_item_eth eth_null;
102 struct rte_flow_item_vlan vlan_null;
105 rte_flow_error_set(error,
106 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
107 NULL, "NULL pattern.");
112 rte_flow_error_set(error, EINVAL,
113 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
114 NULL, "NULL action.");
118 rte_flow_error_set(error, EINVAL,
119 RTE_FLOW_ERROR_TYPE_ATTR,
120 NULL, "NULL attribute.");
124 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
125 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
127 /* the first not void item can be MAC or IPv4 */
128 item = next_no_void_pattern(pattern, NULL);
130 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
131 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
132 rte_flow_error_set(error, EINVAL,
133 RTE_FLOW_ERROR_TYPE_ITEM,
134 item, "Not supported by ntuple filter");
138 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
139 eth_spec = item->spec;
140 eth_mask = item->mask;
141 /*Not supported last point for range*/
143 rte_flow_error_set(error,
145 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
146 item, "Not supported last point for range");
149 /* if the first item is MAC, the content should be NULL */
150 if ((item->spec || item->mask) &&
151 (memcmp(eth_spec, ð_null,
152 sizeof(struct rte_flow_item_eth)) ||
153 memcmp(eth_mask, ð_null,
154 sizeof(struct rte_flow_item_eth)))) {
155 rte_flow_error_set(error, EINVAL,
156 RTE_FLOW_ERROR_TYPE_ITEM,
157 item, "Not supported by ntuple filter");
160 /* check if the next not void item is IPv4 or Vlan */
161 item = next_no_void_pattern(pattern, item);
162 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
163 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
164 rte_flow_error_set(error,
165 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
166 item, "Not supported by ntuple filter");
171 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
172 vlan_spec = item->spec;
173 vlan_mask = item->mask;
174 /*Not supported last point for range*/
176 rte_flow_error_set(error,
177 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
178 item, "Not supported last point for range");
181 /* the content should be NULL */
182 if ((item->spec || item->mask) &&
183 (memcmp(vlan_spec, &vlan_null,
184 sizeof(struct rte_flow_item_vlan)) ||
185 memcmp(vlan_mask, &vlan_null,
186 sizeof(struct rte_flow_item_vlan)))) {
187 rte_flow_error_set(error, EINVAL,
188 RTE_FLOW_ERROR_TYPE_ITEM,
189 item, "Not supported by ntuple filter");
192 /* check if the next not void item is IPv4 */
193 item = next_no_void_pattern(pattern, item);
194 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
195 rte_flow_error_set(error,
196 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
197 item, "Not supported by ntuple filter");
203 /* get the IPv4 info */
204 if (!item->spec || !item->mask) {
205 rte_flow_error_set(error, EINVAL,
206 RTE_FLOW_ERROR_TYPE_ITEM,
207 item, "Invalid ntuple mask");
210 /*Not supported last point for range*/
212 rte_flow_error_set(error, EINVAL,
213 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
214 item, "Not supported last point for range");
218 ipv4_mask = item->mask;
220 * Only support src & dst addresses, protocol,
221 * others should be masked.
223 if (ipv4_mask->hdr.version_ihl ||
224 ipv4_mask->hdr.type_of_service ||
225 ipv4_mask->hdr.total_length ||
226 ipv4_mask->hdr.packet_id ||
227 ipv4_mask->hdr.fragment_offset ||
228 ipv4_mask->hdr.time_to_live ||
229 ipv4_mask->hdr.hdr_checksum) {
230 rte_flow_error_set(error,
231 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
232 item, "Not supported by ntuple filter");
235 if ((ipv4_mask->hdr.src_addr != 0 &&
236 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
237 (ipv4_mask->hdr.dst_addr != 0 &&
238 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
239 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
240 ipv4_mask->hdr.next_proto_id != 0)) {
241 rte_flow_error_set(error,
242 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
243 item, "Not supported by ntuple filter");
247 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
248 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
249 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
251 ipv4_spec = item->spec;
252 filter->dst_ip = ipv4_spec->hdr.dst_addr;
253 filter->src_ip = ipv4_spec->hdr.src_addr;
254 filter->proto = ipv4_spec->hdr.next_proto_id;
257 /* check if the next not void item is TCP or UDP */
258 item = next_no_void_pattern(pattern, item);
259 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
260 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
261 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
262 item->type != RTE_FLOW_ITEM_TYPE_END) {
263 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
264 rte_flow_error_set(error, EINVAL,
265 RTE_FLOW_ERROR_TYPE_ITEM,
266 item, "Not supported by ntuple filter");
270 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
271 (!item->spec && !item->mask)) {
275 /* get the TCP/UDP/SCTP info */
276 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
277 (!item->spec || !item->mask)) {
278 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
279 rte_flow_error_set(error, EINVAL,
280 RTE_FLOW_ERROR_TYPE_ITEM,
281 item, "Invalid ntuple mask");
285 /*Not supported last point for range*/
287 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
288 rte_flow_error_set(error, EINVAL,
289 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
290 item, "Not supported last point for range");
294 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
295 tcp_mask = item->mask;
298 * Only support src & dst ports, tcp flags,
299 * others should be masked.
301 if (tcp_mask->hdr.sent_seq ||
302 tcp_mask->hdr.recv_ack ||
303 tcp_mask->hdr.data_off ||
304 tcp_mask->hdr.rx_win ||
305 tcp_mask->hdr.cksum ||
306 tcp_mask->hdr.tcp_urp) {
308 sizeof(struct rte_eth_ntuple_filter));
309 rte_flow_error_set(error, EINVAL,
310 RTE_FLOW_ERROR_TYPE_ITEM,
311 item, "Not supported by ntuple filter");
314 if ((tcp_mask->hdr.src_port != 0 &&
315 tcp_mask->hdr.src_port != UINT16_MAX) ||
316 (tcp_mask->hdr.dst_port != 0 &&
317 tcp_mask->hdr.dst_port != UINT16_MAX)) {
318 rte_flow_error_set(error,
319 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
320 item, "Not supported by ntuple filter");
324 filter->dst_port_mask = tcp_mask->hdr.dst_port;
325 filter->src_port_mask = tcp_mask->hdr.src_port;
326 if (tcp_mask->hdr.tcp_flags == 0xFF) {
327 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
328 } else if (!tcp_mask->hdr.tcp_flags) {
329 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
331 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
332 rte_flow_error_set(error, EINVAL,
333 RTE_FLOW_ERROR_TYPE_ITEM,
334 item, "Not supported by ntuple filter");
338 tcp_spec = item->spec;
339 filter->dst_port = tcp_spec->hdr.dst_port;
340 filter->src_port = tcp_spec->hdr.src_port;
341 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
342 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
343 udp_mask = item->mask;
346 * Only support src & dst ports,
347 * others should be masked.
349 if (udp_mask->hdr.dgram_len ||
350 udp_mask->hdr.dgram_cksum) {
352 sizeof(struct rte_eth_ntuple_filter));
353 rte_flow_error_set(error, EINVAL,
354 RTE_FLOW_ERROR_TYPE_ITEM,
355 item, "Not supported by ntuple filter");
358 if ((udp_mask->hdr.src_port != 0 &&
359 udp_mask->hdr.src_port != UINT16_MAX) ||
360 (udp_mask->hdr.dst_port != 0 &&
361 udp_mask->hdr.dst_port != UINT16_MAX)) {
362 rte_flow_error_set(error,
363 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
364 item, "Not supported by ntuple filter");
368 filter->dst_port_mask = udp_mask->hdr.dst_port;
369 filter->src_port_mask = udp_mask->hdr.src_port;
371 udp_spec = item->spec;
372 filter->dst_port = udp_spec->hdr.dst_port;
373 filter->src_port = udp_spec->hdr.src_port;
374 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
375 sctp_mask = item->mask;
378 * Only support src & dst ports,
379 * others should be masked.
381 if (sctp_mask->hdr.tag ||
382 sctp_mask->hdr.cksum) {
384 sizeof(struct rte_eth_ntuple_filter));
385 rte_flow_error_set(error, EINVAL,
386 RTE_FLOW_ERROR_TYPE_ITEM,
387 item, "Not supported by ntuple filter");
391 filter->dst_port_mask = sctp_mask->hdr.dst_port;
392 filter->src_port_mask = sctp_mask->hdr.src_port;
394 sctp_spec = item->spec;
395 filter->dst_port = sctp_spec->hdr.dst_port;
396 filter->src_port = sctp_spec->hdr.src_port;
401 /* check if the next not void item is END */
402 item = next_no_void_pattern(pattern, item);
403 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
404 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
405 rte_flow_error_set(error, EINVAL,
406 RTE_FLOW_ERROR_TYPE_ITEM,
407 item, "Not supported by ntuple filter");
414 * n-tuple only supports forwarding,
415 * check if the first not void action is QUEUE.
417 act = next_no_void_action(actions, NULL);
418 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
419 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ACTION,
422 item, "Not supported action.");
426 ((const struct rte_flow_action_queue *)act->conf)->index;
428 /* check if the next not void item is END */
429 act = next_no_void_action(actions, act);
430 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
431 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ACTION,
434 act, "Not supported action.");
439 /* must be input direction */
440 if (!attr->ingress) {
441 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
442 rte_flow_error_set(error, EINVAL,
443 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
444 attr, "Only support ingress.");
450 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
451 rte_flow_error_set(error, EINVAL,
452 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
453 attr, "Not support egress.");
458 if (attr->transfer) {
459 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
460 rte_flow_error_set(error, EINVAL,
461 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
462 attr, "No support for transfer.");
466 if (attr->priority > 0xFFFF) {
467 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
468 rte_flow_error_set(error, EINVAL,
469 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
470 attr, "Error priority.");
473 filter->priority = (uint16_t)attr->priority;
474 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
475 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
476 filter->priority = 1;
481 /* a specific function for txgbe because the flags is specific */
483 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
484 const struct rte_flow_attr *attr,
485 const struct rte_flow_item pattern[],
486 const struct rte_flow_action actions[],
487 struct rte_eth_ntuple_filter *filter,
488 struct rte_flow_error *error)
492 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
497 /* txgbe doesn't support tcp flags */
498 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
499 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
500 rte_flow_error_set(error, EINVAL,
501 RTE_FLOW_ERROR_TYPE_ITEM,
502 NULL, "Not supported by ntuple filter");
506 /* txgbe doesn't support many priorities */
507 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
508 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
509 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
510 rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ITEM,
512 NULL, "Priority not supported by ntuple filter");
516 if (filter->queue >= dev->data->nb_rx_queues) {
517 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
518 rte_flow_error_set(error, EINVAL,
519 RTE_FLOW_ERROR_TYPE_ITEM,
520 NULL, "Not supported by ntuple filter");
524 /* fixed value for txgbe */
525 filter->flags = RTE_5TUPLE_FLAGS;
530 * Parse the rule to see if it is a ethertype rule.
531 * And get the ethertype filter info BTW.
533 * The first not void item can be ETH.
534 * The next not void item must be END.
536 * The first not void action should be QUEUE.
537 * The next not void action should be END.
540 * ETH type 0x0807 0xFFFF
542 * other members in mask and spec should set to 0x00.
543 * item->last should be NULL.
546 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
547 const struct rte_flow_item *pattern,
548 const struct rte_flow_action *actions,
549 struct rte_eth_ethertype_filter *filter,
550 struct rte_flow_error *error)
552 const struct rte_flow_item *item;
553 const struct rte_flow_action *act;
554 const struct rte_flow_item_eth *eth_spec;
555 const struct rte_flow_item_eth *eth_mask;
556 const struct rte_flow_action_queue *act_q;
559 rte_flow_error_set(error, EINVAL,
560 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
561 NULL, "NULL pattern.");
566 rte_flow_error_set(error, EINVAL,
567 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
568 NULL, "NULL action.");
573 rte_flow_error_set(error, EINVAL,
574 RTE_FLOW_ERROR_TYPE_ATTR,
575 NULL, "NULL attribute.");
579 item = next_no_void_pattern(pattern, NULL);
580 /* The first non-void item should be MAC. */
581 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
582 rte_flow_error_set(error, EINVAL,
583 RTE_FLOW_ERROR_TYPE_ITEM,
584 item, "Not supported by ethertype filter");
588 /*Not supported last point for range*/
590 rte_flow_error_set(error, EINVAL,
591 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
592 item, "Not supported last point for range");
596 /* Get the MAC info. */
597 if (!item->spec || !item->mask) {
598 rte_flow_error_set(error, EINVAL,
599 RTE_FLOW_ERROR_TYPE_ITEM,
600 item, "Not supported by ethertype filter");
604 eth_spec = item->spec;
605 eth_mask = item->mask;
607 /* Mask bits of source MAC address must be full of 0.
608 * Mask bits of destination MAC address must be full
611 if (!rte_is_zero_ether_addr(ð_mask->src) ||
612 (!rte_is_zero_ether_addr(ð_mask->dst) &&
613 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
614 rte_flow_error_set(error, EINVAL,
615 RTE_FLOW_ERROR_TYPE_ITEM,
616 item, "Invalid ether address mask");
620 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
621 rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ITEM,
623 item, "Invalid ethertype mask");
627 /* If mask bits of destination MAC address
628 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
630 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
631 filter->mac_addr = eth_spec->dst;
632 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
634 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
636 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
638 /* Check if the next non-void item is END. */
639 item = next_no_void_pattern(pattern, item);
640 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ITEM,
643 item, "Not supported by ethertype filter.");
649 act = next_no_void_action(actions, NULL);
650 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
651 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
652 rte_flow_error_set(error, EINVAL,
653 RTE_FLOW_ERROR_TYPE_ACTION,
654 act, "Not supported action.");
658 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
659 act_q = (const struct rte_flow_action_queue *)act->conf;
660 filter->queue = act_q->index;
662 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
665 /* Check if the next non-void item is END */
666 act = next_no_void_action(actions, act);
667 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ACTION,
670 act, "Not supported action.");
675 /* Must be input direction */
676 if (!attr->ingress) {
677 rte_flow_error_set(error, EINVAL,
678 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
679 attr, "Only support ingress.");
685 rte_flow_error_set(error, EINVAL,
686 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
687 attr, "Not support egress.");
692 if (attr->transfer) {
693 rte_flow_error_set(error, EINVAL,
694 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
695 attr, "No support for transfer.");
700 if (attr->priority) {
701 rte_flow_error_set(error, EINVAL,
702 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
703 attr, "Not support priority.");
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
711 attr, "Not support group.");
719 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
720 const struct rte_flow_attr *attr,
721 const struct rte_flow_item pattern[],
722 const struct rte_flow_action actions[],
723 struct rte_eth_ethertype_filter *filter,
724 struct rte_flow_error *error)
728 ret = cons_parse_ethertype_filter(attr, pattern,
729 actions, filter, error);
734 if (filter->queue >= dev->data->nb_rx_queues) {
735 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736 rte_flow_error_set(error, EINVAL,
737 RTE_FLOW_ERROR_TYPE_ITEM,
738 NULL, "queue index much too big");
742 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
743 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
744 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
745 rte_flow_error_set(error, EINVAL,
746 RTE_FLOW_ERROR_TYPE_ITEM,
747 NULL, "IPv4/IPv6 not supported by ethertype filter");
751 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
752 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
753 rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ITEM,
755 NULL, "mac compare is unsupported");
759 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
760 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
761 rte_flow_error_set(error, EINVAL,
762 RTE_FLOW_ERROR_TYPE_ITEM,
763 NULL, "drop option is unsupported");
771 * Parse the rule to see if it is a TCP SYN rule.
772 * And get the TCP SYN filter info BTW.
774 * The first not void item must be ETH.
775 * The second not void item must be IPV4 or IPV6.
776 * The third not void item must be TCP.
777 * The next not void item must be END.
779 * The first not void action should be QUEUE.
780 * The next not void action should be END.
784 * IPV4/IPV6 NULL NULL
785 * TCP tcp_flags 0x02 0xFF
787 * other members in mask and spec should set to 0x00.
788 * item->last should be NULL.
791 cons_parse_syn_filter(const struct rte_flow_attr *attr,
792 const struct rte_flow_item pattern[],
793 const struct rte_flow_action actions[],
794 struct rte_eth_syn_filter *filter,
795 struct rte_flow_error *error)
797 const struct rte_flow_item *item;
798 const struct rte_flow_action *act;
799 const struct rte_flow_item_tcp *tcp_spec;
800 const struct rte_flow_item_tcp *tcp_mask;
801 const struct rte_flow_action_queue *act_q;
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
806 NULL, "NULL pattern.");
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
813 NULL, "NULL action.");
818 rte_flow_error_set(error, EINVAL,
819 RTE_FLOW_ERROR_TYPE_ATTR,
820 NULL, "NULL attribute.");
825 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
826 item = next_no_void_pattern(pattern, NULL);
827 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
828 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
829 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
830 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
831 rte_flow_error_set(error, EINVAL,
832 RTE_FLOW_ERROR_TYPE_ITEM,
833 item, "Not supported by syn filter");
836 /*Not supported last point for range*/
838 rte_flow_error_set(error, EINVAL,
839 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
840 item, "Not supported last point for range");
845 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
846 /* if the item is MAC, the content should be NULL */
847 if (item->spec || item->mask) {
848 rte_flow_error_set(error, EINVAL,
849 RTE_FLOW_ERROR_TYPE_ITEM,
850 item, "Invalid SYN address mask");
854 /* check if the next not void item is IPv4 or IPv6 */
855 item = next_no_void_pattern(pattern, item);
856 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
857 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ITEM,
860 item, "Not supported by syn filter");
866 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
867 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
868 /* if the item is IP, the content should be NULL */
869 if (item->spec || item->mask) {
870 rte_flow_error_set(error, EINVAL,
871 RTE_FLOW_ERROR_TYPE_ITEM,
872 item, "Invalid SYN mask");
876 /* check if the next not void item is TCP */
877 item = next_no_void_pattern(pattern, item);
878 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
879 rte_flow_error_set(error, EINVAL,
880 RTE_FLOW_ERROR_TYPE_ITEM,
881 item, "Not supported by syn filter");
886 /* Get the TCP info. Only support SYN. */
887 if (!item->spec || !item->mask) {
888 rte_flow_error_set(error, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ITEM,
890 item, "Invalid SYN mask");
893 /*Not supported last point for range*/
895 rte_flow_error_set(error, EINVAL,
896 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
897 item, "Not supported last point for range");
901 tcp_spec = item->spec;
902 tcp_mask = item->mask;
903 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
904 tcp_mask->hdr.src_port ||
905 tcp_mask->hdr.dst_port ||
906 tcp_mask->hdr.sent_seq ||
907 tcp_mask->hdr.recv_ack ||
908 tcp_mask->hdr.data_off ||
909 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
910 tcp_mask->hdr.rx_win ||
911 tcp_mask->hdr.cksum ||
912 tcp_mask->hdr.tcp_urp) {
913 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
914 rte_flow_error_set(error, EINVAL,
915 RTE_FLOW_ERROR_TYPE_ITEM,
916 item, "Not supported by syn filter");
920 /* check if the next not void item is END */
921 item = next_no_void_pattern(pattern, item);
922 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
923 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
924 rte_flow_error_set(error, EINVAL,
925 RTE_FLOW_ERROR_TYPE_ITEM,
926 item, "Not supported by syn filter");
930 /* check if the first not void action is QUEUE. */
931 act = next_no_void_action(actions, NULL);
932 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
933 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
934 rte_flow_error_set(error, EINVAL,
935 RTE_FLOW_ERROR_TYPE_ACTION,
936 act, "Not supported action.");
940 act_q = (const struct rte_flow_action_queue *)act->conf;
941 filter->queue = act_q->index;
942 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
943 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
944 rte_flow_error_set(error, EINVAL,
945 RTE_FLOW_ERROR_TYPE_ACTION,
946 act, "Not supported action.");
950 /* check if the next not void item is END */
951 act = next_no_void_action(actions, act);
952 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
953 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
954 rte_flow_error_set(error, EINVAL,
955 RTE_FLOW_ERROR_TYPE_ACTION,
956 act, "Not supported action.");
961 /* must be input direction */
962 if (!attr->ingress) {
963 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
966 attr, "Only support ingress.");
972 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
973 rte_flow_error_set(error, EINVAL,
974 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
975 attr, "Not support egress.");
980 if (attr->transfer) {
981 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
982 rte_flow_error_set(error, EINVAL,
983 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
984 attr, "No support for transfer.");
988 /* Support 2 priorities, the lowest or highest. */
989 if (!attr->priority) {
991 } else if (attr->priority == (uint32_t)~0U) {
994 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
995 rte_flow_error_set(error, EINVAL,
996 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
997 attr, "Not support priority.");
1005 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1006 const struct rte_flow_attr *attr,
1007 const struct rte_flow_item pattern[],
1008 const struct rte_flow_action actions[],
1009 struct rte_eth_syn_filter *filter,
1010 struct rte_flow_error *error)
1014 ret = cons_parse_syn_filter(attr, pattern,
1015 actions, filter, error);
1017 if (filter->queue >= dev->data->nb_rx_queues)
1027 * Parse the rule to see if it is a L2 tunnel rule.
1028 * And get the L2 tunnel filter info BTW.
1029 * Only support E-tag now.
1031 * The first not void item can be E_TAG.
1032 * The next not void item must be END.
1034 * The first not void action should be VF or PF.
1035 * The next not void action should be END.
1039 e_cid_base 0x309 0xFFF
1041 * other members in mask and spec should set to 0x00.
1042 * item->last should be NULL.
1045 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1046 const struct rte_flow_attr *attr,
1047 const struct rte_flow_item pattern[],
1048 const struct rte_flow_action actions[],
1049 struct txgbe_l2_tunnel_conf *filter,
1050 struct rte_flow_error *error)
1052 const struct rte_flow_item *item;
1053 const struct rte_flow_item_e_tag *e_tag_spec;
1054 const struct rte_flow_item_e_tag *e_tag_mask;
1055 const struct rte_flow_action *act;
1056 const struct rte_flow_action_vf *act_vf;
1057 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1060 rte_flow_error_set(error, EINVAL,
1061 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1062 NULL, "NULL pattern.");
1067 rte_flow_error_set(error, EINVAL,
1068 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1069 NULL, "NULL action.");
1074 rte_flow_error_set(error, EINVAL,
1075 RTE_FLOW_ERROR_TYPE_ATTR,
1076 NULL, "NULL attribute.");
1080 /* The first not void item should be e-tag. */
1081 item = next_no_void_pattern(pattern, NULL);
1082 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1083 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1084 rte_flow_error_set(error, EINVAL,
1085 RTE_FLOW_ERROR_TYPE_ITEM,
1086 item, "Not supported by L2 tunnel filter");
1090 if (!item->spec || !item->mask) {
1091 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1092 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1093 item, "Not supported by L2 tunnel filter");
1097 /*Not supported last point for range*/
1099 rte_flow_error_set(error, EINVAL,
1100 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1101 item, "Not supported last point for range");
1105 e_tag_spec = item->spec;
1106 e_tag_mask = item->mask;
1108 /* Only care about GRP and E cid base. */
1109 if (e_tag_mask->epcp_edei_in_ecid_b ||
1110 e_tag_mask->in_ecid_e ||
1111 e_tag_mask->ecid_e ||
1112 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1113 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1114 rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ITEM,
1116 item, "Not supported by L2 tunnel filter");
1120 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1122 * grp and e_cid_base are bit fields and only use 14 bits.
1123 * e-tag id is taken as little endian by HW.
1125 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1127 /* check if the next not void item is END */
1128 item = next_no_void_pattern(pattern, item);
1129 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1130 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1131 rte_flow_error_set(error, EINVAL,
1132 RTE_FLOW_ERROR_TYPE_ITEM,
1133 item, "Not supported by L2 tunnel filter");
1138 /* must be input direction */
1139 if (!attr->ingress) {
1140 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1141 rte_flow_error_set(error, EINVAL,
1142 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1143 attr, "Only support ingress.");
1149 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1150 rte_flow_error_set(error, EINVAL,
1151 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1152 attr, "Not support egress.");
1157 if (attr->transfer) {
1158 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1159 rte_flow_error_set(error, EINVAL,
1160 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1161 attr, "No support for transfer.");
1166 if (attr->priority) {
1167 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1168 rte_flow_error_set(error, EINVAL,
1169 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1170 attr, "Not support priority.");
1174 /* check if the first not void action is VF or PF. */
1175 act = next_no_void_action(actions, NULL);
1176 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1177 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1178 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1179 rte_flow_error_set(error, EINVAL,
1180 RTE_FLOW_ERROR_TYPE_ACTION,
1181 act, "Not supported action.");
1185 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1186 act_vf = (const struct rte_flow_action_vf *)act->conf;
1187 filter->pool = act_vf->id;
1189 filter->pool = pci_dev->max_vfs;
1192 /* check if the next not void item is END */
1193 act = next_no_void_action(actions, act);
1194 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1195 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1196 rte_flow_error_set(error, EINVAL,
1197 RTE_FLOW_ERROR_TYPE_ACTION,
1198 act, "Not supported action.");
1206 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1207 const struct rte_flow_attr *attr,
1208 const struct rte_flow_item pattern[],
1209 const struct rte_flow_action actions[],
1210 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1211 struct rte_flow_error *error)
1214 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1217 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1218 actions, l2_tn_filter, error);
1220 vf_num = pci_dev->max_vfs;
1222 if (l2_tn_filter->pool > vf_num)
1228 /* Parse to get the attr and action info of flow director rule. */
1230 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1231 const struct rte_flow_action actions[],
1232 struct txgbe_fdir_rule *rule,
1233 struct rte_flow_error *error)
1235 const struct rte_flow_action *act;
1236 const struct rte_flow_action_queue *act_q;
1237 const struct rte_flow_action_mark *mark;
1240 /* must be input direction */
1241 if (!attr->ingress) {
1242 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1245 attr, "Only support ingress.");
1251 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1252 rte_flow_error_set(error, EINVAL,
1253 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1254 attr, "Not support egress.");
1259 if (attr->transfer) {
1260 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1261 rte_flow_error_set(error, EINVAL,
1262 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1263 attr, "No support for transfer.");
1268 if (attr->priority) {
1269 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1270 rte_flow_error_set(error, EINVAL,
1271 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1272 attr, "Not support priority.");
1276 /* check if the first not void action is QUEUE or DROP. */
1277 act = next_no_void_action(actions, NULL);
1278 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1279 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1280 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1281 rte_flow_error_set(error, EINVAL,
1282 RTE_FLOW_ERROR_TYPE_ACTION,
1283 act, "Not supported action.");
1287 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1288 act_q = (const struct rte_flow_action_queue *)act->conf;
1289 rule->queue = act_q->index;
1291 /* signature mode does not support drop action. */
1292 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1293 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1294 rte_flow_error_set(error, EINVAL,
1295 RTE_FLOW_ERROR_TYPE_ACTION,
1296 act, "Not supported action.");
1299 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1302 /* check if the next not void item is MARK */
1303 act = next_no_void_action(actions, act);
1304 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1305 act->type != RTE_FLOW_ACTION_TYPE_END) {
1306 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1307 rte_flow_error_set(error, EINVAL,
1308 RTE_FLOW_ERROR_TYPE_ACTION,
1309 act, "Not supported action.");
1315 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1316 mark = (const struct rte_flow_action_mark *)act->conf;
1317 rule->soft_id = mark->id;
1318 act = next_no_void_action(actions, act);
1321 /* check if the next not void item is END */
1322 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1323 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1324 rte_flow_error_set(error, EINVAL,
1325 RTE_FLOW_ERROR_TYPE_ACTION,
1326 act, "Not supported action.");
1333 /* search next no void pattern and skip fuzzy */
1335 const struct rte_flow_item *next_no_fuzzy_pattern(
1336 const struct rte_flow_item pattern[],
1337 const struct rte_flow_item *cur)
1339 const struct rte_flow_item *next =
1340 next_no_void_pattern(pattern, cur);
1342 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1344 next = next_no_void_pattern(pattern, next);
1348 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1350 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1351 const struct rte_flow_item *item;
1352 uint32_t sh, lh, mh;
1357 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1360 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1392 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1393 * And get the flow director filter info BTW.
1394 * UDP/TCP/SCTP PATTERN:
1395 * The first not void item can be ETH or IPV4 or IPV6
1396 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1397 * The next not void item could be UDP or TCP or SCTP (optional)
1398 * The next not void item could be RAW (for flexbyte, optional)
1399 * The next not void item must be END.
1400 * A Fuzzy Match pattern can appear at any place before END.
1401 * Fuzzy Match is optional for IPV4 but is required for IPV6
1403 * The first not void item must be ETH.
1404 * The second not void item must be MAC VLAN.
1405 * The next not void item must be END.
1407 * The first not void action should be QUEUE or DROP.
1408 * The second not void optional action should be MARK,
1409 * mark_id is a uint32_t number.
1410 * The next not void action should be END.
1411 * UDP/TCP/SCTP pattern example:
1414 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1415 * dst_addr 192.167.3.50 0xFFFFFFFF
1416 * UDP/TCP/SCTP src_port 80 0xFFFF
1417 * dst_port 80 0xFFFF
1418 * FLEX relative 0 0x1
1421 * offset 12 0xFFFFFFFF
1424 * pattern[0] 0x86 0xFF
1425 * pattern[1] 0xDD 0xFF
1427 * MAC VLAN pattern example:
1430 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1431 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1432 * MAC VLAN tci 0x2016 0xEFFF
1434 * Other members in mask and spec should set to 0x00.
1435 * Item->last should be NULL.
1438 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1439 const struct rte_flow_attr *attr,
1440 const struct rte_flow_item pattern[],
1441 const struct rte_flow_action actions[],
1442 struct txgbe_fdir_rule *rule,
1443 struct rte_flow_error *error)
1445 const struct rte_flow_item *item;
1446 const struct rte_flow_item_eth *eth_mask;
1447 const struct rte_flow_item_ipv4 *ipv4_spec;
1448 const struct rte_flow_item_ipv4 *ipv4_mask;
1449 const struct rte_flow_item_ipv6 *ipv6_spec;
1450 const struct rte_flow_item_ipv6 *ipv6_mask;
1451 const struct rte_flow_item_tcp *tcp_spec;
1452 const struct rte_flow_item_tcp *tcp_mask;
1453 const struct rte_flow_item_udp *udp_spec;
1454 const struct rte_flow_item_udp *udp_mask;
1455 const struct rte_flow_item_sctp *sctp_spec;
1456 const struct rte_flow_item_sctp *sctp_mask;
1457 const struct rte_flow_item_raw *raw_mask;
1458 const struct rte_flow_item_raw *raw_spec;
1463 rte_flow_error_set(error, EINVAL,
1464 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1465 NULL, "NULL pattern.");
1470 rte_flow_error_set(error, EINVAL,
1471 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1472 NULL, "NULL action.");
1477 rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_ATTR,
1479 NULL, "NULL attribute.");
1484 * Some fields may not be provided. Set spec to 0 and mask to default
1485 * value. So, we need not do anything for the not provided fields later.
1487 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1488 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1489 rule->mask.vlan_tci_mask = 0;
1490 rule->mask.flex_bytes_mask = 0;
1493 * The first not void item should be
1494 * MAC or IPv4 or TCP or UDP or SCTP.
1496 item = next_no_fuzzy_pattern(pattern, NULL);
1497 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1498 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1499 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1500 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1501 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1502 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1503 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1504 rte_flow_error_set(error, EINVAL,
1505 RTE_FLOW_ERROR_TYPE_ITEM,
1506 item, "Not supported by fdir filter");
1510 if (signature_match(pattern))
1511 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1513 rule->mode = RTE_FDIR_MODE_PERFECT;
1515 /*Not supported last point for range*/
1517 rte_flow_error_set(error, EINVAL,
1518 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1519 item, "Not supported last point for range");
1523 /* Get the MAC info. */
1524 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1526 * Only support vlan and dst MAC address,
1527 * others should be masked.
1529 if (item->spec && !item->mask) {
1530 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1531 rte_flow_error_set(error, EINVAL,
1532 RTE_FLOW_ERROR_TYPE_ITEM,
1533 item, "Not supported by fdir filter");
1538 rule->b_mask = TRUE;
1539 eth_mask = item->mask;
1541 /* Ether type should be masked. */
1542 if (eth_mask->type ||
1543 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1544 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1545 rte_flow_error_set(error, EINVAL,
1546 RTE_FLOW_ERROR_TYPE_ITEM,
1547 item, "Not supported by fdir filter");
1551 /* If ethernet has meaning, it means MAC VLAN mode. */
1552 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1555 * src MAC address must be masked,
1556 * and don't support dst MAC address mask.
1558 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1559 if (eth_mask->src.addr_bytes[j] ||
1560 eth_mask->dst.addr_bytes[j] != 0xFF) {
1562 sizeof(struct txgbe_fdir_rule));
1563 rte_flow_error_set(error, EINVAL,
1564 RTE_FLOW_ERROR_TYPE_ITEM,
1565 item, "Not supported by fdir filter");
1570 /* When no VLAN, considered as full mask. */
1571 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1573 /*** If both spec and mask are item,
1574 * it means don't care about ETH.
1579 * Check if the next not void item is vlan or ipv4.
1580 * IPv6 is not supported.
1582 item = next_no_fuzzy_pattern(pattern, item);
1583 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1584 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1585 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1586 rte_flow_error_set(error, EINVAL,
1587 RTE_FLOW_ERROR_TYPE_ITEM,
1588 item, "Not supported by fdir filter");
1592 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1593 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1594 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ITEM,
1597 item, "Not supported by fdir filter");
1603 /* Get the IPV4 info. */
1604 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1606 * Set the flow type even if there's no content
1607 * as we must have a flow type.
1609 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1610 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1611 /*Not supported last point for range*/
1613 rte_flow_error_set(error, EINVAL,
1614 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1615 item, "Not supported last point for range");
1619 * Only care about src & dst addresses,
1620 * others should be masked.
1623 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1624 rte_flow_error_set(error, EINVAL,
1625 RTE_FLOW_ERROR_TYPE_ITEM,
1626 item, "Not supported by fdir filter");
1629 rule->b_mask = TRUE;
1630 ipv4_mask = item->mask;
1631 if (ipv4_mask->hdr.version_ihl ||
1632 ipv4_mask->hdr.type_of_service ||
1633 ipv4_mask->hdr.total_length ||
1634 ipv4_mask->hdr.packet_id ||
1635 ipv4_mask->hdr.fragment_offset ||
1636 ipv4_mask->hdr.time_to_live ||
1637 ipv4_mask->hdr.next_proto_id ||
1638 ipv4_mask->hdr.hdr_checksum) {
1639 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1640 rte_flow_error_set(error, EINVAL,
1641 RTE_FLOW_ERROR_TYPE_ITEM,
1642 item, "Not supported by fdir filter");
1645 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1646 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1649 rule->b_spec = TRUE;
1650 ipv4_spec = item->spec;
1651 rule->input.dst_ip[0] =
1652 ipv4_spec->hdr.dst_addr;
1653 rule->input.src_ip[0] =
1654 ipv4_spec->hdr.src_addr;
1658 * Check if the next not void item is
1659 * TCP or UDP or SCTP or END.
1661 item = next_no_fuzzy_pattern(pattern, item);
1662 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1663 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1664 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1665 item->type != RTE_FLOW_ITEM_TYPE_END &&
1666 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1667 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1668 rte_flow_error_set(error, EINVAL,
1669 RTE_FLOW_ERROR_TYPE_ITEM,
1670 item, "Not supported by fdir filter");
1675 /* Get the IPV6 info. */
1676 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1678 * Set the flow type even if there's no content
1679 * as we must have a flow type.
1681 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1682 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1685 * 1. must signature match
1686 * 2. not support last
1687 * 3. mask must not null
1689 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1692 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1693 rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1695 item, "Not supported last point for range");
1699 rule->b_mask = TRUE;
1700 ipv6_mask = item->mask;
1701 if (ipv6_mask->hdr.vtc_flow ||
1702 ipv6_mask->hdr.payload_len ||
1703 ipv6_mask->hdr.proto ||
1704 ipv6_mask->hdr.hop_limits) {
1705 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1706 rte_flow_error_set(error, EINVAL,
1707 RTE_FLOW_ERROR_TYPE_ITEM,
1708 item, "Not supported by fdir filter");
1712 /* check src addr mask */
1713 for (j = 0; j < 16; j++) {
1714 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1715 rule->mask.src_ipv6_mask |= 1 << j;
1716 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1717 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1718 rte_flow_error_set(error, EINVAL,
1719 RTE_FLOW_ERROR_TYPE_ITEM,
1720 item, "Not supported by fdir filter");
1725 /* check dst addr mask */
1726 for (j = 0; j < 16; j++) {
1727 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1728 rule->mask.dst_ipv6_mask |= 1 << j;
1729 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1730 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1731 rte_flow_error_set(error, EINVAL,
1732 RTE_FLOW_ERROR_TYPE_ITEM,
1733 item, "Not supported by fdir filter");
1739 rule->b_spec = TRUE;
1740 ipv6_spec = item->spec;
1741 rte_memcpy(rule->input.src_ip,
1742 ipv6_spec->hdr.src_addr, 16);
1743 rte_memcpy(rule->input.dst_ip,
1744 ipv6_spec->hdr.dst_addr, 16);
1748 * Check if the next not void item is
1749 * TCP or UDP or SCTP or END.
1751 item = next_no_fuzzy_pattern(pattern, item);
1752 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1753 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1754 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1755 item->type != RTE_FLOW_ITEM_TYPE_END &&
1756 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1757 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1758 rte_flow_error_set(error, EINVAL,
1759 RTE_FLOW_ERROR_TYPE_ITEM,
1760 item, "Not supported by fdir filter");
1765 /* Get the TCP info. */
1766 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1768 * Set the flow type even if there's no content
1769 * as we must have a flow type.
1771 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1772 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1773 /*Not supported last point for range*/
1775 rte_flow_error_set(error, EINVAL,
1776 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1777 item, "Not supported last point for range");
1781 * Only care about src & dst ports,
1782 * others should be masked.
1785 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1786 rte_flow_error_set(error, EINVAL,
1787 RTE_FLOW_ERROR_TYPE_ITEM,
1788 item, "Not supported by fdir filter");
1791 rule->b_mask = TRUE;
1792 tcp_mask = item->mask;
1793 if (tcp_mask->hdr.sent_seq ||
1794 tcp_mask->hdr.recv_ack ||
1795 tcp_mask->hdr.data_off ||
1796 tcp_mask->hdr.tcp_flags ||
1797 tcp_mask->hdr.rx_win ||
1798 tcp_mask->hdr.cksum ||
1799 tcp_mask->hdr.tcp_urp) {
1800 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1801 rte_flow_error_set(error, EINVAL,
1802 RTE_FLOW_ERROR_TYPE_ITEM,
1803 item, "Not supported by fdir filter");
1806 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1807 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1810 rule->b_spec = TRUE;
1811 tcp_spec = item->spec;
1812 rule->input.src_port =
1813 tcp_spec->hdr.src_port;
1814 rule->input.dst_port =
1815 tcp_spec->hdr.dst_port;
1818 item = next_no_fuzzy_pattern(pattern, item);
1819 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1820 item->type != RTE_FLOW_ITEM_TYPE_END) {
1821 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1822 rte_flow_error_set(error, EINVAL,
1823 RTE_FLOW_ERROR_TYPE_ITEM,
1824 item, "Not supported by fdir filter");
1829 /* Get the UDP info */
1830 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1832 * Set the flow type even if there's no content
1833 * as we must have a flow type.
1835 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1836 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1837 /*Not supported last point for range*/
1839 rte_flow_error_set(error, EINVAL,
1840 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1841 item, "Not supported last point for range");
1845 * Only care about src & dst ports,
1846 * others should be masked.
1849 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1850 rte_flow_error_set(error, EINVAL,
1851 RTE_FLOW_ERROR_TYPE_ITEM,
1852 item, "Not supported by fdir filter");
1855 rule->b_mask = TRUE;
1856 udp_mask = item->mask;
1857 if (udp_mask->hdr.dgram_len ||
1858 udp_mask->hdr.dgram_cksum) {
1859 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1860 rte_flow_error_set(error, EINVAL,
1861 RTE_FLOW_ERROR_TYPE_ITEM,
1862 item, "Not supported by fdir filter");
1865 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1866 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1869 rule->b_spec = TRUE;
1870 udp_spec = item->spec;
1871 rule->input.src_port =
1872 udp_spec->hdr.src_port;
1873 rule->input.dst_port =
1874 udp_spec->hdr.dst_port;
1877 item = next_no_fuzzy_pattern(pattern, item);
1878 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1879 item->type != RTE_FLOW_ITEM_TYPE_END) {
1880 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1881 rte_flow_error_set(error, EINVAL,
1882 RTE_FLOW_ERROR_TYPE_ITEM,
1883 item, "Not supported by fdir filter");
1888 /* Get the SCTP info */
1889 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1891 * Set the flow type even if there's no content
1892 * as we must have a flow type.
1894 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1895 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1896 /*Not supported last point for range*/
1898 rte_flow_error_set(error, EINVAL,
1899 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1900 item, "Not supported last point for range");
1905 * Only care about src & dst ports,
1906 * others should be masked.
1909 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1910 rte_flow_error_set(error, EINVAL,
1911 RTE_FLOW_ERROR_TYPE_ITEM,
1912 item, "Not supported by fdir filter");
1915 rule->b_mask = TRUE;
1916 sctp_mask = item->mask;
1917 if (sctp_mask->hdr.tag ||
1918 sctp_mask->hdr.cksum) {
1919 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1920 rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_ITEM,
1922 item, "Not supported by fdir filter");
1925 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1926 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1929 rule->b_spec = TRUE;
1930 sctp_spec = item->spec;
1931 rule->input.src_port =
1932 sctp_spec->hdr.src_port;
1933 rule->input.dst_port =
1934 sctp_spec->hdr.dst_port;
1936 /* others even sctp port is not supported */
1937 sctp_mask = item->mask;
1939 (sctp_mask->hdr.src_port ||
1940 sctp_mask->hdr.dst_port ||
1941 sctp_mask->hdr.tag ||
1942 sctp_mask->hdr.cksum)) {
1943 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1944 rte_flow_error_set(error, EINVAL,
1945 RTE_FLOW_ERROR_TYPE_ITEM,
1946 item, "Not supported by fdir filter");
1950 item = next_no_fuzzy_pattern(pattern, item);
1951 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1952 item->type != RTE_FLOW_ITEM_TYPE_END) {
1953 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1954 rte_flow_error_set(error, EINVAL,
1955 RTE_FLOW_ERROR_TYPE_ITEM,
1956 item, "Not supported by fdir filter");
1961 /* Get the flex byte info */
1962 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1963 /* Not supported last point for range*/
1965 rte_flow_error_set(error, EINVAL,
1966 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1967 item, "Not supported last point for range");
1970 /* mask should not be null */
1971 if (!item->mask || !item->spec) {
1972 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1973 rte_flow_error_set(error, EINVAL,
1974 RTE_FLOW_ERROR_TYPE_ITEM,
1975 item, "Not supported by fdir filter");
1979 raw_mask = item->mask;
1982 if (raw_mask->relative != 0x1 ||
1983 raw_mask->search != 0x1 ||
1984 raw_mask->reserved != 0x0 ||
1985 (uint32_t)raw_mask->offset != 0xffffffff ||
1986 raw_mask->limit != 0xffff ||
1987 raw_mask->length != 0xffff) {
1988 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1989 rte_flow_error_set(error, EINVAL,
1990 RTE_FLOW_ERROR_TYPE_ITEM,
1991 item, "Not supported by fdir filter");
1995 raw_spec = item->spec;
1998 if (raw_spec->relative != 0 ||
1999 raw_spec->search != 0 ||
2000 raw_spec->reserved != 0 ||
2001 raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2002 raw_spec->offset % 2 ||
2003 raw_spec->limit != 0 ||
2004 raw_spec->length != 2 ||
2005 /* pattern can't be 0xffff */
2006 (raw_spec->pattern[0] == 0xff &&
2007 raw_spec->pattern[1] == 0xff)) {
2008 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2009 rte_flow_error_set(error, EINVAL,
2010 RTE_FLOW_ERROR_TYPE_ITEM,
2011 item, "Not supported by fdir filter");
2015 /* check pattern mask */
2016 if (raw_mask->pattern[0] != 0xff ||
2017 raw_mask->pattern[1] != 0xff) {
2018 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2019 rte_flow_error_set(error, EINVAL,
2020 RTE_FLOW_ERROR_TYPE_ITEM,
2021 item, "Not supported by fdir filter");
2025 rule->mask.flex_bytes_mask = 0xffff;
2026 rule->input.flex_bytes =
2027 (((uint16_t)raw_spec->pattern[1]) << 8) |
2028 raw_spec->pattern[0];
2029 rule->flex_bytes_offset = raw_spec->offset;
2032 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2033 /* check if the next not void item is END */
2034 item = next_no_fuzzy_pattern(pattern, item);
2035 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2036 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2037 rte_flow_error_set(error, EINVAL,
2038 RTE_FLOW_ERROR_TYPE_ITEM,
2039 item, "Not supported by fdir filter");
2044 rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2046 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2050 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2051 * And get the flow director filter info BTW.
2053 * The first not void item must be ETH.
2054 * The second not void item must be IPV4/ IPV6.
2055 * The third not void item must be NVGRE.
2056 * The next not void item must be END.
2058 * The first not void item must be ETH.
2059 * The second not void item must be IPV4/ IPV6.
2060 * The third not void item must be NVGRE.
2061 * The next not void item must be END.
2063 * The first not void action should be QUEUE or DROP.
2064 * The second not void optional action should be MARK,
2065 * mark_id is a uint32_t number.
2066 * The next not void action should be END.
2067 * VxLAN pattern example:
2070 * IPV4/IPV6 NULL NULL
2072 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2073 * MAC VLAN tci 0x2016 0xEFFF
2075 * NEGRV pattern example:
2078 * IPV4/IPV6 NULL NULL
2079 * NVGRE protocol 0x6558 0xFFFF
2080 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2081 * MAC VLAN tci 0x2016 0xEFFF
2083 * other members in mask and spec should set to 0x00.
2084 * item->last should be NULL.
2087 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2088 const struct rte_flow_item pattern[],
2089 const struct rte_flow_action actions[],
2090 struct txgbe_fdir_rule *rule,
2091 struct rte_flow_error *error)
2093 const struct rte_flow_item *item;
2094 const struct rte_flow_item_eth *eth_mask;
2098 rte_flow_error_set(error, EINVAL,
2099 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2100 NULL, "NULL pattern.");
2105 rte_flow_error_set(error, EINVAL,
2106 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2107 NULL, "NULL action.");
2112 rte_flow_error_set(error, EINVAL,
2113 RTE_FLOW_ERROR_TYPE_ATTR,
2114 NULL, "NULL attribute.");
2119 * Some fields may not be provided. Set spec to 0 and mask to default
2120 * value. So, we need not do anything for the not provided fields later.
2122 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2123 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2124 rule->mask.vlan_tci_mask = 0;
2127 * The first not void item should be
2128 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2130 item = next_no_void_pattern(pattern, NULL);
2131 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2132 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2133 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2134 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2135 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2136 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2137 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2138 rte_flow_error_set(error, EINVAL,
2139 RTE_FLOW_ERROR_TYPE_ITEM,
2140 item, "Not supported by fdir filter");
2144 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2147 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2148 /* Only used to describe the protocol stack. */
2149 if (item->spec || item->mask) {
2150 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2151 rte_flow_error_set(error, EINVAL,
2152 RTE_FLOW_ERROR_TYPE_ITEM,
2153 item, "Not supported by fdir filter");
2156 /* Not supported last point for range*/
2158 rte_flow_error_set(error, EINVAL,
2159 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2160 item, "Not supported last point for range");
2164 /* Check if the next not void item is IPv4 or IPv6. */
2165 item = next_no_void_pattern(pattern, item);
2166 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2167 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2168 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2169 rte_flow_error_set(error, EINVAL,
2170 RTE_FLOW_ERROR_TYPE_ITEM,
2171 item, "Not supported by fdir filter");
2177 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2178 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2179 /* Only used to describe the protocol stack. */
2180 if (item->spec || item->mask) {
2181 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2182 rte_flow_error_set(error, EINVAL,
2183 RTE_FLOW_ERROR_TYPE_ITEM,
2184 item, "Not supported by fdir filter");
2187 /*Not supported last point for range*/
2189 rte_flow_error_set(error, EINVAL,
2190 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2191 item, "Not supported last point for range");
2195 /* Check if the next not void item is UDP or NVGRE. */
2196 item = next_no_void_pattern(pattern, item);
2197 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2198 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2199 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2200 rte_flow_error_set(error, EINVAL,
2201 RTE_FLOW_ERROR_TYPE_ITEM,
2202 item, "Not supported by fdir filter");
2208 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2209 /* Only used to describe the protocol stack. */
2210 if (item->spec || item->mask) {
2211 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2212 rte_flow_error_set(error, EINVAL,
2213 RTE_FLOW_ERROR_TYPE_ITEM,
2214 item, "Not supported by fdir filter");
2217 /*Not supported last point for range*/
2219 rte_flow_error_set(error, EINVAL,
2220 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2221 item, "Not supported last point for range");
2225 /* Check if the next not void item is VxLAN. */
2226 item = next_no_void_pattern(pattern, item);
2227 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2228 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2229 rte_flow_error_set(error, EINVAL,
2230 RTE_FLOW_ERROR_TYPE_ITEM,
2231 item, "Not supported by fdir filter");
2236 /* check if the next not void item is MAC */
2237 item = next_no_void_pattern(pattern, item);
2238 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2239 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2240 rte_flow_error_set(error, EINVAL,
2241 RTE_FLOW_ERROR_TYPE_ITEM,
2242 item, "Not supported by fdir filter");
2247 * Only support vlan and dst MAC address,
2248 * others should be masked.
2252 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2253 rte_flow_error_set(error, EINVAL,
2254 RTE_FLOW_ERROR_TYPE_ITEM,
2255 item, "Not supported by fdir filter");
2258 /*Not supported last point for range*/
2260 rte_flow_error_set(error, EINVAL,
2261 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2262 item, "Not supported last point for range");
2265 rule->b_mask = TRUE;
2266 eth_mask = item->mask;
2268 /* Ether type should be masked. */
2269 if (eth_mask->type) {
2270 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2271 rte_flow_error_set(error, EINVAL,
2272 RTE_FLOW_ERROR_TYPE_ITEM,
2273 item, "Not supported by fdir filter");
2277 /* src MAC address should be masked. */
2278 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2279 if (eth_mask->src.addr_bytes[j]) {
2281 sizeof(struct txgbe_fdir_rule));
2282 rte_flow_error_set(error, EINVAL,
2283 RTE_FLOW_ERROR_TYPE_ITEM,
2284 item, "Not supported by fdir filter");
2288 rule->mask.mac_addr_byte_mask = 0;
2289 for (j = 0; j < ETH_ADDR_LEN; j++) {
2290 /* It's a per byte mask. */
2291 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2292 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2293 } else if (eth_mask->dst.addr_bytes[j]) {
2294 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2295 rte_flow_error_set(error, EINVAL,
2296 RTE_FLOW_ERROR_TYPE_ITEM,
2297 item, "Not supported by fdir filter");
2302 /* When no vlan, considered as full mask. */
2303 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2306 * Check if the next not void item is vlan or ipv4.
2307 * IPv6 is not supported.
2309 item = next_no_void_pattern(pattern, item);
2310 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2311 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2312 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2313 rte_flow_error_set(error, EINVAL,
2314 RTE_FLOW_ERROR_TYPE_ITEM,
2315 item, "Not supported by fdir filter");
2318 /*Not supported last point for range*/
2320 rte_flow_error_set(error, EINVAL,
2321 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2322 item, "Not supported last point for range");
2327 * If the tags is 0, it means don't care about the VLAN.
2331 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2335 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2336 const struct rte_flow_attr *attr,
2337 const struct rte_flow_item pattern[],
2338 const struct rte_flow_action actions[],
2339 struct txgbe_fdir_rule *rule,
2340 struct rte_flow_error *error)
2343 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2344 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2346 ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2347 actions, rule, error);
2351 ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2352 actions, rule, error);
2358 if (hw->mac.type == txgbe_mac_raptor &&
2359 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2360 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2363 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2364 fdir_mode != rule->mode)
2367 if (rule->queue >= dev->data->nb_rx_queues)
2374 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2375 const struct rte_flow_attr *attr,
2376 const struct rte_flow_action actions[],
2377 struct txgbe_rte_flow_rss_conf *rss_conf,
2378 struct rte_flow_error *error)
2380 const struct rte_flow_action *act;
2381 const struct rte_flow_action_rss *rss;
2385 * rss only supports forwarding,
2386 * check if the first not void action is RSS.
2388 act = next_no_void_action(actions, NULL);
2389 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2390 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2391 rte_flow_error_set(error, EINVAL,
2392 RTE_FLOW_ERROR_TYPE_ACTION,
2393 act, "Not supported action.");
2397 rss = (const struct rte_flow_action_rss *)act->conf;
2399 if (!rss || !rss->queue_num) {
2400 rte_flow_error_set(error, EINVAL,
2401 RTE_FLOW_ERROR_TYPE_ACTION,
2407 for (n = 0; n < rss->queue_num; n++) {
2408 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2409 rte_flow_error_set(error, EINVAL,
2410 RTE_FLOW_ERROR_TYPE_ACTION,
2412 "queue id > max number of queues");
2417 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2418 return rte_flow_error_set
2419 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2420 "non-default RSS hash functions are not supported");
2422 return rte_flow_error_set
2423 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2424 "a nonzero RSS encapsulation level is not supported");
2425 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2426 return rte_flow_error_set
2427 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2428 "RSS hash key must be exactly 40 bytes");
2429 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2430 return rte_flow_error_set
2431 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2432 "too many queues for RSS context");
2433 if (txgbe_rss_conf_init(rss_conf, rss))
2434 return rte_flow_error_set
2435 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2436 "RSS context initialization failure");
2438 /* check if the next not void item is END */
2439 act = next_no_void_action(actions, act);
2440 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2441 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2442 rte_flow_error_set(error, EINVAL,
2443 RTE_FLOW_ERROR_TYPE_ACTION,
2444 act, "Not supported action.");
2449 /* must be input direction */
2450 if (!attr->ingress) {
2451 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2452 rte_flow_error_set(error, EINVAL,
2453 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2454 attr, "Only support ingress.");
2460 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2461 rte_flow_error_set(error, EINVAL,
2462 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2463 attr, "Not support egress.");
2468 if (attr->transfer) {
2469 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2470 rte_flow_error_set(error, EINVAL,
2471 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2472 attr, "No support for transfer.");
2476 if (attr->priority > 0xFFFF) {
2477 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2478 rte_flow_error_set(error, EINVAL,
2479 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2480 attr, "Error priority.");
2488 * Create or destroy a flow rule.
2489 * Theorically one rule can match more than one filters.
2490 * We will let it use the filter which it hitt first.
2491 * So, the sequence matters.
2493 static struct rte_flow *
2494 txgbe_flow_create(struct rte_eth_dev *dev,
2495 const struct rte_flow_attr *attr,
2496 const struct rte_flow_item pattern[],
2497 const struct rte_flow_action actions[],
2498 struct rte_flow_error *error)
2500 struct rte_flow *flow = NULL;
2505 * Check if the flow rule is supported by txgbe.
2506 * It only checks the format. Don't guarantee the rule can be programmed into
2507 * the HW. Because there can be no enough room for the rule.
2510 txgbe_flow_validate(struct rte_eth_dev *dev,
2511 const struct rte_flow_attr *attr,
2512 const struct rte_flow_item pattern[],
2513 const struct rte_flow_action actions[],
2514 struct rte_flow_error *error)
2516 struct rte_eth_ntuple_filter ntuple_filter;
2517 struct rte_eth_ethertype_filter ethertype_filter;
2518 struct rte_eth_syn_filter syn_filter;
2519 struct txgbe_l2_tunnel_conf l2_tn_filter;
2520 struct txgbe_fdir_rule fdir_rule;
2521 struct txgbe_rte_flow_rss_conf rss_conf;
2524 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2525 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2526 actions, &ntuple_filter, error);
2530 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2531 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2532 actions, ðertype_filter, error);
2536 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2537 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2538 actions, &syn_filter, error);
2542 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2543 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2544 actions, &fdir_rule, error);
2548 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2549 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2550 actions, &l2_tn_filter, error);
2554 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2555 ret = txgbe_parse_rss_filter(dev, attr,
2556 actions, &rss_conf, error);
2561 /* Destroy a flow rule on txgbe. */
2563 txgbe_flow_destroy(struct rte_eth_dev *dev,
2564 struct rte_flow *flow,
2565 struct rte_flow_error *error)
2572 /* Destroy all flow rules associated with a port on txgbe. */
2574 txgbe_flow_flush(struct rte_eth_dev *dev,
2575 struct rte_flow_error *error)
2582 const struct rte_flow_ops txgbe_flow_ops = {
2583 .validate = txgbe_flow_validate,
2584 .create = txgbe_flow_create,
2585 .destroy = txgbe_flow_destroy,
2586 .flush = txgbe_flow_flush,