1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
7 #include <rte_bus_pci.h>
8 #include <rte_malloc.h>
10 #include <rte_flow_driver.h>
12 #include "txgbe_ethdev.h"
14 #define TXGBE_MIN_N_TUPLE_PRIO 1
15 #define TXGBE_MAX_N_TUPLE_PRIO 7
16 #define TXGBE_MAX_FLX_SOURCE_OFF 62
18 /* ntuple filter list structure */
19 struct txgbe_ntuple_filter_ele {
20 TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
21 struct rte_eth_ntuple_filter filter_info;
23 /* ethertype filter list structure */
24 struct txgbe_ethertype_filter_ele {
25 TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
26 struct rte_eth_ethertype_filter filter_info;
28 /* syn filter list structure */
29 struct txgbe_eth_syn_filter_ele {
30 TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
31 struct rte_eth_syn_filter filter_info;
33 /* fdir filter list structure */
34 struct txgbe_fdir_rule_ele {
35 TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
36 struct txgbe_fdir_rule filter_info;
38 /* l2_tunnel filter list structure */
39 struct txgbe_eth_l2_tunnel_conf_ele {
40 TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
41 struct txgbe_l2_tunnel_conf filter_info;
43 /* rss filter list structure */
44 struct txgbe_rss_conf_ele {
45 TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
46 struct txgbe_rte_flow_rss_conf filter_info;
48 /* txgbe_flow memory list structure */
49 struct txgbe_flow_mem {
50 TAILQ_ENTRY(txgbe_flow_mem) entries;
51 struct rte_flow *flow;
54 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
55 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
56 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
57 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
58 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
59 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
60 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
62 static struct txgbe_ntuple_filter_list filter_ntuple_list;
63 static struct txgbe_ethertype_filter_list filter_ethertype_list;
64 static struct txgbe_syn_filter_list filter_syn_list;
65 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
66 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
67 static struct txgbe_rss_filter_list filter_rss_list;
68 static struct txgbe_flow_mem_list txgbe_flow_list;
71 * Endless loop will never happen with below assumption
72 * 1. there is at least one no-void item(END)
73 * 2. cur is before END.
76 const struct rte_flow_item *next_no_void_pattern(
77 const struct rte_flow_item pattern[],
78 const struct rte_flow_item *cur)
80 const struct rte_flow_item *next =
81 cur ? cur + 1 : &pattern[0];
83 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
90 const struct rte_flow_action *next_no_void_action(
91 const struct rte_flow_action actions[],
92 const struct rte_flow_action *cur)
94 const struct rte_flow_action *next =
95 cur ? cur + 1 : &actions[0];
97 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
104 * Please aware there's an assumption for all the parsers.
105 * rte_flow_item is using big endian, rte_flow_attr and
106 * rte_flow_action are using CPU order.
107 * Because the pattern is used to describe the packets,
108 * normally the packets should use network order.
112 * Parse the rule to see if it is a n-tuple rule.
113 * And get the n-tuple filter info BTW.
115 * The first not void item can be ETH or IPV4.
116 * The second not void item must be IPV4 if the first one is ETH.
117 * The third not void item must be UDP or TCP.
118 * The next not void item must be END.
120 * The first not void action should be QUEUE.
121 * The next not void action should be END.
125 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
126 * dst_addr 192.167.3.50 0xFFFFFFFF
127 * next_proto_id 17 0xFF
128 * UDP/TCP/ src_port 80 0xFFFF
129 * SCTP dst_port 80 0xFFFF
131 * other members in mask and spec should set to 0x00.
132 * item->last should be NULL.
134 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
138 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
139 const struct rte_flow_item pattern[],
140 const struct rte_flow_action actions[],
141 struct rte_eth_ntuple_filter *filter,
142 struct rte_flow_error *error)
144 const struct rte_flow_item *item;
145 const struct rte_flow_action *act;
146 const struct rte_flow_item_ipv4 *ipv4_spec;
147 const struct rte_flow_item_ipv4 *ipv4_mask;
148 const struct rte_flow_item_tcp *tcp_spec;
149 const struct rte_flow_item_tcp *tcp_mask;
150 const struct rte_flow_item_udp *udp_spec;
151 const struct rte_flow_item_udp *udp_mask;
152 const struct rte_flow_item_sctp *sctp_spec;
153 const struct rte_flow_item_sctp *sctp_mask;
154 const struct rte_flow_item_eth *eth_spec;
155 const struct rte_flow_item_eth *eth_mask;
156 const struct rte_flow_item_vlan *vlan_spec;
157 const struct rte_flow_item_vlan *vlan_mask;
158 struct rte_flow_item_eth eth_null;
159 struct rte_flow_item_vlan vlan_null;
162 rte_flow_error_set(error,
163 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
164 NULL, "NULL pattern.");
169 rte_flow_error_set(error, EINVAL,
170 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
171 NULL, "NULL action.");
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ATTR,
177 NULL, "NULL attribute.");
181 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
182 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
184 #ifdef RTE_LIB_SECURITY
186 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
188 act = next_no_void_action(actions, NULL);
189 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
190 const void *conf = act->conf;
191 /* check if the next not void item is END */
192 act = next_no_void_action(actions, act);
193 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
194 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
195 rte_flow_error_set(error, EINVAL,
196 RTE_FLOW_ERROR_TYPE_ACTION,
197 act, "Not supported action.");
201 /* get the IP pattern*/
202 item = next_no_void_pattern(pattern, NULL);
203 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
204 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
206 item->type == RTE_FLOW_ITEM_TYPE_END) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "IP pattern missing.");
212 item = next_no_void_pattern(pattern, item);
215 filter->proto = IPPROTO_ESP;
216 return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
217 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
221 /* the first not void item can be MAC or IPv4 */
222 item = next_no_void_pattern(pattern, NULL);
224 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
225 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ITEM,
228 item, "Not supported by ntuple filter");
232 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
233 eth_spec = item->spec;
234 eth_mask = item->mask;
235 /*Not supported last point for range*/
237 rte_flow_error_set(error,
239 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
240 item, "Not supported last point for range");
243 /* if the first item is MAC, the content should be NULL */
244 if ((item->spec && memcmp(eth_spec, ð_null,
245 sizeof(struct rte_flow_item_eth))) ||
246 (item->mask && memcmp(eth_mask, ð_null,
247 sizeof(struct rte_flow_item_eth)))) {
248 rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ITEM,
250 item, "Not supported by ntuple filter");
253 /* check if the next not void item is IPv4 or Vlan */
254 item = next_no_void_pattern(pattern, item);
255 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
256 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
257 rte_flow_error_set(error,
258 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
259 item, "Not supported by ntuple filter");
264 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
265 vlan_spec = item->spec;
266 vlan_mask = item->mask;
267 /*Not supported last point for range*/
269 rte_flow_error_set(error,
270 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
271 item, "Not supported last point for range");
274 /* the content should be NULL */
275 if ((item->spec && memcmp(vlan_spec, &vlan_null,
276 sizeof(struct rte_flow_item_vlan))) ||
277 (item->mask && memcmp(vlan_mask, &vlan_null,
278 sizeof(struct rte_flow_item_vlan)))) {
279 rte_flow_error_set(error, EINVAL,
280 RTE_FLOW_ERROR_TYPE_ITEM,
281 item, "Not supported by ntuple filter");
284 /* check if the next not void item is IPv4 */
285 item = next_no_void_pattern(pattern, item);
286 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
287 rte_flow_error_set(error,
288 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
289 item, "Not supported by ntuple filter");
295 /* get the IPv4 info */
296 if (!item->spec || !item->mask) {
297 rte_flow_error_set(error, EINVAL,
298 RTE_FLOW_ERROR_TYPE_ITEM,
299 item, "Invalid ntuple mask");
302 /*Not supported last point for range*/
304 rte_flow_error_set(error, EINVAL,
305 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
306 item, "Not supported last point for range");
310 ipv4_mask = item->mask;
312 * Only support src & dst addresses, protocol,
313 * others should be masked.
315 if (ipv4_mask->hdr.version_ihl ||
316 ipv4_mask->hdr.type_of_service ||
317 ipv4_mask->hdr.total_length ||
318 ipv4_mask->hdr.packet_id ||
319 ipv4_mask->hdr.fragment_offset ||
320 ipv4_mask->hdr.time_to_live ||
321 ipv4_mask->hdr.hdr_checksum) {
322 rte_flow_error_set(error,
323 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
324 item, "Not supported by ntuple filter");
327 if ((ipv4_mask->hdr.src_addr != 0 &&
328 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
329 (ipv4_mask->hdr.dst_addr != 0 &&
330 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
331 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
332 ipv4_mask->hdr.next_proto_id != 0)) {
333 rte_flow_error_set(error,
334 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
335 item, "Not supported by ntuple filter");
339 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
340 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
341 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
343 ipv4_spec = item->spec;
344 filter->dst_ip = ipv4_spec->hdr.dst_addr;
345 filter->src_ip = ipv4_spec->hdr.src_addr;
346 filter->proto = ipv4_spec->hdr.next_proto_id;
349 /* check if the next not void item is TCP or UDP */
350 item = next_no_void_pattern(pattern, item);
351 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354 item->type != RTE_FLOW_ITEM_TYPE_END) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
362 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
363 (!item->spec && !item->mask)) {
367 /* get the TCP/UDP/SCTP info */
368 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
369 (!item->spec || !item->mask)) {
370 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
371 rte_flow_error_set(error, EINVAL,
372 RTE_FLOW_ERROR_TYPE_ITEM,
373 item, "Invalid ntuple mask");
377 /*Not supported last point for range*/
379 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
380 rte_flow_error_set(error, EINVAL,
381 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
382 item, "Not supported last point for range");
386 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
387 tcp_mask = item->mask;
390 * Only support src & dst ports, tcp flags,
391 * others should be masked.
393 if (tcp_mask->hdr.sent_seq ||
394 tcp_mask->hdr.recv_ack ||
395 tcp_mask->hdr.data_off ||
396 tcp_mask->hdr.rx_win ||
397 tcp_mask->hdr.cksum ||
398 tcp_mask->hdr.tcp_urp) {
400 sizeof(struct rte_eth_ntuple_filter));
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ITEM,
403 item, "Not supported by ntuple filter");
406 if ((tcp_mask->hdr.src_port != 0 &&
407 tcp_mask->hdr.src_port != UINT16_MAX) ||
408 (tcp_mask->hdr.dst_port != 0 &&
409 tcp_mask->hdr.dst_port != UINT16_MAX)) {
410 rte_flow_error_set(error,
411 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Not supported by ntuple filter");
416 filter->dst_port_mask = tcp_mask->hdr.dst_port;
417 filter->src_port_mask = tcp_mask->hdr.src_port;
418 if (tcp_mask->hdr.tcp_flags == 0xFF) {
419 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
420 } else if (!tcp_mask->hdr.tcp_flags) {
421 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
423 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_ITEM,
426 item, "Not supported by ntuple filter");
430 tcp_spec = item->spec;
431 filter->dst_port = tcp_spec->hdr.dst_port;
432 filter->src_port = tcp_spec->hdr.src_port;
433 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
434 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
435 udp_mask = item->mask;
438 * Only support src & dst ports,
439 * others should be masked.
441 if (udp_mask->hdr.dgram_len ||
442 udp_mask->hdr.dgram_cksum) {
444 sizeof(struct rte_eth_ntuple_filter));
445 rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ITEM,
447 item, "Not supported by ntuple filter");
450 if ((udp_mask->hdr.src_port != 0 &&
451 udp_mask->hdr.src_port != UINT16_MAX) ||
452 (udp_mask->hdr.dst_port != 0 &&
453 udp_mask->hdr.dst_port != UINT16_MAX)) {
454 rte_flow_error_set(error,
455 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
456 item, "Not supported by ntuple filter");
460 filter->dst_port_mask = udp_mask->hdr.dst_port;
461 filter->src_port_mask = udp_mask->hdr.src_port;
463 udp_spec = item->spec;
464 filter->dst_port = udp_spec->hdr.dst_port;
465 filter->src_port = udp_spec->hdr.src_port;
466 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
467 sctp_mask = item->mask;
470 * Only support src & dst ports,
471 * others should be masked.
473 if (sctp_mask->hdr.tag ||
474 sctp_mask->hdr.cksum) {
476 sizeof(struct rte_eth_ntuple_filter));
477 rte_flow_error_set(error, EINVAL,
478 RTE_FLOW_ERROR_TYPE_ITEM,
479 item, "Not supported by ntuple filter");
483 filter->dst_port_mask = sctp_mask->hdr.dst_port;
484 filter->src_port_mask = sctp_mask->hdr.src_port;
486 sctp_spec = item->spec;
487 filter->dst_port = sctp_spec->hdr.dst_port;
488 filter->src_port = sctp_spec->hdr.src_port;
493 /* check if the next not void item is END */
494 item = next_no_void_pattern(pattern, item);
495 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
496 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
497 rte_flow_error_set(error, EINVAL,
498 RTE_FLOW_ERROR_TYPE_ITEM,
499 item, "Not supported by ntuple filter");
506 * n-tuple only supports forwarding,
507 * check if the first not void action is QUEUE.
509 act = next_no_void_action(actions, NULL);
510 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
511 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512 rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ACTION,
514 act, "Not supported action.");
518 ((const struct rte_flow_action_queue *)act->conf)->index;
520 /* check if the next not void item is END */
521 act = next_no_void_action(actions, act);
522 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
523 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
524 rte_flow_error_set(error, EINVAL,
525 RTE_FLOW_ERROR_TYPE_ACTION,
526 act, "Not supported action.");
531 /* must be input direction */
532 if (!attr->ingress) {
533 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
536 attr, "Only support ingress.");
542 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
543 rte_flow_error_set(error, EINVAL,
544 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
545 attr, "Not support egress.");
550 if (attr->transfer) {
551 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
554 attr, "No support for transfer.");
558 if (attr->priority > 0xFFFF) {
559 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
560 rte_flow_error_set(error, EINVAL,
561 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
562 attr, "Error priority.");
565 filter->priority = (uint16_t)attr->priority;
566 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
567 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
568 filter->priority = 1;
573 /* a specific function for txgbe because the flags is specific */
575 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
576 const struct rte_flow_attr *attr,
577 const struct rte_flow_item pattern[],
578 const struct rte_flow_action actions[],
579 struct rte_eth_ntuple_filter *filter,
580 struct rte_flow_error *error)
584 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
589 #ifdef RTE_LIB_SECURITY
590 /* ESP flow not really a flow */
591 if (filter->proto == IPPROTO_ESP)
595 /* txgbe doesn't support tcp flags */
596 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
597 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
598 rte_flow_error_set(error, EINVAL,
599 RTE_FLOW_ERROR_TYPE_ITEM,
600 NULL, "Not supported by ntuple filter");
604 /* txgbe doesn't support many priorities */
605 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
606 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
607 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
608 rte_flow_error_set(error, EINVAL,
609 RTE_FLOW_ERROR_TYPE_ITEM,
610 NULL, "Priority not supported by ntuple filter");
614 if (filter->queue >= dev->data->nb_rx_queues) {
615 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
616 rte_flow_error_set(error, EINVAL,
617 RTE_FLOW_ERROR_TYPE_ITEM,
618 NULL, "Not supported by ntuple filter");
622 /* fixed value for txgbe */
623 filter->flags = RTE_5TUPLE_FLAGS;
628 * Parse the rule to see if it is a ethertype rule.
629 * And get the ethertype filter info BTW.
631 * The first not void item can be ETH.
632 * The next not void item must be END.
634 * The first not void action should be QUEUE.
635 * The next not void action should be END.
638 * ETH type 0x0807 0xFFFF
640 * other members in mask and spec should set to 0x00.
641 * item->last should be NULL.
644 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
645 const struct rte_flow_item *pattern,
646 const struct rte_flow_action *actions,
647 struct rte_eth_ethertype_filter *filter,
648 struct rte_flow_error *error)
650 const struct rte_flow_item *item;
651 const struct rte_flow_action *act;
652 const struct rte_flow_item_eth *eth_spec;
653 const struct rte_flow_item_eth *eth_mask;
654 const struct rte_flow_action_queue *act_q;
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
659 NULL, "NULL pattern.");
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
666 NULL, "NULL action.");
671 rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ATTR,
673 NULL, "NULL attribute.");
677 item = next_no_void_pattern(pattern, NULL);
678 /* The first non-void item should be MAC. */
679 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
682 item, "Not supported by ethertype filter");
686 /*Not supported last point for range*/
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
690 item, "Not supported last point for range");
694 /* Get the MAC info. */
695 if (!item->spec || !item->mask) {
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
698 item, "Not supported by ethertype filter");
702 eth_spec = item->spec;
703 eth_mask = item->mask;
705 /* Mask bits of source MAC address must be full of 0.
706 * Mask bits of destination MAC address must be full
709 if (!rte_is_zero_ether_addr(ð_mask->src) ||
710 (!rte_is_zero_ether_addr(ð_mask->dst) &&
711 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
712 rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ITEM,
714 item, "Invalid ether address mask");
718 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ITEM,
721 item, "Invalid ethertype mask");
725 /* If mask bits of destination MAC address
726 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
728 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
729 filter->mac_addr = eth_spec->dst;
730 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
732 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
734 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
736 /* Check if the next non-void item is END. */
737 item = next_no_void_pattern(pattern, item);
738 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
739 rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ITEM,
741 item, "Not supported by ethertype filter.");
747 act = next_no_void_action(actions, NULL);
748 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
749 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ACTION,
752 act, "Not supported action.");
756 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
757 act_q = (const struct rte_flow_action_queue *)act->conf;
758 filter->queue = act_q->index;
760 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
763 /* Check if the next non-void item is END */
764 act = next_no_void_action(actions, act);
765 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ACTION,
768 act, "Not supported action.");
773 /* Must be input direction */
774 if (!attr->ingress) {
775 rte_flow_error_set(error, EINVAL,
776 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
777 attr, "Only support ingress.");
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
785 attr, "Not support egress.");
790 if (attr->transfer) {
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
793 attr, "No support for transfer.");
798 if (attr->priority) {
799 rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
801 attr, "Not support priority.");
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
809 attr, "Not support group.");
817 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
818 const struct rte_flow_attr *attr,
819 const struct rte_flow_item pattern[],
820 const struct rte_flow_action actions[],
821 struct rte_eth_ethertype_filter *filter,
822 struct rte_flow_error *error)
826 ret = cons_parse_ethertype_filter(attr, pattern,
827 actions, filter, error);
832 if (filter->queue >= dev->data->nb_rx_queues) {
833 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
834 rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_ITEM,
836 NULL, "queue index much too big");
840 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
841 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
842 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
843 rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ITEM,
845 NULL, "IPv4/IPv6 not supported by ethertype filter");
849 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
850 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
851 rte_flow_error_set(error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ITEM,
853 NULL, "mac compare is unsupported");
857 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
858 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
859 rte_flow_error_set(error, EINVAL,
860 RTE_FLOW_ERROR_TYPE_ITEM,
861 NULL, "drop option is unsupported");
869 * Parse the rule to see if it is a TCP SYN rule.
870 * And get the TCP SYN filter info BTW.
872 * The first not void item must be ETH.
873 * The second not void item must be IPV4 or IPV6.
874 * The third not void item must be TCP.
875 * The next not void item must be END.
877 * The first not void action should be QUEUE.
878 * The next not void action should be END.
882 * IPV4/IPV6 NULL NULL
883 * TCP tcp_flags 0x02 0xFF
885 * other members in mask and spec should set to 0x00.
886 * item->last should be NULL.
889 cons_parse_syn_filter(const struct rte_flow_attr *attr,
890 const struct rte_flow_item pattern[],
891 const struct rte_flow_action actions[],
892 struct rte_eth_syn_filter *filter,
893 struct rte_flow_error *error)
895 const struct rte_flow_item *item;
896 const struct rte_flow_action *act;
897 const struct rte_flow_item_tcp *tcp_spec;
898 const struct rte_flow_item_tcp *tcp_mask;
899 const struct rte_flow_action_queue *act_q;
902 rte_flow_error_set(error, EINVAL,
903 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
904 NULL, "NULL pattern.");
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
911 NULL, "NULL action.");
916 rte_flow_error_set(error, EINVAL,
917 RTE_FLOW_ERROR_TYPE_ATTR,
918 NULL, "NULL attribute.");
923 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
924 item = next_no_void_pattern(pattern, NULL);
925 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
926 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
927 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
928 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
929 rte_flow_error_set(error, EINVAL,
930 RTE_FLOW_ERROR_TYPE_ITEM,
931 item, "Not supported by syn filter");
934 /*Not supported last point for range*/
936 rte_flow_error_set(error, EINVAL,
937 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
938 item, "Not supported last point for range");
943 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
944 /* if the item is MAC, the content should be NULL */
945 if (item->spec || item->mask) {
946 rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ITEM,
948 item, "Invalid SYN address mask");
952 /* check if the next not void item is IPv4 or IPv6 */
953 item = next_no_void_pattern(pattern, item);
954 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
955 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ITEM,
958 item, "Not supported by syn filter");
964 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
965 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
966 /* if the item is IP, the content should be NULL */
967 if (item->spec || item->mask) {
968 rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ITEM,
970 item, "Invalid SYN mask");
974 /* check if the next not void item is TCP */
975 item = next_no_void_pattern(pattern, item);
976 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
979 item, "Not supported by syn filter");
984 /* Get the TCP info. Only support SYN. */
985 if (!item->spec || !item->mask) {
986 rte_flow_error_set(error, EINVAL,
987 RTE_FLOW_ERROR_TYPE_ITEM,
988 item, "Invalid SYN mask");
991 /*Not supported last point for range*/
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
995 item, "Not supported last point for range");
999 tcp_spec = item->spec;
1000 tcp_mask = item->mask;
1001 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1002 tcp_mask->hdr.src_port ||
1003 tcp_mask->hdr.dst_port ||
1004 tcp_mask->hdr.sent_seq ||
1005 tcp_mask->hdr.recv_ack ||
1006 tcp_mask->hdr.data_off ||
1007 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1008 tcp_mask->hdr.rx_win ||
1009 tcp_mask->hdr.cksum ||
1010 tcp_mask->hdr.tcp_urp) {
1011 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1012 rte_flow_error_set(error, EINVAL,
1013 RTE_FLOW_ERROR_TYPE_ITEM,
1014 item, "Not supported by syn filter");
1018 /* check if the next not void item is END */
1019 item = next_no_void_pattern(pattern, item);
1020 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1021 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1022 rte_flow_error_set(error, EINVAL,
1023 RTE_FLOW_ERROR_TYPE_ITEM,
1024 item, "Not supported by syn filter");
1028 /* check if the first not void action is QUEUE. */
1029 act = next_no_void_action(actions, NULL);
1030 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1031 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ACTION,
1034 act, "Not supported action.");
1038 act_q = (const struct rte_flow_action_queue *)act->conf;
1039 filter->queue = act_q->index;
1040 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
1041 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ACTION,
1044 act, "Not supported action.");
1048 /* check if the next not void item is END */
1049 act = next_no_void_action(actions, act);
1050 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1051 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1052 rte_flow_error_set(error, EINVAL,
1053 RTE_FLOW_ERROR_TYPE_ACTION,
1054 act, "Not supported action.");
1059 /* must be input direction */
1060 if (!attr->ingress) {
1061 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1064 attr, "Only support ingress.");
1070 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1071 rte_flow_error_set(error, EINVAL,
1072 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1073 attr, "Not support egress.");
1078 if (attr->transfer) {
1079 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1080 rte_flow_error_set(error, EINVAL,
1081 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1082 attr, "No support for transfer.");
1086 /* Support 2 priorities, the lowest or highest. */
1087 if (!attr->priority) {
1088 filter->hig_pri = 0;
1089 } else if (attr->priority == (uint32_t)~0U) {
1090 filter->hig_pri = 1;
1092 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1093 rte_flow_error_set(error, EINVAL,
1094 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1095 attr, "Not support priority.");
1103 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1104 const struct rte_flow_attr *attr,
1105 const struct rte_flow_item pattern[],
1106 const struct rte_flow_action actions[],
1107 struct rte_eth_syn_filter *filter,
1108 struct rte_flow_error *error)
1112 ret = cons_parse_syn_filter(attr, pattern,
1113 actions, filter, error);
1115 if (filter->queue >= dev->data->nb_rx_queues)
1125 * Parse the rule to see if it is a L2 tunnel rule.
1126 * And get the L2 tunnel filter info BTW.
1127 * Only support E-tag now.
1129 * The first not void item can be E_TAG.
1130 * The next not void item must be END.
1132 * The first not void action should be VF or PF.
1133 * The next not void action should be END.
1137 e_cid_base 0x309 0xFFF
1139 * other members in mask and spec should set to 0x00.
1140 * item->last should be NULL.
1143 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1144 const struct rte_flow_attr *attr,
1145 const struct rte_flow_item pattern[],
1146 const struct rte_flow_action actions[],
1147 struct txgbe_l2_tunnel_conf *filter,
1148 struct rte_flow_error *error)
1150 const struct rte_flow_item *item;
1151 const struct rte_flow_item_e_tag *e_tag_spec;
1152 const struct rte_flow_item_e_tag *e_tag_mask;
1153 const struct rte_flow_action *act;
1154 const struct rte_flow_action_vf *act_vf;
1155 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1158 rte_flow_error_set(error, EINVAL,
1159 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1160 NULL, "NULL pattern.");
1165 rte_flow_error_set(error, EINVAL,
1166 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1167 NULL, "NULL action.");
1172 rte_flow_error_set(error, EINVAL,
1173 RTE_FLOW_ERROR_TYPE_ATTR,
1174 NULL, "NULL attribute.");
1178 /* The first not void item should be e-tag. */
1179 item = next_no_void_pattern(pattern, NULL);
1180 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1181 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1182 rte_flow_error_set(error, EINVAL,
1183 RTE_FLOW_ERROR_TYPE_ITEM,
1184 item, "Not supported by L2 tunnel filter");
1188 if (!item->spec || !item->mask) {
1189 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1190 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1191 item, "Not supported by L2 tunnel filter");
1195 /*Not supported last point for range*/
1197 rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1199 item, "Not supported last point for range");
1203 e_tag_spec = item->spec;
1204 e_tag_mask = item->mask;
1206 /* Only care about GRP and E cid base. */
1207 if (e_tag_mask->epcp_edei_in_ecid_b ||
1208 e_tag_mask->in_ecid_e ||
1209 e_tag_mask->ecid_e ||
1210 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1211 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1212 rte_flow_error_set(error, EINVAL,
1213 RTE_FLOW_ERROR_TYPE_ITEM,
1214 item, "Not supported by L2 tunnel filter");
1218 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1220 * grp and e_cid_base are bit fields and only use 14 bits.
1221 * e-tag id is taken as little endian by HW.
1223 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1225 /* check if the next not void item is END */
1226 item = next_no_void_pattern(pattern, item);
1227 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1228 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ITEM,
1231 item, "Not supported by L2 tunnel filter");
1236 /* must be input direction */
1237 if (!attr->ingress) {
1238 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1241 attr, "Only support ingress.");
1247 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1250 attr, "Not support egress.");
1255 if (attr->transfer) {
1256 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1257 rte_flow_error_set(error, EINVAL,
1258 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1259 attr, "No support for transfer.");
1264 if (attr->priority) {
1265 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1266 rte_flow_error_set(error, EINVAL,
1267 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1268 attr, "Not support priority.");
1272 /* check if the first not void action is VF or PF. */
1273 act = next_no_void_action(actions, NULL);
1274 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1275 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1276 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1277 rte_flow_error_set(error, EINVAL,
1278 RTE_FLOW_ERROR_TYPE_ACTION,
1279 act, "Not supported action.");
1283 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1284 act_vf = (const struct rte_flow_action_vf *)act->conf;
1285 filter->pool = act_vf->id;
1287 filter->pool = pci_dev->max_vfs;
1290 /* check if the next not void item is END */
1291 act = next_no_void_action(actions, act);
1292 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1293 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1294 rte_flow_error_set(error, EINVAL,
1295 RTE_FLOW_ERROR_TYPE_ACTION,
1296 act, "Not supported action.");
1304 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1305 const struct rte_flow_attr *attr,
1306 const struct rte_flow_item pattern[],
1307 const struct rte_flow_action actions[],
1308 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1309 struct rte_flow_error *error)
1312 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1315 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1316 actions, l2_tn_filter, error);
1318 vf_num = pci_dev->max_vfs;
1320 if (l2_tn_filter->pool > vf_num)
1326 /* Parse to get the attr and action info of flow director rule. */
1328 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1329 const struct rte_flow_action actions[],
1330 struct txgbe_fdir_rule *rule,
1331 struct rte_flow_error *error)
1333 const struct rte_flow_action *act;
1334 const struct rte_flow_action_queue *act_q;
1335 const struct rte_flow_action_mark *mark;
1338 /* must be input direction */
1339 if (!attr->ingress) {
1340 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1341 rte_flow_error_set(error, EINVAL,
1342 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1343 attr, "Only support ingress.");
1349 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1352 attr, "Not support egress.");
1357 if (attr->transfer) {
1358 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1359 rte_flow_error_set(error, EINVAL,
1360 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1361 attr, "No support for transfer.");
1366 if (attr->priority) {
1367 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1368 rte_flow_error_set(error, EINVAL,
1369 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1370 attr, "Not support priority.");
1374 /* check if the first not void action is QUEUE or DROP. */
1375 act = next_no_void_action(actions, NULL);
1376 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1377 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1378 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1379 rte_flow_error_set(error, EINVAL,
1380 RTE_FLOW_ERROR_TYPE_ACTION,
1381 act, "Not supported action.");
1385 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1386 act_q = (const struct rte_flow_action_queue *)act->conf;
1387 rule->queue = act_q->index;
1389 /* signature mode does not support drop action. */
1390 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1391 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1392 rte_flow_error_set(error, EINVAL,
1393 RTE_FLOW_ERROR_TYPE_ACTION,
1394 act, "Not supported action.");
1397 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1400 /* check if the next not void item is MARK */
1401 act = next_no_void_action(actions, act);
1402 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1403 act->type != RTE_FLOW_ACTION_TYPE_END) {
1404 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ACTION,
1407 act, "Not supported action.");
1413 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1414 mark = (const struct rte_flow_action_mark *)act->conf;
1415 rule->soft_id = mark->id;
1416 act = next_no_void_action(actions, act);
1419 /* check if the next not void item is END */
1420 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1421 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1422 rte_flow_error_set(error, EINVAL,
1423 RTE_FLOW_ERROR_TYPE_ACTION,
1424 act, "Not supported action.");
1431 /* search next no void pattern and skip fuzzy */
1433 const struct rte_flow_item *next_no_fuzzy_pattern(
1434 const struct rte_flow_item pattern[],
1435 const struct rte_flow_item *cur)
1437 const struct rte_flow_item *next =
1438 next_no_void_pattern(pattern, cur);
1440 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1442 next = next_no_void_pattern(pattern, next);
1446 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1448 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1449 const struct rte_flow_item *item;
1450 uint32_t sh, lh, mh;
1455 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1458 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1490 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1491 * And get the flow director filter info BTW.
1492 * UDP/TCP/SCTP PATTERN:
1493 * The first not void item can be ETH or IPV4 or IPV6
1494 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1495 * The next not void item could be UDP or TCP or SCTP (optional)
1496 * The next not void item could be RAW (for flexbyte, optional)
1497 * The next not void item must be END.
1498 * A Fuzzy Match pattern can appear at any place before END.
1499 * Fuzzy Match is optional for IPV4 but is required for IPV6
1501 * The first not void item must be ETH.
1502 * The second not void item must be MAC VLAN.
1503 * The next not void item must be END.
1505 * The first not void action should be QUEUE or DROP.
1506 * The second not void optional action should be MARK,
1507 * mark_id is a uint32_t number.
1508 * The next not void action should be END.
1509 * UDP/TCP/SCTP pattern example:
1512 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1513 * dst_addr 192.167.3.50 0xFFFFFFFF
1514 * UDP/TCP/SCTP src_port 80 0xFFFF
1515 * dst_port 80 0xFFFF
1516 * FLEX relative 0 0x1
1519 * offset 12 0xFFFFFFFF
1522 * pattern[0] 0x86 0xFF
1523 * pattern[1] 0xDD 0xFF
1525 * MAC VLAN pattern example:
1528 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1529 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1530 * MAC VLAN tci 0x2016 0xEFFF
1532 * Other members in mask and spec should set to 0x00.
1533 * Item->last should be NULL.
1536 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1537 const struct rte_flow_attr *attr,
1538 const struct rte_flow_item pattern[],
1539 const struct rte_flow_action actions[],
1540 struct txgbe_fdir_rule *rule,
1541 struct rte_flow_error *error)
1543 const struct rte_flow_item *item;
1544 const struct rte_flow_item_eth *eth_mask;
1545 const struct rte_flow_item_ipv4 *ipv4_spec;
1546 const struct rte_flow_item_ipv4 *ipv4_mask;
1547 const struct rte_flow_item_ipv6 *ipv6_spec;
1548 const struct rte_flow_item_ipv6 *ipv6_mask;
1549 const struct rte_flow_item_tcp *tcp_spec;
1550 const struct rte_flow_item_tcp *tcp_mask;
1551 const struct rte_flow_item_udp *udp_spec;
1552 const struct rte_flow_item_udp *udp_mask;
1553 const struct rte_flow_item_sctp *sctp_spec;
1554 const struct rte_flow_item_sctp *sctp_mask;
1555 const struct rte_flow_item_raw *raw_mask;
1556 const struct rte_flow_item_raw *raw_spec;
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1563 NULL, "NULL pattern.");
1568 rte_flow_error_set(error, EINVAL,
1569 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1570 NULL, "NULL action.");
1575 rte_flow_error_set(error, EINVAL,
1576 RTE_FLOW_ERROR_TYPE_ATTR,
1577 NULL, "NULL attribute.");
1582 * Some fields may not be provided. Set spec to 0 and mask to default
1583 * value. So, we need not do anything for the not provided fields later.
1585 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1586 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1587 rule->mask.vlan_tci_mask = 0;
1588 rule->mask.flex_bytes_mask = 0;
1591 * The first not void item should be
1592 * MAC or IPv4 or TCP or UDP or SCTP.
1594 item = next_no_fuzzy_pattern(pattern, NULL);
1595 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1596 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1597 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1598 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1599 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1600 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1601 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1602 rte_flow_error_set(error, EINVAL,
1603 RTE_FLOW_ERROR_TYPE_ITEM,
1604 item, "Not supported by fdir filter");
1608 if (signature_match(pattern))
1609 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1611 rule->mode = RTE_FDIR_MODE_PERFECT;
1613 /*Not supported last point for range*/
1615 rte_flow_error_set(error, EINVAL,
1616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1617 item, "Not supported last point for range");
1621 /* Get the MAC info. */
1622 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1624 * Only support vlan and dst MAC address,
1625 * others should be masked.
1627 if (item->spec && !item->mask) {
1628 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM,
1631 item, "Not supported by fdir filter");
1636 rule->b_mask = TRUE;
1637 eth_mask = item->mask;
1639 /* Ether type should be masked. */
1640 if (eth_mask->type ||
1641 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1642 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1643 rte_flow_error_set(error, EINVAL,
1644 RTE_FLOW_ERROR_TYPE_ITEM,
1645 item, "Not supported by fdir filter");
1649 /* If ethernet has meaning, it means MAC VLAN mode. */
1650 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1653 * src MAC address must be masked,
1654 * and don't support dst MAC address mask.
1656 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1657 if (eth_mask->src.addr_bytes[j] ||
1658 eth_mask->dst.addr_bytes[j] != 0xFF) {
1660 sizeof(struct txgbe_fdir_rule));
1661 rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ITEM,
1663 item, "Not supported by fdir filter");
1668 /* When no VLAN, considered as full mask. */
1669 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1671 /*** If both spec and mask are item,
1672 * it means don't care about ETH.
1677 * Check if the next not void item is vlan or ipv4.
1678 * IPv6 is not supported.
1680 item = next_no_fuzzy_pattern(pattern, item);
1681 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1682 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1683 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1684 rte_flow_error_set(error, EINVAL,
1685 RTE_FLOW_ERROR_TYPE_ITEM,
1686 item, "Not supported by fdir filter");
1690 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1691 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1692 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1693 rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_ITEM,
1695 item, "Not supported by fdir filter");
1701 /* Get the IPV4 info. */
1702 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1704 * Set the flow type even if there's no content
1705 * as we must have a flow type.
1707 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1708 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1709 /*Not supported last point for range*/
1711 rte_flow_error_set(error, EINVAL,
1712 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1713 item, "Not supported last point for range");
1717 * Only care about src & dst addresses,
1718 * others should be masked.
1721 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1722 rte_flow_error_set(error, EINVAL,
1723 RTE_FLOW_ERROR_TYPE_ITEM,
1724 item, "Not supported by fdir filter");
1727 rule->b_mask = TRUE;
1728 ipv4_mask = item->mask;
1729 if (ipv4_mask->hdr.version_ihl ||
1730 ipv4_mask->hdr.type_of_service ||
1731 ipv4_mask->hdr.total_length ||
1732 ipv4_mask->hdr.packet_id ||
1733 ipv4_mask->hdr.fragment_offset ||
1734 ipv4_mask->hdr.time_to_live ||
1735 ipv4_mask->hdr.next_proto_id ||
1736 ipv4_mask->hdr.hdr_checksum) {
1737 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1738 rte_flow_error_set(error, EINVAL,
1739 RTE_FLOW_ERROR_TYPE_ITEM,
1740 item, "Not supported by fdir filter");
1743 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1744 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1747 rule->b_spec = TRUE;
1748 ipv4_spec = item->spec;
1749 rule->input.dst_ip[0] =
1750 ipv4_spec->hdr.dst_addr;
1751 rule->input.src_ip[0] =
1752 ipv4_spec->hdr.src_addr;
1756 * Check if the next not void item is
1757 * TCP or UDP or SCTP or END.
1759 item = next_no_fuzzy_pattern(pattern, item);
1760 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1761 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1762 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1763 item->type != RTE_FLOW_ITEM_TYPE_END &&
1764 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1765 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1766 rte_flow_error_set(error, EINVAL,
1767 RTE_FLOW_ERROR_TYPE_ITEM,
1768 item, "Not supported by fdir filter");
1773 /* Get the IPV6 info. */
1774 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1776 * Set the flow type even if there's no content
1777 * as we must have a flow type.
1779 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1780 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1783 * 1. must signature match
1784 * 2. not support last
1785 * 3. mask must not null
1787 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1790 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1791 rte_flow_error_set(error, EINVAL,
1792 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1793 item, "Not supported last point for range");
1797 rule->b_mask = TRUE;
1798 ipv6_mask = item->mask;
1799 if (ipv6_mask->hdr.vtc_flow ||
1800 ipv6_mask->hdr.payload_len ||
1801 ipv6_mask->hdr.proto ||
1802 ipv6_mask->hdr.hop_limits) {
1803 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1804 rte_flow_error_set(error, EINVAL,
1805 RTE_FLOW_ERROR_TYPE_ITEM,
1806 item, "Not supported by fdir filter");
1810 /* check src addr mask */
1811 for (j = 0; j < 16; j++) {
1812 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1813 rule->mask.src_ipv6_mask |= 1 << j;
1814 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1815 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1816 rte_flow_error_set(error, EINVAL,
1817 RTE_FLOW_ERROR_TYPE_ITEM,
1818 item, "Not supported by fdir filter");
1823 /* check dst addr mask */
1824 for (j = 0; j < 16; j++) {
1825 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1826 rule->mask.dst_ipv6_mask |= 1 << j;
1827 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1828 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1829 rte_flow_error_set(error, EINVAL,
1830 RTE_FLOW_ERROR_TYPE_ITEM,
1831 item, "Not supported by fdir filter");
1837 rule->b_spec = TRUE;
1838 ipv6_spec = item->spec;
1839 rte_memcpy(rule->input.src_ip,
1840 ipv6_spec->hdr.src_addr, 16);
1841 rte_memcpy(rule->input.dst_ip,
1842 ipv6_spec->hdr.dst_addr, 16);
1846 * Check if the next not void item is
1847 * TCP or UDP or SCTP or END.
1849 item = next_no_fuzzy_pattern(pattern, item);
1850 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1851 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1852 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1853 item->type != RTE_FLOW_ITEM_TYPE_END &&
1854 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1855 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1856 rte_flow_error_set(error, EINVAL,
1857 RTE_FLOW_ERROR_TYPE_ITEM,
1858 item, "Not supported by fdir filter");
1863 /* Get the TCP info. */
1864 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1866 * Set the flow type even if there's no content
1867 * as we must have a flow type.
1869 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1870 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1871 /*Not supported last point for range*/
1873 rte_flow_error_set(error, EINVAL,
1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1875 item, "Not supported last point for range");
1879 * Only care about src & dst ports,
1880 * others should be masked.
1883 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1884 rte_flow_error_set(error, EINVAL,
1885 RTE_FLOW_ERROR_TYPE_ITEM,
1886 item, "Not supported by fdir filter");
1889 rule->b_mask = TRUE;
1890 tcp_mask = item->mask;
1891 if (tcp_mask->hdr.sent_seq ||
1892 tcp_mask->hdr.recv_ack ||
1893 tcp_mask->hdr.data_off ||
1894 tcp_mask->hdr.tcp_flags ||
1895 tcp_mask->hdr.rx_win ||
1896 tcp_mask->hdr.cksum ||
1897 tcp_mask->hdr.tcp_urp) {
1898 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1899 rte_flow_error_set(error, EINVAL,
1900 RTE_FLOW_ERROR_TYPE_ITEM,
1901 item, "Not supported by fdir filter");
1904 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1905 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1908 rule->b_spec = TRUE;
1909 tcp_spec = item->spec;
1910 rule->input.src_port =
1911 tcp_spec->hdr.src_port;
1912 rule->input.dst_port =
1913 tcp_spec->hdr.dst_port;
1916 item = next_no_fuzzy_pattern(pattern, item);
1917 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1918 item->type != RTE_FLOW_ITEM_TYPE_END) {
1919 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1920 rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_ITEM,
1922 item, "Not supported by fdir filter");
1927 /* Get the UDP info */
1928 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1930 * Set the flow type even if there's no content
1931 * as we must have a flow type.
1933 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1934 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1935 /*Not supported last point for range*/
1937 rte_flow_error_set(error, EINVAL,
1938 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1939 item, "Not supported last point for range");
1943 * Only care about src & dst ports,
1944 * others should be masked.
1947 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1948 rte_flow_error_set(error, EINVAL,
1949 RTE_FLOW_ERROR_TYPE_ITEM,
1950 item, "Not supported by fdir filter");
1953 rule->b_mask = TRUE;
1954 udp_mask = item->mask;
1955 if (udp_mask->hdr.dgram_len ||
1956 udp_mask->hdr.dgram_cksum) {
1957 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1958 rte_flow_error_set(error, EINVAL,
1959 RTE_FLOW_ERROR_TYPE_ITEM,
1960 item, "Not supported by fdir filter");
1963 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1964 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1967 rule->b_spec = TRUE;
1968 udp_spec = item->spec;
1969 rule->input.src_port =
1970 udp_spec->hdr.src_port;
1971 rule->input.dst_port =
1972 udp_spec->hdr.dst_port;
1975 item = next_no_fuzzy_pattern(pattern, item);
1976 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1977 item->type != RTE_FLOW_ITEM_TYPE_END) {
1978 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1979 rte_flow_error_set(error, EINVAL,
1980 RTE_FLOW_ERROR_TYPE_ITEM,
1981 item, "Not supported by fdir filter");
1986 /* Get the SCTP info */
1987 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1989 * Set the flow type even if there's no content
1990 * as we must have a flow type.
1992 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1993 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1994 /*Not supported last point for range*/
1996 rte_flow_error_set(error, EINVAL,
1997 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1998 item, "Not supported last point for range");
2003 * Only care about src & dst ports,
2004 * others should be masked.
2007 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2008 rte_flow_error_set(error, EINVAL,
2009 RTE_FLOW_ERROR_TYPE_ITEM,
2010 item, "Not supported by fdir filter");
2013 rule->b_mask = TRUE;
2014 sctp_mask = item->mask;
2015 if (sctp_mask->hdr.tag ||
2016 sctp_mask->hdr.cksum) {
2017 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2018 rte_flow_error_set(error, EINVAL,
2019 RTE_FLOW_ERROR_TYPE_ITEM,
2020 item, "Not supported by fdir filter");
2023 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2024 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2027 rule->b_spec = TRUE;
2028 sctp_spec = item->spec;
2029 rule->input.src_port =
2030 sctp_spec->hdr.src_port;
2031 rule->input.dst_port =
2032 sctp_spec->hdr.dst_port;
2034 /* others even sctp port is not supported */
2035 sctp_mask = item->mask;
2037 (sctp_mask->hdr.src_port ||
2038 sctp_mask->hdr.dst_port ||
2039 sctp_mask->hdr.tag ||
2040 sctp_mask->hdr.cksum)) {
2041 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2042 rte_flow_error_set(error, EINVAL,
2043 RTE_FLOW_ERROR_TYPE_ITEM,
2044 item, "Not supported by fdir filter");
2048 item = next_no_fuzzy_pattern(pattern, item);
2049 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2050 item->type != RTE_FLOW_ITEM_TYPE_END) {
2051 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2052 rte_flow_error_set(error, EINVAL,
2053 RTE_FLOW_ERROR_TYPE_ITEM,
2054 item, "Not supported by fdir filter");
2059 /* Get the flex byte info */
2060 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2061 /* Not supported last point for range*/
2063 rte_flow_error_set(error, EINVAL,
2064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2065 item, "Not supported last point for range");
2068 /* mask should not be null */
2069 if (!item->mask || !item->spec) {
2070 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2071 rte_flow_error_set(error, EINVAL,
2072 RTE_FLOW_ERROR_TYPE_ITEM,
2073 item, "Not supported by fdir filter");
2077 raw_mask = item->mask;
2080 if (raw_mask->relative != 0x1 ||
2081 raw_mask->search != 0x1 ||
2082 raw_mask->reserved != 0x0 ||
2083 (uint32_t)raw_mask->offset != 0xffffffff ||
2084 raw_mask->limit != 0xffff ||
2085 raw_mask->length != 0xffff) {
2086 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2087 rte_flow_error_set(error, EINVAL,
2088 RTE_FLOW_ERROR_TYPE_ITEM,
2089 item, "Not supported by fdir filter");
2093 raw_spec = item->spec;
2096 if (raw_spec->relative != 0 ||
2097 raw_spec->search != 0 ||
2098 raw_spec->reserved != 0 ||
2099 raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2100 raw_spec->offset % 2 ||
2101 raw_spec->limit != 0 ||
2102 raw_spec->length != 2 ||
2103 /* pattern can't be 0xffff */
2104 (raw_spec->pattern[0] == 0xff &&
2105 raw_spec->pattern[1] == 0xff)) {
2106 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Not supported by fdir filter");
2113 /* check pattern mask */
2114 if (raw_mask->pattern[0] != 0xff ||
2115 raw_mask->pattern[1] != 0xff) {
2116 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2117 rte_flow_error_set(error, EINVAL,
2118 RTE_FLOW_ERROR_TYPE_ITEM,
2119 item, "Not supported by fdir filter");
2123 rule->mask.flex_bytes_mask = 0xffff;
2124 rule->input.flex_bytes =
2125 (((uint16_t)raw_spec->pattern[1]) << 8) |
2126 raw_spec->pattern[0];
2127 rule->flex_bytes_offset = raw_spec->offset;
2130 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2131 /* check if the next not void item is END */
2132 item = next_no_fuzzy_pattern(pattern, item);
2133 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2134 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2135 rte_flow_error_set(error, EINVAL,
2136 RTE_FLOW_ERROR_TYPE_ITEM,
2137 item, "Not supported by fdir filter");
2142 rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2144 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2148 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2149 * And get the flow director filter info BTW.
2151 * The first not void item must be ETH.
2152 * The second not void item must be IPV4/ IPV6.
2153 * The third not void item must be NVGRE.
2154 * The next not void item must be END.
2156 * The first not void item must be ETH.
2157 * The second not void item must be IPV4/ IPV6.
2158 * The third not void item must be NVGRE.
2159 * The next not void item must be END.
2161 * The first not void action should be QUEUE or DROP.
2162 * The second not void optional action should be MARK,
2163 * mark_id is a uint32_t number.
2164 * The next not void action should be END.
2165 * VxLAN pattern example:
2168 * IPV4/IPV6 NULL NULL
2170 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2171 * MAC VLAN tci 0x2016 0xEFFF
2173 * NEGRV pattern example:
2176 * IPV4/IPV6 NULL NULL
2177 * NVGRE protocol 0x6558 0xFFFF
2178 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2179 * MAC VLAN tci 0x2016 0xEFFF
2181 * other members in mask and spec should set to 0x00.
2182 * item->last should be NULL.
2185 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2186 const struct rte_flow_item pattern[],
2187 const struct rte_flow_action actions[],
2188 struct txgbe_fdir_rule *rule,
2189 struct rte_flow_error *error)
2191 const struct rte_flow_item *item;
2192 const struct rte_flow_item_eth *eth_mask;
2196 rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2198 NULL, "NULL pattern.");
2203 rte_flow_error_set(error, EINVAL,
2204 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2205 NULL, "NULL action.");
2210 rte_flow_error_set(error, EINVAL,
2211 RTE_FLOW_ERROR_TYPE_ATTR,
2212 NULL, "NULL attribute.");
2217 * Some fields may not be provided. Set spec to 0 and mask to default
2218 * value. So, we need not do anything for the not provided fields later.
2220 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2221 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2222 rule->mask.vlan_tci_mask = 0;
2225 * The first not void item should be
2226 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2228 item = next_no_void_pattern(pattern, NULL);
2229 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2230 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2231 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2232 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2233 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2234 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2235 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2236 rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_ITEM,
2238 item, "Not supported by fdir filter");
2242 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2245 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2246 /* Only used to describe the protocol stack. */
2247 if (item->spec || item->mask) {
2248 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2249 rte_flow_error_set(error, EINVAL,
2250 RTE_FLOW_ERROR_TYPE_ITEM,
2251 item, "Not supported by fdir filter");
2254 /* Not supported last point for range*/
2256 rte_flow_error_set(error, EINVAL,
2257 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2258 item, "Not supported last point for range");
2262 /* Check if the next not void item is IPv4 or IPv6. */
2263 item = next_no_void_pattern(pattern, item);
2264 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2265 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2266 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2267 rte_flow_error_set(error, EINVAL,
2268 RTE_FLOW_ERROR_TYPE_ITEM,
2269 item, "Not supported by fdir filter");
2275 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2276 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2277 /* Only used to describe the protocol stack. */
2278 if (item->spec || item->mask) {
2279 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2280 rte_flow_error_set(error, EINVAL,
2281 RTE_FLOW_ERROR_TYPE_ITEM,
2282 item, "Not supported by fdir filter");
2285 /*Not supported last point for range*/
2287 rte_flow_error_set(error, EINVAL,
2288 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2289 item, "Not supported last point for range");
2293 /* Check if the next not void item is UDP or NVGRE. */
2294 item = next_no_void_pattern(pattern, item);
2295 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2296 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2297 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2298 rte_flow_error_set(error, EINVAL,
2299 RTE_FLOW_ERROR_TYPE_ITEM,
2300 item, "Not supported by fdir filter");
2306 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2307 /* Only used to describe the protocol stack. */
2308 if (item->spec || item->mask) {
2309 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2310 rte_flow_error_set(error, EINVAL,
2311 RTE_FLOW_ERROR_TYPE_ITEM,
2312 item, "Not supported by fdir filter");
2315 /*Not supported last point for range*/
2317 rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2319 item, "Not supported last point for range");
2323 /* Check if the next not void item is VxLAN. */
2324 item = next_no_void_pattern(pattern, item);
2325 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2326 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2327 rte_flow_error_set(error, EINVAL,
2328 RTE_FLOW_ERROR_TYPE_ITEM,
2329 item, "Not supported by fdir filter");
2334 /* check if the next not void item is MAC */
2335 item = next_no_void_pattern(pattern, item);
2336 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2337 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2338 rte_flow_error_set(error, EINVAL,
2339 RTE_FLOW_ERROR_TYPE_ITEM,
2340 item, "Not supported by fdir filter");
2345 * Only support vlan and dst MAC address,
2346 * others should be masked.
2350 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2351 rte_flow_error_set(error, EINVAL,
2352 RTE_FLOW_ERROR_TYPE_ITEM,
2353 item, "Not supported by fdir filter");
2356 /*Not supported last point for range*/
2358 rte_flow_error_set(error, EINVAL,
2359 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2360 item, "Not supported last point for range");
2363 rule->b_mask = TRUE;
2364 eth_mask = item->mask;
2366 /* Ether type should be masked. */
2367 if (eth_mask->type) {
2368 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2369 rte_flow_error_set(error, EINVAL,
2370 RTE_FLOW_ERROR_TYPE_ITEM,
2371 item, "Not supported by fdir filter");
2375 /* src MAC address should be masked. */
2376 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2377 if (eth_mask->src.addr_bytes[j]) {
2379 sizeof(struct txgbe_fdir_rule));
2380 rte_flow_error_set(error, EINVAL,
2381 RTE_FLOW_ERROR_TYPE_ITEM,
2382 item, "Not supported by fdir filter");
2386 rule->mask.mac_addr_byte_mask = 0;
2387 for (j = 0; j < ETH_ADDR_LEN; j++) {
2388 /* It's a per byte mask. */
2389 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2390 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2391 } else if (eth_mask->dst.addr_bytes[j]) {
2392 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2393 rte_flow_error_set(error, EINVAL,
2394 RTE_FLOW_ERROR_TYPE_ITEM,
2395 item, "Not supported by fdir filter");
2400 /* When no vlan, considered as full mask. */
2401 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2404 * Check if the next not void item is vlan or ipv4.
2405 * IPv6 is not supported.
2407 item = next_no_void_pattern(pattern, item);
2408 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2409 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2410 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2411 rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_ITEM,
2413 item, "Not supported by fdir filter");
2416 /*Not supported last point for range*/
2418 rte_flow_error_set(error, EINVAL,
2419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2420 item, "Not supported last point for range");
2425 * If the tags is 0, it means don't care about the VLAN.
2429 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2433 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2434 const struct rte_flow_attr *attr,
2435 const struct rte_flow_item pattern[],
2436 const struct rte_flow_action actions[],
2437 struct txgbe_fdir_rule *rule,
2438 struct rte_flow_error *error)
2441 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2442 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2444 ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2445 actions, rule, error);
2449 ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2450 actions, rule, error);
2456 if (hw->mac.type == txgbe_mac_raptor &&
2457 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2458 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2461 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2462 fdir_mode != rule->mode)
2465 if (rule->queue >= dev->data->nb_rx_queues)
2472 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2473 const struct rte_flow_attr *attr,
2474 const struct rte_flow_action actions[],
2475 struct txgbe_rte_flow_rss_conf *rss_conf,
2476 struct rte_flow_error *error)
2478 const struct rte_flow_action *act;
2479 const struct rte_flow_action_rss *rss;
2483 * rss only supports forwarding,
2484 * check if the first not void action is RSS.
2486 act = next_no_void_action(actions, NULL);
2487 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2488 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2489 rte_flow_error_set(error, EINVAL,
2490 RTE_FLOW_ERROR_TYPE_ACTION,
2491 act, "Not supported action.");
2495 rss = (const struct rte_flow_action_rss *)act->conf;
2497 if (!rss || !rss->queue_num) {
2498 rte_flow_error_set(error, EINVAL,
2499 RTE_FLOW_ERROR_TYPE_ACTION,
2505 for (n = 0; n < rss->queue_num; n++) {
2506 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2507 rte_flow_error_set(error, EINVAL,
2508 RTE_FLOW_ERROR_TYPE_ACTION,
2510 "queue id > max number of queues");
2515 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2516 return rte_flow_error_set
2517 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2518 "non-default RSS hash functions are not supported");
2520 return rte_flow_error_set
2521 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2522 "a nonzero RSS encapsulation level is not supported");
2523 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2524 return rte_flow_error_set
2525 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2526 "RSS hash key must be exactly 40 bytes");
2527 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2528 return rte_flow_error_set
2529 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2530 "too many queues for RSS context");
2531 if (txgbe_rss_conf_init(rss_conf, rss))
2532 return rte_flow_error_set
2533 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2534 "RSS context initialization failure");
2536 /* check if the next not void item is END */
2537 act = next_no_void_action(actions, act);
2538 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2539 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2540 rte_flow_error_set(error, EINVAL,
2541 RTE_FLOW_ERROR_TYPE_ACTION,
2542 act, "Not supported action.");
2547 /* must be input direction */
2548 if (!attr->ingress) {
2549 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2550 rte_flow_error_set(error, EINVAL,
2551 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2552 attr, "Only support ingress.");
2558 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2559 rte_flow_error_set(error, EINVAL,
2560 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2561 attr, "Not support egress.");
2566 if (attr->transfer) {
2567 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2568 rte_flow_error_set(error, EINVAL,
2569 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2570 attr, "No support for transfer.");
2574 if (attr->priority > 0xFFFF) {
2575 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2576 rte_flow_error_set(error, EINVAL,
2577 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2578 attr, "Error priority.");
2585 /* remove the rss filter */
2587 txgbe_clear_rss_filter(struct rte_eth_dev *dev)
2589 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
2591 if (filter_info->rss_info.conf.queue_num)
2592 txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2596 txgbe_filterlist_init(void)
2598 TAILQ_INIT(&filter_ntuple_list);
2599 TAILQ_INIT(&filter_ethertype_list);
2600 TAILQ_INIT(&filter_syn_list);
2601 TAILQ_INIT(&filter_fdir_list);
2602 TAILQ_INIT(&filter_l2_tunnel_list);
2603 TAILQ_INIT(&filter_rss_list);
2604 TAILQ_INIT(&txgbe_flow_list);
2608 txgbe_filterlist_flush(void)
2610 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2611 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2612 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2613 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2614 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2615 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2616 struct txgbe_rss_conf_ele *rss_filter_ptr;
2618 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2619 TAILQ_REMOVE(&filter_ntuple_list,
2622 rte_free(ntuple_filter_ptr);
2625 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2626 TAILQ_REMOVE(&filter_ethertype_list,
2627 ethertype_filter_ptr,
2629 rte_free(ethertype_filter_ptr);
2632 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2633 TAILQ_REMOVE(&filter_syn_list,
2636 rte_free(syn_filter_ptr);
2639 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2640 TAILQ_REMOVE(&filter_l2_tunnel_list,
2643 rte_free(l2_tn_filter_ptr);
2646 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2647 TAILQ_REMOVE(&filter_fdir_list,
2650 rte_free(fdir_rule_ptr);
2653 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2654 TAILQ_REMOVE(&filter_rss_list,
2657 rte_free(rss_filter_ptr);
2660 while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
2661 TAILQ_REMOVE(&txgbe_flow_list,
2664 rte_free(txgbe_flow_mem_ptr->flow);
2665 rte_free(txgbe_flow_mem_ptr);
2670 * Create or destroy a flow rule.
2671 * Theorically one rule can match more than one filters.
2672 * We will let it use the filter which it hit first.
2673 * So, the sequence matters.
2675 static struct rte_flow *
2676 txgbe_flow_create(struct rte_eth_dev *dev,
2677 const struct rte_flow_attr *attr,
2678 const struct rte_flow_item pattern[],
2679 const struct rte_flow_action actions[],
2680 struct rte_flow_error *error)
2683 struct rte_eth_ntuple_filter ntuple_filter;
2684 struct rte_eth_ethertype_filter ethertype_filter;
2685 struct rte_eth_syn_filter syn_filter;
2686 struct txgbe_fdir_rule fdir_rule;
2687 struct txgbe_l2_tunnel_conf l2_tn_filter;
2688 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2689 struct txgbe_rte_flow_rss_conf rss_conf;
2690 struct rte_flow *flow = NULL;
2691 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2692 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2693 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2694 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2695 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2696 struct txgbe_rss_conf_ele *rss_filter_ptr;
2697 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2698 uint8_t first_mask = FALSE;
2700 flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2702 PMD_DRV_LOG(ERR, "failed to allocate memory");
2703 return (struct rte_flow *)flow;
2705 txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2706 sizeof(struct txgbe_flow_mem), 0);
2707 if (!txgbe_flow_mem_ptr) {
2708 PMD_DRV_LOG(ERR, "failed to allocate memory");
2712 txgbe_flow_mem_ptr->flow = flow;
2713 TAILQ_INSERT_TAIL(&txgbe_flow_list,
2714 txgbe_flow_mem_ptr, entries);
2716 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2717 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2718 actions, &ntuple_filter, error);
2720 #ifdef RTE_LIB_SECURITY
2721 /* ESP flow not really a flow*/
2722 if (ntuple_filter.proto == IPPROTO_ESP)
2727 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2729 ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2730 sizeof(struct txgbe_ntuple_filter_ele), 0);
2731 if (!ntuple_filter_ptr) {
2732 PMD_DRV_LOG(ERR, "failed to allocate memory");
2735 rte_memcpy(&ntuple_filter_ptr->filter_info,
2737 sizeof(struct rte_eth_ntuple_filter));
2738 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2739 ntuple_filter_ptr, entries);
2740 flow->rule = ntuple_filter_ptr;
2741 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2747 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2748 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2749 actions, ðertype_filter, error);
2751 ret = txgbe_add_del_ethertype_filter(dev,
2752 ðertype_filter, TRUE);
2754 ethertype_filter_ptr =
2755 rte_zmalloc("txgbe_ethertype_filter",
2756 sizeof(struct txgbe_ethertype_filter_ele), 0);
2757 if (!ethertype_filter_ptr) {
2758 PMD_DRV_LOG(ERR, "failed to allocate memory");
2761 rte_memcpy(ðertype_filter_ptr->filter_info,
2763 sizeof(struct rte_eth_ethertype_filter));
2764 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2765 ethertype_filter_ptr, entries);
2766 flow->rule = ethertype_filter_ptr;
2767 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2773 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2774 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2775 actions, &syn_filter, error);
2777 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2779 syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2780 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2781 if (!syn_filter_ptr) {
2782 PMD_DRV_LOG(ERR, "failed to allocate memory");
2785 rte_memcpy(&syn_filter_ptr->filter_info,
2787 sizeof(struct rte_eth_syn_filter));
2788 TAILQ_INSERT_TAIL(&filter_syn_list,
2791 flow->rule = syn_filter_ptr;
2792 flow->filter_type = RTE_ETH_FILTER_SYN;
2798 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2799 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2800 actions, &fdir_rule, error);
2802 /* A mask cannot be deleted. */
2803 if (fdir_rule.b_mask) {
2804 if (!fdir_info->mask_added) {
2805 /* It's the first time the mask is set. */
2806 rte_memcpy(&fdir_info->mask,
2808 sizeof(struct txgbe_hw_fdir_mask));
2809 fdir_info->flex_bytes_offset =
2810 fdir_rule.flex_bytes_offset;
2812 if (fdir_rule.mask.flex_bytes_mask)
2813 txgbe_fdir_set_flexbytes_offset(dev,
2814 fdir_rule.flex_bytes_offset);
2816 ret = txgbe_fdir_set_input_mask(dev);
2820 fdir_info->mask_added = TRUE;
2824 * Only support one global mask,
2825 * all the masks should be the same.
2827 ret = memcmp(&fdir_info->mask,
2829 sizeof(struct txgbe_hw_fdir_mask));
2833 if (fdir_info->flex_bytes_offset !=
2834 fdir_rule.flex_bytes_offset)
2839 if (fdir_rule.b_spec) {
2840 ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2843 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2844 sizeof(struct txgbe_fdir_rule_ele), 0);
2845 if (!fdir_rule_ptr) {
2847 "failed to allocate memory");
2850 rte_memcpy(&fdir_rule_ptr->filter_info,
2852 sizeof(struct txgbe_fdir_rule));
2853 TAILQ_INSERT_TAIL(&filter_fdir_list,
2854 fdir_rule_ptr, entries);
2855 flow->rule = fdir_rule_ptr;
2856 flow->filter_type = RTE_ETH_FILTER_FDIR;
2863 * clean the mask_added flag if fail to
2867 fdir_info->mask_added = FALSE;
2875 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2876 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2877 actions, &l2_tn_filter, error);
2879 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2881 l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2882 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2883 if (!l2_tn_filter_ptr) {
2884 PMD_DRV_LOG(ERR, "failed to allocate memory");
2887 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2889 sizeof(struct txgbe_l2_tunnel_conf));
2890 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2891 l2_tn_filter_ptr, entries);
2892 flow->rule = l2_tn_filter_ptr;
2893 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2898 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2899 ret = txgbe_parse_rss_filter(dev, attr,
2900 actions, &rss_conf, error);
2902 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2904 rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2905 sizeof(struct txgbe_rss_conf_ele), 0);
2906 if (!rss_filter_ptr) {
2907 PMD_DRV_LOG(ERR, "failed to allocate memory");
2910 txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2912 TAILQ_INSERT_TAIL(&filter_rss_list,
2913 rss_filter_ptr, entries);
2914 flow->rule = rss_filter_ptr;
2915 flow->filter_type = RTE_ETH_FILTER_HASH;
2921 TAILQ_REMOVE(&txgbe_flow_list,
2922 txgbe_flow_mem_ptr, entries);
2923 rte_flow_error_set(error, -ret,
2924 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2925 "Failed to create flow.");
2926 rte_free(txgbe_flow_mem_ptr);
2932 * Check if the flow rule is supported by txgbe.
2933 * It only checks the format. Don't guarantee the rule can be programmed into
2934 * the HW. Because there can be no enough room for the rule.
2937 txgbe_flow_validate(struct rte_eth_dev *dev,
2938 const struct rte_flow_attr *attr,
2939 const struct rte_flow_item pattern[],
2940 const struct rte_flow_action actions[],
2941 struct rte_flow_error *error)
2943 struct rte_eth_ntuple_filter ntuple_filter;
2944 struct rte_eth_ethertype_filter ethertype_filter;
2945 struct rte_eth_syn_filter syn_filter;
2946 struct txgbe_l2_tunnel_conf l2_tn_filter;
2947 struct txgbe_fdir_rule fdir_rule;
2948 struct txgbe_rte_flow_rss_conf rss_conf;
2951 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2952 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2953 actions, &ntuple_filter, error);
2957 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2958 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2959 actions, ðertype_filter, error);
2963 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2964 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2965 actions, &syn_filter, error);
2969 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2970 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2971 actions, &fdir_rule, error);
2975 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2976 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2977 actions, &l2_tn_filter, error);
2981 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2982 ret = txgbe_parse_rss_filter(dev, attr,
2983 actions, &rss_conf, error);
2988 /* Destroy a flow rule on txgbe. */
2990 txgbe_flow_destroy(struct rte_eth_dev *dev,
2991 struct rte_flow *flow,
2992 struct rte_flow_error *error)
2995 struct rte_flow *pmd_flow = flow;
2996 enum rte_filter_type filter_type = pmd_flow->filter_type;
2997 struct rte_eth_ntuple_filter ntuple_filter;
2998 struct rte_eth_ethertype_filter ethertype_filter;
2999 struct rte_eth_syn_filter syn_filter;
3000 struct txgbe_fdir_rule fdir_rule;
3001 struct txgbe_l2_tunnel_conf l2_tn_filter;
3002 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
3003 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
3004 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
3005 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3006 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
3007 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
3008 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
3009 struct txgbe_rss_conf_ele *rss_filter_ptr;
3011 switch (filter_type) {
3012 case RTE_ETH_FILTER_NTUPLE:
3013 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
3015 rte_memcpy(&ntuple_filter,
3016 &ntuple_filter_ptr->filter_info,
3017 sizeof(struct rte_eth_ntuple_filter));
3018 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3020 TAILQ_REMOVE(&filter_ntuple_list,
3021 ntuple_filter_ptr, entries);
3022 rte_free(ntuple_filter_ptr);
3025 case RTE_ETH_FILTER_ETHERTYPE:
3026 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
3028 rte_memcpy(ðertype_filter,
3029 ðertype_filter_ptr->filter_info,
3030 sizeof(struct rte_eth_ethertype_filter));
3031 ret = txgbe_add_del_ethertype_filter(dev,
3032 ðertype_filter, FALSE);
3034 TAILQ_REMOVE(&filter_ethertype_list,
3035 ethertype_filter_ptr, entries);
3036 rte_free(ethertype_filter_ptr);
3039 case RTE_ETH_FILTER_SYN:
3040 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
3042 rte_memcpy(&syn_filter,
3043 &syn_filter_ptr->filter_info,
3044 sizeof(struct rte_eth_syn_filter));
3045 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
3047 TAILQ_REMOVE(&filter_syn_list,
3048 syn_filter_ptr, entries);
3049 rte_free(syn_filter_ptr);
3052 case RTE_ETH_FILTER_FDIR:
3053 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
3054 rte_memcpy(&fdir_rule,
3055 &fdir_rule_ptr->filter_info,
3056 sizeof(struct txgbe_fdir_rule));
3057 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3059 TAILQ_REMOVE(&filter_fdir_list,
3060 fdir_rule_ptr, entries);
3061 rte_free(fdir_rule_ptr);
3062 if (TAILQ_EMPTY(&filter_fdir_list))
3063 fdir_info->mask_added = false;
3066 case RTE_ETH_FILTER_L2_TUNNEL:
3067 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
3069 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3070 sizeof(struct txgbe_l2_tunnel_conf));
3071 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3073 TAILQ_REMOVE(&filter_l2_tunnel_list,
3074 l2_tn_filter_ptr, entries);
3075 rte_free(l2_tn_filter_ptr);
3078 case RTE_ETH_FILTER_HASH:
3079 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
3081 ret = txgbe_config_rss_filter(dev,
3082 &rss_filter_ptr->filter_info, FALSE);
3084 TAILQ_REMOVE(&filter_rss_list,
3085 rss_filter_ptr, entries);
3086 rte_free(rss_filter_ptr);
3090 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3097 rte_flow_error_set(error, EINVAL,
3098 RTE_FLOW_ERROR_TYPE_HANDLE,
3099 NULL, "Failed to destroy flow");
3103 TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
3104 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
3105 TAILQ_REMOVE(&txgbe_flow_list,
3106 txgbe_flow_mem_ptr, entries);
3107 rte_free(txgbe_flow_mem_ptr);
3115 /* Destroy all flow rules associated with a port on txgbe. */
3117 txgbe_flow_flush(struct rte_eth_dev *dev,
3118 struct rte_flow_error *error)
3122 txgbe_clear_all_ntuple_filter(dev);
3123 txgbe_clear_all_ethertype_filter(dev);
3124 txgbe_clear_syn_filter(dev);
3126 ret = txgbe_clear_all_fdir_filter(dev);
3128 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3129 NULL, "Failed to flush rule");
3133 ret = txgbe_clear_all_l2_tn_filter(dev);
3135 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3136 NULL, "Failed to flush rule");
3140 txgbe_clear_rss_filter(dev);
3142 txgbe_filterlist_flush();
3147 const struct rte_flow_ops txgbe_flow_ops = {
3148 .validate = txgbe_flow_validate,
3149 .create = txgbe_flow_create,
3150 .destroy = txgbe_flow_destroy,
3151 .flush = txgbe_flow_flush,