1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
6 #include <rte_bus_pci.h>
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
11 #include "txgbe_ethdev.h"
13 #define TXGBE_MIN_N_TUPLE_PRIO 1
14 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 #define TXGBE_MAX_FLX_SOURCE_OFF 62
17 /* ntuple filter list structure */
18 struct txgbe_ntuple_filter_ele {
19 TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
20 struct rte_eth_ntuple_filter filter_info;
22 /* ethertype filter list structure */
23 struct txgbe_ethertype_filter_ele {
24 TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
25 struct rte_eth_ethertype_filter filter_info;
27 /* syn filter list structure */
28 struct txgbe_eth_syn_filter_ele {
29 TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
30 struct rte_eth_syn_filter filter_info;
32 /* fdir filter list structure */
33 struct txgbe_fdir_rule_ele {
34 TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
35 struct txgbe_fdir_rule filter_info;
37 /* l2_tunnel filter list structure */
38 struct txgbe_eth_l2_tunnel_conf_ele {
39 TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
40 struct txgbe_l2_tunnel_conf filter_info;
42 /* rss filter list structure */
43 struct txgbe_rss_conf_ele {
44 TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
45 struct txgbe_rte_flow_rss_conf filter_info;
47 /* txgbe_flow memory list structure */
48 struct txgbe_flow_mem {
49 TAILQ_ENTRY(txgbe_flow_mem) entries;
50 struct rte_flow *flow;
53 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
54 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
55 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
56 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
57 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
58 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
59 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
61 static struct txgbe_ntuple_filter_list filter_ntuple_list;
62 static struct txgbe_ethertype_filter_list filter_ethertype_list;
63 static struct txgbe_syn_filter_list filter_syn_list;
64 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
65 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
66 static struct txgbe_rss_filter_list filter_rss_list;
67 static struct txgbe_flow_mem_list txgbe_flow_list;
70 * Endless loop will never happen with below assumption
71 * 1. there is at least one no-void item(END)
72 * 2. cur is before END.
75 const struct rte_flow_item *next_no_void_pattern(
76 const struct rte_flow_item pattern[],
77 const struct rte_flow_item *cur)
79 const struct rte_flow_item *next =
80 cur ? cur + 1 : &pattern[0];
82 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
89 const struct rte_flow_action *next_no_void_action(
90 const struct rte_flow_action actions[],
91 const struct rte_flow_action *cur)
93 const struct rte_flow_action *next =
94 cur ? cur + 1 : &actions[0];
96 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
103 * Please aware there's an assumption for all the parsers.
104 * rte_flow_item is using big endian, rte_flow_attr and
105 * rte_flow_action are using CPU order.
106 * Because the pattern is used to describe the packets,
107 * normally the packets should use network order.
111 * Parse the rule to see if it is a n-tuple rule.
112 * And get the n-tuple filter info BTW.
114 * The first not void item can be ETH or IPV4.
115 * The second not void item must be IPV4 if the first one is ETH.
116 * The third not void item must be UDP or TCP.
117 * The next not void item must be END.
119 * The first not void action should be QUEUE.
120 * The next not void action should be END.
124 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
125 * dst_addr 192.167.3.50 0xFFFFFFFF
126 * next_proto_id 17 0xFF
127 * UDP/TCP/ src_port 80 0xFFFF
128 * SCTP dst_port 80 0xFFFF
130 * other members in mask and spec should set to 0x00.
131 * item->last should be NULL.
133 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
137 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
138 const struct rte_flow_item pattern[],
139 const struct rte_flow_action actions[],
140 struct rte_eth_ntuple_filter *filter,
141 struct rte_flow_error *error)
143 const struct rte_flow_item *item;
144 const struct rte_flow_action *act;
145 const struct rte_flow_item_ipv4 *ipv4_spec;
146 const struct rte_flow_item_ipv4 *ipv4_mask;
147 const struct rte_flow_item_tcp *tcp_spec;
148 const struct rte_flow_item_tcp *tcp_mask;
149 const struct rte_flow_item_udp *udp_spec;
150 const struct rte_flow_item_udp *udp_mask;
151 const struct rte_flow_item_sctp *sctp_spec;
152 const struct rte_flow_item_sctp *sctp_mask;
153 const struct rte_flow_item_eth *eth_spec;
154 const struct rte_flow_item_eth *eth_mask;
155 const struct rte_flow_item_vlan *vlan_spec;
156 const struct rte_flow_item_vlan *vlan_mask;
157 struct rte_flow_item_eth eth_null;
158 struct rte_flow_item_vlan vlan_null;
161 rte_flow_error_set(error,
162 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
163 NULL, "NULL pattern.");
168 rte_flow_error_set(error, EINVAL,
169 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
170 NULL, "NULL action.");
174 rte_flow_error_set(error, EINVAL,
175 RTE_FLOW_ERROR_TYPE_ATTR,
176 NULL, "NULL attribute.");
180 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
181 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
183 #ifdef RTE_LIB_SECURITY
185 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
187 act = next_no_void_action(actions, NULL);
188 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
189 const void *conf = act->conf;
190 /* check if the next not void item is END */
191 act = next_no_void_action(actions, act);
192 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
193 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
194 rte_flow_error_set(error, EINVAL,
195 RTE_FLOW_ERROR_TYPE_ACTION,
196 act, "Not supported action.");
200 /* get the IP pattern*/
201 item = next_no_void_pattern(pattern, NULL);
202 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
203 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
205 item->type == RTE_FLOW_ITEM_TYPE_END) {
206 rte_flow_error_set(error, EINVAL,
207 RTE_FLOW_ERROR_TYPE_ITEM,
208 item, "IP pattern missing.");
211 item = next_no_void_pattern(pattern, item);
214 filter->proto = IPPROTO_ESP;
215 return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
216 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
220 /* the first not void item can be MAC or IPv4 */
221 item = next_no_void_pattern(pattern, NULL);
223 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
224 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ITEM,
227 item, "Not supported by ntuple filter");
231 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
232 eth_spec = item->spec;
233 eth_mask = item->mask;
234 /*Not supported last point for range*/
236 rte_flow_error_set(error,
238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239 item, "Not supported last point for range");
242 /* if the first item is MAC, the content should be NULL */
243 if ((item->spec && memcmp(eth_spec, ð_null,
244 sizeof(struct rte_flow_item_eth))) ||
245 (item->mask && memcmp(eth_mask, ð_null,
246 sizeof(struct rte_flow_item_eth)))) {
247 rte_flow_error_set(error, EINVAL,
248 RTE_FLOW_ERROR_TYPE_ITEM,
249 item, "Not supported by ntuple filter");
252 /* check if the next not void item is IPv4 or Vlan */
253 item = next_no_void_pattern(pattern, item);
254 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
255 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
256 rte_flow_error_set(error,
257 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
258 item, "Not supported by ntuple filter");
263 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
264 vlan_spec = item->spec;
265 vlan_mask = item->mask;
266 /*Not supported last point for range*/
268 rte_flow_error_set(error,
269 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
270 item, "Not supported last point for range");
273 /* the content should be NULL */
274 if ((item->spec && memcmp(vlan_spec, &vlan_null,
275 sizeof(struct rte_flow_item_vlan))) ||
276 (item->mask && memcmp(vlan_mask, &vlan_null,
277 sizeof(struct rte_flow_item_vlan)))) {
278 rte_flow_error_set(error, EINVAL,
279 RTE_FLOW_ERROR_TYPE_ITEM,
280 item, "Not supported by ntuple filter");
283 /* check if the next not void item is IPv4 */
284 item = next_no_void_pattern(pattern, item);
285 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
286 rte_flow_error_set(error,
287 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
288 item, "Not supported by ntuple filter");
294 /* get the IPv4 info */
295 if (!item->spec || !item->mask) {
296 rte_flow_error_set(error, EINVAL,
297 RTE_FLOW_ERROR_TYPE_ITEM,
298 item, "Invalid ntuple mask");
301 /*Not supported last point for range*/
303 rte_flow_error_set(error, EINVAL,
304 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
305 item, "Not supported last point for range");
309 ipv4_mask = item->mask;
311 * Only support src & dst addresses, protocol,
312 * others should be masked.
314 if (ipv4_mask->hdr.version_ihl ||
315 ipv4_mask->hdr.type_of_service ||
316 ipv4_mask->hdr.total_length ||
317 ipv4_mask->hdr.packet_id ||
318 ipv4_mask->hdr.fragment_offset ||
319 ipv4_mask->hdr.time_to_live ||
320 ipv4_mask->hdr.hdr_checksum) {
321 rte_flow_error_set(error,
322 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
323 item, "Not supported by ntuple filter");
326 if ((ipv4_mask->hdr.src_addr != 0 &&
327 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
328 (ipv4_mask->hdr.dst_addr != 0 &&
329 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
330 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
331 ipv4_mask->hdr.next_proto_id != 0)) {
332 rte_flow_error_set(error,
333 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
334 item, "Not supported by ntuple filter");
338 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
339 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
340 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
342 ipv4_spec = item->spec;
343 filter->dst_ip = ipv4_spec->hdr.dst_addr;
344 filter->src_ip = ipv4_spec->hdr.src_addr;
345 filter->proto = ipv4_spec->hdr.next_proto_id;
348 /* check if the next not void item is TCP or UDP */
349 item = next_no_void_pattern(pattern, item);
350 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
351 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
352 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
353 item->type != RTE_FLOW_ITEM_TYPE_END) {
354 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
355 rte_flow_error_set(error, EINVAL,
356 RTE_FLOW_ERROR_TYPE_ITEM,
357 item, "Not supported by ntuple filter");
361 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
362 (!item->spec && !item->mask)) {
366 /* get the TCP/UDP/SCTP info */
367 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
368 (!item->spec || !item->mask)) {
369 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
370 rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ITEM,
372 item, "Invalid ntuple mask");
376 /*Not supported last point for range*/
378 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
379 rte_flow_error_set(error, EINVAL,
380 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
381 item, "Not supported last point for range");
385 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
386 tcp_mask = item->mask;
389 * Only support src & dst ports, tcp flags,
390 * others should be masked.
392 if (tcp_mask->hdr.sent_seq ||
393 tcp_mask->hdr.recv_ack ||
394 tcp_mask->hdr.data_off ||
395 tcp_mask->hdr.rx_win ||
396 tcp_mask->hdr.cksum ||
397 tcp_mask->hdr.tcp_urp) {
399 sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ITEM,
402 item, "Not supported by ntuple filter");
405 if ((tcp_mask->hdr.src_port != 0 &&
406 tcp_mask->hdr.src_port != UINT16_MAX) ||
407 (tcp_mask->hdr.dst_port != 0 &&
408 tcp_mask->hdr.dst_port != UINT16_MAX)) {
409 rte_flow_error_set(error,
410 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
411 item, "Not supported by ntuple filter");
415 filter->dst_port_mask = tcp_mask->hdr.dst_port;
416 filter->src_port_mask = tcp_mask->hdr.src_port;
417 if (tcp_mask->hdr.tcp_flags == 0xFF) {
418 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
419 } else if (!tcp_mask->hdr.tcp_flags) {
420 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
422 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM,
425 item, "Not supported by ntuple filter");
429 tcp_spec = item->spec;
430 filter->dst_port = tcp_spec->hdr.dst_port;
431 filter->src_port = tcp_spec->hdr.src_port;
432 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
433 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
434 udp_mask = item->mask;
437 * Only support src & dst ports,
438 * others should be masked.
440 if (udp_mask->hdr.dgram_len ||
441 udp_mask->hdr.dgram_cksum) {
443 sizeof(struct rte_eth_ntuple_filter));
444 rte_flow_error_set(error, EINVAL,
445 RTE_FLOW_ERROR_TYPE_ITEM,
446 item, "Not supported by ntuple filter");
449 if ((udp_mask->hdr.src_port != 0 &&
450 udp_mask->hdr.src_port != UINT16_MAX) ||
451 (udp_mask->hdr.dst_port != 0 &&
452 udp_mask->hdr.dst_port != UINT16_MAX)) {
453 rte_flow_error_set(error,
454 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
455 item, "Not supported by ntuple filter");
459 filter->dst_port_mask = udp_mask->hdr.dst_port;
460 filter->src_port_mask = udp_mask->hdr.src_port;
462 udp_spec = item->spec;
463 filter->dst_port = udp_spec->hdr.dst_port;
464 filter->src_port = udp_spec->hdr.src_port;
465 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
466 sctp_mask = item->mask;
469 * Only support src & dst ports,
470 * others should be masked.
472 if (sctp_mask->hdr.tag ||
473 sctp_mask->hdr.cksum) {
475 sizeof(struct rte_eth_ntuple_filter));
476 rte_flow_error_set(error, EINVAL,
477 RTE_FLOW_ERROR_TYPE_ITEM,
478 item, "Not supported by ntuple filter");
482 filter->dst_port_mask = sctp_mask->hdr.dst_port;
483 filter->src_port_mask = sctp_mask->hdr.src_port;
485 sctp_spec = item->spec;
486 filter->dst_port = sctp_spec->hdr.dst_port;
487 filter->src_port = sctp_spec->hdr.src_port;
492 /* check if the next not void item is END */
493 item = next_no_void_pattern(pattern, item);
494 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
495 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
496 rte_flow_error_set(error, EINVAL,
497 RTE_FLOW_ERROR_TYPE_ITEM,
498 item, "Not supported by ntuple filter");
505 * n-tuple only supports forwarding,
506 * check if the first not void action is QUEUE.
508 act = next_no_void_action(actions, NULL);
509 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
510 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
511 rte_flow_error_set(error, EINVAL,
512 RTE_FLOW_ERROR_TYPE_ACTION,
513 act, "Not supported action.");
517 ((const struct rte_flow_action_queue *)act->conf)->index;
519 /* check if the next not void item is END */
520 act = next_no_void_action(actions, act);
521 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
522 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523 rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ACTION,
525 act, "Not supported action.");
530 /* must be input direction */
531 if (!attr->ingress) {
532 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
533 rte_flow_error_set(error, EINVAL,
534 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
535 attr, "Only support ingress.");
541 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
542 rte_flow_error_set(error, EINVAL,
543 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
544 attr, "Not support egress.");
549 if (attr->transfer) {
550 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
551 rte_flow_error_set(error, EINVAL,
552 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
553 attr, "No support for transfer.");
557 if (attr->priority > 0xFFFF) {
558 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
559 rte_flow_error_set(error, EINVAL,
560 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
561 attr, "Error priority.");
564 filter->priority = (uint16_t)attr->priority;
565 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
566 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
567 filter->priority = 1;
572 /* a specific function for txgbe because the flags is specific */
574 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
575 const struct rte_flow_attr *attr,
576 const struct rte_flow_item pattern[],
577 const struct rte_flow_action actions[],
578 struct rte_eth_ntuple_filter *filter,
579 struct rte_flow_error *error)
583 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
588 #ifdef RTE_LIB_SECURITY
589 /* ESP flow not really a flow */
590 if (filter->proto == IPPROTO_ESP)
594 /* txgbe doesn't support tcp flags */
595 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
596 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
597 rte_flow_error_set(error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ITEM,
599 NULL, "Not supported by ntuple filter");
603 /* txgbe doesn't support many priorities */
604 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
605 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
606 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
607 rte_flow_error_set(error, EINVAL,
608 RTE_FLOW_ERROR_TYPE_ITEM,
609 NULL, "Priority not supported by ntuple filter");
613 if (filter->queue >= dev->data->nb_rx_queues) {
614 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
615 rte_flow_error_set(error, EINVAL,
616 RTE_FLOW_ERROR_TYPE_ITEM,
617 NULL, "Not supported by ntuple filter");
621 /* fixed value for txgbe */
622 filter->flags = RTE_5TUPLE_FLAGS;
627 * Parse the rule to see if it is a ethertype rule.
628 * And get the ethertype filter info BTW.
630 * The first not void item can be ETH.
631 * The next not void item must be END.
633 * The first not void action should be QUEUE.
634 * The next not void action should be END.
637 * ETH type 0x0807 0xFFFF
639 * other members in mask and spec should set to 0x00.
640 * item->last should be NULL.
643 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
644 const struct rte_flow_item *pattern,
645 const struct rte_flow_action *actions,
646 struct rte_eth_ethertype_filter *filter,
647 struct rte_flow_error *error)
649 const struct rte_flow_item *item;
650 const struct rte_flow_action *act;
651 const struct rte_flow_item_eth *eth_spec;
652 const struct rte_flow_item_eth *eth_mask;
653 const struct rte_flow_action_queue *act_q;
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
658 NULL, "NULL pattern.");
663 rte_flow_error_set(error, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
665 NULL, "NULL action.");
670 rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ATTR,
672 NULL, "NULL attribute.");
676 item = next_no_void_pattern(pattern, NULL);
677 /* The first non-void item should be MAC. */
678 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
679 rte_flow_error_set(error, EINVAL,
680 RTE_FLOW_ERROR_TYPE_ITEM,
681 item, "Not supported by ethertype filter");
685 /*Not supported last point for range*/
687 rte_flow_error_set(error, EINVAL,
688 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
689 item, "Not supported last point for range");
693 /* Get the MAC info. */
694 if (!item->spec || !item->mask) {
695 rte_flow_error_set(error, EINVAL,
696 RTE_FLOW_ERROR_TYPE_ITEM,
697 item, "Not supported by ethertype filter");
701 eth_spec = item->spec;
702 eth_mask = item->mask;
704 /* Mask bits of source MAC address must be full of 0.
705 * Mask bits of destination MAC address must be full
708 if (!rte_is_zero_ether_addr(ð_mask->src) ||
709 (!rte_is_zero_ether_addr(ð_mask->dst) &&
710 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
711 rte_flow_error_set(error, EINVAL,
712 RTE_FLOW_ERROR_TYPE_ITEM,
713 item, "Invalid ether address mask");
717 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM,
720 item, "Invalid ethertype mask");
724 /* If mask bits of destination MAC address
725 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
727 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
728 filter->mac_addr = eth_spec->dst;
729 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
731 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
733 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
735 /* Check if the next non-void item is END. */
736 item = next_no_void_pattern(pattern, item);
737 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
738 rte_flow_error_set(error, EINVAL,
739 RTE_FLOW_ERROR_TYPE_ITEM,
740 item, "Not supported by ethertype filter.");
746 act = next_no_void_action(actions, NULL);
747 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
748 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
749 rte_flow_error_set(error, EINVAL,
750 RTE_FLOW_ERROR_TYPE_ACTION,
751 act, "Not supported action.");
755 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
756 act_q = (const struct rte_flow_action_queue *)act->conf;
757 filter->queue = act_q->index;
759 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
762 /* Check if the next non-void item is END */
763 act = next_no_void_action(actions, act);
764 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
765 rte_flow_error_set(error, EINVAL,
766 RTE_FLOW_ERROR_TYPE_ACTION,
767 act, "Not supported action.");
772 /* Must be input direction */
773 if (!attr->ingress) {
774 rte_flow_error_set(error, EINVAL,
775 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
776 attr, "Only support ingress.");
782 rte_flow_error_set(error, EINVAL,
783 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
784 attr, "Not support egress.");
789 if (attr->transfer) {
790 rte_flow_error_set(error, EINVAL,
791 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
792 attr, "No support for transfer.");
797 if (attr->priority) {
798 rte_flow_error_set(error, EINVAL,
799 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
800 attr, "Not support priority.");
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
808 attr, "Not support group.");
816 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
817 const struct rte_flow_attr *attr,
818 const struct rte_flow_item pattern[],
819 const struct rte_flow_action actions[],
820 struct rte_eth_ethertype_filter *filter,
821 struct rte_flow_error *error)
825 ret = cons_parse_ethertype_filter(attr, pattern,
826 actions, filter, error);
831 if (filter->queue >= dev->data->nb_rx_queues) {
832 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
833 rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_ITEM,
835 NULL, "queue index much too big");
839 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
840 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
841 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
842 rte_flow_error_set(error, EINVAL,
843 RTE_FLOW_ERROR_TYPE_ITEM,
844 NULL, "IPv4/IPv6 not supported by ethertype filter");
848 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
849 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
850 rte_flow_error_set(error, EINVAL,
851 RTE_FLOW_ERROR_TYPE_ITEM,
852 NULL, "mac compare is unsupported");
856 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
857 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ITEM,
860 NULL, "drop option is unsupported");
868 * Parse the rule to see if it is a TCP SYN rule.
869 * And get the TCP SYN filter info BTW.
871 * The first not void item must be ETH.
872 * The second not void item must be IPV4 or IPV6.
873 * The third not void item must be TCP.
874 * The next not void item must be END.
876 * The first not void action should be QUEUE.
877 * The next not void action should be END.
881 * IPV4/IPV6 NULL NULL
882 * TCP tcp_flags 0x02 0xFF
884 * other members in mask and spec should set to 0x00.
885 * item->last should be NULL.
888 cons_parse_syn_filter(const struct rte_flow_attr *attr,
889 const struct rte_flow_item pattern[],
890 const struct rte_flow_action actions[],
891 struct rte_eth_syn_filter *filter,
892 struct rte_flow_error *error)
894 const struct rte_flow_item *item;
895 const struct rte_flow_action *act;
896 const struct rte_flow_item_tcp *tcp_spec;
897 const struct rte_flow_item_tcp *tcp_mask;
898 const struct rte_flow_action_queue *act_q;
901 rte_flow_error_set(error, EINVAL,
902 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
903 NULL, "NULL pattern.");
908 rte_flow_error_set(error, EINVAL,
909 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
910 NULL, "NULL action.");
915 rte_flow_error_set(error, EINVAL,
916 RTE_FLOW_ERROR_TYPE_ATTR,
917 NULL, "NULL attribute.");
922 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
923 item = next_no_void_pattern(pattern, NULL);
924 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
925 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
927 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
928 rte_flow_error_set(error, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ITEM,
930 item, "Not supported by syn filter");
933 /*Not supported last point for range*/
935 rte_flow_error_set(error, EINVAL,
936 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937 item, "Not supported last point for range");
942 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
943 /* if the item is MAC, the content should be NULL */
944 if (item->spec || item->mask) {
945 rte_flow_error_set(error, EINVAL,
946 RTE_FLOW_ERROR_TYPE_ITEM,
947 item, "Invalid SYN address mask");
951 /* check if the next not void item is IPv4 or IPv6 */
952 item = next_no_void_pattern(pattern, item);
953 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
954 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
955 rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_ITEM,
957 item, "Not supported by syn filter");
963 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
964 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
965 /* if the item is IP, the content should be NULL */
966 if (item->spec || item->mask) {
967 rte_flow_error_set(error, EINVAL,
968 RTE_FLOW_ERROR_TYPE_ITEM,
969 item, "Invalid SYN mask");
973 /* check if the next not void item is TCP */
974 item = next_no_void_pattern(pattern, item);
975 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
976 rte_flow_error_set(error, EINVAL,
977 RTE_FLOW_ERROR_TYPE_ITEM,
978 item, "Not supported by syn filter");
983 /* Get the TCP info. Only support SYN. */
984 if (!item->spec || !item->mask) {
985 rte_flow_error_set(error, EINVAL,
986 RTE_FLOW_ERROR_TYPE_ITEM,
987 item, "Invalid SYN mask");
990 /*Not supported last point for range*/
992 rte_flow_error_set(error, EINVAL,
993 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
994 item, "Not supported last point for range");
998 tcp_spec = item->spec;
999 tcp_mask = item->mask;
1000 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1001 tcp_mask->hdr.src_port ||
1002 tcp_mask->hdr.dst_port ||
1003 tcp_mask->hdr.sent_seq ||
1004 tcp_mask->hdr.recv_ack ||
1005 tcp_mask->hdr.data_off ||
1006 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1007 tcp_mask->hdr.rx_win ||
1008 tcp_mask->hdr.cksum ||
1009 tcp_mask->hdr.tcp_urp) {
1010 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1011 rte_flow_error_set(error, EINVAL,
1012 RTE_FLOW_ERROR_TYPE_ITEM,
1013 item, "Not supported by syn filter");
1017 /* check if the next not void item is END */
1018 item = next_no_void_pattern(pattern, item);
1019 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1020 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1021 rte_flow_error_set(error, EINVAL,
1022 RTE_FLOW_ERROR_TYPE_ITEM,
1023 item, "Not supported by syn filter");
1027 /* check if the first not void action is QUEUE. */
1028 act = next_no_void_action(actions, NULL);
1029 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1030 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1031 rte_flow_error_set(error, EINVAL,
1032 RTE_FLOW_ERROR_TYPE_ACTION,
1033 act, "Not supported action.");
1037 act_q = (const struct rte_flow_action_queue *)act->conf;
1038 filter->queue = act_q->index;
1039 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
1040 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1041 rte_flow_error_set(error, EINVAL,
1042 RTE_FLOW_ERROR_TYPE_ACTION,
1043 act, "Not supported action.");
1047 /* check if the next not void item is END */
1048 act = next_no_void_action(actions, act);
1049 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1050 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1051 rte_flow_error_set(error, EINVAL,
1052 RTE_FLOW_ERROR_TYPE_ACTION,
1053 act, "Not supported action.");
1058 /* must be input direction */
1059 if (!attr->ingress) {
1060 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1061 rte_flow_error_set(error, EINVAL,
1062 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1063 attr, "Only support ingress.");
1069 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1070 rte_flow_error_set(error, EINVAL,
1071 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1072 attr, "Not support egress.");
1077 if (attr->transfer) {
1078 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1079 rte_flow_error_set(error, EINVAL,
1080 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1081 attr, "No support for transfer.");
1085 /* Support 2 priorities, the lowest or highest. */
1086 if (!attr->priority) {
1087 filter->hig_pri = 0;
1088 } else if (attr->priority == (uint32_t)~0U) {
1089 filter->hig_pri = 1;
1091 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1094 attr, "Not support priority.");
1102 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1103 const struct rte_flow_attr *attr,
1104 const struct rte_flow_item pattern[],
1105 const struct rte_flow_action actions[],
1106 struct rte_eth_syn_filter *filter,
1107 struct rte_flow_error *error)
1111 ret = cons_parse_syn_filter(attr, pattern,
1112 actions, filter, error);
1114 if (filter->queue >= dev->data->nb_rx_queues)
1124 * Parse the rule to see if it is a L2 tunnel rule.
1125 * And get the L2 tunnel filter info BTW.
1126 * Only support E-tag now.
1128 * The first not void item can be E_TAG.
1129 * The next not void item must be END.
1131 * The first not void action should be VF or PF.
1132 * The next not void action should be END.
1136 e_cid_base 0x309 0xFFF
1138 * other members in mask and spec should set to 0x00.
1139 * item->last should be NULL.
1142 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1143 const struct rte_flow_attr *attr,
1144 const struct rte_flow_item pattern[],
1145 const struct rte_flow_action actions[],
1146 struct txgbe_l2_tunnel_conf *filter,
1147 struct rte_flow_error *error)
1149 const struct rte_flow_item *item;
1150 const struct rte_flow_item_e_tag *e_tag_spec;
1151 const struct rte_flow_item_e_tag *e_tag_mask;
1152 const struct rte_flow_action *act;
1153 const struct rte_flow_action_vf *act_vf;
1154 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1157 rte_flow_error_set(error, EINVAL,
1158 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1159 NULL, "NULL pattern.");
1164 rte_flow_error_set(error, EINVAL,
1165 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1166 NULL, "NULL action.");
1171 rte_flow_error_set(error, EINVAL,
1172 RTE_FLOW_ERROR_TYPE_ATTR,
1173 NULL, "NULL attribute.");
1177 /* The first not void item should be e-tag. */
1178 item = next_no_void_pattern(pattern, NULL);
1179 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1180 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1181 rte_flow_error_set(error, EINVAL,
1182 RTE_FLOW_ERROR_TYPE_ITEM,
1183 item, "Not supported by L2 tunnel filter");
1187 if (!item->spec || !item->mask) {
1188 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1189 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1190 item, "Not supported by L2 tunnel filter");
1194 /*Not supported last point for range*/
1196 rte_flow_error_set(error, EINVAL,
1197 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1198 item, "Not supported last point for range");
1202 e_tag_spec = item->spec;
1203 e_tag_mask = item->mask;
1205 /* Only care about GRP and E cid base. */
1206 if (e_tag_mask->epcp_edei_in_ecid_b ||
1207 e_tag_mask->in_ecid_e ||
1208 e_tag_mask->ecid_e ||
1209 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1210 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1211 rte_flow_error_set(error, EINVAL,
1212 RTE_FLOW_ERROR_TYPE_ITEM,
1213 item, "Not supported by L2 tunnel filter");
1217 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1219 * grp and e_cid_base are bit fields and only use 14 bits.
1220 * e-tag id is taken as little endian by HW.
1222 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1224 /* check if the next not void item is END */
1225 item = next_no_void_pattern(pattern, item);
1226 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1227 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1228 rte_flow_error_set(error, EINVAL,
1229 RTE_FLOW_ERROR_TYPE_ITEM,
1230 item, "Not supported by L2 tunnel filter");
1235 /* must be input direction */
1236 if (!attr->ingress) {
1237 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1238 rte_flow_error_set(error, EINVAL,
1239 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1240 attr, "Only support ingress.");
1246 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1247 rte_flow_error_set(error, EINVAL,
1248 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1249 attr, "Not support egress.");
1254 if (attr->transfer) {
1255 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1256 rte_flow_error_set(error, EINVAL,
1257 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1258 attr, "No support for transfer.");
1263 if (attr->priority) {
1264 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1265 rte_flow_error_set(error, EINVAL,
1266 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1267 attr, "Not support priority.");
1271 /* check if the first not void action is VF or PF. */
1272 act = next_no_void_action(actions, NULL);
1273 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1274 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1275 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1276 rte_flow_error_set(error, EINVAL,
1277 RTE_FLOW_ERROR_TYPE_ACTION,
1278 act, "Not supported action.");
1282 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1283 act_vf = (const struct rte_flow_action_vf *)act->conf;
1284 filter->pool = act_vf->id;
1286 filter->pool = pci_dev->max_vfs;
1289 /* check if the next not void item is END */
1290 act = next_no_void_action(actions, act);
1291 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1292 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1293 rte_flow_error_set(error, EINVAL,
1294 RTE_FLOW_ERROR_TYPE_ACTION,
1295 act, "Not supported action.");
1303 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1304 const struct rte_flow_attr *attr,
1305 const struct rte_flow_item pattern[],
1306 const struct rte_flow_action actions[],
1307 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1308 struct rte_flow_error *error)
1311 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1314 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1315 actions, l2_tn_filter, error);
1317 vf_num = pci_dev->max_vfs;
1319 if (l2_tn_filter->pool > vf_num)
1325 /* Parse to get the attr and action info of flow director rule. */
1327 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1328 const struct rte_flow_action actions[],
1329 struct txgbe_fdir_rule *rule,
1330 struct rte_flow_error *error)
1332 const struct rte_flow_action *act;
1333 const struct rte_flow_action_queue *act_q;
1334 const struct rte_flow_action_mark *mark;
1337 /* must be input direction */
1338 if (!attr->ingress) {
1339 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1340 rte_flow_error_set(error, EINVAL,
1341 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1342 attr, "Only support ingress.");
1348 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1349 rte_flow_error_set(error, EINVAL,
1350 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1351 attr, "Not support egress.");
1356 if (attr->transfer) {
1357 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1358 rte_flow_error_set(error, EINVAL,
1359 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1360 attr, "No support for transfer.");
1365 if (attr->priority) {
1366 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1367 rte_flow_error_set(error, EINVAL,
1368 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1369 attr, "Not support priority.");
1373 /* check if the first not void action is QUEUE or DROP. */
1374 act = next_no_void_action(actions, NULL);
1375 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1376 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1377 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1378 rte_flow_error_set(error, EINVAL,
1379 RTE_FLOW_ERROR_TYPE_ACTION,
1380 act, "Not supported action.");
1384 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1385 act_q = (const struct rte_flow_action_queue *)act->conf;
1386 rule->queue = act_q->index;
1388 /* signature mode does not support drop action. */
1389 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1390 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1391 rte_flow_error_set(error, EINVAL,
1392 RTE_FLOW_ERROR_TYPE_ACTION,
1393 act, "Not supported action.");
1396 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1399 /* check if the next not void item is MARK */
1400 act = next_no_void_action(actions, act);
1401 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1402 act->type != RTE_FLOW_ACTION_TYPE_END) {
1403 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1404 rte_flow_error_set(error, EINVAL,
1405 RTE_FLOW_ERROR_TYPE_ACTION,
1406 act, "Not supported action.");
1412 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1413 mark = (const struct rte_flow_action_mark *)act->conf;
1414 rule->soft_id = mark->id;
1415 act = next_no_void_action(actions, act);
1418 /* check if the next not void item is END */
1419 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1420 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1421 rte_flow_error_set(error, EINVAL,
1422 RTE_FLOW_ERROR_TYPE_ACTION,
1423 act, "Not supported action.");
1430 /* search next no void pattern and skip fuzzy */
1432 const struct rte_flow_item *next_no_fuzzy_pattern(
1433 const struct rte_flow_item pattern[],
1434 const struct rte_flow_item *cur)
1436 const struct rte_flow_item *next =
1437 next_no_void_pattern(pattern, cur);
1439 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1441 next = next_no_void_pattern(pattern, next);
1445 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1447 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1448 const struct rte_flow_item *item;
1449 uint32_t sh, lh, mh;
1454 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1457 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1489 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1490 * And get the flow director filter info BTW.
1491 * UDP/TCP/SCTP PATTERN:
1492 * The first not void item can be ETH or IPV4 or IPV6
1493 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1494 * The next not void item could be UDP or TCP or SCTP (optional)
1495 * The next not void item could be RAW (for flexbyte, optional)
1496 * The next not void item must be END.
1497 * A Fuzzy Match pattern can appear at any place before END.
1498 * Fuzzy Match is optional for IPV4 but is required for IPV6
1500 * The first not void item must be ETH.
1501 * The second not void item must be MAC VLAN.
1502 * The next not void item must be END.
1504 * The first not void action should be QUEUE or DROP.
1505 * The second not void optional action should be MARK,
1506 * mark_id is a uint32_t number.
1507 * The next not void action should be END.
1508 * UDP/TCP/SCTP pattern example:
1511 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1512 * dst_addr 192.167.3.50 0xFFFFFFFF
1513 * UDP/TCP/SCTP src_port 80 0xFFFF
1514 * dst_port 80 0xFFFF
1515 * FLEX relative 0 0x1
1518 * offset 12 0xFFFFFFFF
1521 * pattern[0] 0x86 0xFF
1522 * pattern[1] 0xDD 0xFF
1524 * MAC VLAN pattern example:
1527 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1528 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1529 * MAC VLAN tci 0x2016 0xEFFF
1531 * Other members in mask and spec should set to 0x00.
1532 * Item->last should be NULL.
1535 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1536 const struct rte_flow_attr *attr,
1537 const struct rte_flow_item pattern[],
1538 const struct rte_flow_action actions[],
1539 struct txgbe_fdir_rule *rule,
1540 struct rte_flow_error *error)
1542 const struct rte_flow_item *item;
1543 const struct rte_flow_item_eth *eth_mask;
1544 const struct rte_flow_item_ipv4 *ipv4_spec;
1545 const struct rte_flow_item_ipv4 *ipv4_mask;
1546 const struct rte_flow_item_ipv6 *ipv6_spec;
1547 const struct rte_flow_item_ipv6 *ipv6_mask;
1548 const struct rte_flow_item_tcp *tcp_spec;
1549 const struct rte_flow_item_tcp *tcp_mask;
1550 const struct rte_flow_item_udp *udp_spec;
1551 const struct rte_flow_item_udp *udp_mask;
1552 const struct rte_flow_item_sctp *sctp_spec;
1553 const struct rte_flow_item_sctp *sctp_mask;
1554 const struct rte_flow_item_raw *raw_mask;
1555 const struct rte_flow_item_raw *raw_spec;
1560 rte_flow_error_set(error, EINVAL,
1561 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1562 NULL, "NULL pattern.");
1567 rte_flow_error_set(error, EINVAL,
1568 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1569 NULL, "NULL action.");
1574 rte_flow_error_set(error, EINVAL,
1575 RTE_FLOW_ERROR_TYPE_ATTR,
1576 NULL, "NULL attribute.");
1581 * Some fields may not be provided. Set spec to 0 and mask to default
1582 * value. So, we need not do anything for the not provided fields later.
1584 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1585 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1586 rule->mask.vlan_tci_mask = 0;
1587 rule->mask.flex_bytes_mask = 0;
1590 * The first not void item should be
1591 * MAC or IPv4 or TCP or UDP or SCTP.
1593 item = next_no_fuzzy_pattern(pattern, NULL);
1594 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1595 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1596 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1597 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1598 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1599 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1600 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1601 rte_flow_error_set(error, EINVAL,
1602 RTE_FLOW_ERROR_TYPE_ITEM,
1603 item, "Not supported by fdir filter");
1607 if (signature_match(pattern))
1608 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1610 rule->mode = RTE_FDIR_MODE_PERFECT;
1612 /*Not supported last point for range*/
1614 rte_flow_error_set(error, EINVAL,
1615 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1616 item, "Not supported last point for range");
1620 /* Get the MAC info. */
1621 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1623 * Only support vlan and dst MAC address,
1624 * others should be masked.
1626 if (item->spec && !item->mask) {
1627 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1628 rte_flow_error_set(error, EINVAL,
1629 RTE_FLOW_ERROR_TYPE_ITEM,
1630 item, "Not supported by fdir filter");
1635 rule->b_mask = TRUE;
1636 eth_mask = item->mask;
1638 /* Ether type should be masked. */
1639 if (eth_mask->type ||
1640 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1641 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1642 rte_flow_error_set(error, EINVAL,
1643 RTE_FLOW_ERROR_TYPE_ITEM,
1644 item, "Not supported by fdir filter");
1648 /* If ethernet has meaning, it means MAC VLAN mode. */
1649 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1652 * src MAC address must be masked,
1653 * and don't support dst MAC address mask.
1655 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1656 if (eth_mask->src.addr_bytes[j] ||
1657 eth_mask->dst.addr_bytes[j] != 0xFF) {
1659 sizeof(struct txgbe_fdir_rule));
1660 rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_ITEM,
1662 item, "Not supported by fdir filter");
1667 /* When no VLAN, considered as full mask. */
1668 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1670 /*** If both spec and mask are item,
1671 * it means don't care about ETH.
1676 * Check if the next not void item is vlan or ipv4.
1677 * IPv6 is not supported.
1679 item = next_no_fuzzy_pattern(pattern, item);
1680 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1681 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1682 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1683 rte_flow_error_set(error, EINVAL,
1684 RTE_FLOW_ERROR_TYPE_ITEM,
1685 item, "Not supported by fdir filter");
1689 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1690 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1691 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1692 rte_flow_error_set(error, EINVAL,
1693 RTE_FLOW_ERROR_TYPE_ITEM,
1694 item, "Not supported by fdir filter");
1700 /* Get the IPV4 info. */
1701 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1703 * Set the flow type even if there's no content
1704 * as we must have a flow type.
1706 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1707 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1708 /*Not supported last point for range*/
1710 rte_flow_error_set(error, EINVAL,
1711 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1712 item, "Not supported last point for range");
1716 * Only care about src & dst addresses,
1717 * others should be masked.
1720 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1721 rte_flow_error_set(error, EINVAL,
1722 RTE_FLOW_ERROR_TYPE_ITEM,
1723 item, "Not supported by fdir filter");
1726 rule->b_mask = TRUE;
1727 ipv4_mask = item->mask;
1728 if (ipv4_mask->hdr.version_ihl ||
1729 ipv4_mask->hdr.type_of_service ||
1730 ipv4_mask->hdr.total_length ||
1731 ipv4_mask->hdr.packet_id ||
1732 ipv4_mask->hdr.fragment_offset ||
1733 ipv4_mask->hdr.time_to_live ||
1734 ipv4_mask->hdr.next_proto_id ||
1735 ipv4_mask->hdr.hdr_checksum) {
1736 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1737 rte_flow_error_set(error, EINVAL,
1738 RTE_FLOW_ERROR_TYPE_ITEM,
1739 item, "Not supported by fdir filter");
1742 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1743 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1746 rule->b_spec = TRUE;
1747 ipv4_spec = item->spec;
1748 rule->input.dst_ip[0] =
1749 ipv4_spec->hdr.dst_addr;
1750 rule->input.src_ip[0] =
1751 ipv4_spec->hdr.src_addr;
1755 * Check if the next not void item is
1756 * TCP or UDP or SCTP or END.
1758 item = next_no_fuzzy_pattern(pattern, item);
1759 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1760 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1761 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1762 item->type != RTE_FLOW_ITEM_TYPE_END &&
1763 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1764 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1767 item, "Not supported by fdir filter");
1772 /* Get the IPV6 info. */
1773 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1775 * Set the flow type even if there's no content
1776 * as we must have a flow type.
1778 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1779 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1782 * 1. must signature match
1783 * 2. not support last
1784 * 3. mask must not null
1786 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1789 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1790 rte_flow_error_set(error, EINVAL,
1791 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1792 item, "Not supported last point for range");
1796 rule->b_mask = TRUE;
1797 ipv6_mask = item->mask;
1798 if (ipv6_mask->hdr.vtc_flow ||
1799 ipv6_mask->hdr.payload_len ||
1800 ipv6_mask->hdr.proto ||
1801 ipv6_mask->hdr.hop_limits) {
1802 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1803 rte_flow_error_set(error, EINVAL,
1804 RTE_FLOW_ERROR_TYPE_ITEM,
1805 item, "Not supported by fdir filter");
1809 /* check src addr mask */
1810 for (j = 0; j < 16; j++) {
1811 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1812 rule->mask.src_ipv6_mask |= 1 << j;
1813 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1814 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1815 rte_flow_error_set(error, EINVAL,
1816 RTE_FLOW_ERROR_TYPE_ITEM,
1817 item, "Not supported by fdir filter");
1822 /* check dst addr mask */
1823 for (j = 0; j < 16; j++) {
1824 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1825 rule->mask.dst_ipv6_mask |= 1 << j;
1826 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1827 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1828 rte_flow_error_set(error, EINVAL,
1829 RTE_FLOW_ERROR_TYPE_ITEM,
1830 item, "Not supported by fdir filter");
1836 rule->b_spec = TRUE;
1837 ipv6_spec = item->spec;
1838 rte_memcpy(rule->input.src_ip,
1839 ipv6_spec->hdr.src_addr, 16);
1840 rte_memcpy(rule->input.dst_ip,
1841 ipv6_spec->hdr.dst_addr, 16);
1845 * Check if the next not void item is
1846 * TCP or UDP or SCTP or END.
1848 item = next_no_fuzzy_pattern(pattern, item);
1849 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1850 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1851 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1852 item->type != RTE_FLOW_ITEM_TYPE_END &&
1853 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1854 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1855 rte_flow_error_set(error, EINVAL,
1856 RTE_FLOW_ERROR_TYPE_ITEM,
1857 item, "Not supported by fdir filter");
1862 /* Get the TCP info. */
1863 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1865 * Set the flow type even if there's no content
1866 * as we must have a flow type.
1868 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1869 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1870 /*Not supported last point for range*/
1872 rte_flow_error_set(error, EINVAL,
1873 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1874 item, "Not supported last point for range");
1878 * Only care about src & dst ports,
1879 * others should be masked.
1882 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1883 rte_flow_error_set(error, EINVAL,
1884 RTE_FLOW_ERROR_TYPE_ITEM,
1885 item, "Not supported by fdir filter");
1888 rule->b_mask = TRUE;
1889 tcp_mask = item->mask;
1890 if (tcp_mask->hdr.sent_seq ||
1891 tcp_mask->hdr.recv_ack ||
1892 tcp_mask->hdr.data_off ||
1893 tcp_mask->hdr.tcp_flags ||
1894 tcp_mask->hdr.rx_win ||
1895 tcp_mask->hdr.cksum ||
1896 tcp_mask->hdr.tcp_urp) {
1897 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1898 rte_flow_error_set(error, EINVAL,
1899 RTE_FLOW_ERROR_TYPE_ITEM,
1900 item, "Not supported by fdir filter");
1903 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1904 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1907 rule->b_spec = TRUE;
1908 tcp_spec = item->spec;
1909 rule->input.src_port =
1910 tcp_spec->hdr.src_port;
1911 rule->input.dst_port =
1912 tcp_spec->hdr.dst_port;
1915 item = next_no_fuzzy_pattern(pattern, item);
1916 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1917 item->type != RTE_FLOW_ITEM_TYPE_END) {
1918 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1919 rte_flow_error_set(error, EINVAL,
1920 RTE_FLOW_ERROR_TYPE_ITEM,
1921 item, "Not supported by fdir filter");
1926 /* Get the UDP info */
1927 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1929 * Set the flow type even if there's no content
1930 * as we must have a flow type.
1932 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1933 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1934 /*Not supported last point for range*/
1936 rte_flow_error_set(error, EINVAL,
1937 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1938 item, "Not supported last point for range");
1942 * Only care about src & dst ports,
1943 * others should be masked.
1946 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1947 rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_ITEM,
1949 item, "Not supported by fdir filter");
1952 rule->b_mask = TRUE;
1953 udp_mask = item->mask;
1954 if (udp_mask->hdr.dgram_len ||
1955 udp_mask->hdr.dgram_cksum) {
1956 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1957 rte_flow_error_set(error, EINVAL,
1958 RTE_FLOW_ERROR_TYPE_ITEM,
1959 item, "Not supported by fdir filter");
1962 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1963 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1966 rule->b_spec = TRUE;
1967 udp_spec = item->spec;
1968 rule->input.src_port =
1969 udp_spec->hdr.src_port;
1970 rule->input.dst_port =
1971 udp_spec->hdr.dst_port;
1974 item = next_no_fuzzy_pattern(pattern, item);
1975 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1976 item->type != RTE_FLOW_ITEM_TYPE_END) {
1977 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1978 rte_flow_error_set(error, EINVAL,
1979 RTE_FLOW_ERROR_TYPE_ITEM,
1980 item, "Not supported by fdir filter");
1985 /* Get the SCTP info */
1986 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1988 * Set the flow type even if there's no content
1989 * as we must have a flow type.
1991 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1992 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1993 /*Not supported last point for range*/
1995 rte_flow_error_set(error, EINVAL,
1996 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1997 item, "Not supported last point for range");
2002 * Only care about src & dst ports,
2003 * others should be masked.
2006 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2007 rte_flow_error_set(error, EINVAL,
2008 RTE_FLOW_ERROR_TYPE_ITEM,
2009 item, "Not supported by fdir filter");
2012 rule->b_mask = TRUE;
2013 sctp_mask = item->mask;
2014 if (sctp_mask->hdr.tag ||
2015 sctp_mask->hdr.cksum) {
2016 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2017 rte_flow_error_set(error, EINVAL,
2018 RTE_FLOW_ERROR_TYPE_ITEM,
2019 item, "Not supported by fdir filter");
2022 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2023 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2026 rule->b_spec = TRUE;
2027 sctp_spec = item->spec;
2028 rule->input.src_port =
2029 sctp_spec->hdr.src_port;
2030 rule->input.dst_port =
2031 sctp_spec->hdr.dst_port;
2033 /* others even sctp port is not supported */
2034 sctp_mask = item->mask;
2036 (sctp_mask->hdr.src_port ||
2037 sctp_mask->hdr.dst_port ||
2038 sctp_mask->hdr.tag ||
2039 sctp_mask->hdr.cksum)) {
2040 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2041 rte_flow_error_set(error, EINVAL,
2042 RTE_FLOW_ERROR_TYPE_ITEM,
2043 item, "Not supported by fdir filter");
2047 item = next_no_fuzzy_pattern(pattern, item);
2048 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2049 item->type != RTE_FLOW_ITEM_TYPE_END) {
2050 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2051 rte_flow_error_set(error, EINVAL,
2052 RTE_FLOW_ERROR_TYPE_ITEM,
2053 item, "Not supported by fdir filter");
2058 /* Get the flex byte info */
2059 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2060 /* Not supported last point for range*/
2062 rte_flow_error_set(error, EINVAL,
2063 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2064 item, "Not supported last point for range");
2067 /* mask should not be null */
2068 if (!item->mask || !item->spec) {
2069 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2070 rte_flow_error_set(error, EINVAL,
2071 RTE_FLOW_ERROR_TYPE_ITEM,
2072 item, "Not supported by fdir filter");
2076 raw_mask = item->mask;
2079 if (raw_mask->relative != 0x1 ||
2080 raw_mask->search != 0x1 ||
2081 raw_mask->reserved != 0x0 ||
2082 (uint32_t)raw_mask->offset != 0xffffffff ||
2083 raw_mask->limit != 0xffff ||
2084 raw_mask->length != 0xffff) {
2085 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2086 rte_flow_error_set(error, EINVAL,
2087 RTE_FLOW_ERROR_TYPE_ITEM,
2088 item, "Not supported by fdir filter");
2092 raw_spec = item->spec;
2095 if (raw_spec->relative != 0 ||
2096 raw_spec->search != 0 ||
2097 raw_spec->reserved != 0 ||
2098 raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2099 raw_spec->offset % 2 ||
2100 raw_spec->limit != 0 ||
2101 raw_spec->length != 2 ||
2102 /* pattern can't be 0xffff */
2103 (raw_spec->pattern[0] == 0xff &&
2104 raw_spec->pattern[1] == 0xff)) {
2105 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2106 rte_flow_error_set(error, EINVAL,
2107 RTE_FLOW_ERROR_TYPE_ITEM,
2108 item, "Not supported by fdir filter");
2112 /* check pattern mask */
2113 if (raw_mask->pattern[0] != 0xff ||
2114 raw_mask->pattern[1] != 0xff) {
2115 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2116 rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_ITEM,
2118 item, "Not supported by fdir filter");
2122 rule->mask.flex_bytes_mask = 0xffff;
2123 rule->input.flex_bytes =
2124 (((uint16_t)raw_spec->pattern[1]) << 8) |
2125 raw_spec->pattern[0];
2126 rule->flex_bytes_offset = raw_spec->offset;
2129 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2130 /* check if the next not void item is END */
2131 item = next_no_fuzzy_pattern(pattern, item);
2132 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2133 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2134 rte_flow_error_set(error, EINVAL,
2135 RTE_FLOW_ERROR_TYPE_ITEM,
2136 item, "Not supported by fdir filter");
2141 rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2143 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2147 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2148 * And get the flow director filter info BTW.
2150 * The first not void item must be ETH.
2151 * The second not void item must be IPV4/ IPV6.
2152 * The third not void item must be NVGRE.
2153 * The next not void item must be END.
2155 * The first not void item must be ETH.
2156 * The second not void item must be IPV4/ IPV6.
2157 * The third not void item must be NVGRE.
2158 * The next not void item must be END.
2160 * The first not void action should be QUEUE or DROP.
2161 * The second not void optional action should be MARK,
2162 * mark_id is a uint32_t number.
2163 * The next not void action should be END.
2164 * VxLAN pattern example:
2167 * IPV4/IPV6 NULL NULL
2169 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2170 * MAC VLAN tci 0x2016 0xEFFF
2172 * NEGRV pattern example:
2175 * IPV4/IPV6 NULL NULL
2176 * NVGRE protocol 0x6558 0xFFFF
2177 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2178 * MAC VLAN tci 0x2016 0xEFFF
2180 * other members in mask and spec should set to 0x00.
2181 * item->last should be NULL.
2184 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2185 const struct rte_flow_item pattern[],
2186 const struct rte_flow_action actions[],
2187 struct txgbe_fdir_rule *rule,
2188 struct rte_flow_error *error)
2190 const struct rte_flow_item *item;
2191 const struct rte_flow_item_eth *eth_mask;
2195 rte_flow_error_set(error, EINVAL,
2196 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2197 NULL, "NULL pattern.");
2202 rte_flow_error_set(error, EINVAL,
2203 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2204 NULL, "NULL action.");
2209 rte_flow_error_set(error, EINVAL,
2210 RTE_FLOW_ERROR_TYPE_ATTR,
2211 NULL, "NULL attribute.");
2216 * Some fields may not be provided. Set spec to 0 and mask to default
2217 * value. So, we need not do anything for the not provided fields later.
2219 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2220 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2221 rule->mask.vlan_tci_mask = 0;
2224 * The first not void item should be
2225 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2227 item = next_no_void_pattern(pattern, NULL);
2228 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2229 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2230 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2231 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2232 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2233 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2234 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2235 rte_flow_error_set(error, EINVAL,
2236 RTE_FLOW_ERROR_TYPE_ITEM,
2237 item, "Not supported by fdir filter");
2241 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2244 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2245 /* Only used to describe the protocol stack. */
2246 if (item->spec || item->mask) {
2247 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2248 rte_flow_error_set(error, EINVAL,
2249 RTE_FLOW_ERROR_TYPE_ITEM,
2250 item, "Not supported by fdir filter");
2253 /* Not supported last point for range*/
2255 rte_flow_error_set(error, EINVAL,
2256 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2257 item, "Not supported last point for range");
2261 /* Check if the next not void item is IPv4 or IPv6. */
2262 item = next_no_void_pattern(pattern, item);
2263 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2264 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2265 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2266 rte_flow_error_set(error, EINVAL,
2267 RTE_FLOW_ERROR_TYPE_ITEM,
2268 item, "Not supported by fdir filter");
2274 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2275 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2276 /* Only used to describe the protocol stack. */
2277 if (item->spec || item->mask) {
2278 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2279 rte_flow_error_set(error, EINVAL,
2280 RTE_FLOW_ERROR_TYPE_ITEM,
2281 item, "Not supported by fdir filter");
2284 /*Not supported last point for range*/
2286 rte_flow_error_set(error, EINVAL,
2287 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2288 item, "Not supported last point for range");
2292 /* Check if the next not void item is UDP or NVGRE. */
2293 item = next_no_void_pattern(pattern, item);
2294 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2295 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2296 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2297 rte_flow_error_set(error, EINVAL,
2298 RTE_FLOW_ERROR_TYPE_ITEM,
2299 item, "Not supported by fdir filter");
2305 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2306 /* Only used to describe the protocol stack. */
2307 if (item->spec || item->mask) {
2308 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2309 rte_flow_error_set(error, EINVAL,
2310 RTE_FLOW_ERROR_TYPE_ITEM,
2311 item, "Not supported by fdir filter");
2314 /*Not supported last point for range*/
2316 rte_flow_error_set(error, EINVAL,
2317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2318 item, "Not supported last point for range");
2322 /* Check if the next not void item is VxLAN. */
2323 item = next_no_void_pattern(pattern, item);
2324 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2325 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2326 rte_flow_error_set(error, EINVAL,
2327 RTE_FLOW_ERROR_TYPE_ITEM,
2328 item, "Not supported by fdir filter");
2333 /* check if the next not void item is MAC */
2334 item = next_no_void_pattern(pattern, item);
2335 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2336 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2337 rte_flow_error_set(error, EINVAL,
2338 RTE_FLOW_ERROR_TYPE_ITEM,
2339 item, "Not supported by fdir filter");
2344 * Only support vlan and dst MAC address,
2345 * others should be masked.
2349 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2350 rte_flow_error_set(error, EINVAL,
2351 RTE_FLOW_ERROR_TYPE_ITEM,
2352 item, "Not supported by fdir filter");
2355 /*Not supported last point for range*/
2357 rte_flow_error_set(error, EINVAL,
2358 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2359 item, "Not supported last point for range");
2362 rule->b_mask = TRUE;
2363 eth_mask = item->mask;
2365 /* Ether type should be masked. */
2366 if (eth_mask->type) {
2367 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2368 rte_flow_error_set(error, EINVAL,
2369 RTE_FLOW_ERROR_TYPE_ITEM,
2370 item, "Not supported by fdir filter");
2374 /* src MAC address should be masked. */
2375 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2376 if (eth_mask->src.addr_bytes[j]) {
2378 sizeof(struct txgbe_fdir_rule));
2379 rte_flow_error_set(error, EINVAL,
2380 RTE_FLOW_ERROR_TYPE_ITEM,
2381 item, "Not supported by fdir filter");
2385 rule->mask.mac_addr_byte_mask = 0;
2386 for (j = 0; j < ETH_ADDR_LEN; j++) {
2387 /* It's a per byte mask. */
2388 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2389 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2390 } else if (eth_mask->dst.addr_bytes[j]) {
2391 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2392 rte_flow_error_set(error, EINVAL,
2393 RTE_FLOW_ERROR_TYPE_ITEM,
2394 item, "Not supported by fdir filter");
2399 /* When no vlan, considered as full mask. */
2400 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2403 * Check if the next not void item is vlan or ipv4.
2404 * IPv6 is not supported.
2406 item = next_no_void_pattern(pattern, item);
2407 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2408 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2409 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2410 rte_flow_error_set(error, EINVAL,
2411 RTE_FLOW_ERROR_TYPE_ITEM,
2412 item, "Not supported by fdir filter");
2415 /*Not supported last point for range*/
2417 rte_flow_error_set(error, EINVAL,
2418 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2419 item, "Not supported last point for range");
2424 * If the tags is 0, it means don't care about the VLAN.
2428 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2432 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2433 const struct rte_flow_attr *attr,
2434 const struct rte_flow_item pattern[],
2435 const struct rte_flow_action actions[],
2436 struct txgbe_fdir_rule *rule,
2437 struct rte_flow_error *error)
2440 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2441 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2443 ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2444 actions, rule, error);
2448 ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2449 actions, rule, error);
2455 if (hw->mac.type == txgbe_mac_raptor &&
2456 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2457 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2460 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2461 fdir_mode != rule->mode)
2464 if (rule->queue >= dev->data->nb_rx_queues)
2471 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2472 const struct rte_flow_attr *attr,
2473 const struct rte_flow_action actions[],
2474 struct txgbe_rte_flow_rss_conf *rss_conf,
2475 struct rte_flow_error *error)
2477 const struct rte_flow_action *act;
2478 const struct rte_flow_action_rss *rss;
2482 * rss only supports forwarding,
2483 * check if the first not void action is RSS.
2485 act = next_no_void_action(actions, NULL);
2486 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2487 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2488 rte_flow_error_set(error, EINVAL,
2489 RTE_FLOW_ERROR_TYPE_ACTION,
2490 act, "Not supported action.");
2494 rss = (const struct rte_flow_action_rss *)act->conf;
2496 if (!rss || !rss->queue_num) {
2497 rte_flow_error_set(error, EINVAL,
2498 RTE_FLOW_ERROR_TYPE_ACTION,
2504 for (n = 0; n < rss->queue_num; n++) {
2505 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2506 rte_flow_error_set(error, EINVAL,
2507 RTE_FLOW_ERROR_TYPE_ACTION,
2509 "queue id > max number of queues");
2514 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2515 return rte_flow_error_set
2516 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2517 "non-default RSS hash functions are not supported");
2519 return rte_flow_error_set
2520 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2521 "a nonzero RSS encapsulation level is not supported");
2522 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2523 return rte_flow_error_set
2524 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2525 "RSS hash key must be exactly 40 bytes");
2526 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2527 return rte_flow_error_set
2528 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2529 "too many queues for RSS context");
2530 if (txgbe_rss_conf_init(rss_conf, rss))
2531 return rte_flow_error_set
2532 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2533 "RSS context initialization failure");
2535 /* check if the next not void item is END */
2536 act = next_no_void_action(actions, act);
2537 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2538 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2539 rte_flow_error_set(error, EINVAL,
2540 RTE_FLOW_ERROR_TYPE_ACTION,
2541 act, "Not supported action.");
2546 /* must be input direction */
2547 if (!attr->ingress) {
2548 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2549 rte_flow_error_set(error, EINVAL,
2550 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2551 attr, "Only support ingress.");
2557 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2558 rte_flow_error_set(error, EINVAL,
2559 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2560 attr, "Not support egress.");
2565 if (attr->transfer) {
2566 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2567 rte_flow_error_set(error, EINVAL,
2568 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2569 attr, "No support for transfer.");
2573 if (attr->priority > 0xFFFF) {
2574 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2575 rte_flow_error_set(error, EINVAL,
2576 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2577 attr, "Error priority.");
2584 /* remove the rss filter */
2586 txgbe_clear_rss_filter(struct rte_eth_dev *dev)
2588 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
2590 if (filter_info->rss_info.conf.queue_num)
2591 txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2595 txgbe_filterlist_init(void)
2597 TAILQ_INIT(&filter_ntuple_list);
2598 TAILQ_INIT(&filter_ethertype_list);
2599 TAILQ_INIT(&filter_syn_list);
2600 TAILQ_INIT(&filter_fdir_list);
2601 TAILQ_INIT(&filter_l2_tunnel_list);
2602 TAILQ_INIT(&filter_rss_list);
2603 TAILQ_INIT(&txgbe_flow_list);
2607 txgbe_filterlist_flush(void)
2609 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2610 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2611 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2612 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2613 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2614 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2615 struct txgbe_rss_conf_ele *rss_filter_ptr;
2617 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2618 TAILQ_REMOVE(&filter_ntuple_list,
2621 rte_free(ntuple_filter_ptr);
2624 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2625 TAILQ_REMOVE(&filter_ethertype_list,
2626 ethertype_filter_ptr,
2628 rte_free(ethertype_filter_ptr);
2631 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2632 TAILQ_REMOVE(&filter_syn_list,
2635 rte_free(syn_filter_ptr);
2638 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2639 TAILQ_REMOVE(&filter_l2_tunnel_list,
2642 rte_free(l2_tn_filter_ptr);
2645 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2646 TAILQ_REMOVE(&filter_fdir_list,
2649 rte_free(fdir_rule_ptr);
2652 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2653 TAILQ_REMOVE(&filter_rss_list,
2656 rte_free(rss_filter_ptr);
2659 while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
2660 TAILQ_REMOVE(&txgbe_flow_list,
2663 rte_free(txgbe_flow_mem_ptr->flow);
2664 rte_free(txgbe_flow_mem_ptr);
2669 * Create or destroy a flow rule.
2670 * Theorically one rule can match more than one filters.
2671 * We will let it use the filter which it hit first.
2672 * So, the sequence matters.
2674 static struct rte_flow *
2675 txgbe_flow_create(struct rte_eth_dev *dev,
2676 const struct rte_flow_attr *attr,
2677 const struct rte_flow_item pattern[],
2678 const struct rte_flow_action actions[],
2679 struct rte_flow_error *error)
2682 struct rte_eth_ntuple_filter ntuple_filter;
2683 struct rte_eth_ethertype_filter ethertype_filter;
2684 struct rte_eth_syn_filter syn_filter;
2685 struct txgbe_fdir_rule fdir_rule;
2686 struct txgbe_l2_tunnel_conf l2_tn_filter;
2687 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2688 struct txgbe_rte_flow_rss_conf rss_conf;
2689 struct rte_flow *flow = NULL;
2690 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2691 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2692 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2693 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2694 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2695 struct txgbe_rss_conf_ele *rss_filter_ptr;
2696 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2697 uint8_t first_mask = FALSE;
2699 flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2701 PMD_DRV_LOG(ERR, "failed to allocate memory");
2702 return (struct rte_flow *)flow;
2704 txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2705 sizeof(struct txgbe_flow_mem), 0);
2706 if (!txgbe_flow_mem_ptr) {
2707 PMD_DRV_LOG(ERR, "failed to allocate memory");
2711 txgbe_flow_mem_ptr->flow = flow;
2712 TAILQ_INSERT_TAIL(&txgbe_flow_list,
2713 txgbe_flow_mem_ptr, entries);
2715 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2716 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2717 actions, &ntuple_filter, error);
2719 #ifdef RTE_LIB_SECURITY
2720 /* ESP flow not really a flow*/
2721 if (ntuple_filter.proto == IPPROTO_ESP)
2726 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2728 ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2729 sizeof(struct txgbe_ntuple_filter_ele), 0);
2730 if (!ntuple_filter_ptr) {
2731 PMD_DRV_LOG(ERR, "failed to allocate memory");
2734 rte_memcpy(&ntuple_filter_ptr->filter_info,
2736 sizeof(struct rte_eth_ntuple_filter));
2737 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2738 ntuple_filter_ptr, entries);
2739 flow->rule = ntuple_filter_ptr;
2740 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2746 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2747 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2748 actions, ðertype_filter, error);
2750 ret = txgbe_add_del_ethertype_filter(dev,
2751 ðertype_filter, TRUE);
2753 ethertype_filter_ptr =
2754 rte_zmalloc("txgbe_ethertype_filter",
2755 sizeof(struct txgbe_ethertype_filter_ele), 0);
2756 if (!ethertype_filter_ptr) {
2757 PMD_DRV_LOG(ERR, "failed to allocate memory");
2760 rte_memcpy(ðertype_filter_ptr->filter_info,
2762 sizeof(struct rte_eth_ethertype_filter));
2763 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2764 ethertype_filter_ptr, entries);
2765 flow->rule = ethertype_filter_ptr;
2766 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2772 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2773 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2774 actions, &syn_filter, error);
2776 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2778 syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2779 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2780 if (!syn_filter_ptr) {
2781 PMD_DRV_LOG(ERR, "failed to allocate memory");
2784 rte_memcpy(&syn_filter_ptr->filter_info,
2786 sizeof(struct rte_eth_syn_filter));
2787 TAILQ_INSERT_TAIL(&filter_syn_list,
2790 flow->rule = syn_filter_ptr;
2791 flow->filter_type = RTE_ETH_FILTER_SYN;
2797 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2798 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2799 actions, &fdir_rule, error);
2801 /* A mask cannot be deleted. */
2802 if (fdir_rule.b_mask) {
2803 if (!fdir_info->mask_added) {
2804 /* It's the first time the mask is set. */
2805 rte_memcpy(&fdir_info->mask,
2807 sizeof(struct txgbe_hw_fdir_mask));
2808 fdir_info->flex_bytes_offset =
2809 fdir_rule.flex_bytes_offset;
2811 if (fdir_rule.mask.flex_bytes_mask)
2812 txgbe_fdir_set_flexbytes_offset(dev,
2813 fdir_rule.flex_bytes_offset);
2815 ret = txgbe_fdir_set_input_mask(dev);
2819 fdir_info->mask_added = TRUE;
2823 * Only support one global mask,
2824 * all the masks should be the same.
2826 ret = memcmp(&fdir_info->mask,
2828 sizeof(struct txgbe_hw_fdir_mask));
2832 if (fdir_info->flex_bytes_offset !=
2833 fdir_rule.flex_bytes_offset)
2838 if (fdir_rule.b_spec) {
2839 ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2842 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2843 sizeof(struct txgbe_fdir_rule_ele), 0);
2844 if (!fdir_rule_ptr) {
2846 "failed to allocate memory");
2849 rte_memcpy(&fdir_rule_ptr->filter_info,
2851 sizeof(struct txgbe_fdir_rule));
2852 TAILQ_INSERT_TAIL(&filter_fdir_list,
2853 fdir_rule_ptr, entries);
2854 flow->rule = fdir_rule_ptr;
2855 flow->filter_type = RTE_ETH_FILTER_FDIR;
2862 * clean the mask_added flag if fail to
2866 fdir_info->mask_added = FALSE;
2874 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2875 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2876 actions, &l2_tn_filter, error);
2878 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2880 l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2881 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2882 if (!l2_tn_filter_ptr) {
2883 PMD_DRV_LOG(ERR, "failed to allocate memory");
2886 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2888 sizeof(struct txgbe_l2_tunnel_conf));
2889 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2890 l2_tn_filter_ptr, entries);
2891 flow->rule = l2_tn_filter_ptr;
2892 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2897 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2898 ret = txgbe_parse_rss_filter(dev, attr,
2899 actions, &rss_conf, error);
2901 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2903 rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2904 sizeof(struct txgbe_rss_conf_ele), 0);
2905 if (!rss_filter_ptr) {
2906 PMD_DRV_LOG(ERR, "failed to allocate memory");
2909 txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2911 TAILQ_INSERT_TAIL(&filter_rss_list,
2912 rss_filter_ptr, entries);
2913 flow->rule = rss_filter_ptr;
2914 flow->filter_type = RTE_ETH_FILTER_HASH;
2920 TAILQ_REMOVE(&txgbe_flow_list,
2921 txgbe_flow_mem_ptr, entries);
2922 rte_flow_error_set(error, -ret,
2923 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2924 "Failed to create flow.");
2925 rte_free(txgbe_flow_mem_ptr);
2931 * Check if the flow rule is supported by txgbe.
2932 * It only checks the format. Don't guarantee the rule can be programmed into
2933 * the HW. Because there can be no enough room for the rule.
2936 txgbe_flow_validate(struct rte_eth_dev *dev,
2937 const struct rte_flow_attr *attr,
2938 const struct rte_flow_item pattern[],
2939 const struct rte_flow_action actions[],
2940 struct rte_flow_error *error)
2942 struct rte_eth_ntuple_filter ntuple_filter;
2943 struct rte_eth_ethertype_filter ethertype_filter;
2944 struct rte_eth_syn_filter syn_filter;
2945 struct txgbe_l2_tunnel_conf l2_tn_filter;
2946 struct txgbe_fdir_rule fdir_rule;
2947 struct txgbe_rte_flow_rss_conf rss_conf;
2950 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2951 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2952 actions, &ntuple_filter, error);
2956 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2957 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2958 actions, ðertype_filter, error);
2962 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2963 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2964 actions, &syn_filter, error);
2968 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2969 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2970 actions, &fdir_rule, error);
2974 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2975 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2976 actions, &l2_tn_filter, error);
2980 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2981 ret = txgbe_parse_rss_filter(dev, attr,
2982 actions, &rss_conf, error);
2987 /* Destroy a flow rule on txgbe. */
2989 txgbe_flow_destroy(struct rte_eth_dev *dev,
2990 struct rte_flow *flow,
2991 struct rte_flow_error *error)
2994 struct rte_flow *pmd_flow = flow;
2995 enum rte_filter_type filter_type = pmd_flow->filter_type;
2996 struct rte_eth_ntuple_filter ntuple_filter;
2997 struct rte_eth_ethertype_filter ethertype_filter;
2998 struct rte_eth_syn_filter syn_filter;
2999 struct txgbe_fdir_rule fdir_rule;
3000 struct txgbe_l2_tunnel_conf l2_tn_filter;
3001 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
3002 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
3003 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
3004 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3005 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
3006 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
3007 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
3008 struct txgbe_rss_conf_ele *rss_filter_ptr;
3010 switch (filter_type) {
3011 case RTE_ETH_FILTER_NTUPLE:
3012 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
3014 rte_memcpy(&ntuple_filter,
3015 &ntuple_filter_ptr->filter_info,
3016 sizeof(struct rte_eth_ntuple_filter));
3017 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3019 TAILQ_REMOVE(&filter_ntuple_list,
3020 ntuple_filter_ptr, entries);
3021 rte_free(ntuple_filter_ptr);
3024 case RTE_ETH_FILTER_ETHERTYPE:
3025 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
3027 rte_memcpy(ðertype_filter,
3028 ðertype_filter_ptr->filter_info,
3029 sizeof(struct rte_eth_ethertype_filter));
3030 ret = txgbe_add_del_ethertype_filter(dev,
3031 ðertype_filter, FALSE);
3033 TAILQ_REMOVE(&filter_ethertype_list,
3034 ethertype_filter_ptr, entries);
3035 rte_free(ethertype_filter_ptr);
3038 case RTE_ETH_FILTER_SYN:
3039 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
3041 rte_memcpy(&syn_filter,
3042 &syn_filter_ptr->filter_info,
3043 sizeof(struct rte_eth_syn_filter));
3044 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
3046 TAILQ_REMOVE(&filter_syn_list,
3047 syn_filter_ptr, entries);
3048 rte_free(syn_filter_ptr);
3051 case RTE_ETH_FILTER_FDIR:
3052 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
3053 rte_memcpy(&fdir_rule,
3054 &fdir_rule_ptr->filter_info,
3055 sizeof(struct txgbe_fdir_rule));
3056 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3058 TAILQ_REMOVE(&filter_fdir_list,
3059 fdir_rule_ptr, entries);
3060 rte_free(fdir_rule_ptr);
3061 if (TAILQ_EMPTY(&filter_fdir_list))
3062 fdir_info->mask_added = false;
3065 case RTE_ETH_FILTER_L2_TUNNEL:
3066 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
3068 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3069 sizeof(struct txgbe_l2_tunnel_conf));
3070 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3072 TAILQ_REMOVE(&filter_l2_tunnel_list,
3073 l2_tn_filter_ptr, entries);
3074 rte_free(l2_tn_filter_ptr);
3077 case RTE_ETH_FILTER_HASH:
3078 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
3080 ret = txgbe_config_rss_filter(dev,
3081 &rss_filter_ptr->filter_info, FALSE);
3083 TAILQ_REMOVE(&filter_rss_list,
3084 rss_filter_ptr, entries);
3085 rte_free(rss_filter_ptr);
3089 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3096 rte_flow_error_set(error, EINVAL,
3097 RTE_FLOW_ERROR_TYPE_HANDLE,
3098 NULL, "Failed to destroy flow");
3102 TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
3103 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
3104 TAILQ_REMOVE(&txgbe_flow_list,
3105 txgbe_flow_mem_ptr, entries);
3106 rte_free(txgbe_flow_mem_ptr);
3114 /* Destroy all flow rules associated with a port on txgbe. */
3116 txgbe_flow_flush(struct rte_eth_dev *dev,
3117 struct rte_flow_error *error)
3121 txgbe_clear_all_ntuple_filter(dev);
3122 txgbe_clear_all_ethertype_filter(dev);
3123 txgbe_clear_syn_filter(dev);
3125 ret = txgbe_clear_all_fdir_filter(dev);
3127 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3128 NULL, "Failed to flush rule");
3132 ret = txgbe_clear_all_l2_tn_filter(dev);
3134 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3135 NULL, "Failed to flush rule");
3139 txgbe_clear_rss_filter(dev);
3141 txgbe_filterlist_flush();
3146 const struct rte_flow_ops txgbe_flow_ops = {
3147 .validate = txgbe_flow_validate,
3148 .create = txgbe_flow_create,
3149 .destroy = txgbe_flow_destroy,
3150 .flush = txgbe_flow_flush,