1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
11 #include <rte_common.h>
12 #include <rte_interrupts.h>
13 #include <rte_byteorder.h>
15 #include <rte_debug.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_memory.h>
22 #include <rte_atomic.h>
23 #include <rte_malloc.h>
26 #include <rte_flow_driver.h>
28 #include "e1000_logs.h"
29 #include "base/e1000_api.h"
30 #include "e1000_ethdev.h"
32 #define NEXT_ITEM_OF_PATTERN(item, pattern, index) \
34 item = (pattern) + (index); \
35 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
37 item = (pattern) + (index); \
41 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
43 act = (actions) + (index); \
44 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
46 act = (actions) + (index); \
50 #define IGB_FLEX_RAW_NUM 12
52 struct igb_flow_mem_list igb_flow_list;
53 struct igb_ntuple_filter_list igb_filter_ntuple_list;
54 struct igb_ethertype_filter_list igb_filter_ethertype_list;
55 struct igb_syn_filter_list igb_filter_syn_list;
56 struct igb_flex_filter_list igb_filter_flex_list;
57 struct igb_rss_filter_list igb_filter_rss_list;
60 * Please aware there's an asumption for all the parsers.
61 * rte_flow_item is using big endian, rte_flow_attr and
62 * rte_flow_action are using CPU order.
63 * Because the pattern is used to describe the packets,
64 * normally the packets should use network order.
68 * Parse the rule to see if it is a n-tuple rule.
69 * And get the n-tuple filter info BTW.
71 * The first not void item can be ETH or IPV4.
72 * The second not void item must be IPV4 if the first one is ETH.
73 * The third not void item must be UDP or TCP or SCTP
74 * The next not void item must be END.
76 * The first not void action should be QUEUE.
77 * The next not void action should be END.
81 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
82 * dst_addr 192.167.3.50 0xFFFFFFFF
83 * next_proto_id 17 0xFF
84 * UDP/TCP/ src_port 80 0xFFFF
85 * SCTP dst_port 80 0xFFFF
87 * other members in mask and spec should set to 0x00.
88 * item->last should be NULL.
91 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
92 const struct rte_flow_item pattern[],
93 const struct rte_flow_action actions[],
94 struct rte_eth_ntuple_filter *filter,
95 struct rte_flow_error *error)
97 const struct rte_flow_item *item;
98 const struct rte_flow_action *act;
99 const struct rte_flow_item_ipv4 *ipv4_spec;
100 const struct rte_flow_item_ipv4 *ipv4_mask;
101 const struct rte_flow_item_tcp *tcp_spec;
102 const struct rte_flow_item_tcp *tcp_mask;
103 const struct rte_flow_item_udp *udp_spec;
104 const struct rte_flow_item_udp *udp_mask;
105 const struct rte_flow_item_sctp *sctp_spec;
106 const struct rte_flow_item_sctp *sctp_mask;
110 rte_flow_error_set(error,
111 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
112 NULL, "NULL pattern.");
117 rte_flow_error_set(error, EINVAL,
118 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
119 NULL, "NULL action.");
123 rte_flow_error_set(error, EINVAL,
124 RTE_FLOW_ERROR_TYPE_ATTR,
125 NULL, "NULL attribute.");
132 /* the first not void item can be MAC or IPv4 */
133 NEXT_ITEM_OF_PATTERN(item, pattern, index);
135 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
136 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
137 rte_flow_error_set(error, EINVAL,
138 RTE_FLOW_ERROR_TYPE_ITEM,
139 item, "Not supported by ntuple filter");
143 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
144 /*Not supported last point for range*/
146 rte_flow_error_set(error,
148 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
149 item, "Not supported last point for range");
152 /* if the first item is MAC, the content should be NULL */
153 if (item->spec || item->mask) {
154 rte_flow_error_set(error, EINVAL,
155 RTE_FLOW_ERROR_TYPE_ITEM,
156 item, "Not supported by ntuple filter");
159 /* check if the next not void item is IPv4 */
161 NEXT_ITEM_OF_PATTERN(item, pattern, index);
162 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
163 rte_flow_error_set(error,
164 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
165 item, "Not supported by ntuple filter");
170 /* get the IPv4 info */
171 if (!item->spec || !item->mask) {
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ITEM,
174 item, "Invalid ntuple mask");
177 /* Not supported last point for range */
179 rte_flow_error_set(error, EINVAL,
180 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
181 item, "Not supported last point for range");
185 ipv4_mask = item->mask;
187 * Only support src & dst addresses, protocol,
188 * others should be masked.
191 if (ipv4_mask->hdr.version_ihl ||
192 ipv4_mask->hdr.type_of_service ||
193 ipv4_mask->hdr.total_length ||
194 ipv4_mask->hdr.packet_id ||
195 ipv4_mask->hdr.fragment_offset ||
196 ipv4_mask->hdr.time_to_live ||
197 ipv4_mask->hdr.hdr_checksum) {
198 rte_flow_error_set(error,
199 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
200 item, "Not supported by ntuple filter");
204 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
205 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
206 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
208 ipv4_spec = item->spec;
209 filter->dst_ip = ipv4_spec->hdr.dst_addr;
210 filter->src_ip = ipv4_spec->hdr.src_addr;
211 filter->proto = ipv4_spec->hdr.next_proto_id;
213 /* check if the next not void item is TCP or UDP or SCTP */
215 NEXT_ITEM_OF_PATTERN(item, pattern, index);
216 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
217 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
218 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
219 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
220 rte_flow_error_set(error, EINVAL,
221 RTE_FLOW_ERROR_TYPE_ITEM,
222 item, "Not supported by ntuple filter");
226 /* Not supported last point for range */
228 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229 rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
231 item, "Not supported last point for range");
235 /* get the TCP/UDP/SCTP info */
236 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
237 if (item->spec && item->mask) {
238 tcp_mask = item->mask;
241 * Only support src & dst ports, tcp flags,
242 * others should be masked.
244 if (tcp_mask->hdr.sent_seq ||
245 tcp_mask->hdr.recv_ack ||
246 tcp_mask->hdr.data_off ||
247 tcp_mask->hdr.rx_win ||
248 tcp_mask->hdr.cksum ||
249 tcp_mask->hdr.tcp_urp) {
251 sizeof(struct rte_eth_ntuple_filter));
252 rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_ITEM,
254 item, "Not supported by ntuple filter");
258 filter->dst_port_mask = tcp_mask->hdr.dst_port;
259 filter->src_port_mask = tcp_mask->hdr.src_port;
260 if (tcp_mask->hdr.tcp_flags == 0xFF) {
261 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
262 } else if (!tcp_mask->hdr.tcp_flags) {
263 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
266 sizeof(struct rte_eth_ntuple_filter));
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
269 item, "Not supported by ntuple filter");
273 tcp_spec = item->spec;
274 filter->dst_port = tcp_spec->hdr.dst_port;
275 filter->src_port = tcp_spec->hdr.src_port;
276 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
278 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
279 if (item->spec && item->mask) {
280 udp_mask = item->mask;
283 * Only support src & dst ports,
284 * others should be masked.
286 if (udp_mask->hdr.dgram_len ||
287 udp_mask->hdr.dgram_cksum) {
289 sizeof(struct rte_eth_ntuple_filter));
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_ITEM,
292 item, "Not supported by ntuple filter");
296 filter->dst_port_mask = udp_mask->hdr.dst_port;
297 filter->src_port_mask = udp_mask->hdr.src_port;
299 udp_spec = item->spec;
300 filter->dst_port = udp_spec->hdr.dst_port;
301 filter->src_port = udp_spec->hdr.src_port;
304 if (item->spec && item->mask) {
305 sctp_mask = item->mask;
308 * Only support src & dst ports,
309 * others should be masked.
311 if (sctp_mask->hdr.tag ||
312 sctp_mask->hdr.cksum) {
314 sizeof(struct rte_eth_ntuple_filter));
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_ITEM,
317 item, "Not supported by ntuple filter");
321 filter->dst_port_mask = sctp_mask->hdr.dst_port;
322 filter->src_port_mask = sctp_mask->hdr.src_port;
324 sctp_spec = (const struct rte_flow_item_sctp *)
326 filter->dst_port = sctp_spec->hdr.dst_port;
327 filter->src_port = sctp_spec->hdr.src_port;
330 /* check if the next not void item is END */
332 NEXT_ITEM_OF_PATTERN(item, pattern, index);
333 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
334 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
335 rte_flow_error_set(error, EINVAL,
336 RTE_FLOW_ERROR_TYPE_ITEM,
337 item, "Not supported by ntuple filter");
345 * n-tuple only supports forwarding,
346 * check if the first not void action is QUEUE.
348 NEXT_ITEM_OF_ACTION(act, actions, index);
349 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
350 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
351 rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ACTION,
353 item, "Not supported action.");
357 ((const struct rte_flow_action_queue *)act->conf)->index;
359 /* check if the next not void item is END */
361 NEXT_ITEM_OF_ACTION(act, actions, index);
362 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
363 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
364 rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_ACTION,
366 act, "Not supported action.");
371 /* must be input direction */
372 if (!attr->ingress) {
373 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374 rte_flow_error_set(error, EINVAL,
375 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
376 attr, "Only support ingress.");
382 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
383 rte_flow_error_set(error, EINVAL,
384 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
385 attr, "Not support egress.");
390 if (attr->transfer) {
391 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
392 rte_flow_error_set(error, EINVAL,
393 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
394 attr, "No support for transfer.");
398 if (attr->priority > 0xFFFF) {
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
402 attr, "Error priority.");
405 filter->priority = (uint16_t)attr->priority;
410 /* a specific function for igb because the flags is specific */
412 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
413 const struct rte_flow_attr *attr,
414 const struct rte_flow_item pattern[],
415 const struct rte_flow_action actions[],
416 struct rte_eth_ntuple_filter *filter,
417 struct rte_flow_error *error)
419 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
422 MAC_TYPE_FILTER_SUP(hw->mac.type);
424 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
429 /* Igb doesn't support many priorities. */
430 if (filter->priority > E1000_2TUPLE_MAX_PRI) {
431 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ITEM,
434 NULL, "Priority not supported by ntuple filter");
438 if (hw->mac.type == e1000_82576) {
439 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
440 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
441 rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ITEM,
443 NULL, "queue number not "
444 "supported by ntuple filter");
447 filter->flags |= RTE_5TUPLE_FLAGS;
449 if (filter->src_ip_mask || filter->dst_ip_mask ||
450 filter->src_port_mask) {
451 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
452 rte_flow_error_set(error, EINVAL,
453 RTE_FLOW_ERROR_TYPE_ITEM,
454 NULL, "only two tuple are "
455 "supported by this filter");
458 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
459 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
460 rte_flow_error_set(error, EINVAL,
461 RTE_FLOW_ERROR_TYPE_ITEM,
462 NULL, "queue number not "
463 "supported by ntuple filter");
466 filter->flags |= RTE_2TUPLE_FLAGS;
473 * Parse the rule to see if it is a ethertype rule.
474 * And get the ethertype filter info BTW.
476 * The first not void item can be ETH.
477 * The next not void item must be END.
479 * The first not void action should be QUEUE.
480 * The next not void action should be END.
483 * ETH type 0x0807 0xFFFF
485 * other members in mask and spec should set to 0x00.
486 * item->last should be NULL.
489 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
490 const struct rte_flow_item *pattern,
491 const struct rte_flow_action *actions,
492 struct rte_eth_ethertype_filter *filter,
493 struct rte_flow_error *error)
495 const struct rte_flow_item *item;
496 const struct rte_flow_action *act;
497 const struct rte_flow_item_eth *eth_spec;
498 const struct rte_flow_item_eth *eth_mask;
499 const struct rte_flow_action_queue *act_q;
503 rte_flow_error_set(error, EINVAL,
504 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
505 NULL, "NULL pattern.");
510 rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
512 NULL, "NULL action.");
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ATTR,
519 NULL, "NULL attribute.");
526 /* The first non-void item should be MAC. */
527 NEXT_ITEM_OF_PATTERN(item, pattern, index);
528 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ITEM,
531 item, "Not supported by ethertype filter");
535 /*Not supported last point for range*/
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
539 item, "Not supported last point for range");
543 /* Get the MAC info. */
544 if (!item->spec || !item->mask) {
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ITEM,
547 item, "Not supported by ethertype filter");
551 eth_spec = item->spec;
552 eth_mask = item->mask;
554 /* Mask bits of source MAC address must be full of 0.
555 * Mask bits of destination MAC address must be full
558 if (!rte_is_zero_ether_addr(ð_mask->src) ||
559 (!rte_is_zero_ether_addr(ð_mask->dst) &&
560 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_ITEM,
563 item, "Invalid ether address mask");
567 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
568 rte_flow_error_set(error, EINVAL,
569 RTE_FLOW_ERROR_TYPE_ITEM,
570 item, "Invalid ethertype mask");
574 /* If mask bits of destination MAC address
575 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
577 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
578 filter->mac_addr = eth_spec->dst;
579 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
581 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
583 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
585 /* Check if the next non-void item is END. */
587 NEXT_ITEM_OF_PATTERN(item, pattern, index);
588 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
589 rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM,
591 item, "Not supported by ethertype filter.");
598 /* Check if the first non-void action is QUEUE or DROP. */
599 NEXT_ITEM_OF_ACTION(act, actions, index);
600 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
601 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ACTION,
604 act, "Not supported action.");
608 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
609 act_q = (const struct rte_flow_action_queue *)act->conf;
610 filter->queue = act_q->index;
612 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
615 /* Check if the next non-void item is END */
617 NEXT_ITEM_OF_ACTION(act, actions, index);
618 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ACTION,
621 act, "Not supported action.");
626 /* Must be input direction */
627 if (!attr->ingress) {
628 rte_flow_error_set(error, EINVAL,
629 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
630 attr, "Only support ingress.");
636 rte_flow_error_set(error, EINVAL,
637 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
638 attr, "Not support egress.");
643 if (attr->transfer) {
644 rte_flow_error_set(error, EINVAL,
645 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
646 attr, "No support for transfer.");
651 if (attr->priority) {
652 rte_flow_error_set(error, EINVAL,
653 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
654 attr, "Not support priority.");
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
662 attr, "Not support group.");
670 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
671 const struct rte_flow_attr *attr,
672 const struct rte_flow_item pattern[],
673 const struct rte_flow_action actions[],
674 struct rte_eth_ethertype_filter *filter,
675 struct rte_flow_error *error)
677 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
680 MAC_TYPE_FILTER_SUP(hw->mac.type);
682 ret = cons_parse_ethertype_filter(attr, pattern,
683 actions, filter, error);
688 if (hw->mac.type == e1000_82576) {
689 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
690 memset(filter, 0, sizeof(
691 struct rte_eth_ethertype_filter));
692 rte_flow_error_set(error, EINVAL,
693 RTE_FLOW_ERROR_TYPE_ITEM,
694 NULL, "queue number not supported "
695 "by ethertype filter");
699 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
700 memset(filter, 0, sizeof(
701 struct rte_eth_ethertype_filter));
702 rte_flow_error_set(error, EINVAL,
703 RTE_FLOW_ERROR_TYPE_ITEM,
704 NULL, "queue number not supported "
705 "by ethertype filter");
710 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
711 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
712 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ITEM,
715 NULL, "IPv4/IPv6 not supported by ethertype filter");
719 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
720 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
723 NULL, "mac compare is unsupported");
727 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
728 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729 rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ITEM,
731 NULL, "drop option is unsupported");
739 * Parse the rule to see if it is a TCP SYN rule.
740 * And get the TCP SYN filter info BTW.
742 * The first not void item must be ETH.
743 * The second not void item must be IPV4 or IPV6.
744 * The third not void item must be TCP.
745 * The next not void item must be END.
747 * The first not void action should be QUEUE.
748 * The next not void action should be END.
752 * IPV4/IPV6 NULL NULL
753 * TCP tcp_flags 0x02 0xFF
755 * other members in mask and spec should set to 0x00.
756 * item->last should be NULL.
759 cons_parse_syn_filter(const struct rte_flow_attr *attr,
760 const struct rte_flow_item pattern[],
761 const struct rte_flow_action actions[],
762 struct rte_eth_syn_filter *filter,
763 struct rte_flow_error *error)
765 const struct rte_flow_item *item;
766 const struct rte_flow_action *act;
767 const struct rte_flow_item_tcp *tcp_spec;
768 const struct rte_flow_item_tcp *tcp_mask;
769 const struct rte_flow_action_queue *act_q;
773 rte_flow_error_set(error, EINVAL,
774 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
775 NULL, "NULL pattern.");
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
782 NULL, "NULL action.");
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ATTR,
789 NULL, "NULL attribute.");
796 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
797 NEXT_ITEM_OF_PATTERN(item, pattern, index);
798 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
799 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
800 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
801 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
802 rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ITEM,
804 item, "Not supported by syn filter");
807 /*Not supported last point for range*/
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
811 item, "Not supported last point for range");
816 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
817 /* if the item is MAC, the content should be NULL */
818 if (item->spec || item->mask) {
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM,
821 item, "Invalid SYN address mask");
825 /* check if the next not void item is IPv4 or IPv6 */
827 NEXT_ITEM_OF_PATTERN(item, pattern, index);
828 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
829 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 item, "Not supported by syn filter");
838 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
839 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
840 /* if the item is IP, the content should be NULL */
841 if (item->spec || item->mask) {
842 rte_flow_error_set(error, EINVAL,
843 RTE_FLOW_ERROR_TYPE_ITEM,
844 item, "Invalid SYN mask");
848 /* check if the next not void item is TCP */
850 NEXT_ITEM_OF_PATTERN(item, pattern, index);
851 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
854 item, "Not supported by syn filter");
859 /* Get the TCP info. Only support SYN. */
860 if (!item->spec || !item->mask) {
861 rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ITEM,
863 item, "Invalid SYN mask");
866 /*Not supported last point for range*/
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870 item, "Not supported last point for range");
874 tcp_spec = item->spec;
875 tcp_mask = item->mask;
876 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
877 tcp_mask->hdr.src_port ||
878 tcp_mask->hdr.dst_port ||
879 tcp_mask->hdr.sent_seq ||
880 tcp_mask->hdr.recv_ack ||
881 tcp_mask->hdr.data_off ||
882 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
883 tcp_mask->hdr.rx_win ||
884 tcp_mask->hdr.cksum ||
885 tcp_mask->hdr.tcp_urp) {
886 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Not supported by syn filter");
893 /* check if the next not void item is END */
895 NEXT_ITEM_OF_PATTERN(item, pattern, index);
896 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
897 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
898 rte_flow_error_set(error, EINVAL,
899 RTE_FLOW_ERROR_TYPE_ITEM,
900 item, "Not supported by syn filter");
907 /* check if the first not void action is QUEUE. */
908 NEXT_ITEM_OF_ACTION(act, actions, index);
909 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
910 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
911 rte_flow_error_set(error, EINVAL,
912 RTE_FLOW_ERROR_TYPE_ACTION,
913 act, "Not supported action.");
917 act_q = (const struct rte_flow_action_queue *)act->conf;
918 filter->queue = act_q->index;
920 /* check if the next not void item is END */
922 NEXT_ITEM_OF_ACTION(act, actions, index);
923 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
924 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
925 rte_flow_error_set(error, EINVAL,
926 RTE_FLOW_ERROR_TYPE_ACTION,
927 act, "Not supported action.");
932 /* must be input direction */
933 if (!attr->ingress) {
934 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
935 rte_flow_error_set(error, EINVAL,
936 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
937 attr, "Only support ingress.");
943 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
944 rte_flow_error_set(error, EINVAL,
945 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
946 attr, "Not support egress.");
951 if (attr->transfer) {
952 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
953 rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
955 attr, "No support for transfer.");
959 /* Support 2 priorities, the lowest or highest. */
960 if (!attr->priority) {
962 } else if (attr->priority == (uint32_t)~0U) {
965 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
966 rte_flow_error_set(error, EINVAL,
967 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
968 attr, "Not support priority.");
976 igb_parse_syn_filter(struct rte_eth_dev *dev,
977 const struct rte_flow_attr *attr,
978 const struct rte_flow_item pattern[],
979 const struct rte_flow_action actions[],
980 struct rte_eth_syn_filter *filter,
981 struct rte_flow_error *error)
983 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
986 MAC_TYPE_FILTER_SUP(hw->mac.type);
988 ret = cons_parse_syn_filter(attr, pattern,
989 actions, filter, error);
991 if (hw->mac.type == e1000_82576) {
992 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
993 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994 rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_ITEM,
996 NULL, "queue number not "
997 "supported by syn filter");
1001 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1002 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ITEM,
1005 NULL, "queue number not "
1006 "supported by syn filter");
1018 * Parse the rule to see if it is a flex byte rule.
1019 * And get the flex byte filter info BTW.
1021 * The first not void item must be RAW.
1022 * The second not void item can be RAW or END.
1023 * The third not void item can be RAW or END.
1024 * The last not void item must be END.
1026 * The first not void action should be QUEUE.
1027 * The next not void action should be END.
1030 * RAW relative 0 0x1
1031 * offset 0 0xFFFFFFFF
1032 * pattern {0x08, 0x06} {0xFF, 0xFF}
1033 * RAW relative 1 0x1
1034 * offset 100 0xFFFFFFFF
1035 * pattern {0x11, 0x22, 0x33} {0xFF, 0xFF, 0xFF}
1037 * other members in mask and spec should set to 0x00.
1038 * item->last should be NULL.
1041 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1042 const struct rte_flow_item pattern[],
1043 const struct rte_flow_action actions[],
1044 struct rte_eth_flex_filter *filter,
1045 struct rte_flow_error *error)
1047 const struct rte_flow_item *item;
1048 const struct rte_flow_action *act;
1049 const struct rte_flow_item_raw *raw_spec;
1050 const struct rte_flow_item_raw *raw_mask;
1051 const struct rte_flow_action_queue *act_q;
1052 uint32_t index, i, offset, total_offset;
1053 uint32_t max_offset = 0;
1054 int32_t shift, j, raw_index = 0;
1055 int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1056 int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1059 rte_flow_error_set(error, EINVAL,
1060 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1061 NULL, "NULL pattern.");
1066 rte_flow_error_set(error, EINVAL,
1067 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1068 NULL, "NULL action.");
1073 rte_flow_error_set(error, EINVAL,
1074 RTE_FLOW_ERROR_TYPE_ATTR,
1075 NULL, "NULL attribute.");
1084 /* the first not void item should be RAW */
1085 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1086 if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1087 rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ITEM,
1089 item, "Not supported by flex filter");
1092 /*Not supported last point for range*/
1094 rte_flow_error_set(error, EINVAL,
1095 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1096 item, "Not supported last point for range");
1100 raw_spec = item->spec;
1101 raw_mask = item->mask;
1103 if (!raw_mask->length ||
1104 !raw_mask->relative) {
1105 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1106 rte_flow_error_set(error, EINVAL,
1107 RTE_FLOW_ERROR_TYPE_ITEM,
1108 item, "Not supported by flex filter");
1112 if (raw_mask->offset)
1113 offset = raw_spec->offset;
1117 for (j = 0; j < raw_spec->length; j++) {
1118 if (raw_mask->pattern[j] != 0xFF) {
1119 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1120 rte_flow_error_set(error, EINVAL,
1121 RTE_FLOW_ERROR_TYPE_ITEM,
1122 item, "Not supported by flex filter");
1129 if (raw_spec->relative) {
1130 for (j = raw_index; j > 0; j--) {
1131 total_offset += raw_offset[j - 1];
1132 if (!relative[j - 1])
1135 if (total_offset + raw_spec->length + offset > max_offset)
1136 max_offset = total_offset + raw_spec->length + offset;
1138 if (raw_spec->length + offset > max_offset)
1139 max_offset = raw_spec->length + offset;
1142 if ((raw_spec->length + offset + total_offset) >
1143 RTE_FLEX_FILTER_MAXLEN) {
1144 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1145 rte_flow_error_set(error, EINVAL,
1146 RTE_FLOW_ERROR_TYPE_ITEM,
1147 item, "Not supported by flex filter");
1151 if (raw_spec->relative == 0) {
1152 for (j = 0; j < raw_spec->length; j++)
1153 filter->bytes[offset + j] =
1154 raw_spec->pattern[j];
1155 j = offset / CHAR_BIT;
1156 shift = offset % CHAR_BIT;
1158 for (j = 0; j < raw_spec->length; j++)
1159 filter->bytes[total_offset + offset + j] =
1160 raw_spec->pattern[j];
1161 j = (total_offset + offset) / CHAR_BIT;
1162 shift = (total_offset + offset) % CHAR_BIT;
1167 for ( ; shift < CHAR_BIT; shift++) {
1168 filter->mask[j] |= (0x80 >> shift);
1170 if (i == raw_spec->length)
1172 if (shift == (CHAR_BIT - 1)) {
1178 relative[raw_index] = raw_spec->relative;
1179 raw_offset[raw_index] = offset + raw_spec->length;
1182 /* check if the next not void item is RAW */
1184 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1185 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1186 item->type != RTE_FLOW_ITEM_TYPE_END) {
1187 rte_flow_error_set(error, EINVAL,
1188 RTE_FLOW_ERROR_TYPE_ITEM,
1189 item, "Not supported by flex filter");
1193 /* go back to parser */
1194 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1195 /* if the item is RAW, the content should be parse */
1199 filter->len = RTE_ALIGN(max_offset, 8);
1204 /* check if the first not void action is QUEUE. */
1205 NEXT_ITEM_OF_ACTION(act, actions, index);
1206 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1207 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ACTION,
1210 act, "Not supported action.");
1214 act_q = (const struct rte_flow_action_queue *)act->conf;
1215 filter->queue = act_q->index;
1217 /* check if the next not void item is END */
1219 NEXT_ITEM_OF_ACTION(act, actions, index);
1220 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1221 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1222 rte_flow_error_set(error, EINVAL,
1223 RTE_FLOW_ERROR_TYPE_ACTION,
1224 act, "Not supported action.");
1229 /* must be input direction */
1230 if (!attr->ingress) {
1231 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1232 rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1234 attr, "Only support ingress.");
1240 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1241 rte_flow_error_set(error, EINVAL,
1242 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1243 attr, "Not support egress.");
1248 if (attr->transfer) {
1249 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1252 attr, "No support for transfer.");
1256 if (attr->priority > 0xFFFF) {
1257 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1258 rte_flow_error_set(error, EINVAL,
1259 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1260 attr, "Error priority.");
1264 filter->priority = (uint16_t)attr->priority;
1270 igb_parse_flex_filter(struct rte_eth_dev *dev,
1271 const struct rte_flow_attr *attr,
1272 const struct rte_flow_item pattern[],
1273 const struct rte_flow_action actions[],
1274 struct rte_eth_flex_filter *filter,
1275 struct rte_flow_error *error)
1277 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1280 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1282 ret = cons_parse_flex_filter(attr, pattern,
1283 actions, filter, error);
1285 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1286 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1287 rte_flow_error_set(error, EINVAL,
1288 RTE_FLOW_ERROR_TYPE_ITEM,
1289 NULL, "queue number not supported by flex filter");
1293 if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1294 filter->len % sizeof(uint64_t) != 0) {
1295 PMD_DRV_LOG(ERR, "filter's length is out of range");
1299 if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1300 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1311 igb_parse_rss_filter(struct rte_eth_dev *dev,
1312 const struct rte_flow_attr *attr,
1313 const struct rte_flow_action actions[],
1314 struct igb_rte_flow_rss_conf *rss_conf,
1315 struct rte_flow_error *error)
1317 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1318 const struct rte_flow_action *act;
1319 const struct rte_flow_action_rss *rss;
1323 * rss only supports forwarding,
1324 * check if the first not void action is RSS.
1327 NEXT_ITEM_OF_ACTION(act, actions, index);
1328 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1329 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ACTION,
1332 act, "Not supported action.");
1336 rss = (const struct rte_flow_action_rss *)act->conf;
1338 if (!rss || !rss->queue_num) {
1339 rte_flow_error_set(error, EINVAL,
1340 RTE_FLOW_ERROR_TYPE_ACTION,
1346 for (n = 0; n < rss->queue_num; n++) {
1347 if (rss->queue[n] >= dev->data->nb_rx_queues) {
1348 rte_flow_error_set(error, EINVAL,
1349 RTE_FLOW_ERROR_TYPE_ACTION,
1351 "queue id > max number of queues");
1356 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
1357 return rte_flow_error_set
1358 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1359 "non-default RSS hash functions are not supported");
1361 return rte_flow_error_set
1362 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1363 "a nonzero RSS encapsulation level is not supported");
1364 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1365 return rte_flow_error_set
1366 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1367 "RSS hash key must be exactly 40 bytes");
1368 if (((hw->mac.type == e1000_82576) &&
1369 (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
1370 ((hw->mac.type != e1000_82576) &&
1371 (rss->queue_num > IGB_MAX_RX_QUEUE_NUM)))
1372 return rte_flow_error_set
1373 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1374 "too many queues for RSS context");
1375 if (igb_rss_conf_init(dev, rss_conf, rss))
1376 return rte_flow_error_set
1377 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
1378 "RSS context initialization failure");
1380 /* check if the next not void item is END */
1382 NEXT_ITEM_OF_ACTION(act, actions, index);
1383 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1384 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1385 rte_flow_error_set(error, EINVAL,
1386 RTE_FLOW_ERROR_TYPE_ACTION,
1387 act, "Not supported action.");
1392 /* must be input direction */
1393 if (!attr->ingress) {
1394 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1395 rte_flow_error_set(error, EINVAL,
1396 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1397 attr, "Only support ingress.");
1403 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1404 rte_flow_error_set(error, EINVAL,
1405 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1406 attr, "Not support egress.");
1411 if (attr->transfer) {
1412 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1415 attr, "No support for transfer.");
1419 if (attr->priority > 0xFFFF) {
1420 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1421 rte_flow_error_set(error, EINVAL,
1422 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1423 attr, "Error priority.");
1431 * Create a flow rule.
1432 * Theorically one rule can match more than one filters.
1433 * We will let it use the filter which it hitt first.
1434 * So, the sequence matters.
1436 static struct rte_flow *
1437 igb_flow_create(struct rte_eth_dev *dev,
1438 const struct rte_flow_attr *attr,
1439 const struct rte_flow_item pattern[],
1440 const struct rte_flow_action actions[],
1441 struct rte_flow_error *error)
1444 struct rte_eth_ntuple_filter ntuple_filter;
1445 struct rte_eth_ethertype_filter ethertype_filter;
1446 struct rte_eth_syn_filter syn_filter;
1447 struct rte_eth_flex_filter flex_filter;
1448 struct igb_rte_flow_rss_conf rss_conf;
1449 struct rte_flow *flow = NULL;
1450 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1451 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1452 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1453 struct igb_flex_filter_ele *flex_filter_ptr;
1454 struct igb_rss_conf_ele *rss_filter_ptr;
1455 struct igb_flow_mem *igb_flow_mem_ptr;
1457 flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1459 PMD_DRV_LOG(ERR, "failed to allocate memory");
1460 return (struct rte_flow *)flow;
1462 igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1463 sizeof(struct igb_flow_mem), 0);
1464 if (!igb_flow_mem_ptr) {
1465 PMD_DRV_LOG(ERR, "failed to allocate memory");
1469 igb_flow_mem_ptr->flow = flow;
1470 igb_flow_mem_ptr->dev = dev;
1471 TAILQ_INSERT_TAIL(&igb_flow_list,
1472 igb_flow_mem_ptr, entries);
1474 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1475 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1476 actions, &ntuple_filter, error);
1478 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1480 ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1481 sizeof(struct igb_ntuple_filter_ele), 0);
1482 if (!ntuple_filter_ptr) {
1483 PMD_DRV_LOG(ERR, "failed to allocate memory");
1487 rte_memcpy(&ntuple_filter_ptr->filter_info,
1489 sizeof(struct rte_eth_ntuple_filter));
1490 TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1491 ntuple_filter_ptr, entries);
1492 flow->rule = ntuple_filter_ptr;
1493 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1499 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1500 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1501 actions, ðertype_filter, error);
1503 ret = igb_add_del_ethertype_filter(dev,
1504 ðertype_filter, TRUE);
1506 ethertype_filter_ptr = rte_zmalloc(
1507 "igb_ethertype_filter",
1508 sizeof(struct igb_ethertype_filter_ele), 0);
1509 if (!ethertype_filter_ptr) {
1510 PMD_DRV_LOG(ERR, "failed to allocate memory");
1514 rte_memcpy(ðertype_filter_ptr->filter_info,
1516 sizeof(struct rte_eth_ethertype_filter));
1517 TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1518 ethertype_filter_ptr, entries);
1519 flow->rule = ethertype_filter_ptr;
1520 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1526 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1527 ret = igb_parse_syn_filter(dev, attr, pattern,
1528 actions, &syn_filter, error);
1530 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1532 syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1533 sizeof(struct igb_eth_syn_filter_ele), 0);
1534 if (!syn_filter_ptr) {
1535 PMD_DRV_LOG(ERR, "failed to allocate memory");
1539 rte_memcpy(&syn_filter_ptr->filter_info,
1541 sizeof(struct rte_eth_syn_filter));
1542 TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1545 flow->rule = syn_filter_ptr;
1546 flow->filter_type = RTE_ETH_FILTER_SYN;
1552 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1553 ret = igb_parse_flex_filter(dev, attr, pattern,
1554 actions, &flex_filter, error);
1556 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1558 flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1559 sizeof(struct igb_flex_filter_ele), 0);
1560 if (!flex_filter_ptr) {
1561 PMD_DRV_LOG(ERR, "failed to allocate memory");
1565 rte_memcpy(&flex_filter_ptr->filter_info,
1567 sizeof(struct rte_eth_flex_filter));
1568 TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1569 flex_filter_ptr, entries);
1570 flow->rule = flex_filter_ptr;
1571 flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1576 memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1577 ret = igb_parse_rss_filter(dev, attr,
1578 actions, &rss_conf, error);
1580 ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
1582 rss_filter_ptr = rte_zmalloc("igb_rss_filter",
1583 sizeof(struct igb_rss_conf_ele), 0);
1584 if (!rss_filter_ptr) {
1585 PMD_DRV_LOG(ERR, "failed to allocate memory");
1588 igb_rss_conf_init(dev, &rss_filter_ptr->filter_info,
1590 TAILQ_INSERT_TAIL(&igb_filter_rss_list,
1591 rss_filter_ptr, entries);
1592 flow->rule = rss_filter_ptr;
1593 flow->filter_type = RTE_ETH_FILTER_HASH;
1599 TAILQ_REMOVE(&igb_flow_list,
1600 igb_flow_mem_ptr, entries);
1601 rte_flow_error_set(error, -ret,
1602 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1603 "Failed to create flow.");
1604 rte_free(igb_flow_mem_ptr);
1610 * Check if the flow rule is supported by igb.
1611 * It only checkes the format. Don't guarantee the rule can be programmed into
1612 * the HW. Because there can be no enough room for the rule.
1615 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1616 const struct rte_flow_attr *attr,
1617 const struct rte_flow_item pattern[],
1618 const struct rte_flow_action actions[],
1619 struct rte_flow_error *error)
1621 struct rte_eth_ntuple_filter ntuple_filter;
1622 struct rte_eth_ethertype_filter ethertype_filter;
1623 struct rte_eth_syn_filter syn_filter;
1624 struct rte_eth_flex_filter flex_filter;
1625 struct igb_rte_flow_rss_conf rss_conf;
1628 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1629 ret = igb_parse_ntuple_filter(dev, attr, pattern,
1630 actions, &ntuple_filter, error);
1634 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1635 ret = igb_parse_ethertype_filter(dev, attr, pattern,
1636 actions, ðertype_filter, error);
1640 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1641 ret = igb_parse_syn_filter(dev, attr, pattern,
1642 actions, &syn_filter, error);
1646 memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1647 ret = igb_parse_flex_filter(dev, attr, pattern,
1648 actions, &flex_filter, error);
1652 memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1653 ret = igb_parse_rss_filter(dev, attr,
1654 actions, &rss_conf, error);
1659 /* Destroy a flow rule on igb. */
1661 igb_flow_destroy(struct rte_eth_dev *dev,
1662 struct rte_flow *flow,
1663 struct rte_flow_error *error)
1666 struct rte_flow *pmd_flow = flow;
1667 enum rte_filter_type filter_type = pmd_flow->filter_type;
1668 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1669 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1670 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1671 struct igb_flex_filter_ele *flex_filter_ptr;
1672 struct igb_flow_mem *igb_flow_mem_ptr;
1673 struct igb_rss_conf_ele *rss_filter_ptr;
1675 switch (filter_type) {
1676 case RTE_ETH_FILTER_NTUPLE:
1677 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1679 ret = igb_add_del_ntuple_filter(dev,
1680 &ntuple_filter_ptr->filter_info, FALSE);
1682 TAILQ_REMOVE(&igb_filter_ntuple_list,
1683 ntuple_filter_ptr, entries);
1684 rte_free(ntuple_filter_ptr);
1687 case RTE_ETH_FILTER_ETHERTYPE:
1688 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1690 ret = igb_add_del_ethertype_filter(dev,
1691 ðertype_filter_ptr->filter_info, FALSE);
1693 TAILQ_REMOVE(&igb_filter_ethertype_list,
1694 ethertype_filter_ptr, entries);
1695 rte_free(ethertype_filter_ptr);
1698 case RTE_ETH_FILTER_SYN:
1699 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1701 ret = eth_igb_syn_filter_set(dev,
1702 &syn_filter_ptr->filter_info, FALSE);
1704 TAILQ_REMOVE(&igb_filter_syn_list,
1705 syn_filter_ptr, entries);
1706 rte_free(syn_filter_ptr);
1709 case RTE_ETH_FILTER_FLEXIBLE:
1710 flex_filter_ptr = (struct igb_flex_filter_ele *)
1712 ret = eth_igb_add_del_flex_filter(dev,
1713 &flex_filter_ptr->filter_info, FALSE);
1715 TAILQ_REMOVE(&igb_filter_flex_list,
1716 flex_filter_ptr, entries);
1717 rte_free(flex_filter_ptr);
1720 case RTE_ETH_FILTER_HASH:
1721 rss_filter_ptr = (struct igb_rss_conf_ele *)
1723 ret = igb_config_rss_filter(dev,
1724 &rss_filter_ptr->filter_info, FALSE);
1726 TAILQ_REMOVE(&igb_filter_rss_list,
1727 rss_filter_ptr, entries);
1728 rte_free(rss_filter_ptr);
1732 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1739 rte_flow_error_set(error, EINVAL,
1740 RTE_FLOW_ERROR_TYPE_HANDLE,
1741 NULL, "Failed to destroy flow");
1745 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1746 if (igb_flow_mem_ptr->flow == pmd_flow) {
1747 TAILQ_REMOVE(&igb_flow_list,
1748 igb_flow_mem_ptr, entries);
1749 rte_free(igb_flow_mem_ptr);
1757 /* remove all the n-tuple filters */
1759 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1761 struct e1000_filter_info *filter_info =
1762 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1763 struct e1000_5tuple_filter *p_5tuple;
1764 struct e1000_2tuple_filter *p_2tuple;
1766 while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1767 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1769 while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1770 igb_delete_2tuple_filter(dev, p_2tuple);
1773 /* remove all the ether type filters */
1775 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1777 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1778 struct e1000_filter_info *filter_info =
1779 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1782 for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1783 if (filter_info->ethertype_mask & (1 << i)) {
1784 (void)igb_ethertype_filter_remove(filter_info,
1786 E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1787 E1000_WRITE_FLUSH(hw);
1792 /* remove the SYN filter */
1794 igb_clear_syn_filter(struct rte_eth_dev *dev)
1796 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1797 struct e1000_filter_info *filter_info =
1798 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1800 if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1801 filter_info->syn_info = 0;
1802 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1803 E1000_WRITE_FLUSH(hw);
1807 /* remove all the flex filters */
1809 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1811 struct e1000_filter_info *filter_info =
1812 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1813 struct e1000_flex_filter *flex_filter;
1815 while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1816 igb_remove_flex_filter(dev, flex_filter);
1819 /* remove the rss filter */
1821 igb_clear_rss_filter(struct rte_eth_dev *dev)
1823 struct e1000_filter_info *filter =
1824 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1826 if (filter->rss_info.conf.queue_num)
1827 igb_config_rss_filter(dev, &filter->rss_info, FALSE);
1831 igb_filterlist_flush(struct rte_eth_dev *dev)
1833 struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1834 struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1835 struct igb_eth_syn_filter_ele *syn_filter_ptr;
1836 struct igb_flex_filter_ele *flex_filter_ptr;
1837 struct igb_rss_conf_ele *rss_filter_ptr;
1838 struct igb_flow_mem *igb_flow_mem_ptr;
1839 enum rte_filter_type filter_type;
1840 struct rte_flow *pmd_flow;
1842 TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1843 if (igb_flow_mem_ptr->dev == dev) {
1844 pmd_flow = igb_flow_mem_ptr->flow;
1845 filter_type = pmd_flow->filter_type;
1847 switch (filter_type) {
1848 case RTE_ETH_FILTER_NTUPLE:
1850 (struct igb_ntuple_filter_ele *)
1852 TAILQ_REMOVE(&igb_filter_ntuple_list,
1853 ntuple_filter_ptr, entries);
1854 rte_free(ntuple_filter_ptr);
1856 case RTE_ETH_FILTER_ETHERTYPE:
1857 ethertype_filter_ptr =
1858 (struct igb_ethertype_filter_ele *)
1860 TAILQ_REMOVE(&igb_filter_ethertype_list,
1861 ethertype_filter_ptr, entries);
1862 rte_free(ethertype_filter_ptr);
1864 case RTE_ETH_FILTER_SYN:
1866 (struct igb_eth_syn_filter_ele *)
1868 TAILQ_REMOVE(&igb_filter_syn_list,
1869 syn_filter_ptr, entries);
1870 rte_free(syn_filter_ptr);
1872 case RTE_ETH_FILTER_FLEXIBLE:
1874 (struct igb_flex_filter_ele *)
1876 TAILQ_REMOVE(&igb_filter_flex_list,
1877 flex_filter_ptr, entries);
1878 rte_free(flex_filter_ptr);
1880 case RTE_ETH_FILTER_HASH:
1882 (struct igb_rss_conf_ele *)
1884 TAILQ_REMOVE(&igb_filter_rss_list,
1885 rss_filter_ptr, entries);
1886 rte_free(rss_filter_ptr);
1889 PMD_DRV_LOG(WARNING, "Filter type"
1890 "(%d) not supported", filter_type);
1893 TAILQ_REMOVE(&igb_flow_list,
1896 rte_free(igb_flow_mem_ptr->flow);
1897 rte_free(igb_flow_mem_ptr);
1902 /* Destroy all flow rules associated with a port on igb. */
1904 igb_flow_flush(struct rte_eth_dev *dev,
1905 __rte_unused struct rte_flow_error *error)
1907 igb_clear_all_ntuple_filter(dev);
1908 igb_clear_all_ethertype_filter(dev);
1909 igb_clear_syn_filter(dev);
1910 igb_clear_all_flex_filter(dev);
1911 igb_clear_rss_filter(dev);
1912 igb_filterlist_flush(dev);
1917 const struct rte_flow_ops igb_flow_ops = {
1918 .validate = igb_flow_validate,
1919 .create = igb_flow_create,
1920 .destroy = igb_flow_destroy,
1921 .flush = igb_flow_flush,