1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
7 #include <rte_flow_driver.h>
9 #include "txgbe_ethdev.h"
11 #define TXGBE_MIN_N_TUPLE_PRIO 1
12 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 * Endless loop will never happen with below assumption
16 * 1. there is at least one no-void item(END)
17 * 2. cur is before END.
20 const struct rte_flow_item *next_no_void_pattern(
21 const struct rte_flow_item pattern[],
22 const struct rte_flow_item *cur)
24 const struct rte_flow_item *next =
25 cur ? cur + 1 : &pattern[0];
27 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
34 const struct rte_flow_action *next_no_void_action(
35 const struct rte_flow_action actions[],
36 const struct rte_flow_action *cur)
38 const struct rte_flow_action *next =
39 cur ? cur + 1 : &actions[0];
41 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
48 * Please aware there's an assumption for all the parsers.
49 * rte_flow_item is using big endian, rte_flow_attr and
50 * rte_flow_action are using CPU order.
51 * Because the pattern is used to describe the packets,
52 * normally the packets should use network order.
56 * Parse the rule to see if it is a n-tuple rule.
57 * And get the n-tuple filter info BTW.
59 * The first not void item can be ETH or IPV4.
60 * The second not void item must be IPV4 if the first one is ETH.
61 * The third not void item must be UDP or TCP.
62 * The next not void item must be END.
64 * The first not void action should be QUEUE.
65 * The next not void action should be END.
69 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
70 * dst_addr 192.167.3.50 0xFFFFFFFF
71 * next_proto_id 17 0xFF
72 * UDP/TCP/ src_port 80 0xFFFF
73 * SCTP dst_port 80 0xFFFF
75 * other members in mask and spec should set to 0x00.
76 * item->last should be NULL.
79 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
80 const struct rte_flow_item pattern[],
81 const struct rte_flow_action actions[],
82 struct rte_eth_ntuple_filter *filter,
83 struct rte_flow_error *error)
85 const struct rte_flow_item *item;
86 const struct rte_flow_action *act;
87 const struct rte_flow_item_ipv4 *ipv4_spec;
88 const struct rte_flow_item_ipv4 *ipv4_mask;
89 const struct rte_flow_item_tcp *tcp_spec;
90 const struct rte_flow_item_tcp *tcp_mask;
91 const struct rte_flow_item_udp *udp_spec;
92 const struct rte_flow_item_udp *udp_mask;
93 const struct rte_flow_item_sctp *sctp_spec;
94 const struct rte_flow_item_sctp *sctp_mask;
95 const struct rte_flow_item_eth *eth_spec;
96 const struct rte_flow_item_eth *eth_mask;
97 const struct rte_flow_item_vlan *vlan_spec;
98 const struct rte_flow_item_vlan *vlan_mask;
99 struct rte_flow_item_eth eth_null;
100 struct rte_flow_item_vlan vlan_null;
103 rte_flow_error_set(error,
104 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105 NULL, "NULL pattern.");
110 rte_flow_error_set(error, EINVAL,
111 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112 NULL, "NULL action.");
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ATTR,
118 NULL, "NULL attribute.");
122 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
123 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
125 /* the first not void item can be MAC or IPv4 */
126 item = next_no_void_pattern(pattern, NULL);
128 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130 rte_flow_error_set(error, EINVAL,
131 RTE_FLOW_ERROR_TYPE_ITEM,
132 item, "Not supported by ntuple filter");
136 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137 eth_spec = item->spec;
138 eth_mask = item->mask;
139 /*Not supported last point for range*/
141 rte_flow_error_set(error,
143 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
144 item, "Not supported last point for range");
147 /* if the first item is MAC, the content should be NULL */
148 if ((item->spec || item->mask) &&
149 (memcmp(eth_spec, ð_null,
150 sizeof(struct rte_flow_item_eth)) ||
151 memcmp(eth_mask, ð_null,
152 sizeof(struct rte_flow_item_eth)))) {
153 rte_flow_error_set(error, EINVAL,
154 RTE_FLOW_ERROR_TYPE_ITEM,
155 item, "Not supported by ntuple filter");
158 /* check if the next not void item is IPv4 or Vlan */
159 item = next_no_void_pattern(pattern, item);
160 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
161 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
162 rte_flow_error_set(error,
163 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
164 item, "Not supported by ntuple filter");
169 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
170 vlan_spec = item->spec;
171 vlan_mask = item->mask;
172 /*Not supported last point for range*/
174 rte_flow_error_set(error,
175 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
176 item, "Not supported last point for range");
179 /* the content should be NULL */
180 if ((item->spec || item->mask) &&
181 (memcmp(vlan_spec, &vlan_null,
182 sizeof(struct rte_flow_item_vlan)) ||
183 memcmp(vlan_mask, &vlan_null,
184 sizeof(struct rte_flow_item_vlan)))) {
185 rte_flow_error_set(error, EINVAL,
186 RTE_FLOW_ERROR_TYPE_ITEM,
187 item, "Not supported by ntuple filter");
190 /* check if the next not void item is IPv4 */
191 item = next_no_void_pattern(pattern, item);
192 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
193 rte_flow_error_set(error,
194 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
195 item, "Not supported by ntuple filter");
201 /* get the IPv4 info */
202 if (!item->spec || !item->mask) {
203 rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ITEM,
205 item, "Invalid ntuple mask");
208 /*Not supported last point for range*/
210 rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212 item, "Not supported last point for range");
216 ipv4_mask = item->mask;
218 * Only support src & dst addresses, protocol,
219 * others should be masked.
221 if (ipv4_mask->hdr.version_ihl ||
222 ipv4_mask->hdr.type_of_service ||
223 ipv4_mask->hdr.total_length ||
224 ipv4_mask->hdr.packet_id ||
225 ipv4_mask->hdr.fragment_offset ||
226 ipv4_mask->hdr.time_to_live ||
227 ipv4_mask->hdr.hdr_checksum) {
228 rte_flow_error_set(error,
229 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
230 item, "Not supported by ntuple filter");
233 if ((ipv4_mask->hdr.src_addr != 0 &&
234 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
235 (ipv4_mask->hdr.dst_addr != 0 &&
236 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
237 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
238 ipv4_mask->hdr.next_proto_id != 0)) {
239 rte_flow_error_set(error,
240 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241 item, "Not supported by ntuple filter");
245 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
249 ipv4_spec = item->spec;
250 filter->dst_ip = ipv4_spec->hdr.dst_addr;
251 filter->src_ip = ipv4_spec->hdr.src_addr;
252 filter->proto = ipv4_spec->hdr.next_proto_id;
255 /* check if the next not void item is TCP or UDP */
256 item = next_no_void_pattern(pattern, item);
257 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
260 item->type != RTE_FLOW_ITEM_TYPE_END) {
261 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
262 rte_flow_error_set(error, EINVAL,
263 RTE_FLOW_ERROR_TYPE_ITEM,
264 item, "Not supported by ntuple filter");
268 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
269 (!item->spec && !item->mask)) {
273 /* get the TCP/UDP/SCTP info */
274 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
275 (!item->spec || !item->mask)) {
276 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
277 rte_flow_error_set(error, EINVAL,
278 RTE_FLOW_ERROR_TYPE_ITEM,
279 item, "Invalid ntuple mask");
283 /*Not supported last point for range*/
285 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288 item, "Not supported last point for range");
292 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
293 tcp_mask = item->mask;
296 * Only support src & dst ports, tcp flags,
297 * others should be masked.
299 if (tcp_mask->hdr.sent_seq ||
300 tcp_mask->hdr.recv_ack ||
301 tcp_mask->hdr.data_off ||
302 tcp_mask->hdr.rx_win ||
303 tcp_mask->hdr.cksum ||
304 tcp_mask->hdr.tcp_urp) {
306 sizeof(struct rte_eth_ntuple_filter));
307 rte_flow_error_set(error, EINVAL,
308 RTE_FLOW_ERROR_TYPE_ITEM,
309 item, "Not supported by ntuple filter");
312 if ((tcp_mask->hdr.src_port != 0 &&
313 tcp_mask->hdr.src_port != UINT16_MAX) ||
314 (tcp_mask->hdr.dst_port != 0 &&
315 tcp_mask->hdr.dst_port != UINT16_MAX)) {
316 rte_flow_error_set(error,
317 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
318 item, "Not supported by ntuple filter");
322 filter->dst_port_mask = tcp_mask->hdr.dst_port;
323 filter->src_port_mask = tcp_mask->hdr.src_port;
324 if (tcp_mask->hdr.tcp_flags == 0xFF) {
325 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
326 } else if (!tcp_mask->hdr.tcp_flags) {
327 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
329 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
330 rte_flow_error_set(error, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ITEM,
332 item, "Not supported by ntuple filter");
336 tcp_spec = item->spec;
337 filter->dst_port = tcp_spec->hdr.dst_port;
338 filter->src_port = tcp_spec->hdr.src_port;
339 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
340 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
341 udp_mask = item->mask;
344 * Only support src & dst ports,
345 * others should be masked.
347 if (udp_mask->hdr.dgram_len ||
348 udp_mask->hdr.dgram_cksum) {
350 sizeof(struct rte_eth_ntuple_filter));
351 rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ITEM,
353 item, "Not supported by ntuple filter");
356 if ((udp_mask->hdr.src_port != 0 &&
357 udp_mask->hdr.src_port != UINT16_MAX) ||
358 (udp_mask->hdr.dst_port != 0 &&
359 udp_mask->hdr.dst_port != UINT16_MAX)) {
360 rte_flow_error_set(error,
361 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
362 item, "Not supported by ntuple filter");
366 filter->dst_port_mask = udp_mask->hdr.dst_port;
367 filter->src_port_mask = udp_mask->hdr.src_port;
369 udp_spec = item->spec;
370 filter->dst_port = udp_spec->hdr.dst_port;
371 filter->src_port = udp_spec->hdr.src_port;
372 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
373 sctp_mask = item->mask;
376 * Only support src & dst ports,
377 * others should be masked.
379 if (sctp_mask->hdr.tag ||
380 sctp_mask->hdr.cksum) {
382 sizeof(struct rte_eth_ntuple_filter));
383 rte_flow_error_set(error, EINVAL,
384 RTE_FLOW_ERROR_TYPE_ITEM,
385 item, "Not supported by ntuple filter");
389 filter->dst_port_mask = sctp_mask->hdr.dst_port;
390 filter->src_port_mask = sctp_mask->hdr.src_port;
392 sctp_spec = item->spec;
393 filter->dst_port = sctp_spec->hdr.dst_port;
394 filter->src_port = sctp_spec->hdr.src_port;
399 /* check if the next not void item is END */
400 item = next_no_void_pattern(pattern, item);
401 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
402 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
403 rte_flow_error_set(error, EINVAL,
404 RTE_FLOW_ERROR_TYPE_ITEM,
405 item, "Not supported by ntuple filter");
412 * n-tuple only supports forwarding,
413 * check if the first not void action is QUEUE.
415 act = next_no_void_action(actions, NULL);
416 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
417 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418 rte_flow_error_set(error, EINVAL,
419 RTE_FLOW_ERROR_TYPE_ACTION,
420 item, "Not supported action.");
424 ((const struct rte_flow_action_queue *)act->conf)->index;
426 /* check if the next not void item is END */
427 act = next_no_void_action(actions, act);
428 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
429 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ACTION,
432 act, "Not supported action.");
437 /* must be input direction */
438 if (!attr->ingress) {
439 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440 rte_flow_error_set(error, EINVAL,
441 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
442 attr, "Only support ingress.");
448 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
449 rte_flow_error_set(error, EINVAL,
450 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
451 attr, "Not support egress.");
456 if (attr->transfer) {
457 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458 rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
460 attr, "No support for transfer.");
464 if (attr->priority > 0xFFFF) {
465 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466 rte_flow_error_set(error, EINVAL,
467 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
468 attr, "Error priority.");
471 filter->priority = (uint16_t)attr->priority;
472 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
473 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
474 filter->priority = 1;
479 /* a specific function for txgbe because the flags is specific */
481 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
482 const struct rte_flow_attr *attr,
483 const struct rte_flow_item pattern[],
484 const struct rte_flow_action actions[],
485 struct rte_eth_ntuple_filter *filter,
486 struct rte_flow_error *error)
490 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
495 /* txgbe doesn't support tcp flags */
496 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
497 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
500 NULL, "Not supported by ntuple filter");
504 /* txgbe doesn't support many priorities */
505 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
506 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
507 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508 rte_flow_error_set(error, EINVAL,
509 RTE_FLOW_ERROR_TYPE_ITEM,
510 NULL, "Priority not supported by ntuple filter");
514 if (filter->queue >= dev->data->nb_rx_queues) {
515 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ITEM,
518 NULL, "Not supported by ntuple filter");
522 /* fixed value for txgbe */
523 filter->flags = RTE_5TUPLE_FLAGS;
528 * Parse the rule to see if it is a ethertype rule.
529 * And get the ethertype filter info BTW.
531 * The first not void item can be ETH.
532 * The next not void item must be END.
534 * The first not void action should be QUEUE.
535 * The next not void action should be END.
538 * ETH type 0x0807 0xFFFF
540 * other members in mask and spec should set to 0x00.
541 * item->last should be NULL.
544 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
545 const struct rte_flow_item *pattern,
546 const struct rte_flow_action *actions,
547 struct rte_eth_ethertype_filter *filter,
548 struct rte_flow_error *error)
550 const struct rte_flow_item *item;
551 const struct rte_flow_action *act;
552 const struct rte_flow_item_eth *eth_spec;
553 const struct rte_flow_item_eth *eth_mask;
554 const struct rte_flow_action_queue *act_q;
557 rte_flow_error_set(error, EINVAL,
558 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
559 NULL, "NULL pattern.");
564 rte_flow_error_set(error, EINVAL,
565 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
566 NULL, "NULL action.");
571 rte_flow_error_set(error, EINVAL,
572 RTE_FLOW_ERROR_TYPE_ATTR,
573 NULL, "NULL attribute.");
577 item = next_no_void_pattern(pattern, NULL);
578 /* The first non-void item should be MAC. */
579 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ITEM,
582 item, "Not supported by ethertype filter");
586 /*Not supported last point for range*/
588 rte_flow_error_set(error, EINVAL,
589 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
590 item, "Not supported last point for range");
594 /* Get the MAC info. */
595 if (!item->spec || !item->mask) {
596 rte_flow_error_set(error, EINVAL,
597 RTE_FLOW_ERROR_TYPE_ITEM,
598 item, "Not supported by ethertype filter");
602 eth_spec = item->spec;
603 eth_mask = item->mask;
605 /* Mask bits of source MAC address must be full of 0.
606 * Mask bits of destination MAC address must be full
609 if (!rte_is_zero_ether_addr(ð_mask->src) ||
610 (!rte_is_zero_ether_addr(ð_mask->dst) &&
611 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
614 item, "Invalid ether address mask");
618 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ITEM,
621 item, "Invalid ethertype mask");
625 /* If mask bits of destination MAC address
626 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
628 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
629 filter->mac_addr = eth_spec->dst;
630 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
632 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
634 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
636 /* Check if the next non-void item is END. */
637 item = next_no_void_pattern(pattern, item);
638 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ITEM,
641 item, "Not supported by ethertype filter.");
647 act = next_no_void_action(actions, NULL);
648 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
649 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
650 rte_flow_error_set(error, EINVAL,
651 RTE_FLOW_ERROR_TYPE_ACTION,
652 act, "Not supported action.");
656 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
657 act_q = (const struct rte_flow_action_queue *)act->conf;
658 filter->queue = act_q->index;
660 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
663 /* Check if the next non-void item is END */
664 act = next_no_void_action(actions, act);
665 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
666 rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ACTION,
668 act, "Not supported action.");
673 /* Must be input direction */
674 if (!attr->ingress) {
675 rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
677 attr, "Only support ingress.");
683 rte_flow_error_set(error, EINVAL,
684 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
685 attr, "Not support egress.");
690 if (attr->transfer) {
691 rte_flow_error_set(error, EINVAL,
692 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
693 attr, "No support for transfer.");
698 if (attr->priority) {
699 rte_flow_error_set(error, EINVAL,
700 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
701 attr, "Not support priority.");
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
709 attr, "Not support group.");
717 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
718 const struct rte_flow_attr *attr,
719 const struct rte_flow_item pattern[],
720 const struct rte_flow_action actions[],
721 struct rte_eth_ethertype_filter *filter,
722 struct rte_flow_error *error)
726 ret = cons_parse_ethertype_filter(attr, pattern,
727 actions, filter, error);
732 if (filter->queue >= dev->data->nb_rx_queues) {
733 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ITEM,
736 NULL, "queue index much too big");
740 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
741 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
742 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ITEM,
745 NULL, "IPv4/IPv6 not supported by ethertype filter");
749 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
750 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ITEM,
753 NULL, "mac compare is unsupported");
757 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
758 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ITEM,
761 NULL, "drop option is unsupported");
769 * Create or destroy a flow rule.
770 * Theorically one rule can match more than one filters.
771 * We will let it use the filter which it hitt first.
772 * So, the sequence matters.
774 static struct rte_flow *
775 txgbe_flow_create(struct rte_eth_dev *dev,
776 const struct rte_flow_attr *attr,
777 const struct rte_flow_item pattern[],
778 const struct rte_flow_action actions[],
779 struct rte_flow_error *error)
781 struct rte_flow *flow = NULL;
786 * Check if the flow rule is supported by txgbe.
787 * It only checks the format. Don't guarantee the rule can be programmed into
788 * the HW. Because there can be no enough room for the rule.
791 txgbe_flow_validate(struct rte_eth_dev *dev,
792 const struct rte_flow_attr *attr,
793 const struct rte_flow_item pattern[],
794 const struct rte_flow_action actions[],
795 struct rte_flow_error *error)
797 struct rte_eth_ntuple_filter ntuple_filter;
798 struct rte_eth_ethertype_filter ethertype_filter;
801 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
802 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
803 actions, &ntuple_filter, error);
807 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
808 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
809 actions, ðertype_filter, error);
816 /* Destroy a flow rule on txgbe. */
818 txgbe_flow_destroy(struct rte_eth_dev *dev,
819 struct rte_flow *flow,
820 struct rte_flow_error *error)
827 /* Destroy all flow rules associated with a port on txgbe. */
829 txgbe_flow_flush(struct rte_eth_dev *dev,
830 struct rte_flow_error *error)
837 const struct rte_flow_ops txgbe_flow_ops = {
838 .validate = txgbe_flow_validate,
839 .create = txgbe_flow_create,
840 .destroy = txgbe_flow_destroy,
841 .flush = txgbe_flow_flush,