1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
6 #include <rte_bus_pci.h>
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
11 #include "txgbe_ethdev.h"
13 #define TXGBE_MIN_N_TUPLE_PRIO 1
14 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 #define TXGBE_MAX_FLX_SOURCE_OFF 62
17 /* ntuple filter list structure */
18 struct txgbe_ntuple_filter_ele {
19 TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
20 struct rte_eth_ntuple_filter filter_info;
22 /* ethertype filter list structure */
23 struct txgbe_ethertype_filter_ele {
24 TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
25 struct rte_eth_ethertype_filter filter_info;
27 /* syn filter list structure */
28 struct txgbe_eth_syn_filter_ele {
29 TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
30 struct rte_eth_syn_filter filter_info;
32 /* fdir filter list structure */
33 struct txgbe_fdir_rule_ele {
34 TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
35 struct txgbe_fdir_rule filter_info;
37 /* l2_tunnel filter list structure */
38 struct txgbe_eth_l2_tunnel_conf_ele {
39 TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
40 struct txgbe_l2_tunnel_conf filter_info;
42 /* rss filter list structure */
43 struct txgbe_rss_conf_ele {
44 TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
45 struct txgbe_rte_flow_rss_conf filter_info;
47 /* txgbe_flow memory list structure */
48 struct txgbe_flow_mem {
49 TAILQ_ENTRY(txgbe_flow_mem) entries;
50 struct rte_flow *flow;
53 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
54 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
55 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
56 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
57 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
58 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
59 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
61 static struct txgbe_ntuple_filter_list filter_ntuple_list;
62 static struct txgbe_ethertype_filter_list filter_ethertype_list;
63 static struct txgbe_syn_filter_list filter_syn_list;
64 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
65 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
66 static struct txgbe_rss_filter_list filter_rss_list;
67 static struct txgbe_flow_mem_list txgbe_flow_list;
70 * Endless loop will never happen with below assumption
71 * 1. there is at least one no-void item(END)
72 * 2. cur is before END.
75 const struct rte_flow_item *next_no_void_pattern(
76 const struct rte_flow_item pattern[],
77 const struct rte_flow_item *cur)
79 const struct rte_flow_item *next =
80 cur ? cur + 1 : &pattern[0];
82 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
89 const struct rte_flow_action *next_no_void_action(
90 const struct rte_flow_action actions[],
91 const struct rte_flow_action *cur)
93 const struct rte_flow_action *next =
94 cur ? cur + 1 : &actions[0];
96 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
103 * Please aware there's an assumption for all the parsers.
104 * rte_flow_item is using big endian, rte_flow_attr and
105 * rte_flow_action are using CPU order.
106 * Because the pattern is used to describe the packets,
107 * normally the packets should use network order.
111 * Parse the rule to see if it is a n-tuple rule.
112 * And get the n-tuple filter info BTW.
114 * The first not void item can be ETH or IPV4.
115 * The second not void item must be IPV4 if the first one is ETH.
116 * The third not void item must be UDP or TCP.
117 * The next not void item must be END.
119 * The first not void action should be QUEUE.
120 * The next not void action should be END.
124 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
125 * dst_addr 192.167.3.50 0xFFFFFFFF
126 * next_proto_id 17 0xFF
127 * UDP/TCP/ src_port 80 0xFFFF
128 * SCTP dst_port 80 0xFFFF
130 * other members in mask and spec should set to 0x00.
131 * item->last should be NULL.
134 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
135 const struct rte_flow_item pattern[],
136 const struct rte_flow_action actions[],
137 struct rte_eth_ntuple_filter *filter,
138 struct rte_flow_error *error)
140 const struct rte_flow_item *item;
141 const struct rte_flow_action *act;
142 const struct rte_flow_item_ipv4 *ipv4_spec;
143 const struct rte_flow_item_ipv4 *ipv4_mask;
144 const struct rte_flow_item_tcp *tcp_spec;
145 const struct rte_flow_item_tcp *tcp_mask;
146 const struct rte_flow_item_udp *udp_spec;
147 const struct rte_flow_item_udp *udp_mask;
148 const struct rte_flow_item_sctp *sctp_spec;
149 const struct rte_flow_item_sctp *sctp_mask;
150 const struct rte_flow_item_eth *eth_spec;
151 const struct rte_flow_item_eth *eth_mask;
152 const struct rte_flow_item_vlan *vlan_spec;
153 const struct rte_flow_item_vlan *vlan_mask;
154 struct rte_flow_item_eth eth_null;
155 struct rte_flow_item_vlan vlan_null;
158 rte_flow_error_set(error,
159 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
160 NULL, "NULL pattern.");
165 rte_flow_error_set(error, EINVAL,
166 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
167 NULL, "NULL action.");
171 rte_flow_error_set(error, EINVAL,
172 RTE_FLOW_ERROR_TYPE_ATTR,
173 NULL, "NULL attribute.");
177 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
178 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
180 /* the first not void item can be MAC or IPv4 */
181 item = next_no_void_pattern(pattern, NULL);
183 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
184 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
185 rte_flow_error_set(error, EINVAL,
186 RTE_FLOW_ERROR_TYPE_ITEM,
187 item, "Not supported by ntuple filter");
191 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
192 eth_spec = item->spec;
193 eth_mask = item->mask;
194 /*Not supported last point for range*/
196 rte_flow_error_set(error,
198 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
199 item, "Not supported last point for range");
202 /* if the first item is MAC, the content should be NULL */
203 if ((item->spec || item->mask) &&
204 (memcmp(eth_spec, ð_null,
205 sizeof(struct rte_flow_item_eth)) ||
206 memcmp(eth_mask, ð_null,
207 sizeof(struct rte_flow_item_eth)))) {
208 rte_flow_error_set(error, EINVAL,
209 RTE_FLOW_ERROR_TYPE_ITEM,
210 item, "Not supported by ntuple filter");
213 /* check if the next not void item is IPv4 or Vlan */
214 item = next_no_void_pattern(pattern, item);
215 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
216 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
217 rte_flow_error_set(error,
218 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
219 item, "Not supported by ntuple filter");
224 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
225 vlan_spec = item->spec;
226 vlan_mask = item->mask;
227 /*Not supported last point for range*/
229 rte_flow_error_set(error,
230 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
231 item, "Not supported last point for range");
234 /* the content should be NULL */
235 if ((item->spec || item->mask) &&
236 (memcmp(vlan_spec, &vlan_null,
237 sizeof(struct rte_flow_item_vlan)) ||
238 memcmp(vlan_mask, &vlan_null,
239 sizeof(struct rte_flow_item_vlan)))) {
240 rte_flow_error_set(error, EINVAL,
241 RTE_FLOW_ERROR_TYPE_ITEM,
242 item, "Not supported by ntuple filter");
245 /* check if the next not void item is IPv4 */
246 item = next_no_void_pattern(pattern, item);
247 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
248 rte_flow_error_set(error,
249 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
250 item, "Not supported by ntuple filter");
256 /* get the IPv4 info */
257 if (!item->spec || !item->mask) {
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
260 item, "Invalid ntuple mask");
263 /*Not supported last point for range*/
265 rte_flow_error_set(error, EINVAL,
266 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
267 item, "Not supported last point for range");
271 ipv4_mask = item->mask;
273 * Only support src & dst addresses, protocol,
274 * others should be masked.
276 if (ipv4_mask->hdr.version_ihl ||
277 ipv4_mask->hdr.type_of_service ||
278 ipv4_mask->hdr.total_length ||
279 ipv4_mask->hdr.packet_id ||
280 ipv4_mask->hdr.fragment_offset ||
281 ipv4_mask->hdr.time_to_live ||
282 ipv4_mask->hdr.hdr_checksum) {
283 rte_flow_error_set(error,
284 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
285 item, "Not supported by ntuple filter");
288 if ((ipv4_mask->hdr.src_addr != 0 &&
289 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
290 (ipv4_mask->hdr.dst_addr != 0 &&
291 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
292 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
293 ipv4_mask->hdr.next_proto_id != 0)) {
294 rte_flow_error_set(error,
295 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
296 item, "Not supported by ntuple filter");
300 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
301 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
302 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
304 ipv4_spec = item->spec;
305 filter->dst_ip = ipv4_spec->hdr.dst_addr;
306 filter->src_ip = ipv4_spec->hdr.src_addr;
307 filter->proto = ipv4_spec->hdr.next_proto_id;
310 /* check if the next not void item is TCP or UDP */
311 item = next_no_void_pattern(pattern, item);
312 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
313 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
314 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
315 item->type != RTE_FLOW_ITEM_TYPE_END) {
316 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
317 rte_flow_error_set(error, EINVAL,
318 RTE_FLOW_ERROR_TYPE_ITEM,
319 item, "Not supported by ntuple filter");
323 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
324 (!item->spec && !item->mask)) {
328 /* get the TCP/UDP/SCTP info */
329 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
330 (!item->spec || !item->mask)) {
331 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
332 rte_flow_error_set(error, EINVAL,
333 RTE_FLOW_ERROR_TYPE_ITEM,
334 item, "Invalid ntuple mask");
338 /*Not supported last point for range*/
340 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
341 rte_flow_error_set(error, EINVAL,
342 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
343 item, "Not supported last point for range");
347 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
348 tcp_mask = item->mask;
351 * Only support src & dst ports, tcp flags,
352 * others should be masked.
354 if (tcp_mask->hdr.sent_seq ||
355 tcp_mask->hdr.recv_ack ||
356 tcp_mask->hdr.data_off ||
357 tcp_mask->hdr.rx_win ||
358 tcp_mask->hdr.cksum ||
359 tcp_mask->hdr.tcp_urp) {
361 sizeof(struct rte_eth_ntuple_filter));
362 rte_flow_error_set(error, EINVAL,
363 RTE_FLOW_ERROR_TYPE_ITEM,
364 item, "Not supported by ntuple filter");
367 if ((tcp_mask->hdr.src_port != 0 &&
368 tcp_mask->hdr.src_port != UINT16_MAX) ||
369 (tcp_mask->hdr.dst_port != 0 &&
370 tcp_mask->hdr.dst_port != UINT16_MAX)) {
371 rte_flow_error_set(error,
372 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
373 item, "Not supported by ntuple filter");
377 filter->dst_port_mask = tcp_mask->hdr.dst_port;
378 filter->src_port_mask = tcp_mask->hdr.src_port;
379 if (tcp_mask->hdr.tcp_flags == 0xFF) {
380 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
381 } else if (!tcp_mask->hdr.tcp_flags) {
382 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
384 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
385 rte_flow_error_set(error, EINVAL,
386 RTE_FLOW_ERROR_TYPE_ITEM,
387 item, "Not supported by ntuple filter");
391 tcp_spec = item->spec;
392 filter->dst_port = tcp_spec->hdr.dst_port;
393 filter->src_port = tcp_spec->hdr.src_port;
394 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
395 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
396 udp_mask = item->mask;
399 * Only support src & dst ports,
400 * others should be masked.
402 if (udp_mask->hdr.dgram_len ||
403 udp_mask->hdr.dgram_cksum) {
405 sizeof(struct rte_eth_ntuple_filter));
406 rte_flow_error_set(error, EINVAL,
407 RTE_FLOW_ERROR_TYPE_ITEM,
408 item, "Not supported by ntuple filter");
411 if ((udp_mask->hdr.src_port != 0 &&
412 udp_mask->hdr.src_port != UINT16_MAX) ||
413 (udp_mask->hdr.dst_port != 0 &&
414 udp_mask->hdr.dst_port != UINT16_MAX)) {
415 rte_flow_error_set(error,
416 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
417 item, "Not supported by ntuple filter");
421 filter->dst_port_mask = udp_mask->hdr.dst_port;
422 filter->src_port_mask = udp_mask->hdr.src_port;
424 udp_spec = item->spec;
425 filter->dst_port = udp_spec->hdr.dst_port;
426 filter->src_port = udp_spec->hdr.src_port;
427 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
428 sctp_mask = item->mask;
431 * Only support src & dst ports,
432 * others should be masked.
434 if (sctp_mask->hdr.tag ||
435 sctp_mask->hdr.cksum) {
437 sizeof(struct rte_eth_ntuple_filter));
438 rte_flow_error_set(error, EINVAL,
439 RTE_FLOW_ERROR_TYPE_ITEM,
440 item, "Not supported by ntuple filter");
444 filter->dst_port_mask = sctp_mask->hdr.dst_port;
445 filter->src_port_mask = sctp_mask->hdr.src_port;
447 sctp_spec = item->spec;
448 filter->dst_port = sctp_spec->hdr.dst_port;
449 filter->src_port = sctp_spec->hdr.src_port;
454 /* check if the next not void item is END */
455 item = next_no_void_pattern(pattern, item);
456 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
457 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458 rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ITEM,
460 item, "Not supported by ntuple filter");
467 * n-tuple only supports forwarding,
468 * check if the first not void action is QUEUE.
470 act = next_no_void_action(actions, NULL);
471 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
472 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ACTION,
475 item, "Not supported action.");
479 ((const struct rte_flow_action_queue *)act->conf)->index;
481 /* check if the next not void item is END */
482 act = next_no_void_action(actions, act);
483 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
484 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
485 rte_flow_error_set(error, EINVAL,
486 RTE_FLOW_ERROR_TYPE_ACTION,
487 act, "Not supported action.");
492 /* must be input direction */
493 if (!attr->ingress) {
494 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
495 rte_flow_error_set(error, EINVAL,
496 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
497 attr, "Only support ingress.");
503 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
504 rte_flow_error_set(error, EINVAL,
505 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
506 attr, "Not support egress.");
511 if (attr->transfer) {
512 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
513 rte_flow_error_set(error, EINVAL,
514 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
515 attr, "No support for transfer.");
519 if (attr->priority > 0xFFFF) {
520 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
523 attr, "Error priority.");
526 filter->priority = (uint16_t)attr->priority;
527 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
528 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
529 filter->priority = 1;
534 /* a specific function for txgbe because the flags is specific */
536 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
537 const struct rte_flow_attr *attr,
538 const struct rte_flow_item pattern[],
539 const struct rte_flow_action actions[],
540 struct rte_eth_ntuple_filter *filter,
541 struct rte_flow_error *error)
545 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
550 /* txgbe doesn't support tcp flags */
551 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
552 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
553 rte_flow_error_set(error, EINVAL,
554 RTE_FLOW_ERROR_TYPE_ITEM,
555 NULL, "Not supported by ntuple filter");
559 /* txgbe doesn't support many priorities */
560 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
561 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
562 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
563 rte_flow_error_set(error, EINVAL,
564 RTE_FLOW_ERROR_TYPE_ITEM,
565 NULL, "Priority not supported by ntuple filter");
569 if (filter->queue >= dev->data->nb_rx_queues) {
570 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
571 rte_flow_error_set(error, EINVAL,
572 RTE_FLOW_ERROR_TYPE_ITEM,
573 NULL, "Not supported by ntuple filter");
577 /* fixed value for txgbe */
578 filter->flags = RTE_5TUPLE_FLAGS;
583 * Parse the rule to see if it is a ethertype rule.
584 * And get the ethertype filter info BTW.
586 * The first not void item can be ETH.
587 * The next not void item must be END.
589 * The first not void action should be QUEUE.
590 * The next not void action should be END.
593 * ETH type 0x0807 0xFFFF
595 * other members in mask and spec should set to 0x00.
596 * item->last should be NULL.
599 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
600 const struct rte_flow_item *pattern,
601 const struct rte_flow_action *actions,
602 struct rte_eth_ethertype_filter *filter,
603 struct rte_flow_error *error)
605 const struct rte_flow_item *item;
606 const struct rte_flow_action *act;
607 const struct rte_flow_item_eth *eth_spec;
608 const struct rte_flow_item_eth *eth_mask;
609 const struct rte_flow_action_queue *act_q;
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
614 NULL, "NULL pattern.");
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
621 NULL, "NULL action.");
626 rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ATTR,
628 NULL, "NULL attribute.");
632 item = next_no_void_pattern(pattern, NULL);
633 /* The first non-void item should be MAC. */
634 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
635 rte_flow_error_set(error, EINVAL,
636 RTE_FLOW_ERROR_TYPE_ITEM,
637 item, "Not supported by ethertype filter");
641 /*Not supported last point for range*/
643 rte_flow_error_set(error, EINVAL,
644 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
645 item, "Not supported last point for range");
649 /* Get the MAC info. */
650 if (!item->spec || !item->mask) {
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ITEM,
653 item, "Not supported by ethertype filter");
657 eth_spec = item->spec;
658 eth_mask = item->mask;
660 /* Mask bits of source MAC address must be full of 0.
661 * Mask bits of destination MAC address must be full
664 if (!rte_is_zero_ether_addr(ð_mask->src) ||
665 (!rte_is_zero_ether_addr(ð_mask->dst) &&
666 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
667 rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ITEM,
669 item, "Invalid ether address mask");
673 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
674 rte_flow_error_set(error, EINVAL,
675 RTE_FLOW_ERROR_TYPE_ITEM,
676 item, "Invalid ethertype mask");
680 /* If mask bits of destination MAC address
681 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
683 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
684 filter->mac_addr = eth_spec->dst;
685 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
687 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
689 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
691 /* Check if the next non-void item is END. */
692 item = next_no_void_pattern(pattern, item);
693 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
694 rte_flow_error_set(error, EINVAL,
695 RTE_FLOW_ERROR_TYPE_ITEM,
696 item, "Not supported by ethertype filter.");
702 act = next_no_void_action(actions, NULL);
703 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
704 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ACTION,
707 act, "Not supported action.");
711 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
712 act_q = (const struct rte_flow_action_queue *)act->conf;
713 filter->queue = act_q->index;
715 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
718 /* Check if the next non-void item is END */
719 act = next_no_void_action(actions, act);
720 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ACTION,
723 act, "Not supported action.");
728 /* Must be input direction */
729 if (!attr->ingress) {
730 rte_flow_error_set(error, EINVAL,
731 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
732 attr, "Only support ingress.");
738 rte_flow_error_set(error, EINVAL,
739 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
740 attr, "Not support egress.");
745 if (attr->transfer) {
746 rte_flow_error_set(error, EINVAL,
747 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
748 attr, "No support for transfer.");
753 if (attr->priority) {
754 rte_flow_error_set(error, EINVAL,
755 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
756 attr, "Not support priority.");
762 rte_flow_error_set(error, EINVAL,
763 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
764 attr, "Not support group.");
772 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
773 const struct rte_flow_attr *attr,
774 const struct rte_flow_item pattern[],
775 const struct rte_flow_action actions[],
776 struct rte_eth_ethertype_filter *filter,
777 struct rte_flow_error *error)
781 ret = cons_parse_ethertype_filter(attr, pattern,
782 actions, filter, error);
787 if (filter->queue >= dev->data->nb_rx_queues) {
788 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
789 rte_flow_error_set(error, EINVAL,
790 RTE_FLOW_ERROR_TYPE_ITEM,
791 NULL, "queue index much too big");
795 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
796 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
797 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
798 rte_flow_error_set(error, EINVAL,
799 RTE_FLOW_ERROR_TYPE_ITEM,
800 NULL, "IPv4/IPv6 not supported by ethertype filter");
804 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
805 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ITEM,
808 NULL, "mac compare is unsupported");
812 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
813 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM,
816 NULL, "drop option is unsupported");
824 * Parse the rule to see if it is a TCP SYN rule.
825 * And get the TCP SYN filter info BTW.
827 * The first not void item must be ETH.
828 * The second not void item must be IPV4 or IPV6.
829 * The third not void item must be TCP.
830 * The next not void item must be END.
832 * The first not void action should be QUEUE.
833 * The next not void action should be END.
837 * IPV4/IPV6 NULL NULL
838 * TCP tcp_flags 0x02 0xFF
840 * other members in mask and spec should set to 0x00.
841 * item->last should be NULL.
844 cons_parse_syn_filter(const struct rte_flow_attr *attr,
845 const struct rte_flow_item pattern[],
846 const struct rte_flow_action actions[],
847 struct rte_eth_syn_filter *filter,
848 struct rte_flow_error *error)
850 const struct rte_flow_item *item;
851 const struct rte_flow_action *act;
852 const struct rte_flow_item_tcp *tcp_spec;
853 const struct rte_flow_item_tcp *tcp_mask;
854 const struct rte_flow_action_queue *act_q;
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
859 NULL, "NULL pattern.");
864 rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
866 NULL, "NULL action.");
871 rte_flow_error_set(error, EINVAL,
872 RTE_FLOW_ERROR_TYPE_ATTR,
873 NULL, "NULL attribute.");
878 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
879 item = next_no_void_pattern(pattern, NULL);
880 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
881 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
882 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
883 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ITEM,
886 item, "Not supported by syn filter");
889 /*Not supported last point for range*/
891 rte_flow_error_set(error, EINVAL,
892 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
893 item, "Not supported last point for range");
898 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
899 /* if the item is MAC, the content should be NULL */
900 if (item->spec || item->mask) {
901 rte_flow_error_set(error, EINVAL,
902 RTE_FLOW_ERROR_TYPE_ITEM,
903 item, "Invalid SYN address mask");
907 /* check if the next not void item is IPv4 or IPv6 */
908 item = next_no_void_pattern(pattern, item);
909 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
910 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
911 rte_flow_error_set(error, EINVAL,
912 RTE_FLOW_ERROR_TYPE_ITEM,
913 item, "Not supported by syn filter");
919 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
920 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
921 /* if the item is IP, the content should be NULL */
922 if (item->spec || item->mask) {
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_ITEM,
925 item, "Invalid SYN mask");
929 /* check if the next not void item is TCP */
930 item = next_no_void_pattern(pattern, item);
931 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ITEM,
934 item, "Not supported by syn filter");
939 /* Get the TCP info. Only support SYN. */
940 if (!item->spec || !item->mask) {
941 rte_flow_error_set(error, EINVAL,
942 RTE_FLOW_ERROR_TYPE_ITEM,
943 item, "Invalid SYN mask");
946 /*Not supported last point for range*/
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
950 item, "Not supported last point for range");
954 tcp_spec = item->spec;
955 tcp_mask = item->mask;
956 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
957 tcp_mask->hdr.src_port ||
958 tcp_mask->hdr.dst_port ||
959 tcp_mask->hdr.sent_seq ||
960 tcp_mask->hdr.recv_ack ||
961 tcp_mask->hdr.data_off ||
962 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
963 tcp_mask->hdr.rx_win ||
964 tcp_mask->hdr.cksum ||
965 tcp_mask->hdr.tcp_urp) {
966 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
967 rte_flow_error_set(error, EINVAL,
968 RTE_FLOW_ERROR_TYPE_ITEM,
969 item, "Not supported by syn filter");
973 /* check if the next not void item is END */
974 item = next_no_void_pattern(pattern, item);
975 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
976 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
979 item, "Not supported by syn filter");
983 /* check if the first not void action is QUEUE. */
984 act = next_no_void_action(actions, NULL);
985 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
986 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
987 rte_flow_error_set(error, EINVAL,
988 RTE_FLOW_ERROR_TYPE_ACTION,
989 act, "Not supported action.");
993 act_q = (const struct rte_flow_action_queue *)act->conf;
994 filter->queue = act_q->index;
995 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
996 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ACTION,
999 act, "Not supported action.");
1003 /* check if the next not void item is END */
1004 act = next_no_void_action(actions, act);
1005 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1006 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1007 rte_flow_error_set(error, EINVAL,
1008 RTE_FLOW_ERROR_TYPE_ACTION,
1009 act, "Not supported action.");
1014 /* must be input direction */
1015 if (!attr->ingress) {
1016 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1017 rte_flow_error_set(error, EINVAL,
1018 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1019 attr, "Only support ingress.");
1025 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1026 rte_flow_error_set(error, EINVAL,
1027 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1028 attr, "Not support egress.");
1033 if (attr->transfer) {
1034 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1035 rte_flow_error_set(error, EINVAL,
1036 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1037 attr, "No support for transfer.");
1041 /* Support 2 priorities, the lowest or highest. */
1042 if (!attr->priority) {
1043 filter->hig_pri = 0;
1044 } else if (attr->priority == (uint32_t)~0U) {
1045 filter->hig_pri = 1;
1047 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1048 rte_flow_error_set(error, EINVAL,
1049 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1050 attr, "Not support priority.");
1058 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1059 const struct rte_flow_attr *attr,
1060 const struct rte_flow_item pattern[],
1061 const struct rte_flow_action actions[],
1062 struct rte_eth_syn_filter *filter,
1063 struct rte_flow_error *error)
1067 ret = cons_parse_syn_filter(attr, pattern,
1068 actions, filter, error);
1070 if (filter->queue >= dev->data->nb_rx_queues)
1080 * Parse the rule to see if it is a L2 tunnel rule.
1081 * And get the L2 tunnel filter info BTW.
1082 * Only support E-tag now.
1084 * The first not void item can be E_TAG.
1085 * The next not void item must be END.
1087 * The first not void action should be VF or PF.
1088 * The next not void action should be END.
1092 e_cid_base 0x309 0xFFF
1094 * other members in mask and spec should set to 0x00.
1095 * item->last should be NULL.
1098 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1099 const struct rte_flow_attr *attr,
1100 const struct rte_flow_item pattern[],
1101 const struct rte_flow_action actions[],
1102 struct txgbe_l2_tunnel_conf *filter,
1103 struct rte_flow_error *error)
1105 const struct rte_flow_item *item;
1106 const struct rte_flow_item_e_tag *e_tag_spec;
1107 const struct rte_flow_item_e_tag *e_tag_mask;
1108 const struct rte_flow_action *act;
1109 const struct rte_flow_action_vf *act_vf;
1110 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1113 rte_flow_error_set(error, EINVAL,
1114 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1115 NULL, "NULL pattern.");
1120 rte_flow_error_set(error, EINVAL,
1121 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1122 NULL, "NULL action.");
1127 rte_flow_error_set(error, EINVAL,
1128 RTE_FLOW_ERROR_TYPE_ATTR,
1129 NULL, "NULL attribute.");
1133 /* The first not void item should be e-tag. */
1134 item = next_no_void_pattern(pattern, NULL);
1135 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1136 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1137 rte_flow_error_set(error, EINVAL,
1138 RTE_FLOW_ERROR_TYPE_ITEM,
1139 item, "Not supported by L2 tunnel filter");
1143 if (!item->spec || !item->mask) {
1144 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1145 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1146 item, "Not supported by L2 tunnel filter");
1150 /*Not supported last point for range*/
1152 rte_flow_error_set(error, EINVAL,
1153 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1154 item, "Not supported last point for range");
1158 e_tag_spec = item->spec;
1159 e_tag_mask = item->mask;
1161 /* Only care about GRP and E cid base. */
1162 if (e_tag_mask->epcp_edei_in_ecid_b ||
1163 e_tag_mask->in_ecid_e ||
1164 e_tag_mask->ecid_e ||
1165 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1166 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1167 rte_flow_error_set(error, EINVAL,
1168 RTE_FLOW_ERROR_TYPE_ITEM,
1169 item, "Not supported by L2 tunnel filter");
1173 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1175 * grp and e_cid_base are bit fields and only use 14 bits.
1176 * e-tag id is taken as little endian by HW.
1178 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1180 /* check if the next not void item is END */
1181 item = next_no_void_pattern(pattern, item);
1182 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1183 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1184 rte_flow_error_set(error, EINVAL,
1185 RTE_FLOW_ERROR_TYPE_ITEM,
1186 item, "Not supported by L2 tunnel filter");
1191 /* must be input direction */
1192 if (!attr->ingress) {
1193 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1194 rte_flow_error_set(error, EINVAL,
1195 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1196 attr, "Only support ingress.");
1202 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1203 rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1205 attr, "Not support egress.");
1210 if (attr->transfer) {
1211 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1212 rte_flow_error_set(error, EINVAL,
1213 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1214 attr, "No support for transfer.");
1219 if (attr->priority) {
1220 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1221 rte_flow_error_set(error, EINVAL,
1222 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1223 attr, "Not support priority.");
1227 /* check if the first not void action is VF or PF. */
1228 act = next_no_void_action(actions, NULL);
1229 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1230 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1231 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1232 rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ACTION,
1234 act, "Not supported action.");
1238 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1239 act_vf = (const struct rte_flow_action_vf *)act->conf;
1240 filter->pool = act_vf->id;
1242 filter->pool = pci_dev->max_vfs;
1245 /* check if the next not void item is END */
1246 act = next_no_void_action(actions, act);
1247 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1248 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1249 rte_flow_error_set(error, EINVAL,
1250 RTE_FLOW_ERROR_TYPE_ACTION,
1251 act, "Not supported action.");
1259 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1260 const struct rte_flow_attr *attr,
1261 const struct rte_flow_item pattern[],
1262 const struct rte_flow_action actions[],
1263 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1264 struct rte_flow_error *error)
1267 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1270 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1271 actions, l2_tn_filter, error);
1273 vf_num = pci_dev->max_vfs;
1275 if (l2_tn_filter->pool > vf_num)
1281 /* Parse to get the attr and action info of flow director rule. */
1283 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1284 const struct rte_flow_action actions[],
1285 struct txgbe_fdir_rule *rule,
1286 struct rte_flow_error *error)
1288 const struct rte_flow_action *act;
1289 const struct rte_flow_action_queue *act_q;
1290 const struct rte_flow_action_mark *mark;
1293 /* must be input direction */
1294 if (!attr->ingress) {
1295 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1296 rte_flow_error_set(error, EINVAL,
1297 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1298 attr, "Only support ingress.");
1304 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1305 rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1307 attr, "Not support egress.");
1312 if (attr->transfer) {
1313 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1314 rte_flow_error_set(error, EINVAL,
1315 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1316 attr, "No support for transfer.");
1321 if (attr->priority) {
1322 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1323 rte_flow_error_set(error, EINVAL,
1324 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1325 attr, "Not support priority.");
1329 /* check if the first not void action is QUEUE or DROP. */
1330 act = next_no_void_action(actions, NULL);
1331 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1332 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1333 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1334 rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_ACTION,
1336 act, "Not supported action.");
1340 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1341 act_q = (const struct rte_flow_action_queue *)act->conf;
1342 rule->queue = act_q->index;
1344 /* signature mode does not support drop action. */
1345 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1346 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1347 rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ACTION,
1349 act, "Not supported action.");
1352 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1355 /* check if the next not void item is MARK */
1356 act = next_no_void_action(actions, act);
1357 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1358 act->type != RTE_FLOW_ACTION_TYPE_END) {
1359 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1360 rte_flow_error_set(error, EINVAL,
1361 RTE_FLOW_ERROR_TYPE_ACTION,
1362 act, "Not supported action.");
1368 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1369 mark = (const struct rte_flow_action_mark *)act->conf;
1370 rule->soft_id = mark->id;
1371 act = next_no_void_action(actions, act);
1374 /* check if the next not void item is END */
1375 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1376 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1377 rte_flow_error_set(error, EINVAL,
1378 RTE_FLOW_ERROR_TYPE_ACTION,
1379 act, "Not supported action.");
1386 /* search next no void pattern and skip fuzzy */
1388 const struct rte_flow_item *next_no_fuzzy_pattern(
1389 const struct rte_flow_item pattern[],
1390 const struct rte_flow_item *cur)
1392 const struct rte_flow_item *next =
1393 next_no_void_pattern(pattern, cur);
1395 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1397 next = next_no_void_pattern(pattern, next);
1401 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1403 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1404 const struct rte_flow_item *item;
1405 uint32_t sh, lh, mh;
1410 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1413 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1445 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1446 * And get the flow director filter info BTW.
1447 * UDP/TCP/SCTP PATTERN:
1448 * The first not void item can be ETH or IPV4 or IPV6
1449 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1450 * The next not void item could be UDP or TCP or SCTP (optional)
1451 * The next not void item could be RAW (for flexbyte, optional)
1452 * The next not void item must be END.
1453 * A Fuzzy Match pattern can appear at any place before END.
1454 * Fuzzy Match is optional for IPV4 but is required for IPV6
1456 * The first not void item must be ETH.
1457 * The second not void item must be MAC VLAN.
1458 * The next not void item must be END.
1460 * The first not void action should be QUEUE or DROP.
1461 * The second not void optional action should be MARK,
1462 * mark_id is a uint32_t number.
1463 * The next not void action should be END.
1464 * UDP/TCP/SCTP pattern example:
1467 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1468 * dst_addr 192.167.3.50 0xFFFFFFFF
1469 * UDP/TCP/SCTP src_port 80 0xFFFF
1470 * dst_port 80 0xFFFF
1471 * FLEX relative 0 0x1
1474 * offset 12 0xFFFFFFFF
1477 * pattern[0] 0x86 0xFF
1478 * pattern[1] 0xDD 0xFF
1480 * MAC VLAN pattern example:
1483 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1484 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1485 * MAC VLAN tci 0x2016 0xEFFF
1487 * Other members in mask and spec should set to 0x00.
1488 * Item->last should be NULL.
1491 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1492 const struct rte_flow_attr *attr,
1493 const struct rte_flow_item pattern[],
1494 const struct rte_flow_action actions[],
1495 struct txgbe_fdir_rule *rule,
1496 struct rte_flow_error *error)
1498 const struct rte_flow_item *item;
1499 const struct rte_flow_item_eth *eth_mask;
1500 const struct rte_flow_item_ipv4 *ipv4_spec;
1501 const struct rte_flow_item_ipv4 *ipv4_mask;
1502 const struct rte_flow_item_ipv6 *ipv6_spec;
1503 const struct rte_flow_item_ipv6 *ipv6_mask;
1504 const struct rte_flow_item_tcp *tcp_spec;
1505 const struct rte_flow_item_tcp *tcp_mask;
1506 const struct rte_flow_item_udp *udp_spec;
1507 const struct rte_flow_item_udp *udp_mask;
1508 const struct rte_flow_item_sctp *sctp_spec;
1509 const struct rte_flow_item_sctp *sctp_mask;
1510 const struct rte_flow_item_raw *raw_mask;
1511 const struct rte_flow_item_raw *raw_spec;
1516 rte_flow_error_set(error, EINVAL,
1517 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1518 NULL, "NULL pattern.");
1523 rte_flow_error_set(error, EINVAL,
1524 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1525 NULL, "NULL action.");
1530 rte_flow_error_set(error, EINVAL,
1531 RTE_FLOW_ERROR_TYPE_ATTR,
1532 NULL, "NULL attribute.");
1537 * Some fields may not be provided. Set spec to 0 and mask to default
1538 * value. So, we need not do anything for the not provided fields later.
1540 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1541 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1542 rule->mask.vlan_tci_mask = 0;
1543 rule->mask.flex_bytes_mask = 0;
1546 * The first not void item should be
1547 * MAC or IPv4 or TCP or UDP or SCTP.
1549 item = next_no_fuzzy_pattern(pattern, NULL);
1550 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1551 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1552 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1553 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1554 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1555 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1556 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1557 rte_flow_error_set(error, EINVAL,
1558 RTE_FLOW_ERROR_TYPE_ITEM,
1559 item, "Not supported by fdir filter");
1563 if (signature_match(pattern))
1564 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1566 rule->mode = RTE_FDIR_MODE_PERFECT;
1568 /*Not supported last point for range*/
1570 rte_flow_error_set(error, EINVAL,
1571 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1572 item, "Not supported last point for range");
1576 /* Get the MAC info. */
1577 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1579 * Only support vlan and dst MAC address,
1580 * others should be masked.
1582 if (item->spec && !item->mask) {
1583 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1584 rte_flow_error_set(error, EINVAL,
1585 RTE_FLOW_ERROR_TYPE_ITEM,
1586 item, "Not supported by fdir filter");
1591 rule->b_mask = TRUE;
1592 eth_mask = item->mask;
1594 /* Ether type should be masked. */
1595 if (eth_mask->type ||
1596 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1597 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1598 rte_flow_error_set(error, EINVAL,
1599 RTE_FLOW_ERROR_TYPE_ITEM,
1600 item, "Not supported by fdir filter");
1604 /* If ethernet has meaning, it means MAC VLAN mode. */
1605 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1608 * src MAC address must be masked,
1609 * and don't support dst MAC address mask.
1611 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1612 if (eth_mask->src.addr_bytes[j] ||
1613 eth_mask->dst.addr_bytes[j] != 0xFF) {
1615 sizeof(struct txgbe_fdir_rule));
1616 rte_flow_error_set(error, EINVAL,
1617 RTE_FLOW_ERROR_TYPE_ITEM,
1618 item, "Not supported by fdir filter");
1623 /* When no VLAN, considered as full mask. */
1624 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1626 /*** If both spec and mask are item,
1627 * it means don't care about ETH.
1632 * Check if the next not void item is vlan or ipv4.
1633 * IPv6 is not supported.
1635 item = next_no_fuzzy_pattern(pattern, item);
1636 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1637 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1638 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1639 rte_flow_error_set(error, EINVAL,
1640 RTE_FLOW_ERROR_TYPE_ITEM,
1641 item, "Not supported by fdir filter");
1645 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1646 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1647 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1648 rte_flow_error_set(error, EINVAL,
1649 RTE_FLOW_ERROR_TYPE_ITEM,
1650 item, "Not supported by fdir filter");
1656 /* Get the IPV4 info. */
1657 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1659 * Set the flow type even if there's no content
1660 * as we must have a flow type.
1662 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1663 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1664 /*Not supported last point for range*/
1666 rte_flow_error_set(error, EINVAL,
1667 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1668 item, "Not supported last point for range");
1672 * Only care about src & dst addresses,
1673 * others should be masked.
1676 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1677 rte_flow_error_set(error, EINVAL,
1678 RTE_FLOW_ERROR_TYPE_ITEM,
1679 item, "Not supported by fdir filter");
1682 rule->b_mask = TRUE;
1683 ipv4_mask = item->mask;
1684 if (ipv4_mask->hdr.version_ihl ||
1685 ipv4_mask->hdr.type_of_service ||
1686 ipv4_mask->hdr.total_length ||
1687 ipv4_mask->hdr.packet_id ||
1688 ipv4_mask->hdr.fragment_offset ||
1689 ipv4_mask->hdr.time_to_live ||
1690 ipv4_mask->hdr.next_proto_id ||
1691 ipv4_mask->hdr.hdr_checksum) {
1692 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1693 rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_ITEM,
1695 item, "Not supported by fdir filter");
1698 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1699 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1702 rule->b_spec = TRUE;
1703 ipv4_spec = item->spec;
1704 rule->input.dst_ip[0] =
1705 ipv4_spec->hdr.dst_addr;
1706 rule->input.src_ip[0] =
1707 ipv4_spec->hdr.src_addr;
1711 * Check if the next not void item is
1712 * TCP or UDP or SCTP or END.
1714 item = next_no_fuzzy_pattern(pattern, item);
1715 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1716 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1717 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1718 item->type != RTE_FLOW_ITEM_TYPE_END &&
1719 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1720 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1721 rte_flow_error_set(error, EINVAL,
1722 RTE_FLOW_ERROR_TYPE_ITEM,
1723 item, "Not supported by fdir filter");
1728 /* Get the IPV6 info. */
1729 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1731 * Set the flow type even if there's no content
1732 * as we must have a flow type.
1734 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1735 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1738 * 1. must signature match
1739 * 2. not support last
1740 * 3. mask must not null
1742 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1745 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1746 rte_flow_error_set(error, EINVAL,
1747 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1748 item, "Not supported last point for range");
1752 rule->b_mask = TRUE;
1753 ipv6_mask = item->mask;
1754 if (ipv6_mask->hdr.vtc_flow ||
1755 ipv6_mask->hdr.payload_len ||
1756 ipv6_mask->hdr.proto ||
1757 ipv6_mask->hdr.hop_limits) {
1758 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1759 rte_flow_error_set(error, EINVAL,
1760 RTE_FLOW_ERROR_TYPE_ITEM,
1761 item, "Not supported by fdir filter");
1765 /* check src addr mask */
1766 for (j = 0; j < 16; j++) {
1767 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1768 rule->mask.src_ipv6_mask |= 1 << j;
1769 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1770 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1771 rte_flow_error_set(error, EINVAL,
1772 RTE_FLOW_ERROR_TYPE_ITEM,
1773 item, "Not supported by fdir filter");
1778 /* check dst addr mask */
1779 for (j = 0; j < 16; j++) {
1780 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1781 rule->mask.dst_ipv6_mask |= 1 << j;
1782 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1783 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1784 rte_flow_error_set(error, EINVAL,
1785 RTE_FLOW_ERROR_TYPE_ITEM,
1786 item, "Not supported by fdir filter");
1792 rule->b_spec = TRUE;
1793 ipv6_spec = item->spec;
1794 rte_memcpy(rule->input.src_ip,
1795 ipv6_spec->hdr.src_addr, 16);
1796 rte_memcpy(rule->input.dst_ip,
1797 ipv6_spec->hdr.dst_addr, 16);
1801 * Check if the next not void item is
1802 * TCP or UDP or SCTP or END.
1804 item = next_no_fuzzy_pattern(pattern, item);
1805 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1806 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1807 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1808 item->type != RTE_FLOW_ITEM_TYPE_END &&
1809 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1810 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1811 rte_flow_error_set(error, EINVAL,
1812 RTE_FLOW_ERROR_TYPE_ITEM,
1813 item, "Not supported by fdir filter");
1818 /* Get the TCP info. */
1819 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1821 * Set the flow type even if there's no content
1822 * as we must have a flow type.
1824 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1825 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1826 /*Not supported last point for range*/
1828 rte_flow_error_set(error, EINVAL,
1829 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1830 item, "Not supported last point for range");
1834 * Only care about src & dst ports,
1835 * others should be masked.
1838 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1839 rte_flow_error_set(error, EINVAL,
1840 RTE_FLOW_ERROR_TYPE_ITEM,
1841 item, "Not supported by fdir filter");
1844 rule->b_mask = TRUE;
1845 tcp_mask = item->mask;
1846 if (tcp_mask->hdr.sent_seq ||
1847 tcp_mask->hdr.recv_ack ||
1848 tcp_mask->hdr.data_off ||
1849 tcp_mask->hdr.tcp_flags ||
1850 tcp_mask->hdr.rx_win ||
1851 tcp_mask->hdr.cksum ||
1852 tcp_mask->hdr.tcp_urp) {
1853 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1854 rte_flow_error_set(error, EINVAL,
1855 RTE_FLOW_ERROR_TYPE_ITEM,
1856 item, "Not supported by fdir filter");
1859 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1860 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1863 rule->b_spec = TRUE;
1864 tcp_spec = item->spec;
1865 rule->input.src_port =
1866 tcp_spec->hdr.src_port;
1867 rule->input.dst_port =
1868 tcp_spec->hdr.dst_port;
1871 item = next_no_fuzzy_pattern(pattern, item);
1872 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1873 item->type != RTE_FLOW_ITEM_TYPE_END) {
1874 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1875 rte_flow_error_set(error, EINVAL,
1876 RTE_FLOW_ERROR_TYPE_ITEM,
1877 item, "Not supported by fdir filter");
1882 /* Get the UDP info */
1883 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1885 * Set the flow type even if there's no content
1886 * as we must have a flow type.
1888 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1889 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1890 /*Not supported last point for range*/
1892 rte_flow_error_set(error, EINVAL,
1893 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1894 item, "Not supported last point for range");
1898 * Only care about src & dst ports,
1899 * others should be masked.
1902 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1903 rte_flow_error_set(error, EINVAL,
1904 RTE_FLOW_ERROR_TYPE_ITEM,
1905 item, "Not supported by fdir filter");
1908 rule->b_mask = TRUE;
1909 udp_mask = item->mask;
1910 if (udp_mask->hdr.dgram_len ||
1911 udp_mask->hdr.dgram_cksum) {
1912 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1913 rte_flow_error_set(error, EINVAL,
1914 RTE_FLOW_ERROR_TYPE_ITEM,
1915 item, "Not supported by fdir filter");
1918 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1919 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1922 rule->b_spec = TRUE;
1923 udp_spec = item->spec;
1924 rule->input.src_port =
1925 udp_spec->hdr.src_port;
1926 rule->input.dst_port =
1927 udp_spec->hdr.dst_port;
1930 item = next_no_fuzzy_pattern(pattern, item);
1931 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1932 item->type != RTE_FLOW_ITEM_TYPE_END) {
1933 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1934 rte_flow_error_set(error, EINVAL,
1935 RTE_FLOW_ERROR_TYPE_ITEM,
1936 item, "Not supported by fdir filter");
1941 /* Get the SCTP info */
1942 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1944 * Set the flow type even if there's no content
1945 * as we must have a flow type.
1947 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1948 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1949 /*Not supported last point for range*/
1951 rte_flow_error_set(error, EINVAL,
1952 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1953 item, "Not supported last point for range");
1958 * Only care about src & dst ports,
1959 * others should be masked.
1962 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1963 rte_flow_error_set(error, EINVAL,
1964 RTE_FLOW_ERROR_TYPE_ITEM,
1965 item, "Not supported by fdir filter");
1968 rule->b_mask = TRUE;
1969 sctp_mask = item->mask;
1970 if (sctp_mask->hdr.tag ||
1971 sctp_mask->hdr.cksum) {
1972 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1973 rte_flow_error_set(error, EINVAL,
1974 RTE_FLOW_ERROR_TYPE_ITEM,
1975 item, "Not supported by fdir filter");
1978 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1979 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1982 rule->b_spec = TRUE;
1983 sctp_spec = item->spec;
1984 rule->input.src_port =
1985 sctp_spec->hdr.src_port;
1986 rule->input.dst_port =
1987 sctp_spec->hdr.dst_port;
1989 /* others even sctp port is not supported */
1990 sctp_mask = item->mask;
1992 (sctp_mask->hdr.src_port ||
1993 sctp_mask->hdr.dst_port ||
1994 sctp_mask->hdr.tag ||
1995 sctp_mask->hdr.cksum)) {
1996 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1997 rte_flow_error_set(error, EINVAL,
1998 RTE_FLOW_ERROR_TYPE_ITEM,
1999 item, "Not supported by fdir filter");
2003 item = next_no_fuzzy_pattern(pattern, item);
2004 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2005 item->type != RTE_FLOW_ITEM_TYPE_END) {
2006 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2007 rte_flow_error_set(error, EINVAL,
2008 RTE_FLOW_ERROR_TYPE_ITEM,
2009 item, "Not supported by fdir filter");
2014 /* Get the flex byte info */
2015 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2016 /* Not supported last point for range*/
2018 rte_flow_error_set(error, EINVAL,
2019 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2020 item, "Not supported last point for range");
2023 /* mask should not be null */
2024 if (!item->mask || !item->spec) {
2025 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2026 rte_flow_error_set(error, EINVAL,
2027 RTE_FLOW_ERROR_TYPE_ITEM,
2028 item, "Not supported by fdir filter");
2032 raw_mask = item->mask;
2035 if (raw_mask->relative != 0x1 ||
2036 raw_mask->search != 0x1 ||
2037 raw_mask->reserved != 0x0 ||
2038 (uint32_t)raw_mask->offset != 0xffffffff ||
2039 raw_mask->limit != 0xffff ||
2040 raw_mask->length != 0xffff) {
2041 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2042 rte_flow_error_set(error, EINVAL,
2043 RTE_FLOW_ERROR_TYPE_ITEM,
2044 item, "Not supported by fdir filter");
2048 raw_spec = item->spec;
2051 if (raw_spec->relative != 0 ||
2052 raw_spec->search != 0 ||
2053 raw_spec->reserved != 0 ||
2054 raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2055 raw_spec->offset % 2 ||
2056 raw_spec->limit != 0 ||
2057 raw_spec->length != 2 ||
2058 /* pattern can't be 0xffff */
2059 (raw_spec->pattern[0] == 0xff &&
2060 raw_spec->pattern[1] == 0xff)) {
2061 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2062 rte_flow_error_set(error, EINVAL,
2063 RTE_FLOW_ERROR_TYPE_ITEM,
2064 item, "Not supported by fdir filter");
2068 /* check pattern mask */
2069 if (raw_mask->pattern[0] != 0xff ||
2070 raw_mask->pattern[1] != 0xff) {
2071 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2072 rte_flow_error_set(error, EINVAL,
2073 RTE_FLOW_ERROR_TYPE_ITEM,
2074 item, "Not supported by fdir filter");
2078 rule->mask.flex_bytes_mask = 0xffff;
2079 rule->input.flex_bytes =
2080 (((uint16_t)raw_spec->pattern[1]) << 8) |
2081 raw_spec->pattern[0];
2082 rule->flex_bytes_offset = raw_spec->offset;
2085 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2086 /* check if the next not void item is END */
2087 item = next_no_fuzzy_pattern(pattern, item);
2088 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2089 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2090 rte_flow_error_set(error, EINVAL,
2091 RTE_FLOW_ERROR_TYPE_ITEM,
2092 item, "Not supported by fdir filter");
2097 rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2099 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2103 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2104 * And get the flow director filter info BTW.
2106 * The first not void item must be ETH.
2107 * The second not void item must be IPV4/ IPV6.
2108 * The third not void item must be NVGRE.
2109 * The next not void item must be END.
2111 * The first not void item must be ETH.
2112 * The second not void item must be IPV4/ IPV6.
2113 * The third not void item must be NVGRE.
2114 * The next not void item must be END.
2116 * The first not void action should be QUEUE or DROP.
2117 * The second not void optional action should be MARK,
2118 * mark_id is a uint32_t number.
2119 * The next not void action should be END.
2120 * VxLAN pattern example:
2123 * IPV4/IPV6 NULL NULL
2125 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2126 * MAC VLAN tci 0x2016 0xEFFF
2128 * NEGRV pattern example:
2131 * IPV4/IPV6 NULL NULL
2132 * NVGRE protocol 0x6558 0xFFFF
2133 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2134 * MAC VLAN tci 0x2016 0xEFFF
2136 * other members in mask and spec should set to 0x00.
2137 * item->last should be NULL.
2140 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2141 const struct rte_flow_item pattern[],
2142 const struct rte_flow_action actions[],
2143 struct txgbe_fdir_rule *rule,
2144 struct rte_flow_error *error)
2146 const struct rte_flow_item *item;
2147 const struct rte_flow_item_eth *eth_mask;
2151 rte_flow_error_set(error, EINVAL,
2152 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2153 NULL, "NULL pattern.");
2158 rte_flow_error_set(error, EINVAL,
2159 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2160 NULL, "NULL action.");
2165 rte_flow_error_set(error, EINVAL,
2166 RTE_FLOW_ERROR_TYPE_ATTR,
2167 NULL, "NULL attribute.");
2172 * Some fields may not be provided. Set spec to 0 and mask to default
2173 * value. So, we need not do anything for the not provided fields later.
2175 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2176 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2177 rule->mask.vlan_tci_mask = 0;
2180 * The first not void item should be
2181 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2183 item = next_no_void_pattern(pattern, NULL);
2184 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2185 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2186 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2187 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2188 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2189 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2190 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2191 rte_flow_error_set(error, EINVAL,
2192 RTE_FLOW_ERROR_TYPE_ITEM,
2193 item, "Not supported by fdir filter");
2197 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2200 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2201 /* Only used to describe the protocol stack. */
2202 if (item->spec || item->mask) {
2203 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2204 rte_flow_error_set(error, EINVAL,
2205 RTE_FLOW_ERROR_TYPE_ITEM,
2206 item, "Not supported by fdir filter");
2209 /* Not supported last point for range*/
2211 rte_flow_error_set(error, EINVAL,
2212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2213 item, "Not supported last point for range");
2217 /* Check if the next not void item is IPv4 or IPv6. */
2218 item = next_no_void_pattern(pattern, item);
2219 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2220 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2221 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2222 rte_flow_error_set(error, EINVAL,
2223 RTE_FLOW_ERROR_TYPE_ITEM,
2224 item, "Not supported by fdir filter");
2230 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2231 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2232 /* Only used to describe the protocol stack. */
2233 if (item->spec || item->mask) {
2234 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2235 rte_flow_error_set(error, EINVAL,
2236 RTE_FLOW_ERROR_TYPE_ITEM,
2237 item, "Not supported by fdir filter");
2240 /*Not supported last point for range*/
2242 rte_flow_error_set(error, EINVAL,
2243 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2244 item, "Not supported last point for range");
2248 /* Check if the next not void item is UDP or NVGRE. */
2249 item = next_no_void_pattern(pattern, item);
2250 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2251 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2252 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2253 rte_flow_error_set(error, EINVAL,
2254 RTE_FLOW_ERROR_TYPE_ITEM,
2255 item, "Not supported by fdir filter");
2261 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2262 /* Only used to describe the protocol stack. */
2263 if (item->spec || item->mask) {
2264 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2265 rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_ITEM,
2267 item, "Not supported by fdir filter");
2270 /*Not supported last point for range*/
2272 rte_flow_error_set(error, EINVAL,
2273 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2274 item, "Not supported last point for range");
2278 /* Check if the next not void item is VxLAN. */
2279 item = next_no_void_pattern(pattern, item);
2280 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2281 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2282 rte_flow_error_set(error, EINVAL,
2283 RTE_FLOW_ERROR_TYPE_ITEM,
2284 item, "Not supported by fdir filter");
2289 /* check if the next not void item is MAC */
2290 item = next_no_void_pattern(pattern, item);
2291 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2292 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2293 rte_flow_error_set(error, EINVAL,
2294 RTE_FLOW_ERROR_TYPE_ITEM,
2295 item, "Not supported by fdir filter");
2300 * Only support vlan and dst MAC address,
2301 * others should be masked.
2305 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2306 rte_flow_error_set(error, EINVAL,
2307 RTE_FLOW_ERROR_TYPE_ITEM,
2308 item, "Not supported by fdir filter");
2311 /*Not supported last point for range*/
2313 rte_flow_error_set(error, EINVAL,
2314 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2315 item, "Not supported last point for range");
2318 rule->b_mask = TRUE;
2319 eth_mask = item->mask;
2321 /* Ether type should be masked. */
2322 if (eth_mask->type) {
2323 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2324 rte_flow_error_set(error, EINVAL,
2325 RTE_FLOW_ERROR_TYPE_ITEM,
2326 item, "Not supported by fdir filter");
2330 /* src MAC address should be masked. */
2331 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2332 if (eth_mask->src.addr_bytes[j]) {
2334 sizeof(struct txgbe_fdir_rule));
2335 rte_flow_error_set(error, EINVAL,
2336 RTE_FLOW_ERROR_TYPE_ITEM,
2337 item, "Not supported by fdir filter");
2341 rule->mask.mac_addr_byte_mask = 0;
2342 for (j = 0; j < ETH_ADDR_LEN; j++) {
2343 /* It's a per byte mask. */
2344 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2345 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2346 } else if (eth_mask->dst.addr_bytes[j]) {
2347 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2348 rte_flow_error_set(error, EINVAL,
2349 RTE_FLOW_ERROR_TYPE_ITEM,
2350 item, "Not supported by fdir filter");
2355 /* When no vlan, considered as full mask. */
2356 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2359 * Check if the next not void item is vlan or ipv4.
2360 * IPv6 is not supported.
2362 item = next_no_void_pattern(pattern, item);
2363 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2364 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2365 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2366 rte_flow_error_set(error, EINVAL,
2367 RTE_FLOW_ERROR_TYPE_ITEM,
2368 item, "Not supported by fdir filter");
2371 /*Not supported last point for range*/
2373 rte_flow_error_set(error, EINVAL,
2374 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2375 item, "Not supported last point for range");
2380 * If the tags is 0, it means don't care about the VLAN.
2384 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2388 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2389 const struct rte_flow_attr *attr,
2390 const struct rte_flow_item pattern[],
2391 const struct rte_flow_action actions[],
2392 struct txgbe_fdir_rule *rule,
2393 struct rte_flow_error *error)
2396 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2397 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2399 ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2400 actions, rule, error);
2404 ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2405 actions, rule, error);
2411 if (hw->mac.type == txgbe_mac_raptor &&
2412 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2413 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2416 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2417 fdir_mode != rule->mode)
2420 if (rule->queue >= dev->data->nb_rx_queues)
2427 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2428 const struct rte_flow_attr *attr,
2429 const struct rte_flow_action actions[],
2430 struct txgbe_rte_flow_rss_conf *rss_conf,
2431 struct rte_flow_error *error)
2433 const struct rte_flow_action *act;
2434 const struct rte_flow_action_rss *rss;
2438 * rss only supports forwarding,
2439 * check if the first not void action is RSS.
2441 act = next_no_void_action(actions, NULL);
2442 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2443 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2444 rte_flow_error_set(error, EINVAL,
2445 RTE_FLOW_ERROR_TYPE_ACTION,
2446 act, "Not supported action.");
2450 rss = (const struct rte_flow_action_rss *)act->conf;
2452 if (!rss || !rss->queue_num) {
2453 rte_flow_error_set(error, EINVAL,
2454 RTE_FLOW_ERROR_TYPE_ACTION,
2460 for (n = 0; n < rss->queue_num; n++) {
2461 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2462 rte_flow_error_set(error, EINVAL,
2463 RTE_FLOW_ERROR_TYPE_ACTION,
2465 "queue id > max number of queues");
2470 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2471 return rte_flow_error_set
2472 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2473 "non-default RSS hash functions are not supported");
2475 return rte_flow_error_set
2476 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2477 "a nonzero RSS encapsulation level is not supported");
2478 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2479 return rte_flow_error_set
2480 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2481 "RSS hash key must be exactly 40 bytes");
2482 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2483 return rte_flow_error_set
2484 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2485 "too many queues for RSS context");
2486 if (txgbe_rss_conf_init(rss_conf, rss))
2487 return rte_flow_error_set
2488 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2489 "RSS context initialization failure");
2491 /* check if the next not void item is END */
2492 act = next_no_void_action(actions, act);
2493 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2494 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2495 rte_flow_error_set(error, EINVAL,
2496 RTE_FLOW_ERROR_TYPE_ACTION,
2497 act, "Not supported action.");
2502 /* must be input direction */
2503 if (!attr->ingress) {
2504 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2505 rte_flow_error_set(error, EINVAL,
2506 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2507 attr, "Only support ingress.");
2513 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2514 rte_flow_error_set(error, EINVAL,
2515 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2516 attr, "Not support egress.");
2521 if (attr->transfer) {
2522 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2523 rte_flow_error_set(error, EINVAL,
2524 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2525 attr, "No support for transfer.");
2529 if (attr->priority > 0xFFFF) {
2530 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2531 rte_flow_error_set(error, EINVAL,
2532 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2533 attr, "Error priority.");
2541 txgbe_filterlist_init(void)
2543 TAILQ_INIT(&filter_ntuple_list);
2544 TAILQ_INIT(&filter_ethertype_list);
2545 TAILQ_INIT(&filter_syn_list);
2546 TAILQ_INIT(&filter_fdir_list);
2547 TAILQ_INIT(&filter_l2_tunnel_list);
2548 TAILQ_INIT(&filter_rss_list);
2549 TAILQ_INIT(&txgbe_flow_list);
2553 * Create or destroy a flow rule.
2554 * Theorically one rule can match more than one filters.
2555 * We will let it use the filter which it hit first.
2556 * So, the sequence matters.
2558 static struct rte_flow *
2559 txgbe_flow_create(struct rte_eth_dev *dev,
2560 const struct rte_flow_attr *attr,
2561 const struct rte_flow_item pattern[],
2562 const struct rte_flow_action actions[],
2563 struct rte_flow_error *error)
2566 struct rte_eth_ntuple_filter ntuple_filter;
2567 struct rte_eth_ethertype_filter ethertype_filter;
2568 struct rte_eth_syn_filter syn_filter;
2569 struct txgbe_fdir_rule fdir_rule;
2570 struct txgbe_l2_tunnel_conf l2_tn_filter;
2571 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2572 struct txgbe_rte_flow_rss_conf rss_conf;
2573 struct rte_flow *flow = NULL;
2574 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2575 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2576 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2577 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2578 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2579 struct txgbe_rss_conf_ele *rss_filter_ptr;
2580 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2581 uint8_t first_mask = FALSE;
2583 flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2585 PMD_DRV_LOG(ERR, "failed to allocate memory");
2586 return (struct rte_flow *)flow;
2588 txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2589 sizeof(struct txgbe_flow_mem), 0);
2590 if (!txgbe_flow_mem_ptr) {
2591 PMD_DRV_LOG(ERR, "failed to allocate memory");
2595 txgbe_flow_mem_ptr->flow = flow;
2596 TAILQ_INSERT_TAIL(&txgbe_flow_list,
2597 txgbe_flow_mem_ptr, entries);
2599 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2600 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2601 actions, &ntuple_filter, error);
2604 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2606 ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2607 sizeof(struct txgbe_ntuple_filter_ele), 0);
2608 if (!ntuple_filter_ptr) {
2609 PMD_DRV_LOG(ERR, "failed to allocate memory");
2612 rte_memcpy(&ntuple_filter_ptr->filter_info,
2614 sizeof(struct rte_eth_ntuple_filter));
2615 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2616 ntuple_filter_ptr, entries);
2617 flow->rule = ntuple_filter_ptr;
2618 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2624 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2625 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2626 actions, ðertype_filter, error);
2628 ret = txgbe_add_del_ethertype_filter(dev,
2629 ðertype_filter, TRUE);
2631 ethertype_filter_ptr =
2632 rte_zmalloc("txgbe_ethertype_filter",
2633 sizeof(struct txgbe_ethertype_filter_ele), 0);
2634 if (!ethertype_filter_ptr) {
2635 PMD_DRV_LOG(ERR, "failed to allocate memory");
2638 rte_memcpy(ðertype_filter_ptr->filter_info,
2640 sizeof(struct rte_eth_ethertype_filter));
2641 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2642 ethertype_filter_ptr, entries);
2643 flow->rule = ethertype_filter_ptr;
2644 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2650 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2651 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2652 actions, &syn_filter, error);
2654 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2656 syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2657 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2658 if (!syn_filter_ptr) {
2659 PMD_DRV_LOG(ERR, "failed to allocate memory");
2662 rte_memcpy(&syn_filter_ptr->filter_info,
2664 sizeof(struct rte_eth_syn_filter));
2665 TAILQ_INSERT_TAIL(&filter_syn_list,
2668 flow->rule = syn_filter_ptr;
2669 flow->filter_type = RTE_ETH_FILTER_SYN;
2675 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2676 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2677 actions, &fdir_rule, error);
2679 /* A mask cannot be deleted. */
2680 if (fdir_rule.b_mask) {
2681 if (!fdir_info->mask_added) {
2682 /* It's the first time the mask is set. */
2683 rte_memcpy(&fdir_info->mask,
2685 sizeof(struct txgbe_hw_fdir_mask));
2686 fdir_info->flex_bytes_offset =
2687 fdir_rule.flex_bytes_offset;
2689 if (fdir_rule.mask.flex_bytes_mask)
2690 txgbe_fdir_set_flexbytes_offset(dev,
2691 fdir_rule.flex_bytes_offset);
2693 ret = txgbe_fdir_set_input_mask(dev);
2697 fdir_info->mask_added = TRUE;
2701 * Only support one global mask,
2702 * all the masks should be the same.
2704 ret = memcmp(&fdir_info->mask,
2706 sizeof(struct txgbe_hw_fdir_mask));
2710 if (fdir_info->flex_bytes_offset !=
2711 fdir_rule.flex_bytes_offset)
2716 if (fdir_rule.b_spec) {
2717 ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2720 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2721 sizeof(struct txgbe_fdir_rule_ele), 0);
2722 if (!fdir_rule_ptr) {
2724 "failed to allocate memory");
2727 rte_memcpy(&fdir_rule_ptr->filter_info,
2729 sizeof(struct txgbe_fdir_rule));
2730 TAILQ_INSERT_TAIL(&filter_fdir_list,
2731 fdir_rule_ptr, entries);
2732 flow->rule = fdir_rule_ptr;
2733 flow->filter_type = RTE_ETH_FILTER_FDIR;
2740 * clean the mask_added flag if fail to
2744 fdir_info->mask_added = FALSE;
2752 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2753 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2754 actions, &l2_tn_filter, error);
2756 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2758 l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2759 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2760 if (!l2_tn_filter_ptr) {
2761 PMD_DRV_LOG(ERR, "failed to allocate memory");
2764 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2766 sizeof(struct txgbe_l2_tunnel_conf));
2767 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2768 l2_tn_filter_ptr, entries);
2769 flow->rule = l2_tn_filter_ptr;
2770 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2775 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2776 ret = txgbe_parse_rss_filter(dev, attr,
2777 actions, &rss_conf, error);
2779 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2781 rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2782 sizeof(struct txgbe_rss_conf_ele), 0);
2783 if (!rss_filter_ptr) {
2784 PMD_DRV_LOG(ERR, "failed to allocate memory");
2787 txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2789 TAILQ_INSERT_TAIL(&filter_rss_list,
2790 rss_filter_ptr, entries);
2791 flow->rule = rss_filter_ptr;
2792 flow->filter_type = RTE_ETH_FILTER_HASH;
2798 TAILQ_REMOVE(&txgbe_flow_list,
2799 txgbe_flow_mem_ptr, entries);
2800 rte_flow_error_set(error, -ret,
2801 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2802 "Failed to create flow.");
2803 rte_free(txgbe_flow_mem_ptr);
2809 * Check if the flow rule is supported by txgbe.
2810 * It only checks the format. Don't guarantee the rule can be programmed into
2811 * the HW. Because there can be no enough room for the rule.
2814 txgbe_flow_validate(struct rte_eth_dev *dev,
2815 const struct rte_flow_attr *attr,
2816 const struct rte_flow_item pattern[],
2817 const struct rte_flow_action actions[],
2818 struct rte_flow_error *error)
2820 struct rte_eth_ntuple_filter ntuple_filter;
2821 struct rte_eth_ethertype_filter ethertype_filter;
2822 struct rte_eth_syn_filter syn_filter;
2823 struct txgbe_l2_tunnel_conf l2_tn_filter;
2824 struct txgbe_fdir_rule fdir_rule;
2825 struct txgbe_rte_flow_rss_conf rss_conf;
2828 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2829 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2830 actions, &ntuple_filter, error);
2834 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2835 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2836 actions, ðertype_filter, error);
2840 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2841 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2842 actions, &syn_filter, error);
2846 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2847 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2848 actions, &fdir_rule, error);
2852 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2853 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2854 actions, &l2_tn_filter, error);
2858 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2859 ret = txgbe_parse_rss_filter(dev, attr,
2860 actions, &rss_conf, error);
2865 /* Destroy a flow rule on txgbe. */
2867 txgbe_flow_destroy(struct rte_eth_dev *dev,
2868 struct rte_flow *flow,
2869 struct rte_flow_error *error)
2872 struct rte_flow *pmd_flow = flow;
2873 enum rte_filter_type filter_type = pmd_flow->filter_type;
2874 struct rte_eth_ntuple_filter ntuple_filter;
2875 struct rte_eth_ethertype_filter ethertype_filter;
2876 struct rte_eth_syn_filter syn_filter;
2877 struct txgbe_fdir_rule fdir_rule;
2878 struct txgbe_l2_tunnel_conf l2_tn_filter;
2879 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2880 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2881 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2882 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2883 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2884 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2885 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2886 struct txgbe_rss_conf_ele *rss_filter_ptr;
2888 switch (filter_type) {
2889 case RTE_ETH_FILTER_NTUPLE:
2890 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
2892 rte_memcpy(&ntuple_filter,
2893 &ntuple_filter_ptr->filter_info,
2894 sizeof(struct rte_eth_ntuple_filter));
2895 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2897 TAILQ_REMOVE(&filter_ntuple_list,
2898 ntuple_filter_ptr, entries);
2899 rte_free(ntuple_filter_ptr);
2902 case RTE_ETH_FILTER_ETHERTYPE:
2903 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
2905 rte_memcpy(ðertype_filter,
2906 ðertype_filter_ptr->filter_info,
2907 sizeof(struct rte_eth_ethertype_filter));
2908 ret = txgbe_add_del_ethertype_filter(dev,
2909 ðertype_filter, FALSE);
2911 TAILQ_REMOVE(&filter_ethertype_list,
2912 ethertype_filter_ptr, entries);
2913 rte_free(ethertype_filter_ptr);
2916 case RTE_ETH_FILTER_SYN:
2917 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
2919 rte_memcpy(&syn_filter,
2920 &syn_filter_ptr->filter_info,
2921 sizeof(struct rte_eth_syn_filter));
2922 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
2924 TAILQ_REMOVE(&filter_syn_list,
2925 syn_filter_ptr, entries);
2926 rte_free(syn_filter_ptr);
2929 case RTE_ETH_FILTER_FDIR:
2930 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
2931 rte_memcpy(&fdir_rule,
2932 &fdir_rule_ptr->filter_info,
2933 sizeof(struct txgbe_fdir_rule));
2934 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2936 TAILQ_REMOVE(&filter_fdir_list,
2937 fdir_rule_ptr, entries);
2938 rte_free(fdir_rule_ptr);
2939 if (TAILQ_EMPTY(&filter_fdir_list))
2940 fdir_info->mask_added = false;
2943 case RTE_ETH_FILTER_L2_TUNNEL:
2944 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
2946 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2947 sizeof(struct txgbe_l2_tunnel_conf));
2948 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2950 TAILQ_REMOVE(&filter_l2_tunnel_list,
2951 l2_tn_filter_ptr, entries);
2952 rte_free(l2_tn_filter_ptr);
2955 case RTE_ETH_FILTER_HASH:
2956 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
2958 ret = txgbe_config_rss_filter(dev,
2959 &rss_filter_ptr->filter_info, FALSE);
2961 TAILQ_REMOVE(&filter_rss_list,
2962 rss_filter_ptr, entries);
2963 rte_free(rss_filter_ptr);
2967 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2974 rte_flow_error_set(error, EINVAL,
2975 RTE_FLOW_ERROR_TYPE_HANDLE,
2976 NULL, "Failed to destroy flow");
2980 TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
2981 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
2982 TAILQ_REMOVE(&txgbe_flow_list,
2983 txgbe_flow_mem_ptr, entries);
2984 rte_free(txgbe_flow_mem_ptr);
2992 /* Destroy all flow rules associated with a port on txgbe. */
2994 txgbe_flow_flush(struct rte_eth_dev *dev,
2995 struct rte_flow_error *error)
3002 const struct rte_flow_ops txgbe_flow_ops = {
3003 .validate = txgbe_flow_validate,
3004 .create = txgbe_flow_create,
3005 .destroy = txgbe_flow_destroy,
3006 .flush = txgbe_flow_flush,