1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
6 #include <rte_bus_pci.h>
7 #include <rte_malloc.h>
9 #include <rte_flow_driver.h>
11 #include "txgbe_ethdev.h"
13 #define TXGBE_MIN_N_TUPLE_PRIO 1
14 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 #define TXGBE_MAX_FLX_SOURCE_OFF 62
17 /* ntuple filter list structure */
18 struct txgbe_ntuple_filter_ele {
19 TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
20 struct rte_eth_ntuple_filter filter_info;
22 /* ethertype filter list structure */
23 struct txgbe_ethertype_filter_ele {
24 TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
25 struct rte_eth_ethertype_filter filter_info;
27 /* syn filter list structure */
28 struct txgbe_eth_syn_filter_ele {
29 TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
30 struct rte_eth_syn_filter filter_info;
32 /* fdir filter list structure */
33 struct txgbe_fdir_rule_ele {
34 TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
35 struct txgbe_fdir_rule filter_info;
37 /* l2_tunnel filter list structure */
38 struct txgbe_eth_l2_tunnel_conf_ele {
39 TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
40 struct txgbe_l2_tunnel_conf filter_info;
42 /* rss filter list structure */
43 struct txgbe_rss_conf_ele {
44 TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
45 struct txgbe_rte_flow_rss_conf filter_info;
47 /* txgbe_flow memory list structure */
48 struct txgbe_flow_mem {
49 TAILQ_ENTRY(txgbe_flow_mem) entries;
50 struct rte_flow *flow;
53 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
54 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
55 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
56 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
57 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
58 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
59 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
61 static struct txgbe_ntuple_filter_list filter_ntuple_list;
62 static struct txgbe_ethertype_filter_list filter_ethertype_list;
63 static struct txgbe_syn_filter_list filter_syn_list;
64 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
65 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
66 static struct txgbe_rss_filter_list filter_rss_list;
67 static struct txgbe_flow_mem_list txgbe_flow_list;
70 * Endless loop will never happen with below assumption
71 * 1. there is at least one no-void item(END)
72 * 2. cur is before END.
75 const struct rte_flow_item *next_no_void_pattern(
76 const struct rte_flow_item pattern[],
77 const struct rte_flow_item *cur)
79 const struct rte_flow_item *next =
80 cur ? cur + 1 : &pattern[0];
82 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
89 const struct rte_flow_action *next_no_void_action(
90 const struct rte_flow_action actions[],
91 const struct rte_flow_action *cur)
93 const struct rte_flow_action *next =
94 cur ? cur + 1 : &actions[0];
96 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
103 * Please aware there's an assumption for all the parsers.
104 * rte_flow_item is using big endian, rte_flow_attr and
105 * rte_flow_action are using CPU order.
106 * Because the pattern is used to describe the packets,
107 * normally the packets should use network order.
111 * Parse the rule to see if it is a n-tuple rule.
112 * And get the n-tuple filter info BTW.
114 * The first not void item can be ETH or IPV4.
115 * The second not void item must be IPV4 if the first one is ETH.
116 * The third not void item must be UDP or TCP.
117 * The next not void item must be END.
119 * The first not void action should be QUEUE.
120 * The next not void action should be END.
124 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
125 * dst_addr 192.167.3.50 0xFFFFFFFF
126 * next_proto_id 17 0xFF
127 * UDP/TCP/ src_port 80 0xFFFF
128 * SCTP dst_port 80 0xFFFF
130 * other members in mask and spec should set to 0x00.
131 * item->last should be NULL.
133 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
137 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
138 const struct rte_flow_item pattern[],
139 const struct rte_flow_action actions[],
140 struct rte_eth_ntuple_filter *filter,
141 struct rte_flow_error *error)
143 const struct rte_flow_item *item;
144 const struct rte_flow_action *act;
145 const struct rte_flow_item_ipv4 *ipv4_spec;
146 const struct rte_flow_item_ipv4 *ipv4_mask;
147 const struct rte_flow_item_tcp *tcp_spec;
148 const struct rte_flow_item_tcp *tcp_mask;
149 const struct rte_flow_item_udp *udp_spec;
150 const struct rte_flow_item_udp *udp_mask;
151 const struct rte_flow_item_sctp *sctp_spec;
152 const struct rte_flow_item_sctp *sctp_mask;
153 const struct rte_flow_item_eth *eth_spec;
154 const struct rte_flow_item_eth *eth_mask;
155 const struct rte_flow_item_vlan *vlan_spec;
156 const struct rte_flow_item_vlan *vlan_mask;
157 struct rte_flow_item_eth eth_null;
158 struct rte_flow_item_vlan vlan_null;
161 rte_flow_error_set(error,
162 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
163 NULL, "NULL pattern.");
168 rte_flow_error_set(error, EINVAL,
169 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
170 NULL, "NULL action.");
174 rte_flow_error_set(error, EINVAL,
175 RTE_FLOW_ERROR_TYPE_ATTR,
176 NULL, "NULL attribute.");
180 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
181 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
183 #ifdef RTE_LIB_SECURITY
185 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
187 act = next_no_void_action(actions, NULL);
188 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
189 const void *conf = act->conf;
190 /* check if the next not void item is END */
191 act = next_no_void_action(actions, act);
192 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
193 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
194 rte_flow_error_set(error, EINVAL,
195 RTE_FLOW_ERROR_TYPE_ACTION,
196 act, "Not supported action.");
200 /* get the IP pattern*/
201 item = next_no_void_pattern(pattern, NULL);
202 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
203 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
205 item->type == RTE_FLOW_ITEM_TYPE_END) {
206 rte_flow_error_set(error, EINVAL,
207 RTE_FLOW_ERROR_TYPE_ITEM,
208 item, "IP pattern missing.");
211 item = next_no_void_pattern(pattern, item);
214 filter->proto = IPPROTO_ESP;
215 return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
216 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
220 /* the first not void item can be MAC or IPv4 */
221 item = next_no_void_pattern(pattern, NULL);
223 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
224 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ITEM,
227 item, "Not supported by ntuple filter");
231 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
232 eth_spec = item->spec;
233 eth_mask = item->mask;
234 /*Not supported last point for range*/
236 rte_flow_error_set(error,
238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239 item, "Not supported last point for range");
242 /* if the first item is MAC, the content should be NULL */
243 if ((item->spec || item->mask) &&
244 (memcmp(eth_spec, ð_null,
245 sizeof(struct rte_flow_item_eth)) ||
246 memcmp(eth_mask, ð_null,
247 sizeof(struct rte_flow_item_eth)))) {
248 rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ITEM,
250 item, "Not supported by ntuple filter");
253 /* check if the next not void item is IPv4 or Vlan */
254 item = next_no_void_pattern(pattern, item);
255 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
256 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
257 rte_flow_error_set(error,
258 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
259 item, "Not supported by ntuple filter");
264 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
265 vlan_spec = item->spec;
266 vlan_mask = item->mask;
267 /*Not supported last point for range*/
269 rte_flow_error_set(error,
270 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
271 item, "Not supported last point for range");
274 /* the content should be NULL */
275 if ((item->spec || item->mask) &&
276 (memcmp(vlan_spec, &vlan_null,
277 sizeof(struct rte_flow_item_vlan)) ||
278 memcmp(vlan_mask, &vlan_null,
279 sizeof(struct rte_flow_item_vlan)))) {
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_ITEM,
282 item, "Not supported by ntuple filter");
285 /* check if the next not void item is IPv4 */
286 item = next_no_void_pattern(pattern, item);
287 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
288 rte_flow_error_set(error,
289 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
290 item, "Not supported by ntuple filter");
296 /* get the IPv4 info */
297 if (!item->spec || !item->mask) {
298 rte_flow_error_set(error, EINVAL,
299 RTE_FLOW_ERROR_TYPE_ITEM,
300 item, "Invalid ntuple mask");
303 /*Not supported last point for range*/
305 rte_flow_error_set(error, EINVAL,
306 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
307 item, "Not supported last point for range");
311 ipv4_mask = item->mask;
313 * Only support src & dst addresses, protocol,
314 * others should be masked.
316 if (ipv4_mask->hdr.version_ihl ||
317 ipv4_mask->hdr.type_of_service ||
318 ipv4_mask->hdr.total_length ||
319 ipv4_mask->hdr.packet_id ||
320 ipv4_mask->hdr.fragment_offset ||
321 ipv4_mask->hdr.time_to_live ||
322 ipv4_mask->hdr.hdr_checksum) {
323 rte_flow_error_set(error,
324 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
325 item, "Not supported by ntuple filter");
328 if ((ipv4_mask->hdr.src_addr != 0 &&
329 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
330 (ipv4_mask->hdr.dst_addr != 0 &&
331 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
332 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
333 ipv4_mask->hdr.next_proto_id != 0)) {
334 rte_flow_error_set(error,
335 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336 item, "Not supported by ntuple filter");
340 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
344 ipv4_spec = item->spec;
345 filter->dst_ip = ipv4_spec->hdr.dst_addr;
346 filter->src_ip = ipv4_spec->hdr.src_addr;
347 filter->proto = ipv4_spec->hdr.next_proto_id;
350 /* check if the next not void item is TCP or UDP */
351 item = next_no_void_pattern(pattern, item);
352 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
353 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
354 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
355 item->type != RTE_FLOW_ITEM_TYPE_END) {
356 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357 rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM,
359 item, "Not supported by ntuple filter");
363 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
364 (!item->spec && !item->mask)) {
368 /* get the TCP/UDP/SCTP info */
369 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
370 (!item->spec || !item->mask)) {
371 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM,
374 item, "Invalid ntuple mask");
378 /*Not supported last point for range*/
380 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381 rte_flow_error_set(error, EINVAL,
382 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
383 item, "Not supported last point for range");
387 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
388 tcp_mask = item->mask;
391 * Only support src & dst ports, tcp flags,
392 * others should be masked.
394 if (tcp_mask->hdr.sent_seq ||
395 tcp_mask->hdr.recv_ack ||
396 tcp_mask->hdr.data_off ||
397 tcp_mask->hdr.rx_win ||
398 tcp_mask->hdr.cksum ||
399 tcp_mask->hdr.tcp_urp) {
401 sizeof(struct rte_eth_ntuple_filter));
402 rte_flow_error_set(error, EINVAL,
403 RTE_FLOW_ERROR_TYPE_ITEM,
404 item, "Not supported by ntuple filter");
407 if ((tcp_mask->hdr.src_port != 0 &&
408 tcp_mask->hdr.src_port != UINT16_MAX) ||
409 (tcp_mask->hdr.dst_port != 0 &&
410 tcp_mask->hdr.dst_port != UINT16_MAX)) {
411 rte_flow_error_set(error,
412 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
413 item, "Not supported by ntuple filter");
417 filter->dst_port_mask = tcp_mask->hdr.dst_port;
418 filter->src_port_mask = tcp_mask->hdr.src_port;
419 if (tcp_mask->hdr.tcp_flags == 0xFF) {
420 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
421 } else if (!tcp_mask->hdr.tcp_flags) {
422 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
424 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
425 rte_flow_error_set(error, EINVAL,
426 RTE_FLOW_ERROR_TYPE_ITEM,
427 item, "Not supported by ntuple filter");
431 tcp_spec = item->spec;
432 filter->dst_port = tcp_spec->hdr.dst_port;
433 filter->src_port = tcp_spec->hdr.src_port;
434 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
435 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
436 udp_mask = item->mask;
439 * Only support src & dst ports,
440 * others should be masked.
442 if (udp_mask->hdr.dgram_len ||
443 udp_mask->hdr.dgram_cksum) {
445 sizeof(struct rte_eth_ntuple_filter));
446 rte_flow_error_set(error, EINVAL,
447 RTE_FLOW_ERROR_TYPE_ITEM,
448 item, "Not supported by ntuple filter");
451 if ((udp_mask->hdr.src_port != 0 &&
452 udp_mask->hdr.src_port != UINT16_MAX) ||
453 (udp_mask->hdr.dst_port != 0 &&
454 udp_mask->hdr.dst_port != UINT16_MAX)) {
455 rte_flow_error_set(error,
456 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
457 item, "Not supported by ntuple filter");
461 filter->dst_port_mask = udp_mask->hdr.dst_port;
462 filter->src_port_mask = udp_mask->hdr.src_port;
464 udp_spec = item->spec;
465 filter->dst_port = udp_spec->hdr.dst_port;
466 filter->src_port = udp_spec->hdr.src_port;
467 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
468 sctp_mask = item->mask;
471 * Only support src & dst ports,
472 * others should be masked.
474 if (sctp_mask->hdr.tag ||
475 sctp_mask->hdr.cksum) {
477 sizeof(struct rte_eth_ntuple_filter));
478 rte_flow_error_set(error, EINVAL,
479 RTE_FLOW_ERROR_TYPE_ITEM,
480 item, "Not supported by ntuple filter");
484 filter->dst_port_mask = sctp_mask->hdr.dst_port;
485 filter->src_port_mask = sctp_mask->hdr.src_port;
487 sctp_spec = item->spec;
488 filter->dst_port = sctp_spec->hdr.dst_port;
489 filter->src_port = sctp_spec->hdr.src_port;
494 /* check if the next not void item is END */
495 item = next_no_void_pattern(pattern, item);
496 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
497 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
500 item, "Not supported by ntuple filter");
507 * n-tuple only supports forwarding,
508 * check if the first not void action is QUEUE.
510 act = next_no_void_action(actions, NULL);
511 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
512 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
513 rte_flow_error_set(error, EINVAL,
514 RTE_FLOW_ERROR_TYPE_ACTION,
515 act, "Not supported action.");
519 ((const struct rte_flow_action_queue *)act->conf)->index;
521 /* check if the next not void item is END */
522 act = next_no_void_action(actions, act);
523 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
524 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
525 rte_flow_error_set(error, EINVAL,
526 RTE_FLOW_ERROR_TYPE_ACTION,
527 act, "Not supported action.");
532 /* must be input direction */
533 if (!attr->ingress) {
534 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
537 attr, "Only support ingress.");
543 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
544 rte_flow_error_set(error, EINVAL,
545 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
546 attr, "Not support egress.");
551 if (attr->transfer) {
552 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
553 rte_flow_error_set(error, EINVAL,
554 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
555 attr, "No support for transfer.");
559 if (attr->priority > 0xFFFF) {
560 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
563 attr, "Error priority.");
566 filter->priority = (uint16_t)attr->priority;
567 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
568 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
569 filter->priority = 1;
574 /* a specific function for txgbe because the flags is specific */
576 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
577 const struct rte_flow_attr *attr,
578 const struct rte_flow_item pattern[],
579 const struct rte_flow_action actions[],
580 struct rte_eth_ntuple_filter *filter,
581 struct rte_flow_error *error)
585 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
590 #ifdef RTE_LIB_SECURITY
591 /* ESP flow not really a flow */
592 if (filter->proto == IPPROTO_ESP)
596 /* txgbe doesn't support tcp flags */
597 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
598 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
599 rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_ITEM,
601 NULL, "Not supported by ntuple filter");
605 /* txgbe doesn't support many priorities */
606 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
607 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
608 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
609 rte_flow_error_set(error, EINVAL,
610 RTE_FLOW_ERROR_TYPE_ITEM,
611 NULL, "Priority not supported by ntuple filter");
615 if (filter->queue >= dev->data->nb_rx_queues) {
616 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
617 rte_flow_error_set(error, EINVAL,
618 RTE_FLOW_ERROR_TYPE_ITEM,
619 NULL, "Not supported by ntuple filter");
623 /* fixed value for txgbe */
624 filter->flags = RTE_5TUPLE_FLAGS;
629 * Parse the rule to see if it is a ethertype rule.
630 * And get the ethertype filter info BTW.
632 * The first not void item can be ETH.
633 * The next not void item must be END.
635 * The first not void action should be QUEUE.
636 * The next not void action should be END.
639 * ETH type 0x0807 0xFFFF
641 * other members in mask and spec should set to 0x00.
642 * item->last should be NULL.
645 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
646 const struct rte_flow_item *pattern,
647 const struct rte_flow_action *actions,
648 struct rte_eth_ethertype_filter *filter,
649 struct rte_flow_error *error)
651 const struct rte_flow_item *item;
652 const struct rte_flow_action *act;
653 const struct rte_flow_item_eth *eth_spec;
654 const struct rte_flow_item_eth *eth_mask;
655 const struct rte_flow_action_queue *act_q;
658 rte_flow_error_set(error, EINVAL,
659 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
660 NULL, "NULL pattern.");
665 rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
667 NULL, "NULL action.");
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ATTR,
674 NULL, "NULL attribute.");
678 item = next_no_void_pattern(pattern, NULL);
679 /* The first non-void item should be MAC. */
680 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
681 rte_flow_error_set(error, EINVAL,
682 RTE_FLOW_ERROR_TYPE_ITEM,
683 item, "Not supported by ethertype filter");
687 /*Not supported last point for range*/
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
691 item, "Not supported last point for range");
695 /* Get the MAC info. */
696 if (!item->spec || !item->mask) {
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ITEM,
699 item, "Not supported by ethertype filter");
703 eth_spec = item->spec;
704 eth_mask = item->mask;
706 /* Mask bits of source MAC address must be full of 0.
707 * Mask bits of destination MAC address must be full
710 if (!rte_is_zero_ether_addr(ð_mask->src) ||
711 (!rte_is_zero_ether_addr(ð_mask->dst) &&
712 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ITEM,
715 item, "Invalid ether address mask");
719 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
720 rte_flow_error_set(error, EINVAL,
721 RTE_FLOW_ERROR_TYPE_ITEM,
722 item, "Invalid ethertype mask");
726 /* If mask bits of destination MAC address
727 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
729 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
730 filter->mac_addr = eth_spec->dst;
731 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
733 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
735 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
737 /* Check if the next non-void item is END. */
738 item = next_no_void_pattern(pattern, item);
739 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ITEM,
742 item, "Not supported by ethertype filter.");
748 act = next_no_void_action(actions, NULL);
749 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
750 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ACTION,
753 act, "Not supported action.");
757 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
758 act_q = (const struct rte_flow_action_queue *)act->conf;
759 filter->queue = act_q->index;
761 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
764 /* Check if the next non-void item is END */
765 act = next_no_void_action(actions, act);
766 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
767 rte_flow_error_set(error, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ACTION,
769 act, "Not supported action.");
774 /* Must be input direction */
775 if (!attr->ingress) {
776 rte_flow_error_set(error, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
778 attr, "Only support ingress.");
784 rte_flow_error_set(error, EINVAL,
785 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
786 attr, "Not support egress.");
791 if (attr->transfer) {
792 rte_flow_error_set(error, EINVAL,
793 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
794 attr, "No support for transfer.");
799 if (attr->priority) {
800 rte_flow_error_set(error, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
802 attr, "Not support priority.");
808 rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
810 attr, "Not support group.");
818 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
819 const struct rte_flow_attr *attr,
820 const struct rte_flow_item pattern[],
821 const struct rte_flow_action actions[],
822 struct rte_eth_ethertype_filter *filter,
823 struct rte_flow_error *error)
827 ret = cons_parse_ethertype_filter(attr, pattern,
828 actions, filter, error);
833 if (filter->queue >= dev->data->nb_rx_queues) {
834 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
835 rte_flow_error_set(error, EINVAL,
836 RTE_FLOW_ERROR_TYPE_ITEM,
837 NULL, "queue index much too big");
841 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
842 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
843 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ITEM,
846 NULL, "IPv4/IPv6 not supported by ethertype filter");
850 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
851 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
854 NULL, "mac compare is unsupported");
858 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
859 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
860 rte_flow_error_set(error, EINVAL,
861 RTE_FLOW_ERROR_TYPE_ITEM,
862 NULL, "drop option is unsupported");
870 * Parse the rule to see if it is a TCP SYN rule.
871 * And get the TCP SYN filter info BTW.
873 * The first not void item must be ETH.
874 * The second not void item must be IPV4 or IPV6.
875 * The third not void item must be TCP.
876 * The next not void item must be END.
878 * The first not void action should be QUEUE.
879 * The next not void action should be END.
883 * IPV4/IPV6 NULL NULL
884 * TCP tcp_flags 0x02 0xFF
886 * other members in mask and spec should set to 0x00.
887 * item->last should be NULL.
890 cons_parse_syn_filter(const struct rte_flow_attr *attr,
891 const struct rte_flow_item pattern[],
892 const struct rte_flow_action actions[],
893 struct rte_eth_syn_filter *filter,
894 struct rte_flow_error *error)
896 const struct rte_flow_item *item;
897 const struct rte_flow_action *act;
898 const struct rte_flow_item_tcp *tcp_spec;
899 const struct rte_flow_item_tcp *tcp_mask;
900 const struct rte_flow_action_queue *act_q;
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
905 NULL, "NULL pattern.");
910 rte_flow_error_set(error, EINVAL,
911 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
912 NULL, "NULL action.");
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ATTR,
919 NULL, "NULL attribute.");
924 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
925 item = next_no_void_pattern(pattern, NULL);
926 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
927 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
928 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
929 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ITEM,
932 item, "Not supported by syn filter");
935 /*Not supported last point for range*/
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
939 item, "Not supported last point for range");
944 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
945 /* if the item is MAC, the content should be NULL */
946 if (item->spec || item->mask) {
947 rte_flow_error_set(error, EINVAL,
948 RTE_FLOW_ERROR_TYPE_ITEM,
949 item, "Invalid SYN address mask");
953 /* check if the next not void item is IPv4 or IPv6 */
954 item = next_no_void_pattern(pattern, item);
955 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
956 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
957 rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ITEM,
959 item, "Not supported by syn filter");
965 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
966 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
967 /* if the item is IP, the content should be NULL */
968 if (item->spec || item->mask) {
969 rte_flow_error_set(error, EINVAL,
970 RTE_FLOW_ERROR_TYPE_ITEM,
971 item, "Invalid SYN mask");
975 /* check if the next not void item is TCP */
976 item = next_no_void_pattern(pattern, item);
977 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ITEM,
980 item, "Not supported by syn filter");
985 /* Get the TCP info. Only support SYN. */
986 if (!item->spec || !item->mask) {
987 rte_flow_error_set(error, EINVAL,
988 RTE_FLOW_ERROR_TYPE_ITEM,
989 item, "Invalid SYN mask");
992 /*Not supported last point for range*/
994 rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
996 item, "Not supported last point for range");
1000 tcp_spec = item->spec;
1001 tcp_mask = item->mask;
1002 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1003 tcp_mask->hdr.src_port ||
1004 tcp_mask->hdr.dst_port ||
1005 tcp_mask->hdr.sent_seq ||
1006 tcp_mask->hdr.recv_ack ||
1007 tcp_mask->hdr.data_off ||
1008 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1009 tcp_mask->hdr.rx_win ||
1010 tcp_mask->hdr.cksum ||
1011 tcp_mask->hdr.tcp_urp) {
1012 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013 rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ITEM,
1015 item, "Not supported by syn filter");
1019 /* check if the next not void item is END */
1020 item = next_no_void_pattern(pattern, item);
1021 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1022 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023 rte_flow_error_set(error, EINVAL,
1024 RTE_FLOW_ERROR_TYPE_ITEM,
1025 item, "Not supported by syn filter");
1029 /* check if the first not void action is QUEUE. */
1030 act = next_no_void_action(actions, NULL);
1031 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1032 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ACTION,
1035 act, "Not supported action.");
1039 act_q = (const struct rte_flow_action_queue *)act->conf;
1040 filter->queue = act_q->index;
1041 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
1042 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1043 rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_ACTION,
1045 act, "Not supported action.");
1049 /* check if the next not void item is END */
1050 act = next_no_void_action(actions, act);
1051 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1052 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1053 rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ACTION,
1055 act, "Not supported action.");
1060 /* must be input direction */
1061 if (!attr->ingress) {
1062 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1065 attr, "Only support ingress.");
1071 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1072 rte_flow_error_set(error, EINVAL,
1073 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1074 attr, "Not support egress.");
1079 if (attr->transfer) {
1080 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1083 attr, "No support for transfer.");
1087 /* Support 2 priorities, the lowest or highest. */
1088 if (!attr->priority) {
1089 filter->hig_pri = 0;
1090 } else if (attr->priority == (uint32_t)~0U) {
1091 filter->hig_pri = 1;
1093 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1094 rte_flow_error_set(error, EINVAL,
1095 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1096 attr, "Not support priority.");
1104 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1105 const struct rte_flow_attr *attr,
1106 const struct rte_flow_item pattern[],
1107 const struct rte_flow_action actions[],
1108 struct rte_eth_syn_filter *filter,
1109 struct rte_flow_error *error)
1113 ret = cons_parse_syn_filter(attr, pattern,
1114 actions, filter, error);
1116 if (filter->queue >= dev->data->nb_rx_queues)
1126 * Parse the rule to see if it is a L2 tunnel rule.
1127 * And get the L2 tunnel filter info BTW.
1128 * Only support E-tag now.
1130 * The first not void item can be E_TAG.
1131 * The next not void item must be END.
1133 * The first not void action should be VF or PF.
1134 * The next not void action should be END.
1138 e_cid_base 0x309 0xFFF
1140 * other members in mask and spec should set to 0x00.
1141 * item->last should be NULL.
1144 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1145 const struct rte_flow_attr *attr,
1146 const struct rte_flow_item pattern[],
1147 const struct rte_flow_action actions[],
1148 struct txgbe_l2_tunnel_conf *filter,
1149 struct rte_flow_error *error)
1151 const struct rte_flow_item *item;
1152 const struct rte_flow_item_e_tag *e_tag_spec;
1153 const struct rte_flow_item_e_tag *e_tag_mask;
1154 const struct rte_flow_action *act;
1155 const struct rte_flow_action_vf *act_vf;
1156 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1159 rte_flow_error_set(error, EINVAL,
1160 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1161 NULL, "NULL pattern.");
1166 rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1168 NULL, "NULL action.");
1173 rte_flow_error_set(error, EINVAL,
1174 RTE_FLOW_ERROR_TYPE_ATTR,
1175 NULL, "NULL attribute.");
1179 /* The first not void item should be e-tag. */
1180 item = next_no_void_pattern(pattern, NULL);
1181 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1182 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1183 rte_flow_error_set(error, EINVAL,
1184 RTE_FLOW_ERROR_TYPE_ITEM,
1185 item, "Not supported by L2 tunnel filter");
1189 if (!item->spec || !item->mask) {
1190 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1191 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1192 item, "Not supported by L2 tunnel filter");
1196 /*Not supported last point for range*/
1198 rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1200 item, "Not supported last point for range");
1204 e_tag_spec = item->spec;
1205 e_tag_mask = item->mask;
1207 /* Only care about GRP and E cid base. */
1208 if (e_tag_mask->epcp_edei_in_ecid_b ||
1209 e_tag_mask->in_ecid_e ||
1210 e_tag_mask->ecid_e ||
1211 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1212 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1213 rte_flow_error_set(error, EINVAL,
1214 RTE_FLOW_ERROR_TYPE_ITEM,
1215 item, "Not supported by L2 tunnel filter");
1219 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1221 * grp and e_cid_base are bit fields and only use 14 bits.
1222 * e-tag id is taken as little endian by HW.
1224 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1226 /* check if the next not void item is END */
1227 item = next_no_void_pattern(pattern, item);
1228 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1229 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1230 rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ITEM,
1232 item, "Not supported by L2 tunnel filter");
1237 /* must be input direction */
1238 if (!attr->ingress) {
1239 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1240 rte_flow_error_set(error, EINVAL,
1241 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1242 attr, "Only support ingress.");
1248 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1249 rte_flow_error_set(error, EINVAL,
1250 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1251 attr, "Not support egress.");
1256 if (attr->transfer) {
1257 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1258 rte_flow_error_set(error, EINVAL,
1259 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1260 attr, "No support for transfer.");
1265 if (attr->priority) {
1266 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1267 rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1269 attr, "Not support priority.");
1273 /* check if the first not void action is VF or PF. */
1274 act = next_no_void_action(actions, NULL);
1275 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1276 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1277 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1278 rte_flow_error_set(error, EINVAL,
1279 RTE_FLOW_ERROR_TYPE_ACTION,
1280 act, "Not supported action.");
1284 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1285 act_vf = (const struct rte_flow_action_vf *)act->conf;
1286 filter->pool = act_vf->id;
1288 filter->pool = pci_dev->max_vfs;
1291 /* check if the next not void item is END */
1292 act = next_no_void_action(actions, act);
1293 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1294 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1295 rte_flow_error_set(error, EINVAL,
1296 RTE_FLOW_ERROR_TYPE_ACTION,
1297 act, "Not supported action.");
1305 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1306 const struct rte_flow_attr *attr,
1307 const struct rte_flow_item pattern[],
1308 const struct rte_flow_action actions[],
1309 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1310 struct rte_flow_error *error)
1313 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1316 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1317 actions, l2_tn_filter, error);
1319 vf_num = pci_dev->max_vfs;
1321 if (l2_tn_filter->pool > vf_num)
1327 /* Parse to get the attr and action info of flow director rule. */
1329 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1330 const struct rte_flow_action actions[],
1331 struct txgbe_fdir_rule *rule,
1332 struct rte_flow_error *error)
1334 const struct rte_flow_action *act;
1335 const struct rte_flow_action_queue *act_q;
1336 const struct rte_flow_action_mark *mark;
1339 /* must be input direction */
1340 if (!attr->ingress) {
1341 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1342 rte_flow_error_set(error, EINVAL,
1343 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1344 attr, "Only support ingress.");
1350 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1351 rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1353 attr, "Not support egress.");
1358 if (attr->transfer) {
1359 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1360 rte_flow_error_set(error, EINVAL,
1361 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1362 attr, "No support for transfer.");
1367 if (attr->priority) {
1368 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1369 rte_flow_error_set(error, EINVAL,
1370 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1371 attr, "Not support priority.");
1375 /* check if the first not void action is QUEUE or DROP. */
1376 act = next_no_void_action(actions, NULL);
1377 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1378 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1379 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1380 rte_flow_error_set(error, EINVAL,
1381 RTE_FLOW_ERROR_TYPE_ACTION,
1382 act, "Not supported action.");
1386 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1387 act_q = (const struct rte_flow_action_queue *)act->conf;
1388 rule->queue = act_q->index;
1390 /* signature mode does not support drop action. */
1391 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1392 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1393 rte_flow_error_set(error, EINVAL,
1394 RTE_FLOW_ERROR_TYPE_ACTION,
1395 act, "Not supported action.");
1398 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1401 /* check if the next not void item is MARK */
1402 act = next_no_void_action(actions, act);
1403 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1404 act->type != RTE_FLOW_ACTION_TYPE_END) {
1405 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1406 rte_flow_error_set(error, EINVAL,
1407 RTE_FLOW_ERROR_TYPE_ACTION,
1408 act, "Not supported action.");
1414 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1415 mark = (const struct rte_flow_action_mark *)act->conf;
1416 rule->soft_id = mark->id;
1417 act = next_no_void_action(actions, act);
1420 /* check if the next not void item is END */
1421 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1422 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1423 rte_flow_error_set(error, EINVAL,
1424 RTE_FLOW_ERROR_TYPE_ACTION,
1425 act, "Not supported action.");
1432 /* search next no void pattern and skip fuzzy */
1434 const struct rte_flow_item *next_no_fuzzy_pattern(
1435 const struct rte_flow_item pattern[],
1436 const struct rte_flow_item *cur)
1438 const struct rte_flow_item *next =
1439 next_no_void_pattern(pattern, cur);
1441 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1443 next = next_no_void_pattern(pattern, next);
1447 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1449 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1450 const struct rte_flow_item *item;
1451 uint32_t sh, lh, mh;
1456 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1459 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1491 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1492 * And get the flow director filter info BTW.
1493 * UDP/TCP/SCTP PATTERN:
1494 * The first not void item can be ETH or IPV4 or IPV6
1495 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1496 * The next not void item could be UDP or TCP or SCTP (optional)
1497 * The next not void item could be RAW (for flexbyte, optional)
1498 * The next not void item must be END.
1499 * A Fuzzy Match pattern can appear at any place before END.
1500 * Fuzzy Match is optional for IPV4 but is required for IPV6
1502 * The first not void item must be ETH.
1503 * The second not void item must be MAC VLAN.
1504 * The next not void item must be END.
1506 * The first not void action should be QUEUE or DROP.
1507 * The second not void optional action should be MARK,
1508 * mark_id is a uint32_t number.
1509 * The next not void action should be END.
1510 * UDP/TCP/SCTP pattern example:
1513 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1514 * dst_addr 192.167.3.50 0xFFFFFFFF
1515 * UDP/TCP/SCTP src_port 80 0xFFFF
1516 * dst_port 80 0xFFFF
1517 * FLEX relative 0 0x1
1520 * offset 12 0xFFFFFFFF
1523 * pattern[0] 0x86 0xFF
1524 * pattern[1] 0xDD 0xFF
1526 * MAC VLAN pattern example:
1529 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1530 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1531 * MAC VLAN tci 0x2016 0xEFFF
1533 * Other members in mask and spec should set to 0x00.
1534 * Item->last should be NULL.
1537 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1538 const struct rte_flow_attr *attr,
1539 const struct rte_flow_item pattern[],
1540 const struct rte_flow_action actions[],
1541 struct txgbe_fdir_rule *rule,
1542 struct rte_flow_error *error)
1544 const struct rte_flow_item *item;
1545 const struct rte_flow_item_eth *eth_mask;
1546 const struct rte_flow_item_ipv4 *ipv4_spec;
1547 const struct rte_flow_item_ipv4 *ipv4_mask;
1548 const struct rte_flow_item_ipv6 *ipv6_spec;
1549 const struct rte_flow_item_ipv6 *ipv6_mask;
1550 const struct rte_flow_item_tcp *tcp_spec;
1551 const struct rte_flow_item_tcp *tcp_mask;
1552 const struct rte_flow_item_udp *udp_spec;
1553 const struct rte_flow_item_udp *udp_mask;
1554 const struct rte_flow_item_sctp *sctp_spec;
1555 const struct rte_flow_item_sctp *sctp_mask;
1556 const struct rte_flow_item_raw *raw_mask;
1557 const struct rte_flow_item_raw *raw_spec;
1562 rte_flow_error_set(error, EINVAL,
1563 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1564 NULL, "NULL pattern.");
1569 rte_flow_error_set(error, EINVAL,
1570 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1571 NULL, "NULL action.");
1576 rte_flow_error_set(error, EINVAL,
1577 RTE_FLOW_ERROR_TYPE_ATTR,
1578 NULL, "NULL attribute.");
1583 * Some fields may not be provided. Set spec to 0 and mask to default
1584 * value. So, we need not do anything for the not provided fields later.
1586 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1587 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1588 rule->mask.vlan_tci_mask = 0;
1589 rule->mask.flex_bytes_mask = 0;
1592 * The first not void item should be
1593 * MAC or IPv4 or TCP or UDP or SCTP.
1595 item = next_no_fuzzy_pattern(pattern, NULL);
1596 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1597 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1598 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1599 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1600 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1601 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1602 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1603 rte_flow_error_set(error, EINVAL,
1604 RTE_FLOW_ERROR_TYPE_ITEM,
1605 item, "Not supported by fdir filter");
1609 if (signature_match(pattern))
1610 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1612 rule->mode = RTE_FDIR_MODE_PERFECT;
1614 /*Not supported last point for range*/
1616 rte_flow_error_set(error, EINVAL,
1617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1618 item, "Not supported last point for range");
1622 /* Get the MAC info. */
1623 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1625 * Only support vlan and dst MAC address,
1626 * others should be masked.
1628 if (item->spec && !item->mask) {
1629 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1630 rte_flow_error_set(error, EINVAL,
1631 RTE_FLOW_ERROR_TYPE_ITEM,
1632 item, "Not supported by fdir filter");
1637 rule->b_mask = TRUE;
1638 eth_mask = item->mask;
1640 /* Ether type should be masked. */
1641 if (eth_mask->type ||
1642 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1643 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1644 rte_flow_error_set(error, EINVAL,
1645 RTE_FLOW_ERROR_TYPE_ITEM,
1646 item, "Not supported by fdir filter");
1650 /* If ethernet has meaning, it means MAC VLAN mode. */
1651 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1654 * src MAC address must be masked,
1655 * and don't support dst MAC address mask.
1657 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1658 if (eth_mask->src.addr_bytes[j] ||
1659 eth_mask->dst.addr_bytes[j] != 0xFF) {
1661 sizeof(struct txgbe_fdir_rule));
1662 rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ITEM,
1664 item, "Not supported by fdir filter");
1669 /* When no VLAN, considered as full mask. */
1670 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1672 /*** If both spec and mask are item,
1673 * it means don't care about ETH.
1678 * Check if the next not void item is vlan or ipv4.
1679 * IPv6 is not supported.
1681 item = next_no_fuzzy_pattern(pattern, item);
1682 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1683 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1684 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1685 rte_flow_error_set(error, EINVAL,
1686 RTE_FLOW_ERROR_TYPE_ITEM,
1687 item, "Not supported by fdir filter");
1691 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1692 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1693 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1694 rte_flow_error_set(error, EINVAL,
1695 RTE_FLOW_ERROR_TYPE_ITEM,
1696 item, "Not supported by fdir filter");
1702 /* Get the IPV4 info. */
1703 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1705 * Set the flow type even if there's no content
1706 * as we must have a flow type.
1708 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1709 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1710 /*Not supported last point for range*/
1712 rte_flow_error_set(error, EINVAL,
1713 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1714 item, "Not supported last point for range");
1718 * Only care about src & dst addresses,
1719 * others should be masked.
1722 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1723 rte_flow_error_set(error, EINVAL,
1724 RTE_FLOW_ERROR_TYPE_ITEM,
1725 item, "Not supported by fdir filter");
1728 rule->b_mask = TRUE;
1729 ipv4_mask = item->mask;
1730 if (ipv4_mask->hdr.version_ihl ||
1731 ipv4_mask->hdr.type_of_service ||
1732 ipv4_mask->hdr.total_length ||
1733 ipv4_mask->hdr.packet_id ||
1734 ipv4_mask->hdr.fragment_offset ||
1735 ipv4_mask->hdr.time_to_live ||
1736 ipv4_mask->hdr.next_proto_id ||
1737 ipv4_mask->hdr.hdr_checksum) {
1738 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1739 rte_flow_error_set(error, EINVAL,
1740 RTE_FLOW_ERROR_TYPE_ITEM,
1741 item, "Not supported by fdir filter");
1744 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1745 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1748 rule->b_spec = TRUE;
1749 ipv4_spec = item->spec;
1750 rule->input.dst_ip[0] =
1751 ipv4_spec->hdr.dst_addr;
1752 rule->input.src_ip[0] =
1753 ipv4_spec->hdr.src_addr;
1757 * Check if the next not void item is
1758 * TCP or UDP or SCTP or END.
1760 item = next_no_fuzzy_pattern(pattern, item);
1761 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1762 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1763 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1764 item->type != RTE_FLOW_ITEM_TYPE_END &&
1765 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1766 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1767 rte_flow_error_set(error, EINVAL,
1768 RTE_FLOW_ERROR_TYPE_ITEM,
1769 item, "Not supported by fdir filter");
1774 /* Get the IPV6 info. */
1775 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1777 * Set the flow type even if there's no content
1778 * as we must have a flow type.
1780 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1781 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1784 * 1. must signature match
1785 * 2. not support last
1786 * 3. mask must not null
1788 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1791 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1792 rte_flow_error_set(error, EINVAL,
1793 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1794 item, "Not supported last point for range");
1798 rule->b_mask = TRUE;
1799 ipv6_mask = item->mask;
1800 if (ipv6_mask->hdr.vtc_flow ||
1801 ipv6_mask->hdr.payload_len ||
1802 ipv6_mask->hdr.proto ||
1803 ipv6_mask->hdr.hop_limits) {
1804 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1805 rte_flow_error_set(error, EINVAL,
1806 RTE_FLOW_ERROR_TYPE_ITEM,
1807 item, "Not supported by fdir filter");
1811 /* check src addr mask */
1812 for (j = 0; j < 16; j++) {
1813 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1814 rule->mask.src_ipv6_mask |= 1 << j;
1815 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1816 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1817 rte_flow_error_set(error, EINVAL,
1818 RTE_FLOW_ERROR_TYPE_ITEM,
1819 item, "Not supported by fdir filter");
1824 /* check dst addr mask */
1825 for (j = 0; j < 16; j++) {
1826 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1827 rule->mask.dst_ipv6_mask |= 1 << j;
1828 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1829 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1830 rte_flow_error_set(error, EINVAL,
1831 RTE_FLOW_ERROR_TYPE_ITEM,
1832 item, "Not supported by fdir filter");
1838 rule->b_spec = TRUE;
1839 ipv6_spec = item->spec;
1840 rte_memcpy(rule->input.src_ip,
1841 ipv6_spec->hdr.src_addr, 16);
1842 rte_memcpy(rule->input.dst_ip,
1843 ipv6_spec->hdr.dst_addr, 16);
1847 * Check if the next not void item is
1848 * TCP or UDP or SCTP or END.
1850 item = next_no_fuzzy_pattern(pattern, item);
1851 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1852 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1853 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1854 item->type != RTE_FLOW_ITEM_TYPE_END &&
1855 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1856 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1857 rte_flow_error_set(error, EINVAL,
1858 RTE_FLOW_ERROR_TYPE_ITEM,
1859 item, "Not supported by fdir filter");
1864 /* Get the TCP info. */
1865 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1867 * Set the flow type even if there's no content
1868 * as we must have a flow type.
1870 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1871 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1872 /*Not supported last point for range*/
1874 rte_flow_error_set(error, EINVAL,
1875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1876 item, "Not supported last point for range");
1880 * Only care about src & dst ports,
1881 * others should be masked.
1884 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1885 rte_flow_error_set(error, EINVAL,
1886 RTE_FLOW_ERROR_TYPE_ITEM,
1887 item, "Not supported by fdir filter");
1890 rule->b_mask = TRUE;
1891 tcp_mask = item->mask;
1892 if (tcp_mask->hdr.sent_seq ||
1893 tcp_mask->hdr.recv_ack ||
1894 tcp_mask->hdr.data_off ||
1895 tcp_mask->hdr.tcp_flags ||
1896 tcp_mask->hdr.rx_win ||
1897 tcp_mask->hdr.cksum ||
1898 tcp_mask->hdr.tcp_urp) {
1899 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1900 rte_flow_error_set(error, EINVAL,
1901 RTE_FLOW_ERROR_TYPE_ITEM,
1902 item, "Not supported by fdir filter");
1905 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1906 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1909 rule->b_spec = TRUE;
1910 tcp_spec = item->spec;
1911 rule->input.src_port =
1912 tcp_spec->hdr.src_port;
1913 rule->input.dst_port =
1914 tcp_spec->hdr.dst_port;
1917 item = next_no_fuzzy_pattern(pattern, item);
1918 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1919 item->type != RTE_FLOW_ITEM_TYPE_END) {
1920 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1921 rte_flow_error_set(error, EINVAL,
1922 RTE_FLOW_ERROR_TYPE_ITEM,
1923 item, "Not supported by fdir filter");
1928 /* Get the UDP info */
1929 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1931 * Set the flow type even if there's no content
1932 * as we must have a flow type.
1934 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1935 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1936 /*Not supported last point for range*/
1938 rte_flow_error_set(error, EINVAL,
1939 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1940 item, "Not supported last point for range");
1944 * Only care about src & dst ports,
1945 * others should be masked.
1948 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1949 rte_flow_error_set(error, EINVAL,
1950 RTE_FLOW_ERROR_TYPE_ITEM,
1951 item, "Not supported by fdir filter");
1954 rule->b_mask = TRUE;
1955 udp_mask = item->mask;
1956 if (udp_mask->hdr.dgram_len ||
1957 udp_mask->hdr.dgram_cksum) {
1958 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1959 rte_flow_error_set(error, EINVAL,
1960 RTE_FLOW_ERROR_TYPE_ITEM,
1961 item, "Not supported by fdir filter");
1964 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1965 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1968 rule->b_spec = TRUE;
1969 udp_spec = item->spec;
1970 rule->input.src_port =
1971 udp_spec->hdr.src_port;
1972 rule->input.dst_port =
1973 udp_spec->hdr.dst_port;
1976 item = next_no_fuzzy_pattern(pattern, item);
1977 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1978 item->type != RTE_FLOW_ITEM_TYPE_END) {
1979 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1980 rte_flow_error_set(error, EINVAL,
1981 RTE_FLOW_ERROR_TYPE_ITEM,
1982 item, "Not supported by fdir filter");
1987 /* Get the SCTP info */
1988 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1990 * Set the flow type even if there's no content
1991 * as we must have a flow type.
1993 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1994 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1995 /*Not supported last point for range*/
1997 rte_flow_error_set(error, EINVAL,
1998 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1999 item, "Not supported last point for range");
2004 * Only care about src & dst ports,
2005 * others should be masked.
2008 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2009 rte_flow_error_set(error, EINVAL,
2010 RTE_FLOW_ERROR_TYPE_ITEM,
2011 item, "Not supported by fdir filter");
2014 rule->b_mask = TRUE;
2015 sctp_mask = item->mask;
2016 if (sctp_mask->hdr.tag ||
2017 sctp_mask->hdr.cksum) {
2018 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2019 rte_flow_error_set(error, EINVAL,
2020 RTE_FLOW_ERROR_TYPE_ITEM,
2021 item, "Not supported by fdir filter");
2024 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2025 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2028 rule->b_spec = TRUE;
2029 sctp_spec = item->spec;
2030 rule->input.src_port =
2031 sctp_spec->hdr.src_port;
2032 rule->input.dst_port =
2033 sctp_spec->hdr.dst_port;
2035 /* others even sctp port is not supported */
2036 sctp_mask = item->mask;
2038 (sctp_mask->hdr.src_port ||
2039 sctp_mask->hdr.dst_port ||
2040 sctp_mask->hdr.tag ||
2041 sctp_mask->hdr.cksum)) {
2042 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2043 rte_flow_error_set(error, EINVAL,
2044 RTE_FLOW_ERROR_TYPE_ITEM,
2045 item, "Not supported by fdir filter");
2049 item = next_no_fuzzy_pattern(pattern, item);
2050 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2051 item->type != RTE_FLOW_ITEM_TYPE_END) {
2052 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2053 rte_flow_error_set(error, EINVAL,
2054 RTE_FLOW_ERROR_TYPE_ITEM,
2055 item, "Not supported by fdir filter");
2060 /* Get the flex byte info */
2061 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2062 /* Not supported last point for range*/
2064 rte_flow_error_set(error, EINVAL,
2065 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2066 item, "Not supported last point for range");
2069 /* mask should not be null */
2070 if (!item->mask || !item->spec) {
2071 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2072 rte_flow_error_set(error, EINVAL,
2073 RTE_FLOW_ERROR_TYPE_ITEM,
2074 item, "Not supported by fdir filter");
2078 raw_mask = item->mask;
2081 if (raw_mask->relative != 0x1 ||
2082 raw_mask->search != 0x1 ||
2083 raw_mask->reserved != 0x0 ||
2084 (uint32_t)raw_mask->offset != 0xffffffff ||
2085 raw_mask->limit != 0xffff ||
2086 raw_mask->length != 0xffff) {
2087 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2088 rte_flow_error_set(error, EINVAL,
2089 RTE_FLOW_ERROR_TYPE_ITEM,
2090 item, "Not supported by fdir filter");
2094 raw_spec = item->spec;
2097 if (raw_spec->relative != 0 ||
2098 raw_spec->search != 0 ||
2099 raw_spec->reserved != 0 ||
2100 raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2101 raw_spec->offset % 2 ||
2102 raw_spec->limit != 0 ||
2103 raw_spec->length != 2 ||
2104 /* pattern can't be 0xffff */
2105 (raw_spec->pattern[0] == 0xff &&
2106 raw_spec->pattern[1] == 0xff)) {
2107 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2108 rte_flow_error_set(error, EINVAL,
2109 RTE_FLOW_ERROR_TYPE_ITEM,
2110 item, "Not supported by fdir filter");
2114 /* check pattern mask */
2115 if (raw_mask->pattern[0] != 0xff ||
2116 raw_mask->pattern[1] != 0xff) {
2117 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2118 rte_flow_error_set(error, EINVAL,
2119 RTE_FLOW_ERROR_TYPE_ITEM,
2120 item, "Not supported by fdir filter");
2124 rule->mask.flex_bytes_mask = 0xffff;
2125 rule->input.flex_bytes =
2126 (((uint16_t)raw_spec->pattern[1]) << 8) |
2127 raw_spec->pattern[0];
2128 rule->flex_bytes_offset = raw_spec->offset;
2131 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2132 /* check if the next not void item is END */
2133 item = next_no_fuzzy_pattern(pattern, item);
2134 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2135 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2136 rte_flow_error_set(error, EINVAL,
2137 RTE_FLOW_ERROR_TYPE_ITEM,
2138 item, "Not supported by fdir filter");
2143 rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2145 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2149 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2150 * And get the flow director filter info BTW.
2152 * The first not void item must be ETH.
2153 * The second not void item must be IPV4/ IPV6.
2154 * The third not void item must be NVGRE.
2155 * The next not void item must be END.
2157 * The first not void item must be ETH.
2158 * The second not void item must be IPV4/ IPV6.
2159 * The third not void item must be NVGRE.
2160 * The next not void item must be END.
2162 * The first not void action should be QUEUE or DROP.
2163 * The second not void optional action should be MARK,
2164 * mark_id is a uint32_t number.
2165 * The next not void action should be END.
2166 * VxLAN pattern example:
2169 * IPV4/IPV6 NULL NULL
2171 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2172 * MAC VLAN tci 0x2016 0xEFFF
2174 * NEGRV pattern example:
2177 * IPV4/IPV6 NULL NULL
2178 * NVGRE protocol 0x6558 0xFFFF
2179 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2180 * MAC VLAN tci 0x2016 0xEFFF
2182 * other members in mask and spec should set to 0x00.
2183 * item->last should be NULL.
2186 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2187 const struct rte_flow_item pattern[],
2188 const struct rte_flow_action actions[],
2189 struct txgbe_fdir_rule *rule,
2190 struct rte_flow_error *error)
2192 const struct rte_flow_item *item;
2193 const struct rte_flow_item_eth *eth_mask;
2197 rte_flow_error_set(error, EINVAL,
2198 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2199 NULL, "NULL pattern.");
2204 rte_flow_error_set(error, EINVAL,
2205 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2206 NULL, "NULL action.");
2211 rte_flow_error_set(error, EINVAL,
2212 RTE_FLOW_ERROR_TYPE_ATTR,
2213 NULL, "NULL attribute.");
2218 * Some fields may not be provided. Set spec to 0 and mask to default
2219 * value. So, we need not do anything for the not provided fields later.
2221 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2222 memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2223 rule->mask.vlan_tci_mask = 0;
2226 * The first not void item should be
2227 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2229 item = next_no_void_pattern(pattern, NULL);
2230 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2231 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2232 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2233 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2234 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2235 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2236 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2237 rte_flow_error_set(error, EINVAL,
2238 RTE_FLOW_ERROR_TYPE_ITEM,
2239 item, "Not supported by fdir filter");
2243 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2246 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2247 /* Only used to describe the protocol stack. */
2248 if (item->spec || item->mask) {
2249 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2250 rte_flow_error_set(error, EINVAL,
2251 RTE_FLOW_ERROR_TYPE_ITEM,
2252 item, "Not supported by fdir filter");
2255 /* Not supported last point for range*/
2257 rte_flow_error_set(error, EINVAL,
2258 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2259 item, "Not supported last point for range");
2263 /* Check if the next not void item is IPv4 or IPv6. */
2264 item = next_no_void_pattern(pattern, item);
2265 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2266 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2267 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2268 rte_flow_error_set(error, EINVAL,
2269 RTE_FLOW_ERROR_TYPE_ITEM,
2270 item, "Not supported by fdir filter");
2276 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2277 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2278 /* Only used to describe the protocol stack. */
2279 if (item->spec || item->mask) {
2280 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2281 rte_flow_error_set(error, EINVAL,
2282 RTE_FLOW_ERROR_TYPE_ITEM,
2283 item, "Not supported by fdir filter");
2286 /*Not supported last point for range*/
2288 rte_flow_error_set(error, EINVAL,
2289 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2290 item, "Not supported last point for range");
2294 /* Check if the next not void item is UDP or NVGRE. */
2295 item = next_no_void_pattern(pattern, item);
2296 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2297 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2298 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2299 rte_flow_error_set(error, EINVAL,
2300 RTE_FLOW_ERROR_TYPE_ITEM,
2301 item, "Not supported by fdir filter");
2307 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2308 /* Only used to describe the protocol stack. */
2309 if (item->spec || item->mask) {
2310 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2311 rte_flow_error_set(error, EINVAL,
2312 RTE_FLOW_ERROR_TYPE_ITEM,
2313 item, "Not supported by fdir filter");
2316 /*Not supported last point for range*/
2318 rte_flow_error_set(error, EINVAL,
2319 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2320 item, "Not supported last point for range");
2324 /* Check if the next not void item is VxLAN. */
2325 item = next_no_void_pattern(pattern, item);
2326 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2327 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2328 rte_flow_error_set(error, EINVAL,
2329 RTE_FLOW_ERROR_TYPE_ITEM,
2330 item, "Not supported by fdir filter");
2335 /* check if the next not void item is MAC */
2336 item = next_no_void_pattern(pattern, item);
2337 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2338 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2339 rte_flow_error_set(error, EINVAL,
2340 RTE_FLOW_ERROR_TYPE_ITEM,
2341 item, "Not supported by fdir filter");
2346 * Only support vlan and dst MAC address,
2347 * others should be masked.
2351 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2352 rte_flow_error_set(error, EINVAL,
2353 RTE_FLOW_ERROR_TYPE_ITEM,
2354 item, "Not supported by fdir filter");
2357 /*Not supported last point for range*/
2359 rte_flow_error_set(error, EINVAL,
2360 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2361 item, "Not supported last point for range");
2364 rule->b_mask = TRUE;
2365 eth_mask = item->mask;
2367 /* Ether type should be masked. */
2368 if (eth_mask->type) {
2369 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2370 rte_flow_error_set(error, EINVAL,
2371 RTE_FLOW_ERROR_TYPE_ITEM,
2372 item, "Not supported by fdir filter");
2376 /* src MAC address should be masked. */
2377 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2378 if (eth_mask->src.addr_bytes[j]) {
2380 sizeof(struct txgbe_fdir_rule));
2381 rte_flow_error_set(error, EINVAL,
2382 RTE_FLOW_ERROR_TYPE_ITEM,
2383 item, "Not supported by fdir filter");
2387 rule->mask.mac_addr_byte_mask = 0;
2388 for (j = 0; j < ETH_ADDR_LEN; j++) {
2389 /* It's a per byte mask. */
2390 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2391 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2392 } else if (eth_mask->dst.addr_bytes[j]) {
2393 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2394 rte_flow_error_set(error, EINVAL,
2395 RTE_FLOW_ERROR_TYPE_ITEM,
2396 item, "Not supported by fdir filter");
2401 /* When no vlan, considered as full mask. */
2402 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2405 * Check if the next not void item is vlan or ipv4.
2406 * IPv6 is not supported.
2408 item = next_no_void_pattern(pattern, item);
2409 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2410 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2411 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2412 rte_flow_error_set(error, EINVAL,
2413 RTE_FLOW_ERROR_TYPE_ITEM,
2414 item, "Not supported by fdir filter");
2417 /*Not supported last point for range*/
2419 rte_flow_error_set(error, EINVAL,
2420 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2421 item, "Not supported last point for range");
2426 * If the tags is 0, it means don't care about the VLAN.
2430 return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2434 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2435 const struct rte_flow_attr *attr,
2436 const struct rte_flow_item pattern[],
2437 const struct rte_flow_action actions[],
2438 struct txgbe_fdir_rule *rule,
2439 struct rte_flow_error *error)
2442 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2443 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2445 ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2446 actions, rule, error);
2450 ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2451 actions, rule, error);
2457 if (hw->mac.type == txgbe_mac_raptor &&
2458 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2459 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2462 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2463 fdir_mode != rule->mode)
2466 if (rule->queue >= dev->data->nb_rx_queues)
2473 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2474 const struct rte_flow_attr *attr,
2475 const struct rte_flow_action actions[],
2476 struct txgbe_rte_flow_rss_conf *rss_conf,
2477 struct rte_flow_error *error)
2479 const struct rte_flow_action *act;
2480 const struct rte_flow_action_rss *rss;
2484 * rss only supports forwarding,
2485 * check if the first not void action is RSS.
2487 act = next_no_void_action(actions, NULL);
2488 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2489 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2490 rte_flow_error_set(error, EINVAL,
2491 RTE_FLOW_ERROR_TYPE_ACTION,
2492 act, "Not supported action.");
2496 rss = (const struct rte_flow_action_rss *)act->conf;
2498 if (!rss || !rss->queue_num) {
2499 rte_flow_error_set(error, EINVAL,
2500 RTE_FLOW_ERROR_TYPE_ACTION,
2506 for (n = 0; n < rss->queue_num; n++) {
2507 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2508 rte_flow_error_set(error, EINVAL,
2509 RTE_FLOW_ERROR_TYPE_ACTION,
2511 "queue id > max number of queues");
2516 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2517 return rte_flow_error_set
2518 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2519 "non-default RSS hash functions are not supported");
2521 return rte_flow_error_set
2522 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2523 "a nonzero RSS encapsulation level is not supported");
2524 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2525 return rte_flow_error_set
2526 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2527 "RSS hash key must be exactly 40 bytes");
2528 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2529 return rte_flow_error_set
2530 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2531 "too many queues for RSS context");
2532 if (txgbe_rss_conf_init(rss_conf, rss))
2533 return rte_flow_error_set
2534 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2535 "RSS context initialization failure");
2537 /* check if the next not void item is END */
2538 act = next_no_void_action(actions, act);
2539 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2540 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2541 rte_flow_error_set(error, EINVAL,
2542 RTE_FLOW_ERROR_TYPE_ACTION,
2543 act, "Not supported action.");
2548 /* must be input direction */
2549 if (!attr->ingress) {
2550 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2551 rte_flow_error_set(error, EINVAL,
2552 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2553 attr, "Only support ingress.");
2559 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2560 rte_flow_error_set(error, EINVAL,
2561 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2562 attr, "Not support egress.");
2567 if (attr->transfer) {
2568 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2569 rte_flow_error_set(error, EINVAL,
2570 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2571 attr, "No support for transfer.");
2575 if (attr->priority > 0xFFFF) {
2576 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2577 rte_flow_error_set(error, EINVAL,
2578 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2579 attr, "Error priority.");
2586 /* remove the rss filter */
2588 txgbe_clear_rss_filter(struct rte_eth_dev *dev)
2590 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
2592 if (filter_info->rss_info.conf.queue_num)
2593 txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2597 txgbe_filterlist_init(void)
2599 TAILQ_INIT(&filter_ntuple_list);
2600 TAILQ_INIT(&filter_ethertype_list);
2601 TAILQ_INIT(&filter_syn_list);
2602 TAILQ_INIT(&filter_fdir_list);
2603 TAILQ_INIT(&filter_l2_tunnel_list);
2604 TAILQ_INIT(&filter_rss_list);
2605 TAILQ_INIT(&txgbe_flow_list);
2609 txgbe_filterlist_flush(void)
2611 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2612 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2613 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2614 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2615 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2616 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2617 struct txgbe_rss_conf_ele *rss_filter_ptr;
2619 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2620 TAILQ_REMOVE(&filter_ntuple_list,
2623 rte_free(ntuple_filter_ptr);
2626 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2627 TAILQ_REMOVE(&filter_ethertype_list,
2628 ethertype_filter_ptr,
2630 rte_free(ethertype_filter_ptr);
2633 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2634 TAILQ_REMOVE(&filter_syn_list,
2637 rte_free(syn_filter_ptr);
2640 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2641 TAILQ_REMOVE(&filter_l2_tunnel_list,
2644 rte_free(l2_tn_filter_ptr);
2647 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2648 TAILQ_REMOVE(&filter_fdir_list,
2651 rte_free(fdir_rule_ptr);
2654 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2655 TAILQ_REMOVE(&filter_rss_list,
2658 rte_free(rss_filter_ptr);
2661 while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
2662 TAILQ_REMOVE(&txgbe_flow_list,
2665 rte_free(txgbe_flow_mem_ptr->flow);
2666 rte_free(txgbe_flow_mem_ptr);
2671 * Create or destroy a flow rule.
2672 * Theorically one rule can match more than one filters.
2673 * We will let it use the filter which it hit first.
2674 * So, the sequence matters.
2676 static struct rte_flow *
2677 txgbe_flow_create(struct rte_eth_dev *dev,
2678 const struct rte_flow_attr *attr,
2679 const struct rte_flow_item pattern[],
2680 const struct rte_flow_action actions[],
2681 struct rte_flow_error *error)
2684 struct rte_eth_ntuple_filter ntuple_filter;
2685 struct rte_eth_ethertype_filter ethertype_filter;
2686 struct rte_eth_syn_filter syn_filter;
2687 struct txgbe_fdir_rule fdir_rule;
2688 struct txgbe_l2_tunnel_conf l2_tn_filter;
2689 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2690 struct txgbe_rte_flow_rss_conf rss_conf;
2691 struct rte_flow *flow = NULL;
2692 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2693 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2694 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2695 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2696 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2697 struct txgbe_rss_conf_ele *rss_filter_ptr;
2698 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2699 uint8_t first_mask = FALSE;
2701 flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2703 PMD_DRV_LOG(ERR, "failed to allocate memory");
2704 return (struct rte_flow *)flow;
2706 txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2707 sizeof(struct txgbe_flow_mem), 0);
2708 if (!txgbe_flow_mem_ptr) {
2709 PMD_DRV_LOG(ERR, "failed to allocate memory");
2713 txgbe_flow_mem_ptr->flow = flow;
2714 TAILQ_INSERT_TAIL(&txgbe_flow_list,
2715 txgbe_flow_mem_ptr, entries);
2717 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2718 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2719 actions, &ntuple_filter, error);
2721 #ifdef RTE_LIB_SECURITY
2722 /* ESP flow not really a flow*/
2723 if (ntuple_filter.proto == IPPROTO_ESP)
2728 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2730 ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2731 sizeof(struct txgbe_ntuple_filter_ele), 0);
2732 if (!ntuple_filter_ptr) {
2733 PMD_DRV_LOG(ERR, "failed to allocate memory");
2736 rte_memcpy(&ntuple_filter_ptr->filter_info,
2738 sizeof(struct rte_eth_ntuple_filter));
2739 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2740 ntuple_filter_ptr, entries);
2741 flow->rule = ntuple_filter_ptr;
2742 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2748 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2749 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2750 actions, ðertype_filter, error);
2752 ret = txgbe_add_del_ethertype_filter(dev,
2753 ðertype_filter, TRUE);
2755 ethertype_filter_ptr =
2756 rte_zmalloc("txgbe_ethertype_filter",
2757 sizeof(struct txgbe_ethertype_filter_ele), 0);
2758 if (!ethertype_filter_ptr) {
2759 PMD_DRV_LOG(ERR, "failed to allocate memory");
2762 rte_memcpy(ðertype_filter_ptr->filter_info,
2764 sizeof(struct rte_eth_ethertype_filter));
2765 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2766 ethertype_filter_ptr, entries);
2767 flow->rule = ethertype_filter_ptr;
2768 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2774 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2775 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2776 actions, &syn_filter, error);
2778 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2780 syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2781 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2782 if (!syn_filter_ptr) {
2783 PMD_DRV_LOG(ERR, "failed to allocate memory");
2786 rte_memcpy(&syn_filter_ptr->filter_info,
2788 sizeof(struct rte_eth_syn_filter));
2789 TAILQ_INSERT_TAIL(&filter_syn_list,
2792 flow->rule = syn_filter_ptr;
2793 flow->filter_type = RTE_ETH_FILTER_SYN;
2799 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2800 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2801 actions, &fdir_rule, error);
2803 /* A mask cannot be deleted. */
2804 if (fdir_rule.b_mask) {
2805 if (!fdir_info->mask_added) {
2806 /* It's the first time the mask is set. */
2807 rte_memcpy(&fdir_info->mask,
2809 sizeof(struct txgbe_hw_fdir_mask));
2810 fdir_info->flex_bytes_offset =
2811 fdir_rule.flex_bytes_offset;
2813 if (fdir_rule.mask.flex_bytes_mask)
2814 txgbe_fdir_set_flexbytes_offset(dev,
2815 fdir_rule.flex_bytes_offset);
2817 ret = txgbe_fdir_set_input_mask(dev);
2821 fdir_info->mask_added = TRUE;
2825 * Only support one global mask,
2826 * all the masks should be the same.
2828 ret = memcmp(&fdir_info->mask,
2830 sizeof(struct txgbe_hw_fdir_mask));
2834 if (fdir_info->flex_bytes_offset !=
2835 fdir_rule.flex_bytes_offset)
2840 if (fdir_rule.b_spec) {
2841 ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2844 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2845 sizeof(struct txgbe_fdir_rule_ele), 0);
2846 if (!fdir_rule_ptr) {
2848 "failed to allocate memory");
2851 rte_memcpy(&fdir_rule_ptr->filter_info,
2853 sizeof(struct txgbe_fdir_rule));
2854 TAILQ_INSERT_TAIL(&filter_fdir_list,
2855 fdir_rule_ptr, entries);
2856 flow->rule = fdir_rule_ptr;
2857 flow->filter_type = RTE_ETH_FILTER_FDIR;
2864 * clean the mask_added flag if fail to
2868 fdir_info->mask_added = FALSE;
2876 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2877 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2878 actions, &l2_tn_filter, error);
2880 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2882 l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2883 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2884 if (!l2_tn_filter_ptr) {
2885 PMD_DRV_LOG(ERR, "failed to allocate memory");
2888 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2890 sizeof(struct txgbe_l2_tunnel_conf));
2891 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2892 l2_tn_filter_ptr, entries);
2893 flow->rule = l2_tn_filter_ptr;
2894 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2899 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2900 ret = txgbe_parse_rss_filter(dev, attr,
2901 actions, &rss_conf, error);
2903 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2905 rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2906 sizeof(struct txgbe_rss_conf_ele), 0);
2907 if (!rss_filter_ptr) {
2908 PMD_DRV_LOG(ERR, "failed to allocate memory");
2911 txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2913 TAILQ_INSERT_TAIL(&filter_rss_list,
2914 rss_filter_ptr, entries);
2915 flow->rule = rss_filter_ptr;
2916 flow->filter_type = RTE_ETH_FILTER_HASH;
2922 TAILQ_REMOVE(&txgbe_flow_list,
2923 txgbe_flow_mem_ptr, entries);
2924 rte_flow_error_set(error, -ret,
2925 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2926 "Failed to create flow.");
2927 rte_free(txgbe_flow_mem_ptr);
2933 * Check if the flow rule is supported by txgbe.
2934 * It only checks the format. Don't guarantee the rule can be programmed into
2935 * the HW. Because there can be no enough room for the rule.
2938 txgbe_flow_validate(struct rte_eth_dev *dev,
2939 const struct rte_flow_attr *attr,
2940 const struct rte_flow_item pattern[],
2941 const struct rte_flow_action actions[],
2942 struct rte_flow_error *error)
2944 struct rte_eth_ntuple_filter ntuple_filter;
2945 struct rte_eth_ethertype_filter ethertype_filter;
2946 struct rte_eth_syn_filter syn_filter;
2947 struct txgbe_l2_tunnel_conf l2_tn_filter;
2948 struct txgbe_fdir_rule fdir_rule;
2949 struct txgbe_rte_flow_rss_conf rss_conf;
2952 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2953 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2954 actions, &ntuple_filter, error);
2958 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2959 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2960 actions, ðertype_filter, error);
2964 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2965 ret = txgbe_parse_syn_filter(dev, attr, pattern,
2966 actions, &syn_filter, error);
2970 memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2971 ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2972 actions, &fdir_rule, error);
2976 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2977 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2978 actions, &l2_tn_filter, error);
2982 memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2983 ret = txgbe_parse_rss_filter(dev, attr,
2984 actions, &rss_conf, error);
2989 /* Destroy a flow rule on txgbe. */
2991 txgbe_flow_destroy(struct rte_eth_dev *dev,
2992 struct rte_flow *flow,
2993 struct rte_flow_error *error)
2996 struct rte_flow *pmd_flow = flow;
2997 enum rte_filter_type filter_type = pmd_flow->filter_type;
2998 struct rte_eth_ntuple_filter ntuple_filter;
2999 struct rte_eth_ethertype_filter ethertype_filter;
3000 struct rte_eth_syn_filter syn_filter;
3001 struct txgbe_fdir_rule fdir_rule;
3002 struct txgbe_l2_tunnel_conf l2_tn_filter;
3003 struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
3004 struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
3005 struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
3006 struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3007 struct txgbe_fdir_rule_ele *fdir_rule_ptr;
3008 struct txgbe_flow_mem *txgbe_flow_mem_ptr;
3009 struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
3010 struct txgbe_rss_conf_ele *rss_filter_ptr;
3012 switch (filter_type) {
3013 case RTE_ETH_FILTER_NTUPLE:
3014 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
3016 rte_memcpy(&ntuple_filter,
3017 &ntuple_filter_ptr->filter_info,
3018 sizeof(struct rte_eth_ntuple_filter));
3019 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3021 TAILQ_REMOVE(&filter_ntuple_list,
3022 ntuple_filter_ptr, entries);
3023 rte_free(ntuple_filter_ptr);
3026 case RTE_ETH_FILTER_ETHERTYPE:
3027 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
3029 rte_memcpy(ðertype_filter,
3030 ðertype_filter_ptr->filter_info,
3031 sizeof(struct rte_eth_ethertype_filter));
3032 ret = txgbe_add_del_ethertype_filter(dev,
3033 ðertype_filter, FALSE);
3035 TAILQ_REMOVE(&filter_ethertype_list,
3036 ethertype_filter_ptr, entries);
3037 rte_free(ethertype_filter_ptr);
3040 case RTE_ETH_FILTER_SYN:
3041 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
3043 rte_memcpy(&syn_filter,
3044 &syn_filter_ptr->filter_info,
3045 sizeof(struct rte_eth_syn_filter));
3046 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
3048 TAILQ_REMOVE(&filter_syn_list,
3049 syn_filter_ptr, entries);
3050 rte_free(syn_filter_ptr);
3053 case RTE_ETH_FILTER_FDIR:
3054 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
3055 rte_memcpy(&fdir_rule,
3056 &fdir_rule_ptr->filter_info,
3057 sizeof(struct txgbe_fdir_rule));
3058 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3060 TAILQ_REMOVE(&filter_fdir_list,
3061 fdir_rule_ptr, entries);
3062 rte_free(fdir_rule_ptr);
3063 if (TAILQ_EMPTY(&filter_fdir_list))
3064 fdir_info->mask_added = false;
3067 case RTE_ETH_FILTER_L2_TUNNEL:
3068 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
3070 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3071 sizeof(struct txgbe_l2_tunnel_conf));
3072 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3074 TAILQ_REMOVE(&filter_l2_tunnel_list,
3075 l2_tn_filter_ptr, entries);
3076 rte_free(l2_tn_filter_ptr);
3079 case RTE_ETH_FILTER_HASH:
3080 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
3082 ret = txgbe_config_rss_filter(dev,
3083 &rss_filter_ptr->filter_info, FALSE);
3085 TAILQ_REMOVE(&filter_rss_list,
3086 rss_filter_ptr, entries);
3087 rte_free(rss_filter_ptr);
3091 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3098 rte_flow_error_set(error, EINVAL,
3099 RTE_FLOW_ERROR_TYPE_HANDLE,
3100 NULL, "Failed to destroy flow");
3104 TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
3105 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
3106 TAILQ_REMOVE(&txgbe_flow_list,
3107 txgbe_flow_mem_ptr, entries);
3108 rte_free(txgbe_flow_mem_ptr);
3116 /* Destroy all flow rules associated with a port on txgbe. */
3118 txgbe_flow_flush(struct rte_eth_dev *dev,
3119 struct rte_flow_error *error)
3123 txgbe_clear_all_ntuple_filter(dev);
3124 txgbe_clear_all_ethertype_filter(dev);
3125 txgbe_clear_syn_filter(dev);
3127 ret = txgbe_clear_all_fdir_filter(dev);
3129 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3130 NULL, "Failed to flush rule");
3134 ret = txgbe_clear_all_l2_tn_filter(dev);
3136 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3137 NULL, "Failed to flush rule");
3141 txgbe_clear_rss_filter(dev);
3143 txgbe_filterlist_flush();
3148 const struct rte_flow_ops txgbe_flow_ops = {
3149 .validate = txgbe_flow_validate,
3150 .create = txgbe_flow_create,
3151 .destroy = txgbe_flow_destroy,
3152 .flush = txgbe_flow_flush,