1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
12 #include <arpa/inet.h>
18 #include "mrvl_ethdev.h"
20 #include "env/mv_common.h" /* for BIT() */
22 /** Number of rules in the classifier table. */
23 #define MRVL_CLS_MAX_NUM_RULES 20
25 /** Size of the classifier key and mask strings. */
26 #define MRVL_CLS_STR_SIZE_MAX 40
28 /** Parsed fields in processed rte_flow_item. */
29 enum mrvl_parsed_fields {
37 F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
44 F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
48 F_IP6_NEXT_HDR = BIT(14),
50 F_TCP_SPORT = BIT(15),
51 F_TCP_DPORT = BIT(16),
53 F_UDP_SPORT = BIT(17),
54 F_UDP_DPORT = BIT(18),
57 /** PMD-specific definition of a flow rule handle. */
59 LIST_ENTRY(rte_flow) next;
61 enum mrvl_parsed_fields pattern;
63 struct pp2_cls_tbl_rule rule;
64 struct pp2_cls_cos_desc cos;
65 struct pp2_cls_tbl_action action;
68 static const enum rte_flow_item_type pattern_eth[] = {
69 RTE_FLOW_ITEM_TYPE_ETH,
70 RTE_FLOW_ITEM_TYPE_END
73 static const enum rte_flow_item_type pattern_eth_vlan[] = {
74 RTE_FLOW_ITEM_TYPE_ETH,
75 RTE_FLOW_ITEM_TYPE_VLAN,
76 RTE_FLOW_ITEM_TYPE_END
79 static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
80 RTE_FLOW_ITEM_TYPE_ETH,
81 RTE_FLOW_ITEM_TYPE_VLAN,
82 RTE_FLOW_ITEM_TYPE_IPV4,
83 RTE_FLOW_ITEM_TYPE_END
86 static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
87 RTE_FLOW_ITEM_TYPE_ETH,
88 RTE_FLOW_ITEM_TYPE_VLAN,
89 RTE_FLOW_ITEM_TYPE_IPV6,
90 RTE_FLOW_ITEM_TYPE_END
93 static const enum rte_flow_item_type pattern_eth_ip4[] = {
94 RTE_FLOW_ITEM_TYPE_ETH,
95 RTE_FLOW_ITEM_TYPE_IPV4,
96 RTE_FLOW_ITEM_TYPE_END
99 static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
100 RTE_FLOW_ITEM_TYPE_ETH,
101 RTE_FLOW_ITEM_TYPE_IPV4,
102 RTE_FLOW_ITEM_TYPE_TCP,
103 RTE_FLOW_ITEM_TYPE_END
106 static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
107 RTE_FLOW_ITEM_TYPE_ETH,
108 RTE_FLOW_ITEM_TYPE_IPV4,
109 RTE_FLOW_ITEM_TYPE_UDP,
110 RTE_FLOW_ITEM_TYPE_END
113 static const enum rte_flow_item_type pattern_eth_ip6[] = {
114 RTE_FLOW_ITEM_TYPE_ETH,
115 RTE_FLOW_ITEM_TYPE_IPV6,
116 RTE_FLOW_ITEM_TYPE_END
119 static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
120 RTE_FLOW_ITEM_TYPE_ETH,
121 RTE_FLOW_ITEM_TYPE_IPV6,
122 RTE_FLOW_ITEM_TYPE_TCP,
123 RTE_FLOW_ITEM_TYPE_END
126 static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
127 RTE_FLOW_ITEM_TYPE_ETH,
128 RTE_FLOW_ITEM_TYPE_IPV6,
129 RTE_FLOW_ITEM_TYPE_UDP,
130 RTE_FLOW_ITEM_TYPE_END
133 static const enum rte_flow_item_type pattern_vlan[] = {
134 RTE_FLOW_ITEM_TYPE_VLAN,
135 RTE_FLOW_ITEM_TYPE_END
138 static const enum rte_flow_item_type pattern_vlan_ip[] = {
139 RTE_FLOW_ITEM_TYPE_VLAN,
140 RTE_FLOW_ITEM_TYPE_IPV4,
141 RTE_FLOW_ITEM_TYPE_END
144 static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
145 RTE_FLOW_ITEM_TYPE_VLAN,
146 RTE_FLOW_ITEM_TYPE_IPV4,
147 RTE_FLOW_ITEM_TYPE_TCP,
148 RTE_FLOW_ITEM_TYPE_END
151 static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
152 RTE_FLOW_ITEM_TYPE_VLAN,
153 RTE_FLOW_ITEM_TYPE_IPV4,
154 RTE_FLOW_ITEM_TYPE_UDP,
155 RTE_FLOW_ITEM_TYPE_END
158 static const enum rte_flow_item_type pattern_vlan_ip6[] = {
159 RTE_FLOW_ITEM_TYPE_VLAN,
160 RTE_FLOW_ITEM_TYPE_IPV6,
161 RTE_FLOW_ITEM_TYPE_END
164 static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
165 RTE_FLOW_ITEM_TYPE_VLAN,
166 RTE_FLOW_ITEM_TYPE_IPV6,
167 RTE_FLOW_ITEM_TYPE_TCP,
168 RTE_FLOW_ITEM_TYPE_END
171 static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
172 RTE_FLOW_ITEM_TYPE_VLAN,
173 RTE_FLOW_ITEM_TYPE_IPV6,
174 RTE_FLOW_ITEM_TYPE_UDP,
175 RTE_FLOW_ITEM_TYPE_END
178 static const enum rte_flow_item_type pattern_ip[] = {
179 RTE_FLOW_ITEM_TYPE_IPV4,
180 RTE_FLOW_ITEM_TYPE_END
183 static const enum rte_flow_item_type pattern_ip6[] = {
184 RTE_FLOW_ITEM_TYPE_IPV6,
185 RTE_FLOW_ITEM_TYPE_END
188 static const enum rte_flow_item_type pattern_ip_tcp[] = {
189 RTE_FLOW_ITEM_TYPE_IPV4,
190 RTE_FLOW_ITEM_TYPE_TCP,
191 RTE_FLOW_ITEM_TYPE_END
194 static const enum rte_flow_item_type pattern_ip6_tcp[] = {
195 RTE_FLOW_ITEM_TYPE_IPV6,
196 RTE_FLOW_ITEM_TYPE_TCP,
197 RTE_FLOW_ITEM_TYPE_END
200 static const enum rte_flow_item_type pattern_ip_udp[] = {
201 RTE_FLOW_ITEM_TYPE_IPV4,
202 RTE_FLOW_ITEM_TYPE_UDP,
203 RTE_FLOW_ITEM_TYPE_END
206 static const enum rte_flow_item_type pattern_ip6_udp[] = {
207 RTE_FLOW_ITEM_TYPE_IPV6,
208 RTE_FLOW_ITEM_TYPE_UDP,
209 RTE_FLOW_ITEM_TYPE_END
212 static const enum rte_flow_item_type pattern_tcp[] = {
213 RTE_FLOW_ITEM_TYPE_TCP,
214 RTE_FLOW_ITEM_TYPE_END
217 static const enum rte_flow_item_type pattern_udp[] = {
218 RTE_FLOW_ITEM_TYPE_UDP,
219 RTE_FLOW_ITEM_TYPE_END
222 #define MRVL_VLAN_ID_MASK 0x0fff
223 #define MRVL_VLAN_PRI_MASK 0x7000
224 #define MRVL_IPV4_DSCP_MASK 0xfc
225 #define MRVL_IPV4_ADDR_MASK 0xffffffff
226 #define MRVL_IPV6_FLOW_MASK 0x0fffff
229 * Given a flow item, return the next non-void one.
231 * @param items Pointer to the item in the table.
232 * @returns Next not-void item, NULL otherwise.
234 static const struct rte_flow_item *
235 mrvl_next_item(const struct rte_flow_item *items)
237 const struct rte_flow_item *item = items;
239 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
240 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
248 * Allocate memory for classifier rule key and mask fields.
250 * @param field Pointer to the classifier rule.
251 * @returns 0 in case of success, negative value otherwise.
254 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
256 unsigned int id = rte_socket_id();
258 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
262 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
268 rte_free(field->key);
276 * Free memory allocated for classifier rule key and mask fields.
278 * @param field Pointer to the classifier rule.
281 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
283 rte_free(field->key);
284 rte_free(field->mask);
290 * Free memory allocated for all classifier rule key and mask fields.
292 * @param rule Pointer to the classifier table rule.
295 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
299 for (i = 0; i < rule->num_fields; i++)
300 mrvl_free_key_mask(&rule->fields[i]);
301 rule->num_fields = 0;
305 * Initialize rte flow item parsing.
307 * @param item Pointer to the flow item.
308 * @param spec_ptr Pointer to the specific item pointer.
309 * @param mask_ptr Pointer to the specific item's mask pointer.
310 * @def_mask Pointer to the default mask.
311 * @size Size of the flow item.
312 * @error Pointer to the rte flow error.
313 * @returns 0 in case of success, negative value otherwise.
316 mrvl_parse_init(const struct rte_flow_item *item,
317 const void **spec_ptr,
318 const void **mask_ptr,
319 const void *def_mask,
321 struct rte_flow_error *error)
328 memset(zeros, 0, size);
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
337 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM, item,
340 "Mask or last is set without spec\n");
345 * If "mask" is not set, default mask is used,
346 * but if default mask is NULL, "mask" should be set.
348 if (item->mask == NULL) {
349 if (def_mask == NULL) {
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
352 "Mask should be specified\n");
356 mask = (const uint8_t *)def_mask;
358 mask = (const uint8_t *)item->mask;
361 spec = (const uint8_t *)item->spec;
362 last = (const uint8_t *)item->last;
365 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
366 NULL, "Spec should be specified\n");
371 * If field values in "last" are either 0 or equal to the corresponding
372 * values in "spec" then they are ignored.
375 !memcmp(last, zeros, size) &&
376 memcmp(last, spec, size) != 0) {
377 rte_flow_error_set(error, ENOTSUP,
378 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
379 "Ranging is not supported\n");
390 * Parse the eth flow item.
392 * This will create classifier rule that matches either destination or source
395 * @param spec Pointer to the specific flow item.
396 * @param mask Pointer to the specific flow item's mask.
397 * @param mask Pointer to the flow.
398 * @return 0 in case of success, negative error value otherwise.
401 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
402 const struct rte_flow_item_eth *mask,
403 int parse_dst, struct rte_flow *flow)
405 struct pp2_cls_rule_key_field *key_field;
406 const uint8_t *k, *m;
408 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
412 k = spec->dst.addr_bytes;
413 m = mask->dst.addr_bytes;
415 flow->pattern |= F_DMAC;
417 k = spec->src.addr_bytes;
418 m = mask->src.addr_bytes;
420 flow->pattern |= F_SMAC;
423 key_field = &flow->rule.fields[flow->rule.num_fields];
424 mrvl_alloc_key_mask(key_field);
427 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
428 "%02x:%02x:%02x:%02x:%02x:%02x",
429 k[0], k[1], k[2], k[3], k[4], k[5]);
431 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
432 "%02x:%02x:%02x:%02x:%02x:%02x",
433 m[0], m[1], m[2], m[3], m[4], m[5]);
435 flow->rule.num_fields += 1;
441 * Helper for parsing the eth flow item destination mac address.
443 * @param spec Pointer to the specific flow item.
444 * @param mask Pointer to the specific flow item's mask.
445 * @param flow Pointer to the flow.
446 * @return 0 in case of success, negative error value otherwise.
449 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
450 const struct rte_flow_item_eth *mask,
451 struct rte_flow *flow)
453 return mrvl_parse_mac(spec, mask, 1, flow);
457 * Helper for parsing the eth flow item source mac address.
459 * @param spec Pointer to the specific flow item.
460 * @param mask Pointer to the specific flow item's mask.
461 * @param flow Pointer to the flow.
462 * @return 0 in case of success, negative error value otherwise.
465 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
466 const struct rte_flow_item_eth *mask,
467 struct rte_flow *flow)
469 return mrvl_parse_mac(spec, mask, 0, flow);
473 * Parse the ether type field of the eth flow item.
475 * @param spec Pointer to the specific flow item.
476 * @param mask Pointer to the specific flow item's mask.
477 * @param flow Pointer to the flow.
478 * @return 0 in case of success, negative error value otherwise.
481 mrvl_parse_type(const struct rte_flow_item_eth *spec,
482 const struct rte_flow_item_eth *mask __rte_unused,
483 struct rte_flow *flow)
485 struct pp2_cls_rule_key_field *key_field;
488 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
491 key_field = &flow->rule.fields[flow->rule.num_fields];
492 mrvl_alloc_key_mask(key_field);
495 k = rte_be_to_cpu_16(spec->type);
496 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
498 flow->pattern |= F_TYPE;
499 flow->rule.num_fields += 1;
505 * Parse the vid field of the vlan rte flow item.
507 * This will create classifier rule that matches vid.
509 * @param spec Pointer to the specific flow item.
510 * @param mask Pointer to the specific flow item's mask.
511 * @param flow Pointer to the flow.
512 * @return 0 in case of success, negative error value otherwise.
515 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
516 const struct rte_flow_item_vlan *mask __rte_unused,
517 struct rte_flow *flow)
519 struct pp2_cls_rule_key_field *key_field;
522 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
525 key_field = &flow->rule.fields[flow->rule.num_fields];
526 mrvl_alloc_key_mask(key_field);
529 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
530 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
532 flow->pattern |= F_VLAN_ID;
533 flow->rule.num_fields += 1;
539 * Parse the pri field of the vlan rte flow item.
541 * This will create classifier rule that matches pri.
543 * @param spec Pointer to the specific flow item.
544 * @param mask Pointer to the specific flow item's mask.
545 * @param flow Pointer to the flow.
546 * @return 0 in case of success, negative error value otherwise.
549 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
550 const struct rte_flow_item_vlan *mask __rte_unused,
551 struct rte_flow *flow)
553 struct pp2_cls_rule_key_field *key_field;
556 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
559 key_field = &flow->rule.fields[flow->rule.num_fields];
560 mrvl_alloc_key_mask(key_field);
563 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
564 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
566 flow->pattern |= F_VLAN_PRI;
567 flow->rule.num_fields += 1;
573 * Parse the dscp field of the ipv4 rte flow item.
575 * This will create classifier rule that matches dscp field.
577 * @param spec Pointer to the specific flow item.
578 * @param mask Pointer to the specific flow item's mask.
579 * @param flow Pointer to the flow.
580 * @return 0 in case of success, negative error value otherwise.
583 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
584 const struct rte_flow_item_ipv4 *mask,
585 struct rte_flow *flow)
587 struct pp2_cls_rule_key_field *key_field;
590 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
593 key_field = &flow->rule.fields[flow->rule.num_fields];
594 mrvl_alloc_key_mask(key_field);
597 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
598 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
599 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
600 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
602 flow->pattern |= F_IP4_TOS;
603 flow->rule.num_fields += 1;
609 * Parse either source or destination ip addresses of the ipv4 flow item.
611 * This will create classifier rule that matches either destination
612 * or source ip field.
614 * @param spec Pointer to the specific flow item.
615 * @param mask Pointer to the specific flow item's mask.
616 * @param flow Pointer to the flow.
617 * @return 0 in case of success, negative error value otherwise.
620 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
621 const struct rte_flow_item_ipv4 *mask,
622 int parse_dst, struct rte_flow *flow)
624 struct pp2_cls_rule_key_field *key_field;
628 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
631 memset(&k, 0, sizeof(k));
633 k.s_addr = spec->hdr.dst_addr;
634 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
636 flow->pattern |= F_IP4_DIP;
638 k.s_addr = spec->hdr.src_addr;
639 m = rte_be_to_cpu_32(mask->hdr.src_addr);
641 flow->pattern |= F_IP4_SIP;
644 key_field = &flow->rule.fields[flow->rule.num_fields];
645 mrvl_alloc_key_mask(key_field);
648 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
649 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
651 flow->rule.num_fields += 1;
657 * Helper for parsing destination ip of the ipv4 flow item.
659 * @param spec Pointer to the specific flow item.
660 * @param mask Pointer to the specific flow item's mask.
661 * @param flow Pointer to the flow.
662 * @return 0 in case of success, negative error value otherwise.
665 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
666 const struct rte_flow_item_ipv4 *mask,
667 struct rte_flow *flow)
669 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
673 * Helper for parsing source ip of the ipv4 flow item.
675 * @param spec Pointer to the specific flow item.
676 * @param mask Pointer to the specific flow item's mask.
677 * @param flow Pointer to the flow.
678 * @return 0 in case of success, negative error value otherwise.
681 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
682 const struct rte_flow_item_ipv4 *mask,
683 struct rte_flow *flow)
685 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
689 * Parse the proto field of the ipv4 rte flow item.
691 * This will create classifier rule that matches proto field.
693 * @param spec Pointer to the specific flow item.
694 * @param mask Pointer to the specific flow item's mask.
695 * @param flow Pointer to the flow.
696 * @return 0 in case of success, negative error value otherwise.
699 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
700 const struct rte_flow_item_ipv4 *mask __rte_unused,
701 struct rte_flow *flow)
703 struct pp2_cls_rule_key_field *key_field;
704 uint8_t k = spec->hdr.next_proto_id;
706 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
709 key_field = &flow->rule.fields[flow->rule.num_fields];
710 mrvl_alloc_key_mask(key_field);
713 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
715 flow->pattern |= F_IP4_PROTO;
716 flow->rule.num_fields += 1;
722 * Parse either source or destination ip addresses of the ipv6 rte flow item.
724 * This will create classifier rule that matches either destination
725 * or source ip field.
727 * @param spec Pointer to the specific flow item.
728 * @param mask Pointer to the specific flow item's mask.
729 * @param flow Pointer to the flow.
730 * @return 0 in case of success, negative error value otherwise.
733 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
734 const struct rte_flow_item_ipv6 *mask,
735 int parse_dst, struct rte_flow *flow)
737 struct pp2_cls_rule_key_field *key_field;
738 int size = sizeof(spec->hdr.dst_addr);
739 struct in6_addr k, m;
741 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
744 memset(&k, 0, sizeof(k));
746 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
747 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
749 flow->pattern |= F_IP6_DIP;
751 memcpy(k.s6_addr, spec->hdr.src_addr, size);
752 memcpy(m.s6_addr, mask->hdr.src_addr, size);
754 flow->pattern |= F_IP6_SIP;
757 key_field = &flow->rule.fields[flow->rule.num_fields];
758 mrvl_alloc_key_mask(key_field);
759 key_field->size = 16;
761 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
762 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
764 flow->rule.num_fields += 1;
770 * Helper for parsing destination ip of the ipv6 flow item.
772 * @param spec Pointer to the specific flow item.
773 * @param mask Pointer to the specific flow item's mask.
774 * @param flow Pointer to the flow.
775 * @return 0 in case of success, negative error value otherwise.
778 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
779 const struct rte_flow_item_ipv6 *mask,
780 struct rte_flow *flow)
782 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
786 * Helper for parsing source ip of the ipv6 flow item.
788 * @param spec Pointer to the specific flow item.
789 * @param mask Pointer to the specific flow item's mask.
790 * @param flow Pointer to the flow.
791 * @return 0 in case of success, negative error value otherwise.
794 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
795 const struct rte_flow_item_ipv6 *mask,
796 struct rte_flow *flow)
798 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
802 * Parse the flow label of the ipv6 flow item.
804 * This will create classifier rule that matches flow field.
806 * @param spec Pointer to the specific flow item.
807 * @param mask Pointer to the specific flow item's mask.
808 * @param flow Pointer to the flow.
809 * @return 0 in case of success, negative error value otherwise.
812 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
813 const struct rte_flow_item_ipv6 *mask,
814 struct rte_flow *flow)
816 struct pp2_cls_rule_key_field *key_field;
817 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
818 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
820 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
823 key_field = &flow->rule.fields[flow->rule.num_fields];
824 mrvl_alloc_key_mask(key_field);
827 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
828 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
830 flow->pattern |= F_IP6_FLOW;
831 flow->rule.num_fields += 1;
837 * Parse the next header of the ipv6 flow item.
839 * This will create classifier rule that matches next header field.
841 * @param spec Pointer to the specific flow item.
842 * @param mask Pointer to the specific flow item's mask.
843 * @param flow Pointer to the flow.
844 * @return 0 in case of success, negative error value otherwise.
847 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
848 const struct rte_flow_item_ipv6 *mask __rte_unused,
849 struct rte_flow *flow)
851 struct pp2_cls_rule_key_field *key_field;
852 uint8_t k = spec->hdr.proto;
854 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
857 key_field = &flow->rule.fields[flow->rule.num_fields];
858 mrvl_alloc_key_mask(key_field);
861 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
863 flow->pattern |= F_IP6_NEXT_HDR;
864 flow->rule.num_fields += 1;
870 * Parse destination or source port of the tcp flow item.
872 * This will create classifier rule that matches either destination or
875 * @param spec Pointer to the specific flow item.
876 * @param mask Pointer to the specific flow item's mask.
877 * @param flow Pointer to the flow.
878 * @return 0 in case of success, negative error value otherwise.
881 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
882 const struct rte_flow_item_tcp *mask __rte_unused,
883 int parse_dst, struct rte_flow *flow)
885 struct pp2_cls_rule_key_field *key_field;
888 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
891 key_field = &flow->rule.fields[flow->rule.num_fields];
892 mrvl_alloc_key_mask(key_field);
896 k = rte_be_to_cpu_16(spec->hdr.dst_port);
898 flow->pattern |= F_TCP_DPORT;
900 k = rte_be_to_cpu_16(spec->hdr.src_port);
902 flow->pattern |= F_TCP_SPORT;
905 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
907 flow->rule.num_fields += 1;
913 * Helper for parsing the tcp source port of the tcp flow item.
915 * @param spec Pointer to the specific flow item.
916 * @param mask Pointer to the specific flow item's mask.
917 * @param flow Pointer to the flow.
918 * @return 0 in case of success, negative error value otherwise.
921 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
922 const struct rte_flow_item_tcp *mask,
923 struct rte_flow *flow)
925 return mrvl_parse_tcp_port(spec, mask, 0, flow);
929 * Helper for parsing the tcp destination port of the tcp flow item.
931 * @param spec Pointer to the specific flow item.
932 * @param mask Pointer to the specific flow item's mask.
933 * @param flow Pointer to the flow.
934 * @return 0 in case of success, negative error value otherwise.
937 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
938 const struct rte_flow_item_tcp *mask,
939 struct rte_flow *flow)
941 return mrvl_parse_tcp_port(spec, mask, 1, flow);
945 * Parse destination or source port of the udp flow item.
947 * This will create classifier rule that matches either destination or
950 * @param spec Pointer to the specific flow item.
951 * @param mask Pointer to the specific flow item's mask.
952 * @param flow Pointer to the flow.
953 * @return 0 in case of success, negative error value otherwise.
956 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
957 const struct rte_flow_item_udp *mask __rte_unused,
958 int parse_dst, struct rte_flow *flow)
960 struct pp2_cls_rule_key_field *key_field;
963 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
966 key_field = &flow->rule.fields[flow->rule.num_fields];
967 mrvl_alloc_key_mask(key_field);
971 k = rte_be_to_cpu_16(spec->hdr.dst_port);
973 flow->pattern |= F_UDP_DPORT;
975 k = rte_be_to_cpu_16(spec->hdr.src_port);
977 flow->pattern |= F_UDP_SPORT;
980 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
982 flow->rule.num_fields += 1;
988 * Helper for parsing the udp source port of the udp flow item.
990 * @param spec Pointer to the specific flow item.
991 * @param mask Pointer to the specific flow item's mask.
992 * @param flow Pointer to the flow.
993 * @return 0 in case of success, negative error value otherwise.
996 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
997 const struct rte_flow_item_udp *mask,
998 struct rte_flow *flow)
1000 return mrvl_parse_udp_port(spec, mask, 0, flow);
1004 * Helper for parsing the udp destination port of the udp flow item.
1006 * @param spec Pointer to the specific flow item.
1007 * @param mask Pointer to the specific flow item's mask.
1008 * @param flow Pointer to the flow.
1009 * @return 0 in case of success, negative error value otherwise.
1012 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
1013 const struct rte_flow_item_udp *mask,
1014 struct rte_flow *flow)
1016 return mrvl_parse_udp_port(spec, mask, 1, flow);
1020 * Parse eth flow item.
1022 * @param item Pointer to the flow item.
1023 * @param flow Pointer to the flow.
1024 * @param error Pointer to the flow error.
1025 * @param fields Pointer to the parsed parsed fields enum.
1026 * @returns 0 on success, negative value otherwise.
1029 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
1030 struct rte_flow_error *error)
1032 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
1033 struct ether_addr zero;
1036 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1037 &rte_flow_item_eth_mask,
1038 sizeof(struct rte_flow_item_eth), error);
1042 memset(&zero, 0, sizeof(zero));
1044 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
1045 ret = mrvl_parse_dmac(spec, mask, flow);
1050 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
1051 ret = mrvl_parse_smac(spec, mask, flow);
1057 RTE_LOG(WARNING, PMD, "eth type mask is ignored\n");
1058 ret = mrvl_parse_type(spec, mask, flow);
1065 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1066 "Reached maximum number of fields in cls tbl key\n");
1071 * Parse vlan flow item.
1073 * @param item Pointer to the flow item.
1074 * @param flow Pointer to the flow.
1075 * @param error Pointer to the flow error.
1076 * @param fields Pointer to the parsed parsed fields enum.
1077 * @returns 0 on success, negative value otherwise.
1080 mrvl_parse_vlan(const struct rte_flow_item *item,
1081 struct rte_flow *flow,
1082 struct rte_flow_error *error)
1084 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
1088 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1089 &rte_flow_item_vlan_mask,
1090 sizeof(struct rte_flow_item_vlan), error);
1094 m = rte_be_to_cpu_16(mask->tci);
1095 if (m & MRVL_VLAN_ID_MASK) {
1096 RTE_LOG(WARNING, PMD, "vlan id mask is ignored\n");
1097 ret = mrvl_parse_vlan_id(spec, mask, flow);
1102 if (m & MRVL_VLAN_PRI_MASK) {
1103 RTE_LOG(WARNING, PMD, "vlan pri mask is ignored\n");
1104 ret = mrvl_parse_vlan_pri(spec, mask, flow);
1109 if (flow->pattern & F_TYPE) {
1110 rte_flow_error_set(error, ENOTSUP,
1111 RTE_FLOW_ERROR_TYPE_ITEM, item,
1112 "VLAN TPID matching is not supported\n");
1115 if (mask->inner_type) {
1116 struct rte_flow_item_eth spec_eth = {
1117 .type = spec->inner_type,
1119 struct rte_flow_item_eth mask_eth = {
1120 .type = mask->inner_type,
1123 RTE_LOG(WARNING, PMD, "inner eth type mask is ignored\n");
1124 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
1131 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1132 "Reached maximum number of fields in cls tbl key\n");
1137 * Parse ipv4 flow item.
1139 * @param item Pointer to the flow item.
1140 * @param flow Pointer to the flow.
1141 * @param error Pointer to the flow error.
1142 * @param fields Pointer to the parsed parsed fields enum.
1143 * @returns 0 on success, negative value otherwise.
1146 mrvl_parse_ip4(const struct rte_flow_item *item,
1147 struct rte_flow *flow,
1148 struct rte_flow_error *error)
1150 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
1153 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1154 &rte_flow_item_ipv4_mask,
1155 sizeof(struct rte_flow_item_ipv4), error);
1159 if (mask->hdr.version_ihl ||
1160 mask->hdr.total_length ||
1161 mask->hdr.packet_id ||
1162 mask->hdr.fragment_offset ||
1163 mask->hdr.time_to_live ||
1164 mask->hdr.hdr_checksum) {
1165 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1166 NULL, "Not supported by classifier\n");
1170 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
1171 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
1176 if (mask->hdr.src_addr) {
1177 ret = mrvl_parse_ip4_sip(spec, mask, flow);
1182 if (mask->hdr.dst_addr) {
1183 ret = mrvl_parse_ip4_dip(spec, mask, flow);
1188 if (mask->hdr.next_proto_id) {
1189 RTE_LOG(WARNING, PMD, "next proto id mask is ignored\n");
1190 ret = mrvl_parse_ip4_proto(spec, mask, flow);
1197 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1198 "Reached maximum number of fields in cls tbl key\n");
1203 * Parse ipv6 flow item.
1205 * @param item Pointer to the flow item.
1206 * @param flow Pointer to the flow.
1207 * @param error Pointer to the flow error.
1208 * @param fields Pointer to the parsed parsed fields enum.
1209 * @returns 0 on success, negative value otherwise.
1212 mrvl_parse_ip6(const struct rte_flow_item *item,
1213 struct rte_flow *flow,
1214 struct rte_flow_error *error)
1216 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1217 struct ipv6_hdr zero;
1221 ret = mrvl_parse_init(item, (const void **)&spec,
1222 (const void **)&mask,
1223 &rte_flow_item_ipv6_mask,
1224 sizeof(struct rte_flow_item_ipv6),
1229 memset(&zero, 0, sizeof(zero));
1231 if (mask->hdr.payload_len ||
1232 mask->hdr.hop_limits) {
1233 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1234 NULL, "Not supported by classifier\n");
1238 if (memcmp(mask->hdr.src_addr,
1239 zero.src_addr, sizeof(mask->hdr.src_addr))) {
1240 ret = mrvl_parse_ip6_sip(spec, mask, flow);
1245 if (memcmp(mask->hdr.dst_addr,
1246 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1247 ret = mrvl_parse_ip6_dip(spec, mask, flow);
1252 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1254 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1259 if (mask->hdr.proto) {
1260 RTE_LOG(WARNING, PMD, "next header mask is ignored\n");
1261 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1268 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1269 "Reached maximum number of fields in cls tbl key\n");
1274 * Parse tcp flow item.
1276 * @param item Pointer to the flow item.
1277 * @param flow Pointer to the flow.
1278 * @param error Pointer to the flow error.
1279 * @param fields Pointer to the parsed parsed fields enum.
1280 * @returns 0 on success, negative value otherwise.
1283 mrvl_parse_tcp(const struct rte_flow_item *item,
1284 struct rte_flow *flow,
1285 struct rte_flow_error *error)
1287 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1290 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1291 &rte_flow_item_ipv4_mask,
1292 sizeof(struct rte_flow_item_ipv4), error);
1296 if (mask->hdr.sent_seq ||
1297 mask->hdr.recv_ack ||
1298 mask->hdr.data_off ||
1299 mask->hdr.tcp_flags ||
1302 mask->hdr.tcp_urp) {
1303 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1304 NULL, "Not supported by classifier\n");
1308 if (mask->hdr.src_port) {
1309 RTE_LOG(WARNING, PMD, "tcp sport mask is ignored\n");
1310 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1315 if (mask->hdr.dst_port) {
1316 RTE_LOG(WARNING, PMD, "tcp dport mask is ignored\n");
1317 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1324 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1325 "Reached maximum number of fields in cls tbl key\n");
1330 * Parse udp flow item.
1332 * @param item Pointer to the flow item.
1333 * @param flow Pointer to the flow.
1334 * @param error Pointer to the flow error.
1335 * @param fields Pointer to the parsed parsed fields enum.
1336 * @returns 0 on success, negative value otherwise.
1339 mrvl_parse_udp(const struct rte_flow_item *item,
1340 struct rte_flow *flow,
1341 struct rte_flow_error *error)
1343 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1346 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1347 &rte_flow_item_ipv4_mask,
1348 sizeof(struct rte_flow_item_ipv4), error);
1352 if (mask->hdr.dgram_len ||
1353 mask->hdr.dgram_cksum) {
1354 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1355 NULL, "Not supported by classifier\n");
1359 if (mask->hdr.src_port) {
1360 RTE_LOG(WARNING, PMD, "udp sport mask is ignored\n");
1361 ret = mrvl_parse_udp_sport(spec, mask, flow);
1366 if (mask->hdr.dst_port) {
1367 RTE_LOG(WARNING, PMD, "udp dport mask is ignored\n");
1368 ret = mrvl_parse_udp_dport(spec, mask, flow);
1375 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1376 "Reached maximum number of fields in cls tbl key\n");
1381 * Parse flow pattern composed of the the eth item.
1383 * @param pattern Pointer to the flow pattern table.
1384 * @param flow Pointer to the flow.
1385 * @param error Pointer to the flow error.
1386 * @returns 0 in case of success, negative value otherwise.
1389 mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
1390 struct rte_flow *flow,
1391 struct rte_flow_error *error)
1393 return mrvl_parse_eth(pattern, flow, error);
1397 * Parse flow pattern composed of the eth and vlan items.
1399 * @param pattern Pointer to the flow pattern table.
1400 * @param flow Pointer to the flow.
1401 * @param error Pointer to the flow error.
1402 * @returns 0 in case of success, negative value otherwise.
1405 mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
1406 struct rte_flow *flow,
1407 struct rte_flow_error *error)
1409 const struct rte_flow_item *item = mrvl_next_item(pattern);
1412 ret = mrvl_parse_eth(item, flow, error);
1416 item = mrvl_next_item(item + 1);
1418 return mrvl_parse_vlan(item, flow, error);
1422 * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
1424 * @param pattern Pointer to the flow pattern table.
1425 * @param flow Pointer to the flow.
1426 * @param error Pointer to the flow error.
1427 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1428 * @returns 0 in case of success, negative value otherwise.
1431 mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1432 struct rte_flow *flow,
1433 struct rte_flow_error *error, int ip6)
1435 const struct rte_flow_item *item = mrvl_next_item(pattern);
1438 ret = mrvl_parse_eth(item, flow, error);
1442 item = mrvl_next_item(item + 1);
1443 ret = mrvl_parse_vlan(item, flow, error);
1447 item = mrvl_next_item(item + 1);
1449 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1450 mrvl_parse_ip4(item, flow, error);
1454 * Parse flow pattern composed of the eth, vlan and ipv4 items.
1456 * @param pattern Pointer to the flow pattern table.
1457 * @param flow Pointer to the flow.
1458 * @param error Pointer to the flow error.
1459 * @returns 0 in case of success, negative value otherwise.
1462 mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
1463 struct rte_flow *flow,
1464 struct rte_flow_error *error)
1466 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
1470 * Parse flow pattern composed of the eth, vlan and ipv6 items.
1472 * @param pattern Pointer to the flow pattern table.
1473 * @param flow Pointer to the flow.
1474 * @param error Pointer to the flow error.
1475 * @returns 0 in case of success, negative value otherwise.
1478 mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
1479 struct rte_flow *flow,
1480 struct rte_flow_error *error)
1482 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
1486 * Parse flow pattern composed of the eth and ip4/ip6 items.
1488 * @param pattern Pointer to the flow pattern table.
1489 * @param flow Pointer to the flow.
1490 * @param error Pointer to the flow error.
1491 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1492 * @returns 0 in case of success, negative value otherwise.
1495 mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
1496 struct rte_flow *flow,
1497 struct rte_flow_error *error, int ip6)
1499 const struct rte_flow_item *item = mrvl_next_item(pattern);
1502 ret = mrvl_parse_eth(item, flow, error);
1506 item = mrvl_next_item(item + 1);
1508 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1509 mrvl_parse_ip4(item, flow, error);
1513 * Parse flow pattern composed of the eth and ipv4 items.
1515 * @param pattern Pointer to the flow pattern table.
1516 * @param flow Pointer to the flow.
1517 * @param error Pointer to the flow error.
1518 * @returns 0 in case of success, negative value otherwise.
1521 mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
1522 struct rte_flow *flow,
1523 struct rte_flow_error *error)
1525 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1529 * Parse flow pattern composed of the eth and ipv6 items.
1531 * @param pattern Pointer to the flow pattern table.
1532 * @param flow Pointer to the flow.
1533 * @param error Pointer to the flow error.
1534 * @returns 0 in case of success, negative value otherwise.
1537 mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
1538 struct rte_flow *flow,
1539 struct rte_flow_error *error)
1541 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1545 * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
1547 * @param pattern Pointer to the flow pattern table.
1548 * @param flow Pointer to the flow.
1549 * @param error Pointer to the flow error.
1550 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1551 * @returns 0 in case of success, negative value otherwise.
1554 mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
1555 struct rte_flow *flow,
1556 struct rte_flow_error *error, int tcp)
1558 const struct rte_flow_item *item = mrvl_next_item(pattern);
1561 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1565 item = mrvl_next_item(item + 1);
1566 item = mrvl_next_item(item + 1);
1569 return mrvl_parse_tcp(item, flow, error);
1571 return mrvl_parse_udp(item, flow, error);
1575 * Parse flow pattern composed of the eth, ipv4 and tcp items.
1577 * @param pattern Pointer to the flow pattern table.
1578 * @param flow Pointer to the flow.
1579 * @param error Pointer to the flow error.
1580 * @returns 0 in case of success, negative value otherwise.
1583 mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
1584 struct rte_flow *flow,
1585 struct rte_flow_error *error)
1587 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
1591 * Parse flow pattern composed of the eth, ipv4 and udp items.
1593 * @param pattern Pointer to the flow pattern table.
1594 * @param flow Pointer to the flow.
1595 * @param error Pointer to the flow error.
1596 * @returns 0 in case of success, negative value otherwise.
1599 mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
1600 struct rte_flow *flow,
1601 struct rte_flow_error *error)
1603 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
1607 * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
1609 * @param pattern Pointer to the flow pattern table.
1610 * @param flow Pointer to the flow.
1611 * @param error Pointer to the flow error.
1612 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1613 * @returns 0 in case of success, negative value otherwise.
1616 mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
1617 struct rte_flow *flow,
1618 struct rte_flow_error *error, int tcp)
1620 const struct rte_flow_item *item = mrvl_next_item(pattern);
1623 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1627 item = mrvl_next_item(item + 1);
1628 item = mrvl_next_item(item + 1);
1631 return mrvl_parse_tcp(item, flow, error);
1633 return mrvl_parse_udp(item, flow, error);
1637 * Parse flow pattern composed of the eth, ipv6 and tcp items.
1639 * @param pattern Pointer to the flow pattern table.
1640 * @param flow Pointer to the flow.
1641 * @param error Pointer to the flow error.
1642 * @returns 0 in case of success, negative value otherwise.
1645 mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
1646 struct rte_flow *flow,
1647 struct rte_flow_error *error)
1649 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
1653 * Parse flow pattern composed of the eth, ipv6 and udp items.
1655 * @param pattern Pointer to the flow pattern table.
1656 * @param flow Pointer to the flow.
1657 * @param error Pointer to the flow error.
1658 * @returns 0 in case of success, negative value otherwise.
1661 mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
1662 struct rte_flow *flow,
1663 struct rte_flow_error *error)
1665 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
1669 * Parse flow pattern composed of the vlan item.
1671 * @param pattern Pointer to the flow pattern table.
1672 * @param flow Pointer to the flow.
1673 * @param error Pointer to the flow error.
1674 * @returns 0 in case of success, negative value otherwise.
1677 mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
1678 struct rte_flow *flow,
1679 struct rte_flow_error *error)
1681 const struct rte_flow_item *item = mrvl_next_item(pattern);
1683 return mrvl_parse_vlan(item, flow, error);
1687 * Parse flow pattern composed of the vlan and ip4/ip6 items.
1689 * @param pattern Pointer to the flow pattern table.
1690 * @param flow Pointer to the flow.
1691 * @param error Pointer to the flow error.
1692 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1693 * @returns 0 in case of success, negative value otherwise.
1696 mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1697 struct rte_flow *flow,
1698 struct rte_flow_error *error, int ip6)
1700 const struct rte_flow_item *item = mrvl_next_item(pattern);
1703 ret = mrvl_parse_vlan(item, flow, error);
1707 item = mrvl_next_item(item + 1);
1709 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1710 mrvl_parse_ip4(item, flow, error);
1714 * Parse flow pattern composed of the vlan and ipv4 items.
1716 * @param pattern Pointer to the flow pattern table.
1717 * @param flow Pointer to the flow.
1718 * @param error Pointer to the flow error.
1719 * @returns 0 in case of success, negative value otherwise.
1722 mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
1723 struct rte_flow *flow,
1724 struct rte_flow_error *error)
1726 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1730 * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
1732 * @param pattern Pointer to the flow pattern table.
1733 * @param flow Pointer to the flow.
1734 * @param error Pointer to the flow error.
1735 * @returns 0 in case of success, negative value otherwise.
1738 mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
1739 struct rte_flow *flow,
1740 struct rte_flow_error *error, int tcp)
1742 const struct rte_flow_item *item = mrvl_next_item(pattern);
1745 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1749 item = mrvl_next_item(item + 1);
1750 item = mrvl_next_item(item + 1);
1753 return mrvl_parse_tcp(item, flow, error);
1755 return mrvl_parse_udp(item, flow, error);
1759 * Parse flow pattern composed of the vlan, ipv4 and tcp items.
1761 * @param pattern Pointer to the flow pattern table.
1762 * @param flow Pointer to the flow.
1763 * @param error Pointer to the flow error.
1764 * @returns 0 in case of success, negative value otherwise.
1767 mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
1768 struct rte_flow *flow,
1769 struct rte_flow_error *error)
1771 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
1775 * Parse flow pattern composed of the vlan, ipv4 and udp items.
1777 * @param pattern Pointer to the flow pattern table.
1778 * @param flow Pointer to the flow.
1779 * @param error Pointer to the flow error.
1780 * @returns 0 in case of success, negative value otherwise.
1783 mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
1784 struct rte_flow *flow,
1785 struct rte_flow_error *error)
1787 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
1791 * Parse flow pattern composed of the vlan and ipv6 items.
1793 * @param pattern Pointer to the flow pattern table.
1794 * @param flow Pointer to the flow.
1795 * @param error Pointer to the flow error.
1796 * @returns 0 in case of success, negative value otherwise.
1799 mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
1800 struct rte_flow *flow,
1801 struct rte_flow_error *error)
1803 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1807 * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
1809 * @param pattern Pointer to the flow pattern table.
1810 * @param flow Pointer to the flow.
1811 * @param error Pointer to the flow error.
1812 * @returns 0 in case of success, negative value otherwise.
1815 mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
1816 struct rte_flow *flow,
1817 struct rte_flow_error *error, int tcp)
1819 const struct rte_flow_item *item = mrvl_next_item(pattern);
1822 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1826 item = mrvl_next_item(item + 1);
1827 item = mrvl_next_item(item + 1);
1830 return mrvl_parse_tcp(item, flow, error);
1832 return mrvl_parse_udp(item, flow, error);
1836 * Parse flow pattern composed of the vlan, ipv6 and tcp items.
1838 * @param pattern Pointer to the flow pattern table.
1839 * @param flow Pointer to the flow.
1840 * @param error Pointer to the flow error.
1841 * @returns 0 in case of success, negative value otherwise.
1844 mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
1845 struct rte_flow *flow,
1846 struct rte_flow_error *error)
1848 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
1852 * Parse flow pattern composed of the vlan, ipv6 and udp items.
1854 * @param pattern Pointer to the flow pattern table.
1855 * @param flow Pointer to the flow.
1856 * @param error Pointer to the flow error.
1857 * @returns 0 in case of success, negative value otherwise.
1860 mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
1861 struct rte_flow *flow,
1862 struct rte_flow_error *error)
1864 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
1868 * Parse flow pattern composed of the ip4/ip6 item.
1870 * @param pattern Pointer to the flow pattern table.
1871 * @param flow Pointer to the flow.
1872 * @param error Pointer to the flow error.
1873 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1874 * @returns 0 in case of success, negative value otherwise.
1877 mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
1878 struct rte_flow *flow,
1879 struct rte_flow_error *error, int ip6)
1881 const struct rte_flow_item *item = mrvl_next_item(pattern);
1883 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1884 mrvl_parse_ip4(item, flow, error);
1888 * Parse flow pattern composed of the ipv4 item.
1890 * @param pattern Pointer to the flow pattern table.
1891 * @param flow Pointer to the flow.
1892 * @param error Pointer to the flow error.
1893 * @returns 0 in case of success, negative value otherwise.
1896 mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
1897 struct rte_flow *flow,
1898 struct rte_flow_error *error)
1900 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
1904 * Parse flow pattern composed of the ipv6 item.
1906 * @param pattern Pointer to the flow pattern table.
1907 * @param flow Pointer to the flow.
1908 * @param error Pointer to the flow error.
1909 * @returns 0 in case of success, negative value otherwise.
1912 mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
1913 struct rte_flow *flow,
1914 struct rte_flow_error *error)
1916 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
1920 * Parse flow pattern composed of the ip4/ip6 and tcp items.
1922 * @param pattern Pointer to the flow pattern table.
1923 * @param flow Pointer to the flow.
1924 * @param error Pointer to the flow error.
1925 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1926 * @returns 0 in case of success, negative value otherwise.
1929 mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
1930 struct rte_flow *flow,
1931 struct rte_flow_error *error, int ip6)
1933 const struct rte_flow_item *item = mrvl_next_item(pattern);
1936 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1937 mrvl_parse_ip4(item, flow, error);
1941 item = mrvl_next_item(item + 1);
1943 return mrvl_parse_tcp(item, flow, error);
1947 * Parse flow pattern composed of the ipv4 and tcp items.
1949 * @param pattern Pointer to the flow pattern table.
1950 * @param flow Pointer to the flow.
1951 * @param error Pointer to the flow error.
1952 * @returns 0 in case of success, negative value otherwise.
1955 mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
1956 struct rte_flow *flow,
1957 struct rte_flow_error *error)
1959 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
1963 * Parse flow pattern composed of the ipv6 and tcp items.
1965 * @param pattern Pointer to the flow pattern table.
1966 * @param flow Pointer to the flow.
1967 * @param error Pointer to the flow error.
1968 * @returns 0 in case of success, negative value otherwise.
1971 mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
1972 struct rte_flow *flow,
1973 struct rte_flow_error *error)
1975 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
1979 * Parse flow pattern composed of the ipv4/ipv6 and udp items.
1981 * @param pattern Pointer to the flow pattern table.
1982 * @param flow Pointer to the flow.
1983 * @param error Pointer to the flow error.
1984 * @returns 0 in case of success, negative value otherwise.
1987 mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
1988 struct rte_flow *flow,
1989 struct rte_flow_error *error, int ip6)
1991 const struct rte_flow_item *item = mrvl_next_item(pattern);
1994 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1995 mrvl_parse_ip4(item, flow, error);
1999 item = mrvl_next_item(item + 1);
2001 return mrvl_parse_udp(item, flow, error);
2005 * Parse flow pattern composed of the ipv4 and udp items.
2007 * @param pattern Pointer to the flow pattern table.
2008 * @param flow Pointer to the flow.
2009 * @param error Pointer to the flow error.
2010 * @returns 0 in case of success, negative value otherwise.
2013 mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
2014 struct rte_flow *flow,
2015 struct rte_flow_error *error)
2017 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
2021 * Parse flow pattern composed of the ipv6 and udp items.
2023 * @param pattern Pointer to the flow pattern table.
2024 * @param flow Pointer to the flow.
2025 * @param error Pointer to the flow error.
2026 * @returns 0 in case of success, negative value otherwise.
2029 mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
2030 struct rte_flow *flow,
2031 struct rte_flow_error *error)
2033 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
2037 * Parse flow pattern composed of the tcp item.
2039 * @param pattern Pointer to the flow pattern table.
2040 * @param flow Pointer to the flow.
2041 * @param error Pointer to the flow error.
2042 * @returns 0 in case of success, negative value otherwise.
2045 mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
2046 struct rte_flow *flow,
2047 struct rte_flow_error *error)
2049 const struct rte_flow_item *item = mrvl_next_item(pattern);
2051 return mrvl_parse_tcp(item, flow, error);
2055 * Parse flow pattern composed of the udp item.
2057 * @param pattern Pointer to the flow pattern table.
2058 * @param flow Pointer to the flow.
2059 * @param error Pointer to the flow error.
2060 * @returns 0 in case of success, negative value otherwise.
2063 mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
2064 struct rte_flow *flow,
2065 struct rte_flow_error *error)
2067 const struct rte_flow_item *item = mrvl_next_item(pattern);
2069 return mrvl_parse_udp(item, flow, error);
2073 * Structure used to map specific flow pattern to the pattern parse callback
2074 * which will iterate over each pattern item and extract relevant data.
2076 static const struct {
2077 const enum rte_flow_item_type *pattern;
2078 int (*parse)(const struct rte_flow_item pattern[],
2079 struct rte_flow *flow,
2080 struct rte_flow_error *error);
2081 } mrvl_patterns[] = {
2082 { pattern_eth, mrvl_parse_pattern_eth },
2083 { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
2084 { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
2085 { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
2086 { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
2087 { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
2088 { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
2089 { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
2090 { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
2091 { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
2092 { pattern_vlan, mrvl_parse_pattern_vlan },
2093 { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
2094 { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
2095 { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
2096 { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
2097 { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
2098 { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
2099 { pattern_ip, mrvl_parse_pattern_ip4 },
2100 { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
2101 { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
2102 { pattern_ip6, mrvl_parse_pattern_ip6 },
2103 { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
2104 { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
2105 { pattern_tcp, mrvl_parse_pattern_tcp },
2106 { pattern_udp, mrvl_parse_pattern_udp }
2110 * Check whether provided pattern matches any of the supported ones.
2112 * @param type_pattern Pointer to the pattern type.
2113 * @param item_pattern Pointer to the flow pattern.
2114 * @returns 1 in case of success, 0 value otherwise.
2117 mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
2118 const struct rte_flow_item *item_pattern)
2120 const enum rte_flow_item_type *type = type_pattern;
2121 const struct rte_flow_item *item = item_pattern;
2124 if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
2129 if (*type == RTE_FLOW_ITEM_TYPE_END ||
2130 item->type == RTE_FLOW_ITEM_TYPE_END)
2133 if (*type != item->type)
2140 return *type == item->type;
2144 * Parse flow attribute.
2146 * This will check whether the provided attribute's flags are supported.
2148 * @param priv Unused
2149 * @param attr Pointer to the flow attribute.
2150 * @param flow Unused
2151 * @param error Pointer to the flow error.
2152 * @returns 0 in case of success, negative value otherwise.
2155 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
2156 const struct rte_flow_attr *attr,
2157 struct rte_flow *flow __rte_unused,
2158 struct rte_flow_error *error)
2161 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
2162 NULL, "NULL attribute");
2167 rte_flow_error_set(error, ENOTSUP,
2168 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2169 "Groups are not supported");
2172 if (attr->priority) {
2173 rte_flow_error_set(error, ENOTSUP,
2174 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2175 "Priorities are not supported");
2178 if (!attr->ingress) {
2179 rte_flow_error_set(error, ENOTSUP,
2180 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
2181 "Only ingress is supported");
2185 rte_flow_error_set(error, ENOTSUP,
2186 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2187 "Egress is not supported");
2195 * Parse flow pattern.
2197 * Specific classifier rule will be created as well.
2199 * @param priv Unused
2200 * @param pattern Pointer to the flow pattern.
2201 * @param flow Pointer to the flow.
2202 * @param error Pointer to the flow error.
2203 * @returns 0 in case of success, negative value otherwise.
2206 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
2207 const struct rte_flow_item pattern[],
2208 struct rte_flow *flow,
2209 struct rte_flow_error *error)
2214 for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
2215 if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
2218 ret = mrvl_patterns[i].parse(pattern, flow, error);
2220 mrvl_free_all_key_mask(&flow->rule);
2225 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2226 "Unsupported pattern");
2232 * Parse flow actions.
2234 * @param priv Pointer to the port's private data.
2235 * @param actions Pointer the action table.
2236 * @param flow Pointer to the flow.
2237 * @param error Pointer to the flow error.
2238 * @returns 0 in case of success, negative value otherwise.
2241 mrvl_flow_parse_actions(struct mrvl_priv *priv,
2242 const struct rte_flow_action actions[],
2243 struct rte_flow *flow,
2244 struct rte_flow_error *error)
2246 const struct rte_flow_action *action = actions;
2249 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
2250 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
2253 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
2254 flow->cos.ppio = priv->ppio;
2256 flow->action.type = PP2_CLS_TBL_ACT_DROP;
2257 flow->action.cos = &flow->cos;
2259 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2260 const struct rte_flow_action_queue *q =
2261 (const struct rte_flow_action_queue *)
2264 if (q->index > priv->nb_rx_queues) {
2265 rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_ACTION,
2268 "Queue index out of range");
2272 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
2274 * Unknown TC mapping, mapping will not have
2278 "Unknown TC mapping for queue %hu eth%hhu\n",
2279 q->index, priv->ppio_id);
2281 rte_flow_error_set(error, EFAULT,
2282 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2288 "Action: Assign packets to queue %d, tc:%d, q:%d\n",
2289 q->index, priv->rxq_map[q->index].tc,
2290 priv->rxq_map[q->index].inq);
2292 flow->cos.ppio = priv->ppio;
2293 flow->cos.tc = priv->rxq_map[q->index].tc;
2294 flow->action.type = PP2_CLS_TBL_ACT_DONE;
2295 flow->action.cos = &flow->cos;
2298 rte_flow_error_set(error, ENOTSUP,
2299 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2300 "Action not supported");
2307 rte_flow_error_set(error, EINVAL,
2308 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2309 NULL, "Action not specified");
2317 * Parse flow attribute, pattern and actions.
2319 * @param priv Pointer to the port's private data.
2320 * @param attr Pointer to the flow attribute.
2321 * @param pattern Pointer to the flow pattern.
2322 * @param actions Pointer to the flow actions.
2323 * @param flow Pointer to the flow.
2324 * @param error Pointer to the flow error.
2325 * @returns 0 on success, negative value otherwise.
2328 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
2329 const struct rte_flow_item pattern[],
2330 const struct rte_flow_action actions[],
2331 struct rte_flow *flow,
2332 struct rte_flow_error *error)
2336 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
2340 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
2344 return mrvl_flow_parse_actions(priv, actions, flow, error);
2347 static inline enum pp2_cls_tbl_type
2348 mrvl_engine_type(const struct rte_flow *flow)
2352 for (i = 0; i < flow->rule.num_fields; i++)
2353 size += flow->rule.fields[i].size;
2356 * For maskable engine type the key size must be up to 8 bytes.
2357 * For keys with size bigger than 8 bytes, engine type must
2358 * be set to exact match.
2361 return PP2_CLS_TBL_EXACT_MATCH;
2363 return PP2_CLS_TBL_MASKABLE;
2367 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
2369 struct mrvl_priv *priv = dev->data->dev_private;
2370 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
2373 if (priv->cls_tbl) {
2374 pp2_cls_tbl_deinit(priv->cls_tbl);
2375 priv->cls_tbl = NULL;
2378 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
2380 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
2381 RTE_LOG(INFO, PMD, "Setting cls search engine type to %s\n",
2382 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
2383 "exact" : "maskable");
2384 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
2385 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
2386 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
2388 if (first_flow->pattern & F_DMAC) {
2389 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2390 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
2392 key->num_fields += 1;
2395 if (first_flow->pattern & F_SMAC) {
2396 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2397 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
2399 key->num_fields += 1;
2402 if (first_flow->pattern & F_TYPE) {
2403 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2404 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
2406 key->num_fields += 1;
2409 if (first_flow->pattern & F_VLAN_ID) {
2410 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2411 key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
2413 key->num_fields += 1;
2416 if (first_flow->pattern & F_VLAN_PRI) {
2417 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2418 key->proto_field[key->num_fields].field.vlan =
2421 key->num_fields += 1;
2424 if (first_flow->pattern & F_IP4_TOS) {
2425 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2426 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
2428 key->num_fields += 1;
2431 if (first_flow->pattern & F_IP4_SIP) {
2432 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2433 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
2435 key->num_fields += 1;
2438 if (first_flow->pattern & F_IP4_DIP) {
2439 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2440 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
2442 key->num_fields += 1;
2445 if (first_flow->pattern & F_IP4_PROTO) {
2446 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2447 key->proto_field[key->num_fields].field.ipv4 =
2450 key->num_fields += 1;
2453 if (first_flow->pattern & F_IP6_SIP) {
2454 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2455 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
2456 key->key_size += 16;
2457 key->num_fields += 1;
2460 if (first_flow->pattern & F_IP6_DIP) {
2461 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2462 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
2463 key->key_size += 16;
2464 key->num_fields += 1;
2467 if (first_flow->pattern & F_IP6_FLOW) {
2468 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2469 key->proto_field[key->num_fields].field.ipv6 =
2472 key->num_fields += 1;
2475 if (first_flow->pattern & F_IP6_NEXT_HDR) {
2476 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2477 key->proto_field[key->num_fields].field.ipv6 =
2478 MV_NET_IP6_F_NEXT_HDR;
2480 key->num_fields += 1;
2483 if (first_flow->pattern & F_TCP_SPORT) {
2484 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2485 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2487 key->num_fields += 1;
2490 if (first_flow->pattern & F_TCP_DPORT) {
2491 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2492 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
2494 key->num_fields += 1;
2497 if (first_flow->pattern & F_UDP_SPORT) {
2498 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2499 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2501 key->num_fields += 1;
2504 if (first_flow->pattern & F_UDP_DPORT) {
2505 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2506 key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
2508 key->num_fields += 1;
2511 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
2513 priv->cls_tbl_pattern = first_flow->pattern;
2519 * Check whether new flow can be added to the table
2521 * @param priv Pointer to the port's private data.
2522 * @param flow Pointer to the new flow.
2523 * @return 1 in case flow can be added, 0 otherwise.
2526 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
2528 return flow->pattern == priv->cls_tbl_pattern &&
2529 mrvl_engine_type(flow) == priv->cls_tbl_params.type;
2533 * DPDK flow create callback called when flow is to be created.
2535 * @param dev Pointer to the device.
2536 * @param attr Pointer to the flow attribute.
2537 * @param pattern Pointer to the flow pattern.
2538 * @param actions Pointer to the flow actions.
2539 * @param error Pointer to the flow error.
2540 * @returns Pointer to the created flow in case of success, NULL otherwise.
2542 static struct rte_flow *
2543 mrvl_flow_create(struct rte_eth_dev *dev,
2544 const struct rte_flow_attr *attr,
2545 const struct rte_flow_item pattern[],
2546 const struct rte_flow_action actions[],
2547 struct rte_flow_error *error)
2549 struct mrvl_priv *priv = dev->data->dev_private;
2550 struct rte_flow *flow, *first;
2553 if (!dev->data->dev_started) {
2554 rte_flow_error_set(error, EINVAL,
2555 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2556 "Port must be started first\n");
2560 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
2564 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
2571 * 1. In case table does not exist - create one.
2572 * 2. In case table exists, is empty and new flow cannot be added
2574 * 3. In case table is not empty and new flow matches table format
2576 * 4. Otherwise flow cannot be added.
2578 first = LIST_FIRST(&priv->flows);
2579 if (!priv->cls_tbl) {
2580 ret = mrvl_create_cls_table(dev, flow);
2581 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
2582 ret = mrvl_create_cls_table(dev, flow);
2583 } else if (mrvl_flow_can_be_added(priv, flow)) {
2586 rte_flow_error_set(error, EINVAL,
2587 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2588 "Pattern does not match cls table format\n");
2593 rte_flow_error_set(error, EINVAL,
2594 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2595 "Failed to create cls table\n");
2599 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
2601 rte_flow_error_set(error, EINVAL,
2602 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2603 "Failed to add rule\n");
2607 LIST_INSERT_HEAD(&priv->flows, flow, next);
2616 * Remove classifier rule associated with given flow.
2618 * @param priv Pointer to the port's private data.
2619 * @param flow Pointer to the flow.
2620 * @param error Pointer to the flow error.
2621 * @returns 0 in case of success, negative value otherwise.
2624 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
2625 struct rte_flow_error *error)
2629 if (!priv->cls_tbl) {
2630 rte_flow_error_set(error, EINVAL,
2631 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2632 "Classifier table not initialized");
2636 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
2638 rte_flow_error_set(error, EINVAL,
2639 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2640 "Failed to remove rule");
2644 mrvl_free_all_key_mask(&flow->rule);
2650 * DPDK flow destroy callback called when flow is to be removed.
2652 * @param priv Pointer to the port's private data.
2653 * @param flow Pointer to the flow.
2654 * @param error Pointer to the flow error.
2655 * @returns 0 in case of success, negative value otherwise.
2658 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2659 struct rte_flow_error *error)
2661 struct mrvl_priv *priv = dev->data->dev_private;
2665 LIST_FOREACH(f, &priv->flows, next) {
2671 rte_flow_error_set(error, EINVAL,
2672 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2673 "Rule was not found");
2677 LIST_REMOVE(f, next);
2679 ret = mrvl_flow_remove(priv, flow, error);
2689 * DPDK flow callback called to verify given attribute, pattern and actions.
2691 * @param dev Pointer to the device.
2692 * @param attr Pointer to the flow attribute.
2693 * @param pattern Pointer to the flow pattern.
2694 * @param actions Pointer to the flow actions.
2695 * @param error Pointer to the flow error.
2696 * @returns 0 on success, negative value otherwise.
2699 mrvl_flow_validate(struct rte_eth_dev *dev,
2700 const struct rte_flow_attr *attr,
2701 const struct rte_flow_item pattern[],
2702 const struct rte_flow_action actions[],
2703 struct rte_flow_error *error)
2705 static struct rte_flow *flow;
2707 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
2711 mrvl_flow_destroy(dev, flow, error);
2717 * DPDK flow flush callback called when flows are to be flushed.
2719 * @param dev Pointer to the device.
2720 * @param error Pointer to the flow error.
2721 * @returns 0 in case of success, negative value otherwise.
2724 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2726 struct mrvl_priv *priv = dev->data->dev_private;
2728 while (!LIST_EMPTY(&priv->flows)) {
2729 struct rte_flow *flow = LIST_FIRST(&priv->flows);
2730 int ret = mrvl_flow_remove(priv, flow, error);
2734 LIST_REMOVE(flow, next);
2742 * DPDK flow isolate callback called to isolate port.
2744 * @param dev Pointer to the device.
2745 * @param enable Pass 0/1 to disable/enable port isolation.
2746 * @param error Pointer to the flow error.
2747 * @returns 0 in case of success, negative value otherwise.
2750 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
2751 struct rte_flow_error *error)
2753 struct mrvl_priv *priv = dev->data->dev_private;
2755 if (dev->data->dev_started) {
2756 rte_flow_error_set(error, EBUSY,
2757 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2758 NULL, "Port must be stopped first\n");
2762 priv->isolated = enable;
2767 const struct rte_flow_ops mrvl_flow_ops = {
2768 .validate = mrvl_flow_validate,
2769 .create = mrvl_flow_create,
2770 .destroy = mrvl_flow_destroy,
2771 .flush = mrvl_flow_flush,
2772 .isolate = mrvl_flow_isolate