1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
12 #include <arpa/inet.h>
18 #include "mrvl_ethdev.h"
20 #include "env/mv_common.h" /* for BIT() */
22 /** Number of rules in the classifier table. */
23 #define MRVL_CLS_MAX_NUM_RULES 20
25 /** Size of the classifier key and mask strings. */
26 #define MRVL_CLS_STR_SIZE_MAX 40
28 /** Parsed fields in processed rte_flow_item. */
29 enum mrvl_parsed_fields {
37 F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
44 F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
48 F_IP6_NEXT_HDR = BIT(14),
50 F_TCP_SPORT = BIT(15),
51 F_TCP_DPORT = BIT(16),
53 F_UDP_SPORT = BIT(17),
54 F_UDP_DPORT = BIT(18),
57 /** PMD-specific definition of a flow rule handle. */
59 LIST_ENTRY(rte_flow) next;
61 enum mrvl_parsed_fields pattern;
63 struct pp2_cls_tbl_rule rule;
64 struct pp2_cls_cos_desc cos;
65 struct pp2_cls_tbl_action action;
68 static const enum rte_flow_item_type pattern_eth[] = {
69 RTE_FLOW_ITEM_TYPE_ETH,
70 RTE_FLOW_ITEM_TYPE_END
73 static const enum rte_flow_item_type pattern_eth_vlan[] = {
74 RTE_FLOW_ITEM_TYPE_ETH,
75 RTE_FLOW_ITEM_TYPE_VLAN,
76 RTE_FLOW_ITEM_TYPE_END
79 static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
80 RTE_FLOW_ITEM_TYPE_ETH,
81 RTE_FLOW_ITEM_TYPE_VLAN,
82 RTE_FLOW_ITEM_TYPE_IPV4,
83 RTE_FLOW_ITEM_TYPE_END
86 static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
87 RTE_FLOW_ITEM_TYPE_ETH,
88 RTE_FLOW_ITEM_TYPE_VLAN,
89 RTE_FLOW_ITEM_TYPE_IPV6,
90 RTE_FLOW_ITEM_TYPE_END
93 static const enum rte_flow_item_type pattern_eth_ip4[] = {
94 RTE_FLOW_ITEM_TYPE_ETH,
95 RTE_FLOW_ITEM_TYPE_IPV4,
96 RTE_FLOW_ITEM_TYPE_END
99 static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
100 RTE_FLOW_ITEM_TYPE_ETH,
101 RTE_FLOW_ITEM_TYPE_IPV4,
102 RTE_FLOW_ITEM_TYPE_TCP,
103 RTE_FLOW_ITEM_TYPE_END
106 static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
107 RTE_FLOW_ITEM_TYPE_ETH,
108 RTE_FLOW_ITEM_TYPE_IPV4,
109 RTE_FLOW_ITEM_TYPE_UDP,
110 RTE_FLOW_ITEM_TYPE_END
113 static const enum rte_flow_item_type pattern_eth_ip6[] = {
114 RTE_FLOW_ITEM_TYPE_ETH,
115 RTE_FLOW_ITEM_TYPE_IPV6,
116 RTE_FLOW_ITEM_TYPE_END
119 static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
120 RTE_FLOW_ITEM_TYPE_ETH,
121 RTE_FLOW_ITEM_TYPE_IPV6,
122 RTE_FLOW_ITEM_TYPE_TCP,
123 RTE_FLOW_ITEM_TYPE_END
126 static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
127 RTE_FLOW_ITEM_TYPE_ETH,
128 RTE_FLOW_ITEM_TYPE_IPV6,
129 RTE_FLOW_ITEM_TYPE_UDP,
130 RTE_FLOW_ITEM_TYPE_END
133 static const enum rte_flow_item_type pattern_vlan[] = {
134 RTE_FLOW_ITEM_TYPE_VLAN,
135 RTE_FLOW_ITEM_TYPE_END
138 static const enum rte_flow_item_type pattern_vlan_ip[] = {
139 RTE_FLOW_ITEM_TYPE_VLAN,
140 RTE_FLOW_ITEM_TYPE_IPV4,
141 RTE_FLOW_ITEM_TYPE_END
144 static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
145 RTE_FLOW_ITEM_TYPE_VLAN,
146 RTE_FLOW_ITEM_TYPE_IPV4,
147 RTE_FLOW_ITEM_TYPE_TCP,
148 RTE_FLOW_ITEM_TYPE_END
151 static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
152 RTE_FLOW_ITEM_TYPE_VLAN,
153 RTE_FLOW_ITEM_TYPE_IPV4,
154 RTE_FLOW_ITEM_TYPE_UDP,
155 RTE_FLOW_ITEM_TYPE_END
158 static const enum rte_flow_item_type pattern_vlan_ip6[] = {
159 RTE_FLOW_ITEM_TYPE_VLAN,
160 RTE_FLOW_ITEM_TYPE_IPV6,
161 RTE_FLOW_ITEM_TYPE_END
164 static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
165 RTE_FLOW_ITEM_TYPE_VLAN,
166 RTE_FLOW_ITEM_TYPE_IPV6,
167 RTE_FLOW_ITEM_TYPE_TCP,
168 RTE_FLOW_ITEM_TYPE_END
171 static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
172 RTE_FLOW_ITEM_TYPE_VLAN,
173 RTE_FLOW_ITEM_TYPE_IPV6,
174 RTE_FLOW_ITEM_TYPE_UDP,
175 RTE_FLOW_ITEM_TYPE_END
178 static const enum rte_flow_item_type pattern_ip[] = {
179 RTE_FLOW_ITEM_TYPE_IPV4,
180 RTE_FLOW_ITEM_TYPE_END
183 static const enum rte_flow_item_type pattern_ip6[] = {
184 RTE_FLOW_ITEM_TYPE_IPV6,
185 RTE_FLOW_ITEM_TYPE_END
188 static const enum rte_flow_item_type pattern_ip_tcp[] = {
189 RTE_FLOW_ITEM_TYPE_IPV4,
190 RTE_FLOW_ITEM_TYPE_TCP,
191 RTE_FLOW_ITEM_TYPE_END
194 static const enum rte_flow_item_type pattern_ip6_tcp[] = {
195 RTE_FLOW_ITEM_TYPE_IPV6,
196 RTE_FLOW_ITEM_TYPE_TCP,
197 RTE_FLOW_ITEM_TYPE_END
200 static const enum rte_flow_item_type pattern_ip_udp[] = {
201 RTE_FLOW_ITEM_TYPE_IPV4,
202 RTE_FLOW_ITEM_TYPE_UDP,
203 RTE_FLOW_ITEM_TYPE_END
206 static const enum rte_flow_item_type pattern_ip6_udp[] = {
207 RTE_FLOW_ITEM_TYPE_IPV6,
208 RTE_FLOW_ITEM_TYPE_UDP,
209 RTE_FLOW_ITEM_TYPE_END
212 static const enum rte_flow_item_type pattern_tcp[] = {
213 RTE_FLOW_ITEM_TYPE_TCP,
214 RTE_FLOW_ITEM_TYPE_END
217 static const enum rte_flow_item_type pattern_udp[] = {
218 RTE_FLOW_ITEM_TYPE_UDP,
219 RTE_FLOW_ITEM_TYPE_END
222 #define MRVL_VLAN_ID_MASK 0x0fff
223 #define MRVL_VLAN_PRI_MASK 0x7000
224 #define MRVL_IPV4_DSCP_MASK 0xfc
225 #define MRVL_IPV4_ADDR_MASK 0xffffffff
226 #define MRVL_IPV6_FLOW_MASK 0x0fffff
229 * Given a flow item, return the next non-void one.
231 * @param items Pointer to the item in the table.
232 * @returns Next not-void item, NULL otherwise.
234 static const struct rte_flow_item *
235 mrvl_next_item(const struct rte_flow_item *items)
237 const struct rte_flow_item *item = items;
239 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
240 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
248 * Allocate memory for classifier rule key and mask fields.
250 * @param field Pointer to the classifier rule.
251 * @returns 0 in case of success, negative value otherwise.
254 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
256 unsigned int id = rte_socket_id();
258 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
262 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
268 rte_free(field->key);
276 * Free memory allocated for classifier rule key and mask fields.
278 * @param field Pointer to the classifier rule.
281 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
283 rte_free(field->key);
284 rte_free(field->mask);
290 * Free memory allocated for all classifier rule key and mask fields.
292 * @param rule Pointer to the classifier table rule.
295 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
299 for (i = 0; i < rule->num_fields; i++)
300 mrvl_free_key_mask(&rule->fields[i]);
301 rule->num_fields = 0;
305 * Initialize rte flow item parsing.
307 * @param item Pointer to the flow item.
308 * @param spec_ptr Pointer to the specific item pointer.
309 * @param mask_ptr Pointer to the specific item's mask pointer.
310 * @def_mask Pointer to the default mask.
311 * @size Size of the flow item.
312 * @error Pointer to the rte flow error.
313 * @returns 0 in case of success, negative value otherwise.
316 mrvl_parse_init(const struct rte_flow_item *item,
317 const void **spec_ptr,
318 const void **mask_ptr,
319 const void *def_mask,
321 struct rte_flow_error *error)
328 memset(zeros, 0, size);
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
337 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM, item,
340 "Mask or last is set without spec\n");
345 * If "mask" is not set, default mask is used,
346 * but if default mask is NULL, "mask" should be set.
348 if (item->mask == NULL) {
349 if (def_mask == NULL) {
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
352 "Mask should be specified\n");
356 mask = (const uint8_t *)def_mask;
358 mask = (const uint8_t *)item->mask;
361 spec = (const uint8_t *)item->spec;
362 last = (const uint8_t *)item->last;
365 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
366 NULL, "Spec should be specified\n");
371 * If field values in "last" are either 0 or equal to the corresponding
372 * values in "spec" then they are ignored.
375 !memcmp(last, zeros, size) &&
376 memcmp(last, spec, size) != 0) {
377 rte_flow_error_set(error, ENOTSUP,
378 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
379 "Ranging is not supported\n");
390 * Parse the eth flow item.
392 * This will create classifier rule that matches either destination or source
395 * @param spec Pointer to the specific flow item.
396 * @param mask Pointer to the specific flow item's mask.
397 * @param mask Pointer to the flow.
398 * @return 0 in case of success, negative error value otherwise.
401 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
402 const struct rte_flow_item_eth *mask,
403 int parse_dst, struct rte_flow *flow)
405 struct pp2_cls_rule_key_field *key_field;
406 const uint8_t *k, *m;
408 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
412 k = spec->dst.addr_bytes;
413 m = mask->dst.addr_bytes;
415 flow->pattern |= F_DMAC;
417 k = spec->src.addr_bytes;
418 m = mask->src.addr_bytes;
420 flow->pattern |= F_SMAC;
423 key_field = &flow->rule.fields[flow->rule.num_fields];
424 mrvl_alloc_key_mask(key_field);
427 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
428 "%02x:%02x:%02x:%02x:%02x:%02x",
429 k[0], k[1], k[2], k[3], k[4], k[5]);
431 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
432 "%02x:%02x:%02x:%02x:%02x:%02x",
433 m[0], m[1], m[2], m[3], m[4], m[5]);
435 flow->rule.num_fields += 1;
441 * Helper for parsing the eth flow item destination mac address.
443 * @param spec Pointer to the specific flow item.
444 * @param mask Pointer to the specific flow item's mask.
445 * @param flow Pointer to the flow.
446 * @return 0 in case of success, negative error value otherwise.
449 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
450 const struct rte_flow_item_eth *mask,
451 struct rte_flow *flow)
453 return mrvl_parse_mac(spec, mask, 1, flow);
457 * Helper for parsing the eth flow item source mac address.
459 * @param spec Pointer to the specific flow item.
460 * @param mask Pointer to the specific flow item's mask.
461 * @param flow Pointer to the flow.
462 * @return 0 in case of success, negative error value otherwise.
465 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
466 const struct rte_flow_item_eth *mask,
467 struct rte_flow *flow)
469 return mrvl_parse_mac(spec, mask, 0, flow);
473 * Parse the ether type field of the eth flow item.
475 * @param spec Pointer to the specific flow item.
476 * @param mask Pointer to the specific flow item's mask.
477 * @param flow Pointer to the flow.
478 * @return 0 in case of success, negative error value otherwise.
481 mrvl_parse_type(const struct rte_flow_item_eth *spec,
482 const struct rte_flow_item_eth *mask __rte_unused,
483 struct rte_flow *flow)
485 struct pp2_cls_rule_key_field *key_field;
488 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
491 key_field = &flow->rule.fields[flow->rule.num_fields];
492 mrvl_alloc_key_mask(key_field);
495 k = rte_be_to_cpu_16(spec->type);
496 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
498 flow->pattern |= F_TYPE;
499 flow->rule.num_fields += 1;
505 * Parse the vid field of the vlan rte flow item.
507 * This will create classifier rule that matches vid.
509 * @param spec Pointer to the specific flow item.
510 * @param mask Pointer to the specific flow item's mask.
511 * @param flow Pointer to the flow.
512 * @return 0 in case of success, negative error value otherwise.
515 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
516 const struct rte_flow_item_vlan *mask __rte_unused,
517 struct rte_flow *flow)
519 struct pp2_cls_rule_key_field *key_field;
522 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
525 key_field = &flow->rule.fields[flow->rule.num_fields];
526 mrvl_alloc_key_mask(key_field);
529 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
530 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
532 flow->pattern |= F_VLAN_ID;
533 flow->rule.num_fields += 1;
539 * Parse the pri field of the vlan rte flow item.
541 * This will create classifier rule that matches pri.
543 * @param spec Pointer to the specific flow item.
544 * @param mask Pointer to the specific flow item's mask.
545 * @param flow Pointer to the flow.
546 * @return 0 in case of success, negative error value otherwise.
549 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
550 const struct rte_flow_item_vlan *mask __rte_unused,
551 struct rte_flow *flow)
553 struct pp2_cls_rule_key_field *key_field;
556 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
559 key_field = &flow->rule.fields[flow->rule.num_fields];
560 mrvl_alloc_key_mask(key_field);
563 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
564 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
566 flow->pattern |= F_VLAN_PRI;
567 flow->rule.num_fields += 1;
573 * Parse the dscp field of the ipv4 rte flow item.
575 * This will create classifier rule that matches dscp field.
577 * @param spec Pointer to the specific flow item.
578 * @param mask Pointer to the specific flow item's mask.
579 * @param flow Pointer to the flow.
580 * @return 0 in case of success, negative error value otherwise.
583 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
584 const struct rte_flow_item_ipv4 *mask,
585 struct rte_flow *flow)
587 struct pp2_cls_rule_key_field *key_field;
590 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
593 key_field = &flow->rule.fields[flow->rule.num_fields];
594 mrvl_alloc_key_mask(key_field);
597 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
598 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
599 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
600 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
602 flow->pattern |= F_IP4_TOS;
603 flow->rule.num_fields += 1;
609 * Parse either source or destination ip addresses of the ipv4 flow item.
611 * This will create classifier rule that matches either destination
612 * or source ip field.
614 * @param spec Pointer to the specific flow item.
615 * @param mask Pointer to the specific flow item's mask.
616 * @param flow Pointer to the flow.
617 * @return 0 in case of success, negative error value otherwise.
620 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
621 const struct rte_flow_item_ipv4 *mask,
622 int parse_dst, struct rte_flow *flow)
624 struct pp2_cls_rule_key_field *key_field;
628 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
631 memset(&k, 0, sizeof(k));
633 k.s_addr = spec->hdr.dst_addr;
634 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
636 flow->pattern |= F_IP4_DIP;
638 k.s_addr = spec->hdr.src_addr;
639 m = rte_be_to_cpu_32(mask->hdr.src_addr);
641 flow->pattern |= F_IP4_SIP;
644 key_field = &flow->rule.fields[flow->rule.num_fields];
645 mrvl_alloc_key_mask(key_field);
648 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
649 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
651 flow->rule.num_fields += 1;
657 * Helper for parsing destination ip of the ipv4 flow item.
659 * @param spec Pointer to the specific flow item.
660 * @param mask Pointer to the specific flow item's mask.
661 * @param flow Pointer to the flow.
662 * @return 0 in case of success, negative error value otherwise.
665 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
666 const struct rte_flow_item_ipv4 *mask,
667 struct rte_flow *flow)
669 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
673 * Helper for parsing source ip of the ipv4 flow item.
675 * @param spec Pointer to the specific flow item.
676 * @param mask Pointer to the specific flow item's mask.
677 * @param flow Pointer to the flow.
678 * @return 0 in case of success, negative error value otherwise.
681 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
682 const struct rte_flow_item_ipv4 *mask,
683 struct rte_flow *flow)
685 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
689 * Parse the proto field of the ipv4 rte flow item.
691 * This will create classifier rule that matches proto field.
693 * @param spec Pointer to the specific flow item.
694 * @param mask Pointer to the specific flow item's mask.
695 * @param flow Pointer to the flow.
696 * @return 0 in case of success, negative error value otherwise.
699 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
700 const struct rte_flow_item_ipv4 *mask __rte_unused,
701 struct rte_flow *flow)
703 struct pp2_cls_rule_key_field *key_field;
704 uint8_t k = spec->hdr.next_proto_id;
706 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
709 key_field = &flow->rule.fields[flow->rule.num_fields];
710 mrvl_alloc_key_mask(key_field);
713 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
715 flow->pattern |= F_IP4_PROTO;
716 flow->rule.num_fields += 1;
722 * Parse either source or destination ip addresses of the ipv6 rte flow item.
724 * This will create classifier rule that matches either destination
725 * or source ip field.
727 * @param spec Pointer to the specific flow item.
728 * @param mask Pointer to the specific flow item's mask.
729 * @param flow Pointer to the flow.
730 * @return 0 in case of success, negative error value otherwise.
733 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
734 const struct rte_flow_item_ipv6 *mask,
735 int parse_dst, struct rte_flow *flow)
737 struct pp2_cls_rule_key_field *key_field;
738 int size = sizeof(spec->hdr.dst_addr);
739 struct in6_addr k, m;
741 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
744 memset(&k, 0, sizeof(k));
746 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
747 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
749 flow->pattern |= F_IP6_DIP;
751 memcpy(k.s6_addr, spec->hdr.src_addr, size);
752 memcpy(m.s6_addr, mask->hdr.src_addr, size);
754 flow->pattern |= F_IP6_SIP;
757 key_field = &flow->rule.fields[flow->rule.num_fields];
758 mrvl_alloc_key_mask(key_field);
759 key_field->size = 16;
761 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
762 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
764 flow->rule.num_fields += 1;
770 * Helper for parsing destination ip of the ipv6 flow item.
772 * @param spec Pointer to the specific flow item.
773 * @param mask Pointer to the specific flow item's mask.
774 * @param flow Pointer to the flow.
775 * @return 0 in case of success, negative error value otherwise.
778 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
779 const struct rte_flow_item_ipv6 *mask,
780 struct rte_flow *flow)
782 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
786 * Helper for parsing source ip of the ipv6 flow item.
788 * @param spec Pointer to the specific flow item.
789 * @param mask Pointer to the specific flow item's mask.
790 * @param flow Pointer to the flow.
791 * @return 0 in case of success, negative error value otherwise.
794 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
795 const struct rte_flow_item_ipv6 *mask,
796 struct rte_flow *flow)
798 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
802 * Parse the flow label of the ipv6 flow item.
804 * This will create classifier rule that matches flow field.
806 * @param spec Pointer to the specific flow item.
807 * @param mask Pointer to the specific flow item's mask.
808 * @param flow Pointer to the flow.
809 * @return 0 in case of success, negative error value otherwise.
812 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
813 const struct rte_flow_item_ipv6 *mask,
814 struct rte_flow *flow)
816 struct pp2_cls_rule_key_field *key_field;
817 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
818 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
820 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
823 key_field = &flow->rule.fields[flow->rule.num_fields];
824 mrvl_alloc_key_mask(key_field);
827 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
828 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
830 flow->pattern |= F_IP6_FLOW;
831 flow->rule.num_fields += 1;
837 * Parse the next header of the ipv6 flow item.
839 * This will create classifier rule that matches next header field.
841 * @param spec Pointer to the specific flow item.
842 * @param mask Pointer to the specific flow item's mask.
843 * @param flow Pointer to the flow.
844 * @return 0 in case of success, negative error value otherwise.
847 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
848 const struct rte_flow_item_ipv6 *mask __rte_unused,
849 struct rte_flow *flow)
851 struct pp2_cls_rule_key_field *key_field;
852 uint8_t k = spec->hdr.proto;
854 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
857 key_field = &flow->rule.fields[flow->rule.num_fields];
858 mrvl_alloc_key_mask(key_field);
861 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
863 flow->pattern |= F_IP6_NEXT_HDR;
864 flow->rule.num_fields += 1;
870 * Parse destination or source port of the tcp flow item.
872 * This will create classifier rule that matches either destination or
875 * @param spec Pointer to the specific flow item.
876 * @param mask Pointer to the specific flow item's mask.
877 * @param flow Pointer to the flow.
878 * @return 0 in case of success, negative error value otherwise.
881 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
882 const struct rte_flow_item_tcp *mask __rte_unused,
883 int parse_dst, struct rte_flow *flow)
885 struct pp2_cls_rule_key_field *key_field;
888 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
891 key_field = &flow->rule.fields[flow->rule.num_fields];
892 mrvl_alloc_key_mask(key_field);
896 k = rte_be_to_cpu_16(spec->hdr.dst_port);
898 flow->pattern |= F_TCP_DPORT;
900 k = rte_be_to_cpu_16(spec->hdr.src_port);
902 flow->pattern |= F_TCP_SPORT;
905 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
907 flow->rule.num_fields += 1;
913 * Helper for parsing the tcp source port of the tcp flow item.
915 * @param spec Pointer to the specific flow item.
916 * @param mask Pointer to the specific flow item's mask.
917 * @param flow Pointer to the flow.
918 * @return 0 in case of success, negative error value otherwise.
921 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
922 const struct rte_flow_item_tcp *mask,
923 struct rte_flow *flow)
925 return mrvl_parse_tcp_port(spec, mask, 0, flow);
929 * Helper for parsing the tcp destination port of the tcp flow item.
931 * @param spec Pointer to the specific flow item.
932 * @param mask Pointer to the specific flow item's mask.
933 * @param flow Pointer to the flow.
934 * @return 0 in case of success, negative error value otherwise.
937 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
938 const struct rte_flow_item_tcp *mask,
939 struct rte_flow *flow)
941 return mrvl_parse_tcp_port(spec, mask, 1, flow);
945 * Parse destination or source port of the udp flow item.
947 * This will create classifier rule that matches either destination or
950 * @param spec Pointer to the specific flow item.
951 * @param mask Pointer to the specific flow item's mask.
952 * @param flow Pointer to the flow.
953 * @return 0 in case of success, negative error value otherwise.
956 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
957 const struct rte_flow_item_udp *mask __rte_unused,
958 int parse_dst, struct rte_flow *flow)
960 struct pp2_cls_rule_key_field *key_field;
963 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
966 key_field = &flow->rule.fields[flow->rule.num_fields];
967 mrvl_alloc_key_mask(key_field);
971 k = rte_be_to_cpu_16(spec->hdr.dst_port);
973 flow->pattern |= F_UDP_DPORT;
975 k = rte_be_to_cpu_16(spec->hdr.src_port);
977 flow->pattern |= F_UDP_SPORT;
980 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
982 flow->rule.num_fields += 1;
988 * Helper for parsing the udp source port of the udp flow item.
990 * @param spec Pointer to the specific flow item.
991 * @param mask Pointer to the specific flow item's mask.
992 * @param flow Pointer to the flow.
993 * @return 0 in case of success, negative error value otherwise.
996 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
997 const struct rte_flow_item_udp *mask,
998 struct rte_flow *flow)
1000 return mrvl_parse_udp_port(spec, mask, 0, flow);
1004 * Helper for parsing the udp destination port of the udp flow item.
1006 * @param spec Pointer to the specific flow item.
1007 * @param mask Pointer to the specific flow item's mask.
1008 * @param flow Pointer to the flow.
1009 * @return 0 in case of success, negative error value otherwise.
1012 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
1013 const struct rte_flow_item_udp *mask,
1014 struct rte_flow *flow)
1016 return mrvl_parse_udp_port(spec, mask, 1, flow);
1020 * Parse eth flow item.
1022 * @param item Pointer to the flow item.
1023 * @param flow Pointer to the flow.
1024 * @param error Pointer to the flow error.
1025 * @param fields Pointer to the parsed parsed fields enum.
1026 * @returns 0 on success, negative value otherwise.
1029 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
1030 struct rte_flow_error *error)
1032 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
1033 struct ether_addr zero;
1036 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1037 &rte_flow_item_eth_mask,
1038 sizeof(struct rte_flow_item_eth), error);
1042 memset(&zero, 0, sizeof(zero));
1044 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
1045 ret = mrvl_parse_dmac(spec, mask, flow);
1050 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
1051 ret = mrvl_parse_smac(spec, mask, flow);
1057 RTE_LOG(WARNING, PMD, "eth type mask is ignored\n");
1058 ret = mrvl_parse_type(spec, mask, flow);
1065 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1066 "Reached maximum number of fields in cls tbl key\n");
1071 * Parse vlan flow item.
1073 * @param item Pointer to the flow item.
1074 * @param flow Pointer to the flow.
1075 * @param error Pointer to the flow error.
1076 * @param fields Pointer to the parsed parsed fields enum.
1077 * @returns 0 on success, negative value otherwise.
1080 mrvl_parse_vlan(const struct rte_flow_item *item,
1081 struct rte_flow *flow,
1082 struct rte_flow_error *error)
1084 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
1088 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1089 &rte_flow_item_vlan_mask,
1090 sizeof(struct rte_flow_item_vlan), error);
1095 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1096 NULL, "Not supported by classifier\n");
1100 m = rte_be_to_cpu_16(mask->tci);
1101 if (m & MRVL_VLAN_ID_MASK) {
1102 RTE_LOG(WARNING, PMD, "vlan id mask is ignored\n");
1103 ret = mrvl_parse_vlan_id(spec, mask, flow);
1108 if (m & MRVL_VLAN_PRI_MASK) {
1109 RTE_LOG(WARNING, PMD, "vlan pri mask is ignored\n");
1110 ret = mrvl_parse_vlan_pri(spec, mask, flow);
1117 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1118 "Reached maximum number of fields in cls tbl key\n");
1123 * Parse ipv4 flow item.
1125 * @param item Pointer to the flow item.
1126 * @param flow Pointer to the flow.
1127 * @param error Pointer to the flow error.
1128 * @param fields Pointer to the parsed parsed fields enum.
1129 * @returns 0 on success, negative value otherwise.
1132 mrvl_parse_ip4(const struct rte_flow_item *item,
1133 struct rte_flow *flow,
1134 struct rte_flow_error *error)
1136 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
1139 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1140 &rte_flow_item_ipv4_mask,
1141 sizeof(struct rte_flow_item_ipv4), error);
1145 if (mask->hdr.version_ihl ||
1146 mask->hdr.total_length ||
1147 mask->hdr.packet_id ||
1148 mask->hdr.fragment_offset ||
1149 mask->hdr.time_to_live ||
1150 mask->hdr.hdr_checksum) {
1151 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1152 NULL, "Not supported by classifier\n");
1156 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
1157 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
1162 if (mask->hdr.src_addr) {
1163 ret = mrvl_parse_ip4_sip(spec, mask, flow);
1168 if (mask->hdr.dst_addr) {
1169 ret = mrvl_parse_ip4_dip(spec, mask, flow);
1174 if (mask->hdr.next_proto_id) {
1175 RTE_LOG(WARNING, PMD, "next proto id mask is ignored\n");
1176 ret = mrvl_parse_ip4_proto(spec, mask, flow);
1183 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1184 "Reached maximum number of fields in cls tbl key\n");
1189 * Parse ipv6 flow item.
1191 * @param item Pointer to the flow item.
1192 * @param flow Pointer to the flow.
1193 * @param error Pointer to the flow error.
1194 * @param fields Pointer to the parsed parsed fields enum.
1195 * @returns 0 on success, negative value otherwise.
1198 mrvl_parse_ip6(const struct rte_flow_item *item,
1199 struct rte_flow *flow,
1200 struct rte_flow_error *error)
1202 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1203 struct ipv6_hdr zero;
1207 ret = mrvl_parse_init(item, (const void **)&spec,
1208 (const void **)&mask,
1209 &rte_flow_item_ipv6_mask,
1210 sizeof(struct rte_flow_item_ipv6),
1215 memset(&zero, 0, sizeof(zero));
1217 if (mask->hdr.payload_len ||
1218 mask->hdr.hop_limits) {
1219 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1220 NULL, "Not supported by classifier\n");
1224 if (memcmp(mask->hdr.src_addr,
1225 zero.src_addr, sizeof(mask->hdr.src_addr))) {
1226 ret = mrvl_parse_ip6_sip(spec, mask, flow);
1231 if (memcmp(mask->hdr.dst_addr,
1232 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1233 ret = mrvl_parse_ip6_dip(spec, mask, flow);
1238 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1240 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1245 if (mask->hdr.proto) {
1246 RTE_LOG(WARNING, PMD, "next header mask is ignored\n");
1247 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1254 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1255 "Reached maximum number of fields in cls tbl key\n");
1260 * Parse tcp flow item.
1262 * @param item Pointer to the flow item.
1263 * @param flow Pointer to the flow.
1264 * @param error Pointer to the flow error.
1265 * @param fields Pointer to the parsed parsed fields enum.
1266 * @returns 0 on success, negative value otherwise.
1269 mrvl_parse_tcp(const struct rte_flow_item *item,
1270 struct rte_flow *flow,
1271 struct rte_flow_error *error)
1273 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1276 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1277 &rte_flow_item_ipv4_mask,
1278 sizeof(struct rte_flow_item_ipv4), error);
1282 if (mask->hdr.sent_seq ||
1283 mask->hdr.recv_ack ||
1284 mask->hdr.data_off ||
1285 mask->hdr.tcp_flags ||
1288 mask->hdr.tcp_urp) {
1289 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1290 NULL, "Not supported by classifier\n");
1294 if (mask->hdr.src_port) {
1295 RTE_LOG(WARNING, PMD, "tcp sport mask is ignored\n");
1296 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1301 if (mask->hdr.dst_port) {
1302 RTE_LOG(WARNING, PMD, "tcp dport mask is ignored\n");
1303 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1310 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1311 "Reached maximum number of fields in cls tbl key\n");
1316 * Parse udp flow item.
1318 * @param item Pointer to the flow item.
1319 * @param flow Pointer to the flow.
1320 * @param error Pointer to the flow error.
1321 * @param fields Pointer to the parsed parsed fields enum.
1322 * @returns 0 on success, negative value otherwise.
1325 mrvl_parse_udp(const struct rte_flow_item *item,
1326 struct rte_flow *flow,
1327 struct rte_flow_error *error)
1329 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1332 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1333 &rte_flow_item_ipv4_mask,
1334 sizeof(struct rte_flow_item_ipv4), error);
1338 if (mask->hdr.dgram_len ||
1339 mask->hdr.dgram_cksum) {
1340 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1341 NULL, "Not supported by classifier\n");
1345 if (mask->hdr.src_port) {
1346 RTE_LOG(WARNING, PMD, "udp sport mask is ignored\n");
1347 ret = mrvl_parse_udp_sport(spec, mask, flow);
1352 if (mask->hdr.dst_port) {
1353 RTE_LOG(WARNING, PMD, "udp dport mask is ignored\n");
1354 ret = mrvl_parse_udp_dport(spec, mask, flow);
1361 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1362 "Reached maximum number of fields in cls tbl key\n");
1367 * Parse flow pattern composed of the the eth item.
1369 * @param pattern Pointer to the flow pattern table.
1370 * @param flow Pointer to the flow.
1371 * @param error Pointer to the flow error.
1372 * @returns 0 in case of success, negative value otherwise.
1375 mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
1376 struct rte_flow *flow,
1377 struct rte_flow_error *error)
1379 return mrvl_parse_eth(pattern, flow, error);
1383 * Parse flow pattern composed of the eth and vlan items.
1385 * @param pattern Pointer to the flow pattern table.
1386 * @param flow Pointer to the flow.
1387 * @param error Pointer to the flow error.
1388 * @returns 0 in case of success, negative value otherwise.
1391 mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
1392 struct rte_flow *flow,
1393 struct rte_flow_error *error)
1395 const struct rte_flow_item *item = mrvl_next_item(pattern);
1398 ret = mrvl_parse_eth(item, flow, error);
1402 item = mrvl_next_item(item + 1);
1404 return mrvl_parse_vlan(item, flow, error);
1408 * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
1410 * @param pattern Pointer to the flow pattern table.
1411 * @param flow Pointer to the flow.
1412 * @param error Pointer to the flow error.
1413 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1414 * @returns 0 in case of success, negative value otherwise.
1417 mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1418 struct rte_flow *flow,
1419 struct rte_flow_error *error, int ip6)
1421 const struct rte_flow_item *item = mrvl_next_item(pattern);
1424 ret = mrvl_parse_eth(item, flow, error);
1428 item = mrvl_next_item(item + 1);
1429 ret = mrvl_parse_vlan(item, flow, error);
1433 item = mrvl_next_item(item + 1);
1435 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1436 mrvl_parse_ip4(item, flow, error);
1440 * Parse flow pattern composed of the eth, vlan and ipv4 items.
1442 * @param pattern Pointer to the flow pattern table.
1443 * @param flow Pointer to the flow.
1444 * @param error Pointer to the flow error.
1445 * @returns 0 in case of success, negative value otherwise.
1448 mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
1449 struct rte_flow *flow,
1450 struct rte_flow_error *error)
1452 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
1456 * Parse flow pattern composed of the eth, vlan and ipv6 items.
1458 * @param pattern Pointer to the flow pattern table.
1459 * @param flow Pointer to the flow.
1460 * @param error Pointer to the flow error.
1461 * @returns 0 in case of success, negative value otherwise.
1464 mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
1465 struct rte_flow *flow,
1466 struct rte_flow_error *error)
1468 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
1472 * Parse flow pattern composed of the eth and ip4/ip6 items.
1474 * @param pattern Pointer to the flow pattern table.
1475 * @param flow Pointer to the flow.
1476 * @param error Pointer to the flow error.
1477 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1478 * @returns 0 in case of success, negative value otherwise.
1481 mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
1482 struct rte_flow *flow,
1483 struct rte_flow_error *error, int ip6)
1485 const struct rte_flow_item *item = mrvl_next_item(pattern);
1488 ret = mrvl_parse_eth(item, flow, error);
1492 item = mrvl_next_item(item + 1);
1494 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1495 mrvl_parse_ip4(item, flow, error);
1499 * Parse flow pattern composed of the eth and ipv4 items.
1501 * @param pattern Pointer to the flow pattern table.
1502 * @param flow Pointer to the flow.
1503 * @param error Pointer to the flow error.
1504 * @returns 0 in case of success, negative value otherwise.
1507 mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
1508 struct rte_flow *flow,
1509 struct rte_flow_error *error)
1511 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1515 * Parse flow pattern composed of the eth and ipv6 items.
1517 * @param pattern Pointer to the flow pattern table.
1518 * @param flow Pointer to the flow.
1519 * @param error Pointer to the flow error.
1520 * @returns 0 in case of success, negative value otherwise.
1523 mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
1524 struct rte_flow *flow,
1525 struct rte_flow_error *error)
1527 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1531 * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
1533 * @param pattern Pointer to the flow pattern table.
1534 * @param flow Pointer to the flow.
1535 * @param error Pointer to the flow error.
1536 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1537 * @returns 0 in case of success, negative value otherwise.
1540 mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
1541 struct rte_flow *flow,
1542 struct rte_flow_error *error, int tcp)
1544 const struct rte_flow_item *item = mrvl_next_item(pattern);
1547 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1551 item = mrvl_next_item(item + 1);
1552 item = mrvl_next_item(item + 1);
1555 return mrvl_parse_tcp(item, flow, error);
1557 return mrvl_parse_udp(item, flow, error);
1561 * Parse flow pattern composed of the eth, ipv4 and tcp items.
1563 * @param pattern Pointer to the flow pattern table.
1564 * @param flow Pointer to the flow.
1565 * @param error Pointer to the flow error.
1566 * @returns 0 in case of success, negative value otherwise.
1569 mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
1570 struct rte_flow *flow,
1571 struct rte_flow_error *error)
1573 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
1577 * Parse flow pattern composed of the eth, ipv4 and udp items.
1579 * @param pattern Pointer to the flow pattern table.
1580 * @param flow Pointer to the flow.
1581 * @param error Pointer to the flow error.
1582 * @returns 0 in case of success, negative value otherwise.
1585 mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
1586 struct rte_flow *flow,
1587 struct rte_flow_error *error)
1589 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
1593 * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
1595 * @param pattern Pointer to the flow pattern table.
1596 * @param flow Pointer to the flow.
1597 * @param error Pointer to the flow error.
1598 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1599 * @returns 0 in case of success, negative value otherwise.
1602 mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
1603 struct rte_flow *flow,
1604 struct rte_flow_error *error, int tcp)
1606 const struct rte_flow_item *item = mrvl_next_item(pattern);
1609 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1613 item = mrvl_next_item(item + 1);
1614 item = mrvl_next_item(item + 1);
1617 return mrvl_parse_tcp(item, flow, error);
1619 return mrvl_parse_udp(item, flow, error);
1623 * Parse flow pattern composed of the eth, ipv6 and tcp items.
1625 * @param pattern Pointer to the flow pattern table.
1626 * @param flow Pointer to the flow.
1627 * @param error Pointer to the flow error.
1628 * @returns 0 in case of success, negative value otherwise.
1631 mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
1632 struct rte_flow *flow,
1633 struct rte_flow_error *error)
1635 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
1639 * Parse flow pattern composed of the eth, ipv6 and udp items.
1641 * @param pattern Pointer to the flow pattern table.
1642 * @param flow Pointer to the flow.
1643 * @param error Pointer to the flow error.
1644 * @returns 0 in case of success, negative value otherwise.
1647 mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
1648 struct rte_flow *flow,
1649 struct rte_flow_error *error)
1651 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
1655 * Parse flow pattern composed of the vlan item.
1657 * @param pattern Pointer to the flow pattern table.
1658 * @param flow Pointer to the flow.
1659 * @param error Pointer to the flow error.
1660 * @returns 0 in case of success, negative value otherwise.
1663 mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
1664 struct rte_flow *flow,
1665 struct rte_flow_error *error)
1667 const struct rte_flow_item *item = mrvl_next_item(pattern);
1669 return mrvl_parse_vlan(item, flow, error);
1673 * Parse flow pattern composed of the vlan and ip4/ip6 items.
1675 * @param pattern Pointer to the flow pattern table.
1676 * @param flow Pointer to the flow.
1677 * @param error Pointer to the flow error.
1678 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1679 * @returns 0 in case of success, negative value otherwise.
1682 mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1683 struct rte_flow *flow,
1684 struct rte_flow_error *error, int ip6)
1686 const struct rte_flow_item *item = mrvl_next_item(pattern);
1689 ret = mrvl_parse_vlan(item, flow, error);
1693 item = mrvl_next_item(item + 1);
1695 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1696 mrvl_parse_ip4(item, flow, error);
1700 * Parse flow pattern composed of the vlan and ipv4 items.
1702 * @param pattern Pointer to the flow pattern table.
1703 * @param flow Pointer to the flow.
1704 * @param error Pointer to the flow error.
1705 * @returns 0 in case of success, negative value otherwise.
1708 mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
1709 struct rte_flow *flow,
1710 struct rte_flow_error *error)
1712 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1716 * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
1718 * @param pattern Pointer to the flow pattern table.
1719 * @param flow Pointer to the flow.
1720 * @param error Pointer to the flow error.
1721 * @returns 0 in case of success, negative value otherwise.
1724 mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
1725 struct rte_flow *flow,
1726 struct rte_flow_error *error, int tcp)
1728 const struct rte_flow_item *item = mrvl_next_item(pattern);
1731 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1735 item = mrvl_next_item(item + 1);
1736 item = mrvl_next_item(item + 1);
1739 return mrvl_parse_tcp(item, flow, error);
1741 return mrvl_parse_udp(item, flow, error);
1745 * Parse flow pattern composed of the vlan, ipv4 and tcp items.
1747 * @param pattern Pointer to the flow pattern table.
1748 * @param flow Pointer to the flow.
1749 * @param error Pointer to the flow error.
1750 * @returns 0 in case of success, negative value otherwise.
1753 mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
1754 struct rte_flow *flow,
1755 struct rte_flow_error *error)
1757 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
1761 * Parse flow pattern composed of the vlan, ipv4 and udp items.
1763 * @param pattern Pointer to the flow pattern table.
1764 * @param flow Pointer to the flow.
1765 * @param error Pointer to the flow error.
1766 * @returns 0 in case of success, negative value otherwise.
1769 mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
1770 struct rte_flow *flow,
1771 struct rte_flow_error *error)
1773 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
1777 * Parse flow pattern composed of the vlan and ipv6 items.
1779 * @param pattern Pointer to the flow pattern table.
1780 * @param flow Pointer to the flow.
1781 * @param error Pointer to the flow error.
1782 * @returns 0 in case of success, negative value otherwise.
1785 mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
1786 struct rte_flow *flow,
1787 struct rte_flow_error *error)
1789 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1793 * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
1795 * @param pattern Pointer to the flow pattern table.
1796 * @param flow Pointer to the flow.
1797 * @param error Pointer to the flow error.
1798 * @returns 0 in case of success, negative value otherwise.
1801 mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
1802 struct rte_flow *flow,
1803 struct rte_flow_error *error, int tcp)
1805 const struct rte_flow_item *item = mrvl_next_item(pattern);
1808 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1812 item = mrvl_next_item(item + 1);
1813 item = mrvl_next_item(item + 1);
1816 return mrvl_parse_tcp(item, flow, error);
1818 return mrvl_parse_udp(item, flow, error);
1822 * Parse flow pattern composed of the vlan, ipv6 and tcp items.
1824 * @param pattern Pointer to the flow pattern table.
1825 * @param flow Pointer to the flow.
1826 * @param error Pointer to the flow error.
1827 * @returns 0 in case of success, negative value otherwise.
1830 mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
1831 struct rte_flow *flow,
1832 struct rte_flow_error *error)
1834 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
1838 * Parse flow pattern composed of the vlan, ipv6 and udp items.
1840 * @param pattern Pointer to the flow pattern table.
1841 * @param flow Pointer to the flow.
1842 * @param error Pointer to the flow error.
1843 * @returns 0 in case of success, negative value otherwise.
1846 mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
1847 struct rte_flow *flow,
1848 struct rte_flow_error *error)
1850 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
1854 * Parse flow pattern composed of the ip4/ip6 item.
1856 * @param pattern Pointer to the flow pattern table.
1857 * @param flow Pointer to the flow.
1858 * @param error Pointer to the flow error.
1859 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1860 * @returns 0 in case of success, negative value otherwise.
1863 mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
1864 struct rte_flow *flow,
1865 struct rte_flow_error *error, int ip6)
1867 const struct rte_flow_item *item = mrvl_next_item(pattern);
1869 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1870 mrvl_parse_ip4(item, flow, error);
1874 * Parse flow pattern composed of the ipv4 item.
1876 * @param pattern Pointer to the flow pattern table.
1877 * @param flow Pointer to the flow.
1878 * @param error Pointer to the flow error.
1879 * @returns 0 in case of success, negative value otherwise.
1882 mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
1883 struct rte_flow *flow,
1884 struct rte_flow_error *error)
1886 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
1890 * Parse flow pattern composed of the ipv6 item.
1892 * @param pattern Pointer to the flow pattern table.
1893 * @param flow Pointer to the flow.
1894 * @param error Pointer to the flow error.
1895 * @returns 0 in case of success, negative value otherwise.
1898 mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
1899 struct rte_flow *flow,
1900 struct rte_flow_error *error)
1902 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
1906 * Parse flow pattern composed of the ip4/ip6 and tcp items.
1908 * @param pattern Pointer to the flow pattern table.
1909 * @param flow Pointer to the flow.
1910 * @param error Pointer to the flow error.
1911 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1912 * @returns 0 in case of success, negative value otherwise.
1915 mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
1916 struct rte_flow *flow,
1917 struct rte_flow_error *error, int ip6)
1919 const struct rte_flow_item *item = mrvl_next_item(pattern);
1922 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1923 mrvl_parse_ip4(item, flow, error);
1927 item = mrvl_next_item(item + 1);
1929 return mrvl_parse_tcp(item, flow, error);
1933 * Parse flow pattern composed of the ipv4 and tcp items.
1935 * @param pattern Pointer to the flow pattern table.
1936 * @param flow Pointer to the flow.
1937 * @param error Pointer to the flow error.
1938 * @returns 0 in case of success, negative value otherwise.
1941 mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
1942 struct rte_flow *flow,
1943 struct rte_flow_error *error)
1945 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
1949 * Parse flow pattern composed of the ipv6 and tcp items.
1951 * @param pattern Pointer to the flow pattern table.
1952 * @param flow Pointer to the flow.
1953 * @param error Pointer to the flow error.
1954 * @returns 0 in case of success, negative value otherwise.
1957 mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
1958 struct rte_flow *flow,
1959 struct rte_flow_error *error)
1961 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
1965 * Parse flow pattern composed of the ipv4/ipv6 and udp items.
1967 * @param pattern Pointer to the flow pattern table.
1968 * @param flow Pointer to the flow.
1969 * @param error Pointer to the flow error.
1970 * @returns 0 in case of success, negative value otherwise.
1973 mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
1974 struct rte_flow *flow,
1975 struct rte_flow_error *error, int ip6)
1977 const struct rte_flow_item *item = mrvl_next_item(pattern);
1980 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1981 mrvl_parse_ip4(item, flow, error);
1985 item = mrvl_next_item(item + 1);
1987 return mrvl_parse_udp(item, flow, error);
1991 * Parse flow pattern composed of the ipv4 and udp items.
1993 * @param pattern Pointer to the flow pattern table.
1994 * @param flow Pointer to the flow.
1995 * @param error Pointer to the flow error.
1996 * @returns 0 in case of success, negative value otherwise.
1999 mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
2000 struct rte_flow *flow,
2001 struct rte_flow_error *error)
2003 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
2007 * Parse flow pattern composed of the ipv6 and udp items.
2009 * @param pattern Pointer to the flow pattern table.
2010 * @param flow Pointer to the flow.
2011 * @param error Pointer to the flow error.
2012 * @returns 0 in case of success, negative value otherwise.
2015 mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
2016 struct rte_flow *flow,
2017 struct rte_flow_error *error)
2019 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
2023 * Parse flow pattern composed of the tcp item.
2025 * @param pattern Pointer to the flow pattern table.
2026 * @param flow Pointer to the flow.
2027 * @param error Pointer to the flow error.
2028 * @returns 0 in case of success, negative value otherwise.
2031 mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
2032 struct rte_flow *flow,
2033 struct rte_flow_error *error)
2035 const struct rte_flow_item *item = mrvl_next_item(pattern);
2037 return mrvl_parse_tcp(item, flow, error);
2041 * Parse flow pattern composed of the udp item.
2043 * @param pattern Pointer to the flow pattern table.
2044 * @param flow Pointer to the flow.
2045 * @param error Pointer to the flow error.
2046 * @returns 0 in case of success, negative value otherwise.
2049 mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
2050 struct rte_flow *flow,
2051 struct rte_flow_error *error)
2053 const struct rte_flow_item *item = mrvl_next_item(pattern);
2055 return mrvl_parse_udp(item, flow, error);
2059 * Structure used to map specific flow pattern to the pattern parse callback
2060 * which will iterate over each pattern item and extract relevant data.
2062 static const struct {
2063 const enum rte_flow_item_type *pattern;
2064 int (*parse)(const struct rte_flow_item pattern[],
2065 struct rte_flow *flow,
2066 struct rte_flow_error *error);
2067 } mrvl_patterns[] = {
2068 { pattern_eth, mrvl_parse_pattern_eth },
2069 { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
2070 { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
2071 { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
2072 { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
2073 { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
2074 { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
2075 { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
2076 { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
2077 { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
2078 { pattern_vlan, mrvl_parse_pattern_vlan },
2079 { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
2080 { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
2081 { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
2082 { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
2083 { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
2084 { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
2085 { pattern_ip, mrvl_parse_pattern_ip4 },
2086 { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
2087 { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
2088 { pattern_ip6, mrvl_parse_pattern_ip6 },
2089 { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
2090 { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
2091 { pattern_tcp, mrvl_parse_pattern_tcp },
2092 { pattern_udp, mrvl_parse_pattern_udp }
2096 * Check whether provided pattern matches any of the supported ones.
2098 * @param type_pattern Pointer to the pattern type.
2099 * @param item_pattern Pointer to the flow pattern.
2100 * @returns 1 in case of success, 0 value otherwise.
2103 mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
2104 const struct rte_flow_item *item_pattern)
2106 const enum rte_flow_item_type *type = type_pattern;
2107 const struct rte_flow_item *item = item_pattern;
2110 if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
2115 if (*type == RTE_FLOW_ITEM_TYPE_END ||
2116 item->type == RTE_FLOW_ITEM_TYPE_END)
2119 if (*type != item->type)
2126 return *type == item->type;
2130 * Parse flow attribute.
2132 * This will check whether the provided attribute's flags are supported.
2134 * @param priv Unused
2135 * @param attr Pointer to the flow attribute.
2136 * @param flow Unused
2137 * @param error Pointer to the flow error.
2138 * @returns 0 in case of success, negative value otherwise.
2141 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
2142 const struct rte_flow_attr *attr,
2143 struct rte_flow *flow __rte_unused,
2144 struct rte_flow_error *error)
2147 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
2148 NULL, "NULL attribute");
2153 rte_flow_error_set(error, ENOTSUP,
2154 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2155 "Groups are not supported");
2158 if (attr->priority) {
2159 rte_flow_error_set(error, ENOTSUP,
2160 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2161 "Priorities are not supported");
2164 if (!attr->ingress) {
2165 rte_flow_error_set(error, ENOTSUP,
2166 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
2167 "Only ingress is supported");
2171 rte_flow_error_set(error, ENOTSUP,
2172 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2173 "Egress is not supported");
2181 * Parse flow pattern.
2183 * Specific classifier rule will be created as well.
2185 * @param priv Unused
2186 * @param pattern Pointer to the flow pattern.
2187 * @param flow Pointer to the flow.
2188 * @param error Pointer to the flow error.
2189 * @returns 0 in case of success, negative value otherwise.
2192 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
2193 const struct rte_flow_item pattern[],
2194 struct rte_flow *flow,
2195 struct rte_flow_error *error)
2200 for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
2201 if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
2204 ret = mrvl_patterns[i].parse(pattern, flow, error);
2206 mrvl_free_all_key_mask(&flow->rule);
2211 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2212 "Unsupported pattern");
2218 * Parse flow actions.
2220 * @param priv Pointer to the port's private data.
2221 * @param actions Pointer the action table.
2222 * @param flow Pointer to the flow.
2223 * @param error Pointer to the flow error.
2224 * @returns 0 in case of success, negative value otherwise.
2227 mrvl_flow_parse_actions(struct mrvl_priv *priv,
2228 const struct rte_flow_action actions[],
2229 struct rte_flow *flow,
2230 struct rte_flow_error *error)
2232 const struct rte_flow_action *action = actions;
2235 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
2236 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
2239 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
2240 flow->cos.ppio = priv->ppio;
2242 flow->action.type = PP2_CLS_TBL_ACT_DROP;
2243 flow->action.cos = &flow->cos;
2245 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2246 const struct rte_flow_action_queue *q =
2247 (const struct rte_flow_action_queue *)
2250 if (q->index > priv->nb_rx_queues) {
2251 rte_flow_error_set(error, EINVAL,
2252 RTE_FLOW_ERROR_TYPE_ACTION,
2254 "Queue index out of range");
2258 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
2260 * Unknown TC mapping, mapping will not have
2264 "Unknown TC mapping for queue %hu eth%hhu\n",
2265 q->index, priv->ppio_id);
2267 rte_flow_error_set(error, EFAULT,
2268 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2274 "Action: Assign packets to queue %d, tc:%d, q:%d\n",
2275 q->index, priv->rxq_map[q->index].tc,
2276 priv->rxq_map[q->index].inq);
2278 flow->cos.ppio = priv->ppio;
2279 flow->cos.tc = priv->rxq_map[q->index].tc;
2280 flow->action.type = PP2_CLS_TBL_ACT_DONE;
2281 flow->action.cos = &flow->cos;
2284 rte_flow_error_set(error, ENOTSUP,
2285 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2286 "Action not supported");
2293 rte_flow_error_set(error, EINVAL,
2294 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2295 NULL, "Action not specified");
2303 * Parse flow attribute, pattern and actions.
2305 * @param priv Pointer to the port's private data.
2306 * @param attr Pointer to the flow attribute.
2307 * @param pattern Pointer to the flow pattern.
2308 * @param actions Pointer to the flow actions.
2309 * @param flow Pointer to the flow.
2310 * @param error Pointer to the flow error.
2311 * @returns 0 on success, negative value otherwise.
2314 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
2315 const struct rte_flow_item pattern[],
2316 const struct rte_flow_action actions[],
2317 struct rte_flow *flow,
2318 struct rte_flow_error *error)
2322 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
2326 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
2330 return mrvl_flow_parse_actions(priv, actions, flow, error);
2333 static inline enum pp2_cls_tbl_type
2334 mrvl_engine_type(const struct rte_flow *flow)
2338 for (i = 0; i < flow->rule.num_fields; i++)
2339 size += flow->rule.fields[i].size;
2342 * For maskable engine type the key size must be up to 8 bytes.
2343 * For keys with size bigger than 8 bytes, engine type must
2344 * be set to exact match.
2347 return PP2_CLS_TBL_EXACT_MATCH;
2349 return PP2_CLS_TBL_MASKABLE;
2353 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
2355 struct mrvl_priv *priv = dev->data->dev_private;
2356 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
2359 if (priv->cls_tbl) {
2360 pp2_cls_tbl_deinit(priv->cls_tbl);
2361 priv->cls_tbl = NULL;
2364 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
2366 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
2367 RTE_LOG(INFO, PMD, "Setting cls search engine type to %s\n",
2368 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
2369 "exact" : "maskable");
2370 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
2371 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
2372 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
2374 if (first_flow->pattern & F_DMAC) {
2375 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2376 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
2378 key->num_fields += 1;
2381 if (first_flow->pattern & F_SMAC) {
2382 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2383 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
2385 key->num_fields += 1;
2388 if (first_flow->pattern & F_TYPE) {
2389 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2390 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
2392 key->num_fields += 1;
2395 if (first_flow->pattern & F_VLAN_ID) {
2396 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2397 key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
2399 key->num_fields += 1;
2402 if (first_flow->pattern & F_VLAN_PRI) {
2403 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2404 key->proto_field[key->num_fields].field.vlan =
2407 key->num_fields += 1;
2410 if (first_flow->pattern & F_IP4_TOS) {
2411 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2412 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
2414 key->num_fields += 1;
2417 if (first_flow->pattern & F_IP4_SIP) {
2418 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2419 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
2421 key->num_fields += 1;
2424 if (first_flow->pattern & F_IP4_DIP) {
2425 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2426 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
2428 key->num_fields += 1;
2431 if (first_flow->pattern & F_IP4_PROTO) {
2432 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2433 key->proto_field[key->num_fields].field.ipv4 =
2436 key->num_fields += 1;
2439 if (first_flow->pattern & F_IP6_SIP) {
2440 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2441 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
2442 key->key_size += 16;
2443 key->num_fields += 1;
2446 if (first_flow->pattern & F_IP6_DIP) {
2447 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2448 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
2449 key->key_size += 16;
2450 key->num_fields += 1;
2453 if (first_flow->pattern & F_IP6_FLOW) {
2454 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2455 key->proto_field[key->num_fields].field.ipv6 =
2458 key->num_fields += 1;
2461 if (first_flow->pattern & F_IP6_NEXT_HDR) {
2462 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2463 key->proto_field[key->num_fields].field.ipv6 =
2464 MV_NET_IP6_F_NEXT_HDR;
2466 key->num_fields += 1;
2469 if (first_flow->pattern & F_TCP_SPORT) {
2470 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2471 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2473 key->num_fields += 1;
2476 if (first_flow->pattern & F_TCP_DPORT) {
2477 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2478 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
2480 key->num_fields += 1;
2483 if (first_flow->pattern & F_UDP_SPORT) {
2484 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2485 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2487 key->num_fields += 1;
2490 if (first_flow->pattern & F_UDP_DPORT) {
2491 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2492 key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
2494 key->num_fields += 1;
2497 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
2499 priv->cls_tbl_pattern = first_flow->pattern;
2505 * Check whether new flow can be added to the table
2507 * @param priv Pointer to the port's private data.
2508 * @param flow Pointer to the new flow.
2509 * @return 1 in case flow can be added, 0 otherwise.
2512 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
2514 return flow->pattern == priv->cls_tbl_pattern &&
2515 mrvl_engine_type(flow) == priv->cls_tbl_params.type;
2519 * DPDK flow create callback called when flow is to be created.
2521 * @param dev Pointer to the device.
2522 * @param attr Pointer to the flow attribute.
2523 * @param pattern Pointer to the flow pattern.
2524 * @param actions Pointer to the flow actions.
2525 * @param error Pointer to the flow error.
2526 * @returns Pointer to the created flow in case of success, NULL otherwise.
2528 static struct rte_flow *
2529 mrvl_flow_create(struct rte_eth_dev *dev,
2530 const struct rte_flow_attr *attr,
2531 const struct rte_flow_item pattern[],
2532 const struct rte_flow_action actions[],
2533 struct rte_flow_error *error)
2535 struct mrvl_priv *priv = dev->data->dev_private;
2536 struct rte_flow *flow, *first;
2539 if (!dev->data->dev_started) {
2540 rte_flow_error_set(error, EINVAL,
2541 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2542 "Port must be started first\n");
2546 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
2550 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
2557 * 1. In case table does not exist - create one.
2558 * 2. In case table exists, is empty and new flow cannot be added
2560 * 3. In case table is not empty and new flow matches table format
2562 * 4. Otherwise flow cannot be added.
2564 first = LIST_FIRST(&priv->flows);
2565 if (!priv->cls_tbl) {
2566 ret = mrvl_create_cls_table(dev, flow);
2567 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
2568 ret = mrvl_create_cls_table(dev, flow);
2569 } else if (mrvl_flow_can_be_added(priv, flow)) {
2572 rte_flow_error_set(error, EINVAL,
2573 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2574 "Pattern does not match cls table format\n");
2579 rte_flow_error_set(error, EINVAL,
2580 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2581 "Failed to create cls table\n");
2585 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
2587 rte_flow_error_set(error, EINVAL,
2588 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2589 "Failed to add rule\n");
2593 LIST_INSERT_HEAD(&priv->flows, flow, next);
2602 * Remove classifier rule associated with given flow.
2604 * @param priv Pointer to the port's private data.
2605 * @param flow Pointer to the flow.
2606 * @param error Pointer to the flow error.
2607 * @returns 0 in case of success, negative value otherwise.
2610 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
2611 struct rte_flow_error *error)
2615 if (!priv->cls_tbl) {
2616 rte_flow_error_set(error, EINVAL,
2617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2618 "Classifier table not initialized");
2622 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
2624 rte_flow_error_set(error, EINVAL,
2625 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2626 "Failed to remove rule");
2630 mrvl_free_all_key_mask(&flow->rule);
2636 * DPDK flow destroy callback called when flow is to be removed.
2638 * @param priv Pointer to the port's private data.
2639 * @param flow Pointer to the flow.
2640 * @param error Pointer to the flow error.
2641 * @returns 0 in case of success, negative value otherwise.
2644 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2645 struct rte_flow_error *error)
2647 struct mrvl_priv *priv = dev->data->dev_private;
2651 LIST_FOREACH(f, &priv->flows, next) {
2657 rte_flow_error_set(error, EINVAL,
2658 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2659 "Rule was not found");
2663 LIST_REMOVE(f, next);
2665 ret = mrvl_flow_remove(priv, flow, error);
2675 * DPDK flow callback called to verify given attribute, pattern and actions.
2677 * @param dev Pointer to the device.
2678 * @param attr Pointer to the flow attribute.
2679 * @param pattern Pointer to the flow pattern.
2680 * @param actions Pointer to the flow actions.
2681 * @param error Pointer to the flow error.
2682 * @returns 0 on success, negative value otherwise.
2685 mrvl_flow_validate(struct rte_eth_dev *dev,
2686 const struct rte_flow_attr *attr,
2687 const struct rte_flow_item pattern[],
2688 const struct rte_flow_action actions[],
2689 struct rte_flow_error *error)
2691 static struct rte_flow *flow;
2693 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
2697 mrvl_flow_destroy(dev, flow, error);
2703 * DPDK flow flush callback called when flows are to be flushed.
2705 * @param dev Pointer to the device.
2706 * @param error Pointer to the flow error.
2707 * @returns 0 in case of success, negative value otherwise.
2710 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2712 struct mrvl_priv *priv = dev->data->dev_private;
2714 while (!LIST_EMPTY(&priv->flows)) {
2715 struct rte_flow *flow = LIST_FIRST(&priv->flows);
2716 int ret = mrvl_flow_remove(priv, flow, error);
2720 LIST_REMOVE(flow, next);
2728 * DPDK flow isolate callback called to isolate port.
2730 * @param dev Pointer to the device.
2731 * @param enable Pass 0/1 to disable/enable port isolation.
2732 * @param error Pointer to the flow error.
2733 * @returns 0 in case of success, negative value otherwise.
2736 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
2737 struct rte_flow_error *error)
2739 struct mrvl_priv *priv = dev->data->dev_private;
2741 if (dev->data->dev_started) {
2742 rte_flow_error_set(error, EBUSY,
2743 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2744 NULL, "Port must be stopped first\n");
2748 priv->isolated = enable;
2753 const struct rte_flow_ops mrvl_flow_ops = {
2754 .validate = mrvl_flow_validate,
2755 .create = mrvl_flow_create,
2756 .destroy = mrvl_flow_destroy,
2757 .flush = mrvl_flow_flush,
2758 .isolate = mrvl_flow_isolate