1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
12 #include <arpa/inet.h>
18 #include "mrvl_ethdev.h"
20 #include "env/mv_common.h" /* for BIT() */
22 /** Number of rules in the classifier table. */
23 #define MRVL_CLS_MAX_NUM_RULES 20
25 /** Size of the classifier key and mask strings. */
26 #define MRVL_CLS_STR_SIZE_MAX 40
28 /** Parsed fields in processed rte_flow_item. */
29 enum mrvl_parsed_fields {
37 F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
44 F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
48 F_IP6_NEXT_HDR = BIT(14),
50 F_TCP_SPORT = BIT(15),
51 F_TCP_DPORT = BIT(16),
53 F_UDP_SPORT = BIT(17),
54 F_UDP_DPORT = BIT(18),
57 /** PMD-specific definition of a flow rule handle. */
59 LIST_ENTRY(rte_flow) next;
61 enum mrvl_parsed_fields pattern;
63 struct pp2_cls_tbl_rule rule;
64 struct pp2_cls_cos_desc cos;
65 struct pp2_cls_tbl_action action;
68 static const enum rte_flow_item_type pattern_eth[] = {
69 RTE_FLOW_ITEM_TYPE_ETH,
70 RTE_FLOW_ITEM_TYPE_END
73 static const enum rte_flow_item_type pattern_eth_vlan[] = {
74 RTE_FLOW_ITEM_TYPE_ETH,
75 RTE_FLOW_ITEM_TYPE_VLAN,
76 RTE_FLOW_ITEM_TYPE_END
79 static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
80 RTE_FLOW_ITEM_TYPE_ETH,
81 RTE_FLOW_ITEM_TYPE_VLAN,
82 RTE_FLOW_ITEM_TYPE_IPV4,
83 RTE_FLOW_ITEM_TYPE_END
86 static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
87 RTE_FLOW_ITEM_TYPE_ETH,
88 RTE_FLOW_ITEM_TYPE_VLAN,
89 RTE_FLOW_ITEM_TYPE_IPV6,
90 RTE_FLOW_ITEM_TYPE_END
93 static const enum rte_flow_item_type pattern_eth_ip4[] = {
94 RTE_FLOW_ITEM_TYPE_ETH,
95 RTE_FLOW_ITEM_TYPE_IPV4,
96 RTE_FLOW_ITEM_TYPE_END
99 static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
100 RTE_FLOW_ITEM_TYPE_ETH,
101 RTE_FLOW_ITEM_TYPE_IPV4,
102 RTE_FLOW_ITEM_TYPE_TCP,
103 RTE_FLOW_ITEM_TYPE_END
106 static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
107 RTE_FLOW_ITEM_TYPE_ETH,
108 RTE_FLOW_ITEM_TYPE_IPV4,
109 RTE_FLOW_ITEM_TYPE_UDP,
110 RTE_FLOW_ITEM_TYPE_END
113 static const enum rte_flow_item_type pattern_eth_ip6[] = {
114 RTE_FLOW_ITEM_TYPE_ETH,
115 RTE_FLOW_ITEM_TYPE_IPV6,
116 RTE_FLOW_ITEM_TYPE_END
119 static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
120 RTE_FLOW_ITEM_TYPE_ETH,
121 RTE_FLOW_ITEM_TYPE_IPV6,
122 RTE_FLOW_ITEM_TYPE_TCP,
123 RTE_FLOW_ITEM_TYPE_END
126 static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
127 RTE_FLOW_ITEM_TYPE_ETH,
128 RTE_FLOW_ITEM_TYPE_IPV6,
129 RTE_FLOW_ITEM_TYPE_UDP,
130 RTE_FLOW_ITEM_TYPE_END
133 static const enum rte_flow_item_type pattern_vlan[] = {
134 RTE_FLOW_ITEM_TYPE_VLAN,
135 RTE_FLOW_ITEM_TYPE_END
138 static const enum rte_flow_item_type pattern_vlan_ip[] = {
139 RTE_FLOW_ITEM_TYPE_VLAN,
140 RTE_FLOW_ITEM_TYPE_IPV4,
141 RTE_FLOW_ITEM_TYPE_END
144 static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
145 RTE_FLOW_ITEM_TYPE_VLAN,
146 RTE_FLOW_ITEM_TYPE_IPV4,
147 RTE_FLOW_ITEM_TYPE_TCP,
148 RTE_FLOW_ITEM_TYPE_END
151 static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
152 RTE_FLOW_ITEM_TYPE_VLAN,
153 RTE_FLOW_ITEM_TYPE_IPV4,
154 RTE_FLOW_ITEM_TYPE_UDP,
155 RTE_FLOW_ITEM_TYPE_END
158 static const enum rte_flow_item_type pattern_vlan_ip6[] = {
159 RTE_FLOW_ITEM_TYPE_VLAN,
160 RTE_FLOW_ITEM_TYPE_IPV6,
161 RTE_FLOW_ITEM_TYPE_END
164 static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
165 RTE_FLOW_ITEM_TYPE_VLAN,
166 RTE_FLOW_ITEM_TYPE_IPV6,
167 RTE_FLOW_ITEM_TYPE_TCP,
168 RTE_FLOW_ITEM_TYPE_END
171 static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
172 RTE_FLOW_ITEM_TYPE_VLAN,
173 RTE_FLOW_ITEM_TYPE_IPV6,
174 RTE_FLOW_ITEM_TYPE_UDP,
175 RTE_FLOW_ITEM_TYPE_END
178 static const enum rte_flow_item_type pattern_ip[] = {
179 RTE_FLOW_ITEM_TYPE_IPV4,
180 RTE_FLOW_ITEM_TYPE_END
183 static const enum rte_flow_item_type pattern_ip6[] = {
184 RTE_FLOW_ITEM_TYPE_IPV6,
185 RTE_FLOW_ITEM_TYPE_END
188 static const enum rte_flow_item_type pattern_ip_tcp[] = {
189 RTE_FLOW_ITEM_TYPE_IPV4,
190 RTE_FLOW_ITEM_TYPE_TCP,
191 RTE_FLOW_ITEM_TYPE_END
194 static const enum rte_flow_item_type pattern_ip6_tcp[] = {
195 RTE_FLOW_ITEM_TYPE_IPV6,
196 RTE_FLOW_ITEM_TYPE_TCP,
197 RTE_FLOW_ITEM_TYPE_END
200 static const enum rte_flow_item_type pattern_ip_udp[] = {
201 RTE_FLOW_ITEM_TYPE_IPV4,
202 RTE_FLOW_ITEM_TYPE_UDP,
203 RTE_FLOW_ITEM_TYPE_END
206 static const enum rte_flow_item_type pattern_ip6_udp[] = {
207 RTE_FLOW_ITEM_TYPE_IPV6,
208 RTE_FLOW_ITEM_TYPE_UDP,
209 RTE_FLOW_ITEM_TYPE_END
212 static const enum rte_flow_item_type pattern_tcp[] = {
213 RTE_FLOW_ITEM_TYPE_TCP,
214 RTE_FLOW_ITEM_TYPE_END
217 static const enum rte_flow_item_type pattern_udp[] = {
218 RTE_FLOW_ITEM_TYPE_UDP,
219 RTE_FLOW_ITEM_TYPE_END
222 #define MRVL_VLAN_ID_MASK 0x0fff
223 #define MRVL_VLAN_PRI_MASK 0x7000
224 #define MRVL_IPV4_DSCP_MASK 0xfc
225 #define MRVL_IPV4_ADDR_MASK 0xffffffff
226 #define MRVL_IPV6_FLOW_MASK 0x0fffff
229 * Given a flow item, return the next non-void one.
231 * @param items Pointer to the item in the table.
232 * @returns Next not-void item, NULL otherwise.
234 static const struct rte_flow_item *
235 mrvl_next_item(const struct rte_flow_item *items)
237 const struct rte_flow_item *item = items;
239 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
240 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
248 * Allocate memory for classifier rule key and mask fields.
250 * @param field Pointer to the classifier rule.
251 * @returns 0 in case of success, negative value otherwise.
254 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
256 unsigned int id = rte_socket_id();
258 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
262 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
268 rte_free(field->key);
276 * Free memory allocated for classifier rule key and mask fields.
278 * @param field Pointer to the classifier rule.
281 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
283 rte_free(field->key);
284 rte_free(field->mask);
290 * Free memory allocated for all classifier rule key and mask fields.
292 * @param rule Pointer to the classifier table rule.
295 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
299 for (i = 0; i < rule->num_fields; i++)
300 mrvl_free_key_mask(&rule->fields[i]);
301 rule->num_fields = 0;
305 * Initialize rte flow item parsing.
307 * @param item Pointer to the flow item.
308 * @param spec_ptr Pointer to the specific item pointer.
309 * @param mask_ptr Pointer to the specific item's mask pointer.
310 * @def_mask Pointer to the default mask.
311 * @size Size of the flow item.
312 * @error Pointer to the rte flow error.
313 * @returns 0 in case of success, negative value otherwise.
316 mrvl_parse_init(const struct rte_flow_item *item,
317 const void **spec_ptr,
318 const void **mask_ptr,
319 const void *def_mask,
321 struct rte_flow_error *error)
328 memset(zeros, 0, size);
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
337 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM, item,
340 "Mask or last is set without spec\n");
345 * If "mask" is not set, default mask is used,
346 * but if default mask is NULL, "mask" should be set.
348 if (item->mask == NULL) {
349 if (def_mask == NULL) {
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
352 "Mask should be specified\n");
356 mask = (const uint8_t *)def_mask;
358 mask = (const uint8_t *)item->mask;
361 spec = (const uint8_t *)item->spec;
362 last = (const uint8_t *)item->last;
365 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
366 NULL, "Spec should be specified\n");
371 * If field values in "last" are either 0 or equal to the corresponding
372 * values in "spec" then they are ignored.
375 !memcmp(last, zeros, size) &&
376 memcmp(last, spec, size) != 0) {
377 rte_flow_error_set(error, ENOTSUP,
378 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
379 "Ranging is not supported\n");
390 * Parse the eth flow item.
392 * This will create classifier rule that matches either destination or source
395 * @param spec Pointer to the specific flow item.
396 * @param mask Pointer to the specific flow item's mask.
397 * @param parse_dst Parse either destination or source mac address.
398 * @param flow Pointer to the flow.
399 * @return 0 in case of success, negative error value otherwise.
402 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
403 const struct rte_flow_item_eth *mask,
404 int parse_dst, struct rte_flow *flow)
406 struct pp2_cls_rule_key_field *key_field;
407 const uint8_t *k, *m;
409 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
413 k = spec->dst.addr_bytes;
414 m = mask->dst.addr_bytes;
416 flow->pattern |= F_DMAC;
418 k = spec->src.addr_bytes;
419 m = mask->src.addr_bytes;
421 flow->pattern |= F_SMAC;
424 key_field = &flow->rule.fields[flow->rule.num_fields];
425 mrvl_alloc_key_mask(key_field);
428 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
429 "%02x:%02x:%02x:%02x:%02x:%02x",
430 k[0], k[1], k[2], k[3], k[4], k[5]);
432 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
433 "%02x:%02x:%02x:%02x:%02x:%02x",
434 m[0], m[1], m[2], m[3], m[4], m[5]);
436 flow->rule.num_fields += 1;
442 * Helper for parsing the eth flow item destination mac address.
444 * @param spec Pointer to the specific flow item.
445 * @param mask Pointer to the specific flow item's mask.
446 * @param flow Pointer to the flow.
447 * @return 0 in case of success, negative error value otherwise.
450 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
451 const struct rte_flow_item_eth *mask,
452 struct rte_flow *flow)
454 return mrvl_parse_mac(spec, mask, 1, flow);
458 * Helper for parsing the eth flow item source mac address.
460 * @param spec Pointer to the specific flow item.
461 * @param mask Pointer to the specific flow item's mask.
462 * @param flow Pointer to the flow.
463 * @return 0 in case of success, negative error value otherwise.
466 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
467 const struct rte_flow_item_eth *mask,
468 struct rte_flow *flow)
470 return mrvl_parse_mac(spec, mask, 0, flow);
474 * Parse the ether type field of the eth flow item.
476 * @param spec Pointer to the specific flow item.
477 * @param mask Pointer to the specific flow item's mask.
478 * @param flow Pointer to the flow.
479 * @return 0 in case of success, negative error value otherwise.
482 mrvl_parse_type(const struct rte_flow_item_eth *spec,
483 const struct rte_flow_item_eth *mask __rte_unused,
484 struct rte_flow *flow)
486 struct pp2_cls_rule_key_field *key_field;
489 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
492 key_field = &flow->rule.fields[flow->rule.num_fields];
493 mrvl_alloc_key_mask(key_field);
496 k = rte_be_to_cpu_16(spec->type);
497 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
499 flow->pattern |= F_TYPE;
500 flow->rule.num_fields += 1;
506 * Parse the vid field of the vlan rte flow item.
508 * This will create classifier rule that matches vid.
510 * @param spec Pointer to the specific flow item.
511 * @param mask Pointer to the specific flow item's mask.
512 * @param flow Pointer to the flow.
513 * @return 0 in case of success, negative error value otherwise.
516 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
517 const struct rte_flow_item_vlan *mask __rte_unused,
518 struct rte_flow *flow)
520 struct pp2_cls_rule_key_field *key_field;
523 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
526 key_field = &flow->rule.fields[flow->rule.num_fields];
527 mrvl_alloc_key_mask(key_field);
530 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
531 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
533 flow->pattern |= F_VLAN_ID;
534 flow->rule.num_fields += 1;
540 * Parse the pri field of the vlan rte flow item.
542 * This will create classifier rule that matches pri.
544 * @param spec Pointer to the specific flow item.
545 * @param mask Pointer to the specific flow item's mask.
546 * @param flow Pointer to the flow.
547 * @return 0 in case of success, negative error value otherwise.
550 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
551 const struct rte_flow_item_vlan *mask __rte_unused,
552 struct rte_flow *flow)
554 struct pp2_cls_rule_key_field *key_field;
557 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
560 key_field = &flow->rule.fields[flow->rule.num_fields];
561 mrvl_alloc_key_mask(key_field);
564 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
565 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
567 flow->pattern |= F_VLAN_PRI;
568 flow->rule.num_fields += 1;
574 * Parse the dscp field of the ipv4 rte flow item.
576 * This will create classifier rule that matches dscp field.
578 * @param spec Pointer to the specific flow item.
579 * @param mask Pointer to the specific flow item's mask.
580 * @param flow Pointer to the flow.
581 * @return 0 in case of success, negative error value otherwise.
584 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
585 const struct rte_flow_item_ipv4 *mask,
586 struct rte_flow *flow)
588 struct pp2_cls_rule_key_field *key_field;
591 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
594 key_field = &flow->rule.fields[flow->rule.num_fields];
595 mrvl_alloc_key_mask(key_field);
598 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
599 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
600 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
601 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
603 flow->pattern |= F_IP4_TOS;
604 flow->rule.num_fields += 1;
610 * Parse either source or destination ip addresses of the ipv4 flow item.
612 * This will create classifier rule that matches either destination
613 * or source ip field.
615 * @param spec Pointer to the specific flow item.
616 * @param mask Pointer to the specific flow item's mask.
617 * @param parse_dst Parse either destination or source ip address.
618 * @param flow Pointer to the flow.
619 * @return 0 in case of success, negative error value otherwise.
622 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
623 const struct rte_flow_item_ipv4 *mask,
624 int parse_dst, struct rte_flow *flow)
626 struct pp2_cls_rule_key_field *key_field;
630 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
633 memset(&k, 0, sizeof(k));
635 k.s_addr = spec->hdr.dst_addr;
636 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
638 flow->pattern |= F_IP4_DIP;
640 k.s_addr = spec->hdr.src_addr;
641 m = rte_be_to_cpu_32(mask->hdr.src_addr);
643 flow->pattern |= F_IP4_SIP;
646 key_field = &flow->rule.fields[flow->rule.num_fields];
647 mrvl_alloc_key_mask(key_field);
650 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
651 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
653 flow->rule.num_fields += 1;
659 * Helper for parsing destination ip of the ipv4 flow item.
661 * @param spec Pointer to the specific flow item.
662 * @param mask Pointer to the specific flow item's mask.
663 * @param flow Pointer to the flow.
664 * @return 0 in case of success, negative error value otherwise.
667 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
668 const struct rte_flow_item_ipv4 *mask,
669 struct rte_flow *flow)
671 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
675 * Helper for parsing source ip of the ipv4 flow item.
677 * @param spec Pointer to the specific flow item.
678 * @param mask Pointer to the specific flow item's mask.
679 * @param flow Pointer to the flow.
680 * @return 0 in case of success, negative error value otherwise.
683 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
684 const struct rte_flow_item_ipv4 *mask,
685 struct rte_flow *flow)
687 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
691 * Parse the proto field of the ipv4 rte flow item.
693 * This will create classifier rule that matches proto field.
695 * @param spec Pointer to the specific flow item.
696 * @param mask Pointer to the specific flow item's mask.
697 * @param flow Pointer to the flow.
698 * @return 0 in case of success, negative error value otherwise.
701 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
702 const struct rte_flow_item_ipv4 *mask __rte_unused,
703 struct rte_flow *flow)
705 struct pp2_cls_rule_key_field *key_field;
706 uint8_t k = spec->hdr.next_proto_id;
708 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
711 key_field = &flow->rule.fields[flow->rule.num_fields];
712 mrvl_alloc_key_mask(key_field);
715 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
717 flow->pattern |= F_IP4_PROTO;
718 flow->rule.num_fields += 1;
724 * Parse either source or destination ip addresses of the ipv6 rte flow item.
726 * This will create classifier rule that matches either destination
727 * or source ip field.
729 * @param spec Pointer to the specific flow item.
730 * @param mask Pointer to the specific flow item's mask.
731 * @param parse_dst Parse either destination or source ipv6 address.
732 * @param flow Pointer to the flow.
733 * @return 0 in case of success, negative error value otherwise.
736 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
737 const struct rte_flow_item_ipv6 *mask,
738 int parse_dst, struct rte_flow *flow)
740 struct pp2_cls_rule_key_field *key_field;
741 int size = sizeof(spec->hdr.dst_addr);
742 struct in6_addr k, m;
744 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
747 memset(&k, 0, sizeof(k));
749 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
750 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
752 flow->pattern |= F_IP6_DIP;
754 memcpy(k.s6_addr, spec->hdr.src_addr, size);
755 memcpy(m.s6_addr, mask->hdr.src_addr, size);
757 flow->pattern |= F_IP6_SIP;
760 key_field = &flow->rule.fields[flow->rule.num_fields];
761 mrvl_alloc_key_mask(key_field);
762 key_field->size = 16;
764 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
765 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
767 flow->rule.num_fields += 1;
773 * Helper for parsing destination ip of the ipv6 flow item.
775 * @param spec Pointer to the specific flow item.
776 * @param mask Pointer to the specific flow item's mask.
777 * @param flow Pointer to the flow.
778 * @return 0 in case of success, negative error value otherwise.
781 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
782 const struct rte_flow_item_ipv6 *mask,
783 struct rte_flow *flow)
785 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
789 * Helper for parsing source ip of the ipv6 flow item.
791 * @param spec Pointer to the specific flow item.
792 * @param mask Pointer to the specific flow item's mask.
793 * @param flow Pointer to the flow.
794 * @return 0 in case of success, negative error value otherwise.
797 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
798 const struct rte_flow_item_ipv6 *mask,
799 struct rte_flow *flow)
801 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
805 * Parse the flow label of the ipv6 flow item.
807 * This will create classifier rule that matches flow field.
809 * @param spec Pointer to the specific flow item.
810 * @param mask Pointer to the specific flow item's mask.
811 * @param flow Pointer to the flow.
812 * @return 0 in case of success, negative error value otherwise.
815 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
816 const struct rte_flow_item_ipv6 *mask,
817 struct rte_flow *flow)
819 struct pp2_cls_rule_key_field *key_field;
820 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
821 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
823 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
826 key_field = &flow->rule.fields[flow->rule.num_fields];
827 mrvl_alloc_key_mask(key_field);
830 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
831 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
833 flow->pattern |= F_IP6_FLOW;
834 flow->rule.num_fields += 1;
840 * Parse the next header of the ipv6 flow item.
842 * This will create classifier rule that matches next header field.
844 * @param spec Pointer to the specific flow item.
845 * @param mask Pointer to the specific flow item's mask.
846 * @param flow Pointer to the flow.
847 * @return 0 in case of success, negative error value otherwise.
850 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
851 const struct rte_flow_item_ipv6 *mask __rte_unused,
852 struct rte_flow *flow)
854 struct pp2_cls_rule_key_field *key_field;
855 uint8_t k = spec->hdr.proto;
857 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
860 key_field = &flow->rule.fields[flow->rule.num_fields];
861 mrvl_alloc_key_mask(key_field);
864 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
866 flow->pattern |= F_IP6_NEXT_HDR;
867 flow->rule.num_fields += 1;
873 * Parse destination or source port of the tcp flow item.
875 * This will create classifier rule that matches either destination or
878 * @param spec Pointer to the specific flow item.
879 * @param mask Pointer to the specific flow item's mask.
880 * @param parse_dst Parse either destination or source port.
881 * @param flow Pointer to the flow.
882 * @return 0 in case of success, negative error value otherwise.
885 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
886 const struct rte_flow_item_tcp *mask __rte_unused,
887 int parse_dst, struct rte_flow *flow)
889 struct pp2_cls_rule_key_field *key_field;
892 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
895 key_field = &flow->rule.fields[flow->rule.num_fields];
896 mrvl_alloc_key_mask(key_field);
900 k = rte_be_to_cpu_16(spec->hdr.dst_port);
902 flow->pattern |= F_TCP_DPORT;
904 k = rte_be_to_cpu_16(spec->hdr.src_port);
906 flow->pattern |= F_TCP_SPORT;
909 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
911 flow->rule.num_fields += 1;
917 * Helper for parsing the tcp source port of the tcp flow item.
919 * @param spec Pointer to the specific flow item.
920 * @param mask Pointer to the specific flow item's mask.
921 * @param flow Pointer to the flow.
922 * @return 0 in case of success, negative error value otherwise.
925 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
926 const struct rte_flow_item_tcp *mask,
927 struct rte_flow *flow)
929 return mrvl_parse_tcp_port(spec, mask, 0, flow);
933 * Helper for parsing the tcp destination port of the tcp flow item.
935 * @param spec Pointer to the specific flow item.
936 * @param mask Pointer to the specific flow item's mask.
937 * @param flow Pointer to the flow.
938 * @return 0 in case of success, negative error value otherwise.
941 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
942 const struct rte_flow_item_tcp *mask,
943 struct rte_flow *flow)
945 return mrvl_parse_tcp_port(spec, mask, 1, flow);
949 * Parse destination or source port of the udp flow item.
951 * This will create classifier rule that matches either destination or
954 * @param spec Pointer to the specific flow item.
955 * @param mask Pointer to the specific flow item's mask.
956 * @param parse_dst Parse either destination or source port.
957 * @param flow Pointer to the flow.
958 * @return 0 in case of success, negative error value otherwise.
961 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
962 const struct rte_flow_item_udp *mask __rte_unused,
963 int parse_dst, struct rte_flow *flow)
965 struct pp2_cls_rule_key_field *key_field;
968 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
971 key_field = &flow->rule.fields[flow->rule.num_fields];
972 mrvl_alloc_key_mask(key_field);
976 k = rte_be_to_cpu_16(spec->hdr.dst_port);
978 flow->pattern |= F_UDP_DPORT;
980 k = rte_be_to_cpu_16(spec->hdr.src_port);
982 flow->pattern |= F_UDP_SPORT;
985 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
987 flow->rule.num_fields += 1;
993 * Helper for parsing the udp source port of the udp flow item.
995 * @param spec Pointer to the specific flow item.
996 * @param mask Pointer to the specific flow item's mask.
997 * @param flow Pointer to the flow.
998 * @return 0 in case of success, negative error value otherwise.
1001 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
1002 const struct rte_flow_item_udp *mask,
1003 struct rte_flow *flow)
1005 return mrvl_parse_udp_port(spec, mask, 0, flow);
1009 * Helper for parsing the udp destination port of the udp flow item.
1011 * @param spec Pointer to the specific flow item.
1012 * @param mask Pointer to the specific flow item's mask.
1013 * @param flow Pointer to the flow.
1014 * @return 0 in case of success, negative error value otherwise.
1017 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
1018 const struct rte_flow_item_udp *mask,
1019 struct rte_flow *flow)
1021 return mrvl_parse_udp_port(spec, mask, 1, flow);
1025 * Parse eth flow item.
1027 * @param item Pointer to the flow item.
1028 * @param flow Pointer to the flow.
1029 * @param error Pointer to the flow error.
1030 * @returns 0 on success, negative value otherwise.
1033 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
1034 struct rte_flow_error *error)
1036 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
1037 struct ether_addr zero;
1040 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1041 &rte_flow_item_eth_mask,
1042 sizeof(struct rte_flow_item_eth), error);
1046 memset(&zero, 0, sizeof(zero));
1048 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
1049 ret = mrvl_parse_dmac(spec, mask, flow);
1054 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
1055 ret = mrvl_parse_smac(spec, mask, flow);
1061 MRVL_LOG(WARNING, "eth type mask is ignored");
1062 ret = mrvl_parse_type(spec, mask, flow);
1069 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1070 "Reached maximum number of fields in cls tbl key\n");
1075 * Parse vlan flow item.
1077 * @param item Pointer to the flow item.
1078 * @param flow Pointer to the flow.
1079 * @param error Pointer to the flow error.
1080 * @returns 0 on success, negative value otherwise.
1083 mrvl_parse_vlan(const struct rte_flow_item *item,
1084 struct rte_flow *flow,
1085 struct rte_flow_error *error)
1087 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
1091 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1092 &rte_flow_item_vlan_mask,
1093 sizeof(struct rte_flow_item_vlan), error);
1097 m = rte_be_to_cpu_16(mask->tci);
1098 if (m & MRVL_VLAN_ID_MASK) {
1099 MRVL_LOG(WARNING, "vlan id mask is ignored");
1100 ret = mrvl_parse_vlan_id(spec, mask, flow);
1105 if (m & MRVL_VLAN_PRI_MASK) {
1106 MRVL_LOG(WARNING, "vlan pri mask is ignored");
1107 ret = mrvl_parse_vlan_pri(spec, mask, flow);
1112 if (flow->pattern & F_TYPE) {
1113 rte_flow_error_set(error, ENOTSUP,
1114 RTE_FLOW_ERROR_TYPE_ITEM, item,
1115 "VLAN TPID matching is not supported");
1118 if (mask->inner_type) {
1119 struct rte_flow_item_eth spec_eth = {
1120 .type = spec->inner_type,
1122 struct rte_flow_item_eth mask_eth = {
1123 .type = mask->inner_type,
1126 MRVL_LOG(WARNING, "inner eth type mask is ignored");
1127 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
1134 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1135 "Reached maximum number of fields in cls tbl key\n");
1140 * Parse ipv4 flow item.
1142 * @param item Pointer to the flow item.
1143 * @param flow Pointer to the flow.
1144 * @param error Pointer to the flow error.
1145 * @returns 0 on success, negative value otherwise.
1148 mrvl_parse_ip4(const struct rte_flow_item *item,
1149 struct rte_flow *flow,
1150 struct rte_flow_error *error)
1152 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
1155 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1156 &rte_flow_item_ipv4_mask,
1157 sizeof(struct rte_flow_item_ipv4), error);
1161 if (mask->hdr.version_ihl ||
1162 mask->hdr.total_length ||
1163 mask->hdr.packet_id ||
1164 mask->hdr.fragment_offset ||
1165 mask->hdr.time_to_live ||
1166 mask->hdr.hdr_checksum) {
1167 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1168 NULL, "Not supported by classifier\n");
1172 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
1173 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
1178 if (mask->hdr.src_addr) {
1179 ret = mrvl_parse_ip4_sip(spec, mask, flow);
1184 if (mask->hdr.dst_addr) {
1185 ret = mrvl_parse_ip4_dip(spec, mask, flow);
1190 if (mask->hdr.next_proto_id) {
1191 MRVL_LOG(WARNING, "next proto id mask is ignored");
1192 ret = mrvl_parse_ip4_proto(spec, mask, flow);
1199 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1200 "Reached maximum number of fields in cls tbl key\n");
1205 * Parse ipv6 flow item.
1207 * @param item Pointer to the flow item.
1208 * @param flow Pointer to the flow.
1209 * @param error Pointer to the flow error.
1210 * @returns 0 on success, negative value otherwise.
1213 mrvl_parse_ip6(const struct rte_flow_item *item,
1214 struct rte_flow *flow,
1215 struct rte_flow_error *error)
1217 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1218 struct ipv6_hdr zero;
1222 ret = mrvl_parse_init(item, (const void **)&spec,
1223 (const void **)&mask,
1224 &rte_flow_item_ipv6_mask,
1225 sizeof(struct rte_flow_item_ipv6),
1230 memset(&zero, 0, sizeof(zero));
1232 if (mask->hdr.payload_len ||
1233 mask->hdr.hop_limits) {
1234 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1235 NULL, "Not supported by classifier\n");
1239 if (memcmp(mask->hdr.src_addr,
1240 zero.src_addr, sizeof(mask->hdr.src_addr))) {
1241 ret = mrvl_parse_ip6_sip(spec, mask, flow);
1246 if (memcmp(mask->hdr.dst_addr,
1247 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1248 ret = mrvl_parse_ip6_dip(spec, mask, flow);
1253 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1255 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1260 if (mask->hdr.proto) {
1261 MRVL_LOG(WARNING, "next header mask is ignored");
1262 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1269 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1270 "Reached maximum number of fields in cls tbl key\n");
1275 * Parse tcp flow item.
1277 * @param item Pointer to the flow item.
1278 * @param flow Pointer to the flow.
1279 * @param error Pointer to the flow error.
1280 * @returns 0 on success, negative value otherwise.
1283 mrvl_parse_tcp(const struct rte_flow_item *item,
1284 struct rte_flow *flow,
1285 struct rte_flow_error *error)
1287 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1290 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1291 &rte_flow_item_ipv4_mask,
1292 sizeof(struct rte_flow_item_ipv4), error);
1296 if (mask->hdr.sent_seq ||
1297 mask->hdr.recv_ack ||
1298 mask->hdr.data_off ||
1299 mask->hdr.tcp_flags ||
1302 mask->hdr.tcp_urp) {
1303 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1304 NULL, "Not supported by classifier\n");
1308 if (mask->hdr.src_port) {
1309 MRVL_LOG(WARNING, "tcp sport mask is ignored");
1310 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1315 if (mask->hdr.dst_port) {
1316 MRVL_LOG(WARNING, "tcp dport mask is ignored");
1317 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1324 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1325 "Reached maximum number of fields in cls tbl key\n");
1330 * Parse udp flow item.
1332 * @param item Pointer to the flow item.
1333 * @param flow Pointer to the flow.
1334 * @param error Pointer to the flow error.
1335 * @returns 0 on success, negative value otherwise.
1338 mrvl_parse_udp(const struct rte_flow_item *item,
1339 struct rte_flow *flow,
1340 struct rte_flow_error *error)
1342 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1345 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1346 &rte_flow_item_ipv4_mask,
1347 sizeof(struct rte_flow_item_ipv4), error);
1351 if (mask->hdr.dgram_len ||
1352 mask->hdr.dgram_cksum) {
1353 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1354 NULL, "Not supported by classifier\n");
1358 if (mask->hdr.src_port) {
1359 MRVL_LOG(WARNING, "udp sport mask is ignored");
1360 ret = mrvl_parse_udp_sport(spec, mask, flow);
1365 if (mask->hdr.dst_port) {
1366 MRVL_LOG(WARNING, "udp dport mask is ignored");
1367 ret = mrvl_parse_udp_dport(spec, mask, flow);
1374 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1375 "Reached maximum number of fields in cls tbl key\n");
1380 * Parse flow pattern composed of the the eth item.
1382 * @param pattern Pointer to the flow pattern table.
1383 * @param flow Pointer to the flow.
1384 * @param error Pointer to the flow error.
1385 * @returns 0 in case of success, negative value otherwise.
1388 mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
1389 struct rte_flow *flow,
1390 struct rte_flow_error *error)
1392 return mrvl_parse_eth(pattern, flow, error);
1396 * Parse flow pattern composed of the eth and vlan items.
1398 * @param pattern Pointer to the flow pattern table.
1399 * @param flow Pointer to the flow.
1400 * @param error Pointer to the flow error.
1401 * @returns 0 in case of success, negative value otherwise.
1404 mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
1405 struct rte_flow *flow,
1406 struct rte_flow_error *error)
1408 const struct rte_flow_item *item = mrvl_next_item(pattern);
1411 ret = mrvl_parse_eth(item, flow, error);
1415 item = mrvl_next_item(item + 1);
1417 return mrvl_parse_vlan(item, flow, error);
1421 * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
1423 * @param pattern Pointer to the flow pattern table.
1424 * @param flow Pointer to the flow.
1425 * @param error Pointer to the flow error.
1426 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1427 * @returns 0 in case of success, negative value otherwise.
1430 mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1431 struct rte_flow *flow,
1432 struct rte_flow_error *error, int ip6)
1434 const struct rte_flow_item *item = mrvl_next_item(pattern);
1437 ret = mrvl_parse_eth(item, flow, error);
1441 item = mrvl_next_item(item + 1);
1442 ret = mrvl_parse_vlan(item, flow, error);
1446 item = mrvl_next_item(item + 1);
1448 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1449 mrvl_parse_ip4(item, flow, error);
1453 * Parse flow pattern composed of the eth, vlan and ipv4 items.
1455 * @param pattern Pointer to the flow pattern table.
1456 * @param flow Pointer to the flow.
1457 * @param error Pointer to the flow error.
1458 * @returns 0 in case of success, negative value otherwise.
1461 mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
1462 struct rte_flow *flow,
1463 struct rte_flow_error *error)
1465 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
1469 * Parse flow pattern composed of the eth, vlan and ipv6 items.
1471 * @param pattern Pointer to the flow pattern table.
1472 * @param flow Pointer to the flow.
1473 * @param error Pointer to the flow error.
1474 * @returns 0 in case of success, negative value otherwise.
1477 mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
1478 struct rte_flow *flow,
1479 struct rte_flow_error *error)
1481 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
1485 * Parse flow pattern composed of the eth and ip4/ip6 items.
1487 * @param pattern Pointer to the flow pattern table.
1488 * @param flow Pointer to the flow.
1489 * @param error Pointer to the flow error.
1490 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1491 * @returns 0 in case of success, negative value otherwise.
1494 mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
1495 struct rte_flow *flow,
1496 struct rte_flow_error *error, int ip6)
1498 const struct rte_flow_item *item = mrvl_next_item(pattern);
1501 ret = mrvl_parse_eth(item, flow, error);
1505 item = mrvl_next_item(item + 1);
1507 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1508 mrvl_parse_ip4(item, flow, error);
1512 * Parse flow pattern composed of the eth and ipv4 items.
1514 * @param pattern Pointer to the flow pattern table.
1515 * @param flow Pointer to the flow.
1516 * @param error Pointer to the flow error.
1517 * @returns 0 in case of success, negative value otherwise.
1520 mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
1521 struct rte_flow *flow,
1522 struct rte_flow_error *error)
1524 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1528 * Parse flow pattern composed of the eth and ipv6 items.
1530 * @param pattern Pointer to the flow pattern table.
1531 * @param flow Pointer to the flow.
1532 * @param error Pointer to the flow error.
1533 * @returns 0 in case of success, negative value otherwise.
1536 mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
1537 struct rte_flow *flow,
1538 struct rte_flow_error *error)
1540 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1544 * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
1546 * @param pattern Pointer to the flow pattern table.
1547 * @param flow Pointer to the flow.
1548 * @param error Pointer to the flow error.
1549 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1550 * @returns 0 in case of success, negative value otherwise.
1553 mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
1554 struct rte_flow *flow,
1555 struct rte_flow_error *error, int tcp)
1557 const struct rte_flow_item *item = mrvl_next_item(pattern);
1560 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1564 item = mrvl_next_item(item + 1);
1565 item = mrvl_next_item(item + 1);
1568 return mrvl_parse_tcp(item, flow, error);
1570 return mrvl_parse_udp(item, flow, error);
1574 * Parse flow pattern composed of the eth, ipv4 and tcp items.
1576 * @param pattern Pointer to the flow pattern table.
1577 * @param flow Pointer to the flow.
1578 * @param error Pointer to the flow error.
1579 * @returns 0 in case of success, negative value otherwise.
1582 mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
1583 struct rte_flow *flow,
1584 struct rte_flow_error *error)
1586 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
1590 * Parse flow pattern composed of the eth, ipv4 and udp items.
1592 * @param pattern Pointer to the flow pattern table.
1593 * @param flow Pointer to the flow.
1594 * @param error Pointer to the flow error.
1595 * @returns 0 in case of success, negative value otherwise.
1598 mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
1599 struct rte_flow *flow,
1600 struct rte_flow_error *error)
1602 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
1606 * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
1608 * @param pattern Pointer to the flow pattern table.
1609 * @param flow Pointer to the flow.
1610 * @param error Pointer to the flow error.
1611 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1612 * @returns 0 in case of success, negative value otherwise.
1615 mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
1616 struct rte_flow *flow,
1617 struct rte_flow_error *error, int tcp)
1619 const struct rte_flow_item *item = mrvl_next_item(pattern);
1622 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1626 item = mrvl_next_item(item + 1);
1627 item = mrvl_next_item(item + 1);
1630 return mrvl_parse_tcp(item, flow, error);
1632 return mrvl_parse_udp(item, flow, error);
1636 * Parse flow pattern composed of the eth, ipv6 and tcp items.
1638 * @param pattern Pointer to the flow pattern table.
1639 * @param flow Pointer to the flow.
1640 * @param error Pointer to the flow error.
1641 * @returns 0 in case of success, negative value otherwise.
1644 mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
1645 struct rte_flow *flow,
1646 struct rte_flow_error *error)
1648 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
1652 * Parse flow pattern composed of the eth, ipv6 and udp items.
1654 * @param pattern Pointer to the flow pattern table.
1655 * @param flow Pointer to the flow.
1656 * @param error Pointer to the flow error.
1657 * @returns 0 in case of success, negative value otherwise.
1660 mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
1661 struct rte_flow *flow,
1662 struct rte_flow_error *error)
1664 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
1668 * Parse flow pattern composed of the vlan item.
1670 * @param pattern Pointer to the flow pattern table.
1671 * @param flow Pointer to the flow.
1672 * @param error Pointer to the flow error.
1673 * @returns 0 in case of success, negative value otherwise.
1676 mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
1677 struct rte_flow *flow,
1678 struct rte_flow_error *error)
1680 const struct rte_flow_item *item = mrvl_next_item(pattern);
1682 return mrvl_parse_vlan(item, flow, error);
1686 * Parse flow pattern composed of the vlan and ip4/ip6 items.
1688 * @param pattern Pointer to the flow pattern table.
1689 * @param flow Pointer to the flow.
1690 * @param error Pointer to the flow error.
1691 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1692 * @returns 0 in case of success, negative value otherwise.
1695 mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1696 struct rte_flow *flow,
1697 struct rte_flow_error *error, int ip6)
1699 const struct rte_flow_item *item = mrvl_next_item(pattern);
1702 ret = mrvl_parse_vlan(item, flow, error);
1706 item = mrvl_next_item(item + 1);
1708 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1709 mrvl_parse_ip4(item, flow, error);
1713 * Parse flow pattern composed of the vlan and ipv4 items.
1715 * @param pattern Pointer to the flow pattern table.
1716 * @param flow Pointer to the flow.
1717 * @param error Pointer to the flow error.
1718 * @returns 0 in case of success, negative value otherwise.
1721 mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
1722 struct rte_flow *flow,
1723 struct rte_flow_error *error)
1725 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1729 * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
1731 * @param pattern Pointer to the flow pattern table.
1732 * @param flow Pointer to the flow.
1733 * @param error Pointer to the flow error.
1734 * @returns 0 in case of success, negative value otherwise.
1737 mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
1738 struct rte_flow *flow,
1739 struct rte_flow_error *error, int tcp)
1741 const struct rte_flow_item *item = mrvl_next_item(pattern);
1744 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1748 item = mrvl_next_item(item + 1);
1749 item = mrvl_next_item(item + 1);
1752 return mrvl_parse_tcp(item, flow, error);
1754 return mrvl_parse_udp(item, flow, error);
1758 * Parse flow pattern composed of the vlan, ipv4 and tcp items.
1760 * @param pattern Pointer to the flow pattern table.
1761 * @param flow Pointer to the flow.
1762 * @param error Pointer to the flow error.
1763 * @returns 0 in case of success, negative value otherwise.
1766 mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
1767 struct rte_flow *flow,
1768 struct rte_flow_error *error)
1770 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
1774 * Parse flow pattern composed of the vlan, ipv4 and udp items.
1776 * @param pattern Pointer to the flow pattern table.
1777 * @param flow Pointer to the flow.
1778 * @param error Pointer to the flow error.
1779 * @returns 0 in case of success, negative value otherwise.
1782 mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
1783 struct rte_flow *flow,
1784 struct rte_flow_error *error)
1786 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
1790 * Parse flow pattern composed of the vlan and ipv6 items.
1792 * @param pattern Pointer to the flow pattern table.
1793 * @param flow Pointer to the flow.
1794 * @param error Pointer to the flow error.
1795 * @returns 0 in case of success, negative value otherwise.
1798 mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
1799 struct rte_flow *flow,
1800 struct rte_flow_error *error)
1802 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1806 * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
1808 * @param pattern Pointer to the flow pattern table.
1809 * @param flow Pointer to the flow.
1810 * @param error Pointer to the flow error.
1811 * @returns 0 in case of success, negative value otherwise.
1814 mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
1815 struct rte_flow *flow,
1816 struct rte_flow_error *error, int tcp)
1818 const struct rte_flow_item *item = mrvl_next_item(pattern);
1821 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1825 item = mrvl_next_item(item + 1);
1826 item = mrvl_next_item(item + 1);
1829 return mrvl_parse_tcp(item, flow, error);
1831 return mrvl_parse_udp(item, flow, error);
1835 * Parse flow pattern composed of the vlan, ipv6 and tcp items.
1837 * @param pattern Pointer to the flow pattern table.
1838 * @param flow Pointer to the flow.
1839 * @param error Pointer to the flow error.
1840 * @returns 0 in case of success, negative value otherwise.
1843 mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
1844 struct rte_flow *flow,
1845 struct rte_flow_error *error)
1847 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
1851 * Parse flow pattern composed of the vlan, ipv6 and udp items.
1853 * @param pattern Pointer to the flow pattern table.
1854 * @param flow Pointer to the flow.
1855 * @param error Pointer to the flow error.
1856 * @returns 0 in case of success, negative value otherwise.
1859 mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
1860 struct rte_flow *flow,
1861 struct rte_flow_error *error)
1863 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
1867 * Parse flow pattern composed of the ip4/ip6 item.
1869 * @param pattern Pointer to the flow pattern table.
1870 * @param flow Pointer to the flow.
1871 * @param error Pointer to the flow error.
1872 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1873 * @returns 0 in case of success, negative value otherwise.
1876 mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
1877 struct rte_flow *flow,
1878 struct rte_flow_error *error, int ip6)
1880 const struct rte_flow_item *item = mrvl_next_item(pattern);
1882 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1883 mrvl_parse_ip4(item, flow, error);
1887 * Parse flow pattern composed of the ipv4 item.
1889 * @param pattern Pointer to the flow pattern table.
1890 * @param flow Pointer to the flow.
1891 * @param error Pointer to the flow error.
1892 * @returns 0 in case of success, negative value otherwise.
1895 mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
1896 struct rte_flow *flow,
1897 struct rte_flow_error *error)
1899 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
1903 * Parse flow pattern composed of the ipv6 item.
1905 * @param pattern Pointer to the flow pattern table.
1906 * @param flow Pointer to the flow.
1907 * @param error Pointer to the flow error.
1908 * @returns 0 in case of success, negative value otherwise.
1911 mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
1912 struct rte_flow *flow,
1913 struct rte_flow_error *error)
1915 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
1919 * Parse flow pattern composed of the ip4/ip6 and tcp items.
1921 * @param pattern Pointer to the flow pattern table.
1922 * @param flow Pointer to the flow.
1923 * @param error Pointer to the flow error.
1924 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1925 * @returns 0 in case of success, negative value otherwise.
1928 mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
1929 struct rte_flow *flow,
1930 struct rte_flow_error *error, int ip6)
1932 const struct rte_flow_item *item = mrvl_next_item(pattern);
1935 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1936 mrvl_parse_ip4(item, flow, error);
1940 item = mrvl_next_item(item + 1);
1942 return mrvl_parse_tcp(item, flow, error);
1946 * Parse flow pattern composed of the ipv4 and tcp items.
1948 * @param pattern Pointer to the flow pattern table.
1949 * @param flow Pointer to the flow.
1950 * @param error Pointer to the flow error.
1951 * @returns 0 in case of success, negative value otherwise.
1954 mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
1955 struct rte_flow *flow,
1956 struct rte_flow_error *error)
1958 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
1962 * Parse flow pattern composed of the ipv6 and tcp items.
1964 * @param pattern Pointer to the flow pattern table.
1965 * @param flow Pointer to the flow.
1966 * @param error Pointer to the flow error.
1967 * @returns 0 in case of success, negative value otherwise.
1970 mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
1971 struct rte_flow *flow,
1972 struct rte_flow_error *error)
1974 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
1978 * Parse flow pattern composed of the ipv4/ipv6 and udp items.
1980 * @param pattern Pointer to the flow pattern table.
1981 * @param flow Pointer to the flow.
1982 * @param error Pointer to the flow error.
1983 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1984 * @returns 0 in case of success, negative value otherwise.
1987 mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
1988 struct rte_flow *flow,
1989 struct rte_flow_error *error, int ip6)
1991 const struct rte_flow_item *item = mrvl_next_item(pattern);
1994 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1995 mrvl_parse_ip4(item, flow, error);
1999 item = mrvl_next_item(item + 1);
2001 return mrvl_parse_udp(item, flow, error);
2005 * Parse flow pattern composed of the ipv4 and udp items.
2007 * @param pattern Pointer to the flow pattern table.
2008 * @param flow Pointer to the flow.
2009 * @param error Pointer to the flow error.
2010 * @returns 0 in case of success, negative value otherwise.
2013 mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
2014 struct rte_flow *flow,
2015 struct rte_flow_error *error)
2017 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
2021 * Parse flow pattern composed of the ipv6 and udp items.
2023 * @param pattern Pointer to the flow pattern table.
2024 * @param flow Pointer to the flow.
2025 * @param error Pointer to the flow error.
2026 * @returns 0 in case of success, negative value otherwise.
2029 mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
2030 struct rte_flow *flow,
2031 struct rte_flow_error *error)
2033 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
2037 * Parse flow pattern composed of the tcp item.
2039 * @param pattern Pointer to the flow pattern table.
2040 * @param flow Pointer to the flow.
2041 * @param error Pointer to the flow error.
2042 * @returns 0 in case of success, negative value otherwise.
2045 mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
2046 struct rte_flow *flow,
2047 struct rte_flow_error *error)
2049 const struct rte_flow_item *item = mrvl_next_item(pattern);
2051 return mrvl_parse_tcp(item, flow, error);
2055 * Parse flow pattern composed of the udp item.
2057 * @param pattern Pointer to the flow pattern table.
2058 * @param flow Pointer to the flow.
2059 * @param error Pointer to the flow error.
2060 * @returns 0 in case of success, negative value otherwise.
2063 mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
2064 struct rte_flow *flow,
2065 struct rte_flow_error *error)
2067 const struct rte_flow_item *item = mrvl_next_item(pattern);
2069 return mrvl_parse_udp(item, flow, error);
2073 * Structure used to map specific flow pattern to the pattern parse callback
2074 * which will iterate over each pattern item and extract relevant data.
2076 static const struct {
2077 const enum rte_flow_item_type *pattern;
2078 int (*parse)(const struct rte_flow_item pattern[],
2079 struct rte_flow *flow,
2080 struct rte_flow_error *error);
2081 } mrvl_patterns[] = {
2082 { pattern_eth, mrvl_parse_pattern_eth },
2083 { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
2084 { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
2085 { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
2086 { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
2087 { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
2088 { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
2089 { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
2090 { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
2091 { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
2092 { pattern_vlan, mrvl_parse_pattern_vlan },
2093 { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
2094 { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
2095 { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
2096 { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
2097 { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
2098 { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
2099 { pattern_ip, mrvl_parse_pattern_ip4 },
2100 { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
2101 { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
2102 { pattern_ip6, mrvl_parse_pattern_ip6 },
2103 { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
2104 { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
2105 { pattern_tcp, mrvl_parse_pattern_tcp },
2106 { pattern_udp, mrvl_parse_pattern_udp }
2110 * Check whether provided pattern matches any of the supported ones.
2112 * @param type_pattern Pointer to the pattern type.
2113 * @param item_pattern Pointer to the flow pattern.
2114 * @returns 1 in case of success, 0 value otherwise.
2117 mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
2118 const struct rte_flow_item *item_pattern)
2120 const enum rte_flow_item_type *type = type_pattern;
2121 const struct rte_flow_item *item = item_pattern;
2124 if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
2129 if (*type == RTE_FLOW_ITEM_TYPE_END ||
2130 item->type == RTE_FLOW_ITEM_TYPE_END)
2133 if (*type != item->type)
2140 return *type == item->type;
2144 * Parse flow attribute.
2146 * This will check whether the provided attribute's flags are supported.
2148 * @param priv Unused
2149 * @param attr Pointer to the flow attribute.
2150 * @param flow Unused
2151 * @param error Pointer to the flow error.
2152 * @returns 0 in case of success, negative value otherwise.
2155 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
2156 const struct rte_flow_attr *attr,
2157 struct rte_flow *flow __rte_unused,
2158 struct rte_flow_error *error)
2161 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
2162 NULL, "NULL attribute");
2167 rte_flow_error_set(error, ENOTSUP,
2168 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2169 "Groups are not supported");
2172 if (attr->priority) {
2173 rte_flow_error_set(error, ENOTSUP,
2174 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2175 "Priorities are not supported");
2178 if (!attr->ingress) {
2179 rte_flow_error_set(error, ENOTSUP,
2180 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
2181 "Only ingress is supported");
2185 rte_flow_error_set(error, ENOTSUP,
2186 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2187 "Egress is not supported");
2190 if (attr->transfer) {
2191 rte_flow_error_set(error, ENOTSUP,
2192 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
2193 "Transfer is not supported");
2201 * Parse flow pattern.
2203 * Specific classifier rule will be created as well.
2205 * @param priv Unused
2206 * @param pattern Pointer to the flow pattern.
2207 * @param flow Pointer to the flow.
2208 * @param error Pointer to the flow error.
2209 * @returns 0 in case of success, negative value otherwise.
2212 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
2213 const struct rte_flow_item pattern[],
2214 struct rte_flow *flow,
2215 struct rte_flow_error *error)
2220 for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
2221 if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
2224 ret = mrvl_patterns[i].parse(pattern, flow, error);
2226 mrvl_free_all_key_mask(&flow->rule);
2231 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2232 "Unsupported pattern");
2238 * Parse flow actions.
2240 * @param priv Pointer to the port's private data.
2241 * @param actions Pointer the action table.
2242 * @param flow Pointer to the flow.
2243 * @param error Pointer to the flow error.
2244 * @returns 0 in case of success, negative value otherwise.
2247 mrvl_flow_parse_actions(struct mrvl_priv *priv,
2248 const struct rte_flow_action actions[],
2249 struct rte_flow *flow,
2250 struct rte_flow_error *error)
2252 const struct rte_flow_action *action = actions;
2255 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
2256 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
2259 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
2260 flow->cos.ppio = priv->ppio;
2262 flow->action.type = PP2_CLS_TBL_ACT_DROP;
2263 flow->action.cos = &flow->cos;
2265 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2266 const struct rte_flow_action_queue *q =
2267 (const struct rte_flow_action_queue *)
2270 if (q->index > priv->nb_rx_queues) {
2271 rte_flow_error_set(error, EINVAL,
2272 RTE_FLOW_ERROR_TYPE_ACTION,
2274 "Queue index out of range");
2278 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
2280 * Unknown TC mapping, mapping will not have
2284 "Unknown TC mapping for queue %hu eth%hhu",
2285 q->index, priv->ppio_id);
2287 rte_flow_error_set(error, EFAULT,
2288 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2294 "Action: Assign packets to queue %d, tc:%d, q:%d",
2295 q->index, priv->rxq_map[q->index].tc,
2296 priv->rxq_map[q->index].inq);
2298 flow->cos.ppio = priv->ppio;
2299 flow->cos.tc = priv->rxq_map[q->index].tc;
2300 flow->action.type = PP2_CLS_TBL_ACT_DONE;
2301 flow->action.cos = &flow->cos;
2304 rte_flow_error_set(error, ENOTSUP,
2305 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2306 "Action not supported");
2313 rte_flow_error_set(error, EINVAL,
2314 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2315 NULL, "Action not specified");
2323 * Parse flow attribute, pattern and actions.
2325 * @param priv Pointer to the port's private data.
2326 * @param attr Pointer to the flow attribute.
2327 * @param pattern Pointer to the flow pattern.
2328 * @param actions Pointer to the flow actions.
2329 * @param flow Pointer to the flow.
2330 * @param error Pointer to the flow error.
2331 * @returns 0 on success, negative value otherwise.
2334 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
2335 const struct rte_flow_item pattern[],
2336 const struct rte_flow_action actions[],
2337 struct rte_flow *flow,
2338 struct rte_flow_error *error)
2342 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
2346 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
2350 return mrvl_flow_parse_actions(priv, actions, flow, error);
2354 * Get engine type for the given flow.
2356 * @param field Pointer to the flow.
2357 * @returns The type of the engine.
2359 static inline enum pp2_cls_tbl_type
2360 mrvl_engine_type(const struct rte_flow *flow)
2364 for (i = 0; i < flow->rule.num_fields; i++)
2365 size += flow->rule.fields[i].size;
2368 * For maskable engine type the key size must be up to 8 bytes.
2369 * For keys with size bigger than 8 bytes, engine type must
2370 * be set to exact match.
2373 return PP2_CLS_TBL_EXACT_MATCH;
2375 return PP2_CLS_TBL_MASKABLE;
2379 * Create classifier table.
2381 * @param dev Pointer to the device.
2382 * @param flow Pointer to the very first flow.
2383 * @returns 0 in case of success, negative value otherwise.
2386 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
2388 struct mrvl_priv *priv = dev->data->dev_private;
2389 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
2392 if (priv->cls_tbl) {
2393 pp2_cls_tbl_deinit(priv->cls_tbl);
2394 priv->cls_tbl = NULL;
2397 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
2399 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
2400 MRVL_LOG(INFO, "Setting cls search engine type to %s",
2401 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
2402 "exact" : "maskable");
2403 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
2404 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
2405 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
2407 if (first_flow->pattern & F_DMAC) {
2408 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2409 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
2411 key->num_fields += 1;
2414 if (first_flow->pattern & F_SMAC) {
2415 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2416 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
2418 key->num_fields += 1;
2421 if (first_flow->pattern & F_TYPE) {
2422 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2423 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
2425 key->num_fields += 1;
2428 if (first_flow->pattern & F_VLAN_ID) {
2429 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2430 key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
2432 key->num_fields += 1;
2435 if (first_flow->pattern & F_VLAN_PRI) {
2436 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2437 key->proto_field[key->num_fields].field.vlan =
2440 key->num_fields += 1;
2443 if (first_flow->pattern & F_IP4_TOS) {
2444 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2445 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
2447 key->num_fields += 1;
2450 if (first_flow->pattern & F_IP4_SIP) {
2451 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2452 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
2454 key->num_fields += 1;
2457 if (first_flow->pattern & F_IP4_DIP) {
2458 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2459 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
2461 key->num_fields += 1;
2464 if (first_flow->pattern & F_IP4_PROTO) {
2465 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2466 key->proto_field[key->num_fields].field.ipv4 =
2469 key->num_fields += 1;
2472 if (first_flow->pattern & F_IP6_SIP) {
2473 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2474 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
2475 key->key_size += 16;
2476 key->num_fields += 1;
2479 if (first_flow->pattern & F_IP6_DIP) {
2480 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2481 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
2482 key->key_size += 16;
2483 key->num_fields += 1;
2486 if (first_flow->pattern & F_IP6_FLOW) {
2487 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2488 key->proto_field[key->num_fields].field.ipv6 =
2491 key->num_fields += 1;
2494 if (first_flow->pattern & F_IP6_NEXT_HDR) {
2495 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2496 key->proto_field[key->num_fields].field.ipv6 =
2497 MV_NET_IP6_F_NEXT_HDR;
2499 key->num_fields += 1;
2502 if (first_flow->pattern & F_TCP_SPORT) {
2503 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2504 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2506 key->num_fields += 1;
2509 if (first_flow->pattern & F_TCP_DPORT) {
2510 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2511 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
2513 key->num_fields += 1;
2516 if (first_flow->pattern & F_UDP_SPORT) {
2517 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2518 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2520 key->num_fields += 1;
2523 if (first_flow->pattern & F_UDP_DPORT) {
2524 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2525 key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
2527 key->num_fields += 1;
2530 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
2532 priv->cls_tbl_pattern = first_flow->pattern;
2538 * Check whether new flow can be added to the table
2540 * @param priv Pointer to the port's private data.
2541 * @param flow Pointer to the new flow.
2542 * @return 1 in case flow can be added, 0 otherwise.
2545 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
2547 return flow->pattern == priv->cls_tbl_pattern &&
2548 mrvl_engine_type(flow) == priv->cls_tbl_params.type;
2552 * DPDK flow create callback called when flow is to be created.
2554 * @param dev Pointer to the device.
2555 * @param attr Pointer to the flow attribute.
2556 * @param pattern Pointer to the flow pattern.
2557 * @param actions Pointer to the flow actions.
2558 * @param error Pointer to the flow error.
2559 * @returns Pointer to the created flow in case of success, NULL otherwise.
2561 static struct rte_flow *
2562 mrvl_flow_create(struct rte_eth_dev *dev,
2563 const struct rte_flow_attr *attr,
2564 const struct rte_flow_item pattern[],
2565 const struct rte_flow_action actions[],
2566 struct rte_flow_error *error)
2568 struct mrvl_priv *priv = dev->data->dev_private;
2569 struct rte_flow *flow, *first;
2572 if (!dev->data->dev_started) {
2573 rte_flow_error_set(error, EINVAL,
2574 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2575 "Port must be started first\n");
2579 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
2583 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
2590 * 1. In case table does not exist - create one.
2591 * 2. In case table exists, is empty and new flow cannot be added
2593 * 3. In case table is not empty and new flow matches table format
2595 * 4. Otherwise flow cannot be added.
2597 first = LIST_FIRST(&priv->flows);
2598 if (!priv->cls_tbl) {
2599 ret = mrvl_create_cls_table(dev, flow);
2600 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
2601 ret = mrvl_create_cls_table(dev, flow);
2602 } else if (mrvl_flow_can_be_added(priv, flow)) {
2605 rte_flow_error_set(error, EINVAL,
2606 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2607 "Pattern does not match cls table format\n");
2612 rte_flow_error_set(error, EINVAL,
2613 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2614 "Failed to create cls table\n");
2618 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
2620 rte_flow_error_set(error, EINVAL,
2621 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2622 "Failed to add rule\n");
2626 LIST_INSERT_HEAD(&priv->flows, flow, next);
2635 * Remove classifier rule associated with given flow.
2637 * @param priv Pointer to the port's private data.
2638 * @param flow Pointer to the flow.
2639 * @param error Pointer to the flow error.
2640 * @returns 0 in case of success, negative value otherwise.
2643 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
2644 struct rte_flow_error *error)
2648 if (!priv->cls_tbl) {
2649 rte_flow_error_set(error, EINVAL,
2650 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2651 "Classifier table not initialized");
2655 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
2657 rte_flow_error_set(error, EINVAL,
2658 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2659 "Failed to remove rule");
2663 mrvl_free_all_key_mask(&flow->rule);
2669 * DPDK flow destroy callback called when flow is to be removed.
2671 * @param dev Pointer to the device.
2672 * @param flow Pointer to the flow.
2673 * @param error Pointer to the flow error.
2674 * @returns 0 in case of success, negative value otherwise.
2677 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2678 struct rte_flow_error *error)
2680 struct mrvl_priv *priv = dev->data->dev_private;
2684 LIST_FOREACH(f, &priv->flows, next) {
2690 rte_flow_error_set(error, EINVAL,
2691 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2692 "Rule was not found");
2696 LIST_REMOVE(f, next);
2698 ret = mrvl_flow_remove(priv, flow, error);
2708 * DPDK flow callback called to verify given attribute, pattern and actions.
2710 * @param dev Pointer to the device.
2711 * @param attr Pointer to the flow attribute.
2712 * @param pattern Pointer to the flow pattern.
2713 * @param actions Pointer to the flow actions.
2714 * @param error Pointer to the flow error.
2715 * @returns 0 on success, negative value otherwise.
2718 mrvl_flow_validate(struct rte_eth_dev *dev,
2719 const struct rte_flow_attr *attr,
2720 const struct rte_flow_item pattern[],
2721 const struct rte_flow_action actions[],
2722 struct rte_flow_error *error)
2724 static struct rte_flow *flow;
2726 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
2730 mrvl_flow_destroy(dev, flow, error);
2736 * DPDK flow flush callback called when flows are to be flushed.
2738 * @param dev Pointer to the device.
2739 * @param error Pointer to the flow error.
2740 * @returns 0 in case of success, negative value otherwise.
2743 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2745 struct mrvl_priv *priv = dev->data->dev_private;
2747 while (!LIST_EMPTY(&priv->flows)) {
2748 struct rte_flow *flow = LIST_FIRST(&priv->flows);
2749 int ret = mrvl_flow_remove(priv, flow, error);
2753 LIST_REMOVE(flow, next);
2761 * DPDK flow isolate callback called to isolate port.
2763 * @param dev Pointer to the device.
2764 * @param enable Pass 0/1 to disable/enable port isolation.
2765 * @param error Pointer to the flow error.
2766 * @returns 0 in case of success, negative value otherwise.
2769 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
2770 struct rte_flow_error *error)
2772 struct mrvl_priv *priv = dev->data->dev_private;
2774 if (dev->data->dev_started) {
2775 rte_flow_error_set(error, EBUSY,
2776 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2777 NULL, "Port must be stopped first\n");
2781 priv->isolated = enable;
2786 const struct rte_flow_ops mrvl_flow_ops = {
2787 .validate = mrvl_flow_validate,
2788 .create = mrvl_flow_create,
2789 .destroy = mrvl_flow_destroy,
2790 .flush = mrvl_flow_flush,
2791 .isolate = mrvl_flow_isolate