1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
12 #include <arpa/inet.h>
14 #include "mrvl_ethdev.h"
17 /** Number of rules in the classifier table. */
18 #define MRVL_CLS_MAX_NUM_RULES 20
20 /** Size of the classifier key and mask strings. */
21 #define MRVL_CLS_STR_SIZE_MAX 40
23 /** Parsed fields in processed rte_flow_item. */
24 enum mrvl_parsed_fields {
32 F_VLAN_TCI = BIT(5), /* not supported by MUSDK yet */
39 F_IP6_TC = BIT(10), /* not supported by MUSDK yet */
43 F_IP6_NEXT_HDR = BIT(14),
45 F_TCP_SPORT = BIT(15),
46 F_TCP_DPORT = BIT(16),
48 F_UDP_SPORT = BIT(17),
49 F_UDP_DPORT = BIT(18),
52 /** PMD-specific definition of a flow rule handle. */
54 LIST_ENTRY(rte_flow) next;
56 enum mrvl_parsed_fields pattern;
58 struct pp2_cls_tbl_rule rule;
59 struct pp2_cls_cos_desc cos;
60 struct pp2_cls_tbl_action action;
63 static const enum rte_flow_item_type pattern_eth[] = {
64 RTE_FLOW_ITEM_TYPE_ETH,
65 RTE_FLOW_ITEM_TYPE_END
68 static const enum rte_flow_item_type pattern_eth_vlan[] = {
69 RTE_FLOW_ITEM_TYPE_ETH,
70 RTE_FLOW_ITEM_TYPE_VLAN,
71 RTE_FLOW_ITEM_TYPE_END
74 static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
75 RTE_FLOW_ITEM_TYPE_ETH,
76 RTE_FLOW_ITEM_TYPE_VLAN,
77 RTE_FLOW_ITEM_TYPE_IPV4,
78 RTE_FLOW_ITEM_TYPE_END
81 static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
82 RTE_FLOW_ITEM_TYPE_ETH,
83 RTE_FLOW_ITEM_TYPE_VLAN,
84 RTE_FLOW_ITEM_TYPE_IPV6,
85 RTE_FLOW_ITEM_TYPE_END
88 static const enum rte_flow_item_type pattern_eth_ip4[] = {
89 RTE_FLOW_ITEM_TYPE_ETH,
90 RTE_FLOW_ITEM_TYPE_IPV4,
91 RTE_FLOW_ITEM_TYPE_END
94 static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
95 RTE_FLOW_ITEM_TYPE_ETH,
96 RTE_FLOW_ITEM_TYPE_IPV4,
97 RTE_FLOW_ITEM_TYPE_TCP,
98 RTE_FLOW_ITEM_TYPE_END
101 static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
102 RTE_FLOW_ITEM_TYPE_ETH,
103 RTE_FLOW_ITEM_TYPE_IPV4,
104 RTE_FLOW_ITEM_TYPE_UDP,
105 RTE_FLOW_ITEM_TYPE_END
108 static const enum rte_flow_item_type pattern_eth_ip6[] = {
109 RTE_FLOW_ITEM_TYPE_ETH,
110 RTE_FLOW_ITEM_TYPE_IPV6,
111 RTE_FLOW_ITEM_TYPE_END
114 static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
115 RTE_FLOW_ITEM_TYPE_ETH,
116 RTE_FLOW_ITEM_TYPE_IPV6,
117 RTE_FLOW_ITEM_TYPE_TCP,
118 RTE_FLOW_ITEM_TYPE_END
121 static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
122 RTE_FLOW_ITEM_TYPE_ETH,
123 RTE_FLOW_ITEM_TYPE_IPV6,
124 RTE_FLOW_ITEM_TYPE_UDP,
125 RTE_FLOW_ITEM_TYPE_END
128 static const enum rte_flow_item_type pattern_vlan[] = {
129 RTE_FLOW_ITEM_TYPE_VLAN,
130 RTE_FLOW_ITEM_TYPE_END
133 static const enum rte_flow_item_type pattern_vlan_ip[] = {
134 RTE_FLOW_ITEM_TYPE_VLAN,
135 RTE_FLOW_ITEM_TYPE_IPV4,
136 RTE_FLOW_ITEM_TYPE_END
139 static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
140 RTE_FLOW_ITEM_TYPE_VLAN,
141 RTE_FLOW_ITEM_TYPE_IPV4,
142 RTE_FLOW_ITEM_TYPE_TCP,
143 RTE_FLOW_ITEM_TYPE_END
146 static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
147 RTE_FLOW_ITEM_TYPE_VLAN,
148 RTE_FLOW_ITEM_TYPE_IPV4,
149 RTE_FLOW_ITEM_TYPE_UDP,
150 RTE_FLOW_ITEM_TYPE_END
153 static const enum rte_flow_item_type pattern_vlan_ip6[] = {
154 RTE_FLOW_ITEM_TYPE_VLAN,
155 RTE_FLOW_ITEM_TYPE_IPV6,
156 RTE_FLOW_ITEM_TYPE_END
159 static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
160 RTE_FLOW_ITEM_TYPE_VLAN,
161 RTE_FLOW_ITEM_TYPE_IPV6,
162 RTE_FLOW_ITEM_TYPE_TCP,
163 RTE_FLOW_ITEM_TYPE_END
166 static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
167 RTE_FLOW_ITEM_TYPE_VLAN,
168 RTE_FLOW_ITEM_TYPE_IPV6,
169 RTE_FLOW_ITEM_TYPE_UDP,
170 RTE_FLOW_ITEM_TYPE_END
173 static const enum rte_flow_item_type pattern_ip[] = {
174 RTE_FLOW_ITEM_TYPE_IPV4,
175 RTE_FLOW_ITEM_TYPE_END
178 static const enum rte_flow_item_type pattern_ip6[] = {
179 RTE_FLOW_ITEM_TYPE_IPV6,
180 RTE_FLOW_ITEM_TYPE_END
183 static const enum rte_flow_item_type pattern_ip_tcp[] = {
184 RTE_FLOW_ITEM_TYPE_IPV4,
185 RTE_FLOW_ITEM_TYPE_TCP,
186 RTE_FLOW_ITEM_TYPE_END
189 static const enum rte_flow_item_type pattern_ip6_tcp[] = {
190 RTE_FLOW_ITEM_TYPE_IPV6,
191 RTE_FLOW_ITEM_TYPE_TCP,
192 RTE_FLOW_ITEM_TYPE_END
195 static const enum rte_flow_item_type pattern_ip_udp[] = {
196 RTE_FLOW_ITEM_TYPE_IPV4,
197 RTE_FLOW_ITEM_TYPE_UDP,
198 RTE_FLOW_ITEM_TYPE_END
201 static const enum rte_flow_item_type pattern_ip6_udp[] = {
202 RTE_FLOW_ITEM_TYPE_IPV6,
203 RTE_FLOW_ITEM_TYPE_UDP,
204 RTE_FLOW_ITEM_TYPE_END
207 static const enum rte_flow_item_type pattern_tcp[] = {
208 RTE_FLOW_ITEM_TYPE_TCP,
209 RTE_FLOW_ITEM_TYPE_END
212 static const enum rte_flow_item_type pattern_udp[] = {
213 RTE_FLOW_ITEM_TYPE_UDP,
214 RTE_FLOW_ITEM_TYPE_END
217 #define MRVL_VLAN_ID_MASK 0x0fff
218 #define MRVL_VLAN_PRI_MASK 0x7000
219 #define MRVL_IPV4_DSCP_MASK 0xfc
220 #define MRVL_IPV4_ADDR_MASK 0xffffffff
221 #define MRVL_IPV6_FLOW_MASK 0x0fffff
224 * Given a flow item, return the next non-void one.
226 * @param items Pointer to the item in the table.
227 * @returns Next not-void item, NULL otherwise.
229 static const struct rte_flow_item *
230 mrvl_next_item(const struct rte_flow_item *items)
232 const struct rte_flow_item *item = items;
234 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
235 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
243 * Allocate memory for classifier rule key and mask fields.
245 * @param field Pointer to the classifier rule.
246 * @returns 0 in case of success, negative value otherwise.
249 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
251 unsigned int id = rte_socket_id();
253 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
257 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
263 rte_free(field->key);
271 * Free memory allocated for classifier rule key and mask fields.
273 * @param field Pointer to the classifier rule.
276 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
278 rte_free(field->key);
279 rte_free(field->mask);
285 * Free memory allocated for all classifier rule key and mask fields.
287 * @param rule Pointer to the classifier table rule.
290 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
294 for (i = 0; i < rule->num_fields; i++)
295 mrvl_free_key_mask(&rule->fields[i]);
296 rule->num_fields = 0;
300 * Initialize rte flow item parsing.
302 * @param item Pointer to the flow item.
303 * @param spec_ptr Pointer to the specific item pointer.
304 * @param mask_ptr Pointer to the specific item's mask pointer.
305 * @def_mask Pointer to the default mask.
306 * @size Size of the flow item.
307 * @error Pointer to the rte flow error.
308 * @returns 0 in case of success, negative value otherwise.
311 mrvl_parse_init(const struct rte_flow_item *item,
312 const void **spec_ptr,
313 const void **mask_ptr,
314 const void *def_mask,
316 struct rte_flow_error *error)
323 memset(zeros, 0, size);
326 rte_flow_error_set(error, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
332 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
333 rte_flow_error_set(error, EINVAL,
334 RTE_FLOW_ERROR_TYPE_ITEM, item,
335 "Mask or last is set without spec\n");
340 * If "mask" is not set, default mask is used,
341 * but if default mask is NULL, "mask" should be set.
343 if (item->mask == NULL) {
344 if (def_mask == NULL) {
345 rte_flow_error_set(error, EINVAL,
346 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
347 "Mask should be specified\n");
351 mask = (const uint8_t *)def_mask;
353 mask = (const uint8_t *)item->mask;
356 spec = (const uint8_t *)item->spec;
357 last = (const uint8_t *)item->last;
360 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
361 NULL, "Spec should be specified\n");
366 * If field values in "last" are either 0 or equal to the corresponding
367 * values in "spec" then they are ignored.
370 !memcmp(last, zeros, size) &&
371 memcmp(last, spec, size) != 0) {
372 rte_flow_error_set(error, ENOTSUP,
373 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
374 "Ranging is not supported\n");
385 * Parse the eth flow item.
387 * This will create classifier rule that matches either destination or source
390 * @param spec Pointer to the specific flow item.
391 * @param mask Pointer to the specific flow item's mask.
392 * @param parse_dst Parse either destination or source mac address.
393 * @param flow Pointer to the flow.
394 * @return 0 in case of success, negative error value otherwise.
397 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
398 const struct rte_flow_item_eth *mask,
399 int parse_dst, struct rte_flow *flow)
401 struct pp2_cls_rule_key_field *key_field;
402 const uint8_t *k, *m;
404 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
408 k = spec->dst.addr_bytes;
409 m = mask->dst.addr_bytes;
411 flow->pattern |= F_DMAC;
413 k = spec->src.addr_bytes;
414 m = mask->src.addr_bytes;
416 flow->pattern |= F_SMAC;
419 key_field = &flow->rule.fields[flow->rule.num_fields];
420 mrvl_alloc_key_mask(key_field);
423 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
424 "%02x:%02x:%02x:%02x:%02x:%02x",
425 k[0], k[1], k[2], k[3], k[4], k[5]);
427 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
428 "%02x:%02x:%02x:%02x:%02x:%02x",
429 m[0], m[1], m[2], m[3], m[4], m[5]);
431 flow->rule.num_fields += 1;
437 * Helper for parsing the eth flow item destination mac address.
439 * @param spec Pointer to the specific flow item.
440 * @param mask Pointer to the specific flow item's mask.
441 * @param flow Pointer to the flow.
442 * @return 0 in case of success, negative error value otherwise.
445 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
446 const struct rte_flow_item_eth *mask,
447 struct rte_flow *flow)
449 return mrvl_parse_mac(spec, mask, 1, flow);
453 * Helper for parsing the eth flow item source mac address.
455 * @param spec Pointer to the specific flow item.
456 * @param mask Pointer to the specific flow item's mask.
457 * @param flow Pointer to the flow.
458 * @return 0 in case of success, negative error value otherwise.
461 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
462 const struct rte_flow_item_eth *mask,
463 struct rte_flow *flow)
465 return mrvl_parse_mac(spec, mask, 0, flow);
469 * Parse the ether type field of the eth flow item.
471 * @param spec Pointer to the specific flow item.
472 * @param mask Pointer to the specific flow item's mask.
473 * @param flow Pointer to the flow.
474 * @return 0 in case of success, negative error value otherwise.
477 mrvl_parse_type(const struct rte_flow_item_eth *spec,
478 const struct rte_flow_item_eth *mask __rte_unused,
479 struct rte_flow *flow)
481 struct pp2_cls_rule_key_field *key_field;
484 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
487 key_field = &flow->rule.fields[flow->rule.num_fields];
488 mrvl_alloc_key_mask(key_field);
491 k = rte_be_to_cpu_16(spec->type);
492 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
494 flow->pattern |= F_TYPE;
495 flow->rule.num_fields += 1;
501 * Parse the vid field of the vlan rte flow item.
503 * This will create classifier rule that matches vid.
505 * @param spec Pointer to the specific flow item.
506 * @param mask Pointer to the specific flow item's mask.
507 * @param flow Pointer to the flow.
508 * @return 0 in case of success, negative error value otherwise.
511 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
512 const struct rte_flow_item_vlan *mask __rte_unused,
513 struct rte_flow *flow)
515 struct pp2_cls_rule_key_field *key_field;
518 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
521 key_field = &flow->rule.fields[flow->rule.num_fields];
522 mrvl_alloc_key_mask(key_field);
525 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
526 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
528 flow->pattern |= F_VLAN_ID;
529 flow->rule.num_fields += 1;
535 * Parse the pri field of the vlan rte flow item.
537 * This will create classifier rule that matches pri.
539 * @param spec Pointer to the specific flow item.
540 * @param mask Pointer to the specific flow item's mask.
541 * @param flow Pointer to the flow.
542 * @return 0 in case of success, negative error value otherwise.
545 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
546 const struct rte_flow_item_vlan *mask __rte_unused,
547 struct rte_flow *flow)
549 struct pp2_cls_rule_key_field *key_field;
552 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
555 key_field = &flow->rule.fields[flow->rule.num_fields];
556 mrvl_alloc_key_mask(key_field);
559 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
560 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
562 flow->pattern |= F_VLAN_PRI;
563 flow->rule.num_fields += 1;
569 * Parse the dscp field of the ipv4 rte flow item.
571 * This will create classifier rule that matches dscp field.
573 * @param spec Pointer to the specific flow item.
574 * @param mask Pointer to the specific flow item's mask.
575 * @param flow Pointer to the flow.
576 * @return 0 in case of success, negative error value otherwise.
579 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
580 const struct rte_flow_item_ipv4 *mask,
581 struct rte_flow *flow)
583 struct pp2_cls_rule_key_field *key_field;
586 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
589 key_field = &flow->rule.fields[flow->rule.num_fields];
590 mrvl_alloc_key_mask(key_field);
593 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
594 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
595 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
596 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
598 flow->pattern |= F_IP4_TOS;
599 flow->rule.num_fields += 1;
605 * Parse either source or destination ip addresses of the ipv4 flow item.
607 * This will create classifier rule that matches either destination
608 * or source ip field.
610 * @param spec Pointer to the specific flow item.
611 * @param mask Pointer to the specific flow item's mask.
612 * @param parse_dst Parse either destination or source ip address.
613 * @param flow Pointer to the flow.
614 * @return 0 in case of success, negative error value otherwise.
617 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
618 const struct rte_flow_item_ipv4 *mask,
619 int parse_dst, struct rte_flow *flow)
621 struct pp2_cls_rule_key_field *key_field;
625 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
628 memset(&k, 0, sizeof(k));
630 k.s_addr = spec->hdr.dst_addr;
631 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
633 flow->pattern |= F_IP4_DIP;
635 k.s_addr = spec->hdr.src_addr;
636 m = rte_be_to_cpu_32(mask->hdr.src_addr);
638 flow->pattern |= F_IP4_SIP;
641 key_field = &flow->rule.fields[flow->rule.num_fields];
642 mrvl_alloc_key_mask(key_field);
645 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
646 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
648 flow->rule.num_fields += 1;
654 * Helper for parsing destination ip of the ipv4 flow item.
656 * @param spec Pointer to the specific flow item.
657 * @param mask Pointer to the specific flow item's mask.
658 * @param flow Pointer to the flow.
659 * @return 0 in case of success, negative error value otherwise.
662 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
663 const struct rte_flow_item_ipv4 *mask,
664 struct rte_flow *flow)
666 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
670 * Helper for parsing source ip of the ipv4 flow item.
672 * @param spec Pointer to the specific flow item.
673 * @param mask Pointer to the specific flow item's mask.
674 * @param flow Pointer to the flow.
675 * @return 0 in case of success, negative error value otherwise.
678 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
679 const struct rte_flow_item_ipv4 *mask,
680 struct rte_flow *flow)
682 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
686 * Parse the proto field of the ipv4 rte flow item.
688 * This will create classifier rule that matches proto field.
690 * @param spec Pointer to the specific flow item.
691 * @param mask Pointer to the specific flow item's mask.
692 * @param flow Pointer to the flow.
693 * @return 0 in case of success, negative error value otherwise.
696 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
697 const struct rte_flow_item_ipv4 *mask __rte_unused,
698 struct rte_flow *flow)
700 struct pp2_cls_rule_key_field *key_field;
701 uint8_t k = spec->hdr.next_proto_id;
703 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
706 key_field = &flow->rule.fields[flow->rule.num_fields];
707 mrvl_alloc_key_mask(key_field);
710 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
712 flow->pattern |= F_IP4_PROTO;
713 flow->rule.num_fields += 1;
719 * Parse either source or destination ip addresses of the ipv6 rte flow item.
721 * This will create classifier rule that matches either destination
722 * or source ip field.
724 * @param spec Pointer to the specific flow item.
725 * @param mask Pointer to the specific flow item's mask.
726 * @param parse_dst Parse either destination or source ipv6 address.
727 * @param flow Pointer to the flow.
728 * @return 0 in case of success, negative error value otherwise.
731 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
732 const struct rte_flow_item_ipv6 *mask,
733 int parse_dst, struct rte_flow *flow)
735 struct pp2_cls_rule_key_field *key_field;
736 int size = sizeof(spec->hdr.dst_addr);
737 struct in6_addr k, m;
739 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
742 memset(&k, 0, sizeof(k));
744 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
745 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
747 flow->pattern |= F_IP6_DIP;
749 memcpy(k.s6_addr, spec->hdr.src_addr, size);
750 memcpy(m.s6_addr, mask->hdr.src_addr, size);
752 flow->pattern |= F_IP6_SIP;
755 key_field = &flow->rule.fields[flow->rule.num_fields];
756 mrvl_alloc_key_mask(key_field);
757 key_field->size = 16;
759 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
760 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
762 flow->rule.num_fields += 1;
768 * Helper for parsing destination ip of the ipv6 flow item.
770 * @param spec Pointer to the specific flow item.
771 * @param mask Pointer to the specific flow item's mask.
772 * @param flow Pointer to the flow.
773 * @return 0 in case of success, negative error value otherwise.
776 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
777 const struct rte_flow_item_ipv6 *mask,
778 struct rte_flow *flow)
780 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
784 * Helper for parsing source ip of the ipv6 flow item.
786 * @param spec Pointer to the specific flow item.
787 * @param mask Pointer to the specific flow item's mask.
788 * @param flow Pointer to the flow.
789 * @return 0 in case of success, negative error value otherwise.
792 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
793 const struct rte_flow_item_ipv6 *mask,
794 struct rte_flow *flow)
796 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
800 * Parse the flow label of the ipv6 flow item.
802 * This will create classifier rule that matches flow field.
804 * @param spec Pointer to the specific flow item.
805 * @param mask Pointer to the specific flow item's mask.
806 * @param flow Pointer to the flow.
807 * @return 0 in case of success, negative error value otherwise.
810 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
811 const struct rte_flow_item_ipv6 *mask,
812 struct rte_flow *flow)
814 struct pp2_cls_rule_key_field *key_field;
815 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
816 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
818 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
821 key_field = &flow->rule.fields[flow->rule.num_fields];
822 mrvl_alloc_key_mask(key_field);
825 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
826 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
828 flow->pattern |= F_IP6_FLOW;
829 flow->rule.num_fields += 1;
835 * Parse the next header of the ipv6 flow item.
837 * This will create classifier rule that matches next header field.
839 * @param spec Pointer to the specific flow item.
840 * @param mask Pointer to the specific flow item's mask.
841 * @param flow Pointer to the flow.
842 * @return 0 in case of success, negative error value otherwise.
845 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
846 const struct rte_flow_item_ipv6 *mask __rte_unused,
847 struct rte_flow *flow)
849 struct pp2_cls_rule_key_field *key_field;
850 uint8_t k = spec->hdr.proto;
852 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
855 key_field = &flow->rule.fields[flow->rule.num_fields];
856 mrvl_alloc_key_mask(key_field);
859 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
861 flow->pattern |= F_IP6_NEXT_HDR;
862 flow->rule.num_fields += 1;
868 * Parse destination or source port of the tcp flow item.
870 * This will create classifier rule that matches either destination or
873 * @param spec Pointer to the specific flow item.
874 * @param mask Pointer to the specific flow item's mask.
875 * @param parse_dst Parse either destination or source port.
876 * @param flow Pointer to the flow.
877 * @return 0 in case of success, negative error value otherwise.
880 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
881 const struct rte_flow_item_tcp *mask __rte_unused,
882 int parse_dst, struct rte_flow *flow)
884 struct pp2_cls_rule_key_field *key_field;
887 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
890 key_field = &flow->rule.fields[flow->rule.num_fields];
891 mrvl_alloc_key_mask(key_field);
895 k = rte_be_to_cpu_16(spec->hdr.dst_port);
897 flow->pattern |= F_TCP_DPORT;
899 k = rte_be_to_cpu_16(spec->hdr.src_port);
901 flow->pattern |= F_TCP_SPORT;
904 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
906 flow->rule.num_fields += 1;
912 * Helper for parsing the tcp source port of the tcp flow item.
914 * @param spec Pointer to the specific flow item.
915 * @param mask Pointer to the specific flow item's mask.
916 * @param flow Pointer to the flow.
917 * @return 0 in case of success, negative error value otherwise.
920 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
921 const struct rte_flow_item_tcp *mask,
922 struct rte_flow *flow)
924 return mrvl_parse_tcp_port(spec, mask, 0, flow);
928 * Helper for parsing the tcp destination port of the tcp flow item.
930 * @param spec Pointer to the specific flow item.
931 * @param mask Pointer to the specific flow item's mask.
932 * @param flow Pointer to the flow.
933 * @return 0 in case of success, negative error value otherwise.
936 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
937 const struct rte_flow_item_tcp *mask,
938 struct rte_flow *flow)
940 return mrvl_parse_tcp_port(spec, mask, 1, flow);
944 * Parse destination or source port of the udp flow item.
946 * This will create classifier rule that matches either destination or
949 * @param spec Pointer to the specific flow item.
950 * @param mask Pointer to the specific flow item's mask.
951 * @param parse_dst Parse either destination or source port.
952 * @param flow Pointer to the flow.
953 * @return 0 in case of success, negative error value otherwise.
956 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
957 const struct rte_flow_item_udp *mask __rte_unused,
958 int parse_dst, struct rte_flow *flow)
960 struct pp2_cls_rule_key_field *key_field;
963 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
966 key_field = &flow->rule.fields[flow->rule.num_fields];
967 mrvl_alloc_key_mask(key_field);
971 k = rte_be_to_cpu_16(spec->hdr.dst_port);
973 flow->pattern |= F_UDP_DPORT;
975 k = rte_be_to_cpu_16(spec->hdr.src_port);
977 flow->pattern |= F_UDP_SPORT;
980 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
982 flow->rule.num_fields += 1;
988 * Helper for parsing the udp source port of the udp flow item.
990 * @param spec Pointer to the specific flow item.
991 * @param mask Pointer to the specific flow item's mask.
992 * @param flow Pointer to the flow.
993 * @return 0 in case of success, negative error value otherwise.
996 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
997 const struct rte_flow_item_udp *mask,
998 struct rte_flow *flow)
1000 return mrvl_parse_udp_port(spec, mask, 0, flow);
1004 * Helper for parsing the udp destination port of the udp flow item.
1006 * @param spec Pointer to the specific flow item.
1007 * @param mask Pointer to the specific flow item's mask.
1008 * @param flow Pointer to the flow.
1009 * @return 0 in case of success, negative error value otherwise.
1012 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
1013 const struct rte_flow_item_udp *mask,
1014 struct rte_flow *flow)
1016 return mrvl_parse_udp_port(spec, mask, 1, flow);
1020 * Parse eth flow item.
1022 * @param item Pointer to the flow item.
1023 * @param flow Pointer to the flow.
1024 * @param error Pointer to the flow error.
1025 * @returns 0 on success, negative value otherwise.
1028 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
1029 struct rte_flow_error *error)
1031 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
1032 struct ether_addr zero;
1035 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1036 &rte_flow_item_eth_mask,
1037 sizeof(struct rte_flow_item_eth), error);
1041 memset(&zero, 0, sizeof(zero));
1043 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
1044 ret = mrvl_parse_dmac(spec, mask, flow);
1049 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
1050 ret = mrvl_parse_smac(spec, mask, flow);
1056 MRVL_LOG(WARNING, "eth type mask is ignored");
1057 ret = mrvl_parse_type(spec, mask, flow);
1064 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1065 "Reached maximum number of fields in cls tbl key\n");
1070 * Parse vlan flow item.
1072 * @param item Pointer to the flow item.
1073 * @param flow Pointer to the flow.
1074 * @param error Pointer to the flow error.
1075 * @returns 0 on success, negative value otherwise.
1078 mrvl_parse_vlan(const struct rte_flow_item *item,
1079 struct rte_flow *flow,
1080 struct rte_flow_error *error)
1082 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
1086 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1087 &rte_flow_item_vlan_mask,
1088 sizeof(struct rte_flow_item_vlan), error);
1092 m = rte_be_to_cpu_16(mask->tci);
1093 if (m & MRVL_VLAN_ID_MASK) {
1094 MRVL_LOG(WARNING, "vlan id mask is ignored");
1095 ret = mrvl_parse_vlan_id(spec, mask, flow);
1100 if (m & MRVL_VLAN_PRI_MASK) {
1101 MRVL_LOG(WARNING, "vlan pri mask is ignored");
1102 ret = mrvl_parse_vlan_pri(spec, mask, flow);
1107 if (flow->pattern & F_TYPE) {
1108 rte_flow_error_set(error, ENOTSUP,
1109 RTE_FLOW_ERROR_TYPE_ITEM, item,
1110 "VLAN TPID matching is not supported");
1113 if (mask->inner_type) {
1114 struct rte_flow_item_eth spec_eth = {
1115 .type = spec->inner_type,
1117 struct rte_flow_item_eth mask_eth = {
1118 .type = mask->inner_type,
1121 MRVL_LOG(WARNING, "inner eth type mask is ignored");
1122 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
1129 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1130 "Reached maximum number of fields in cls tbl key\n");
1135 * Parse ipv4 flow item.
1137 * @param item Pointer to the flow item.
1138 * @param flow Pointer to the flow.
1139 * @param error Pointer to the flow error.
1140 * @returns 0 on success, negative value otherwise.
1143 mrvl_parse_ip4(const struct rte_flow_item *item,
1144 struct rte_flow *flow,
1145 struct rte_flow_error *error)
1147 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
1150 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1151 &rte_flow_item_ipv4_mask,
1152 sizeof(struct rte_flow_item_ipv4), error);
1156 if (mask->hdr.version_ihl ||
1157 mask->hdr.total_length ||
1158 mask->hdr.packet_id ||
1159 mask->hdr.fragment_offset ||
1160 mask->hdr.time_to_live ||
1161 mask->hdr.hdr_checksum) {
1162 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1163 NULL, "Not supported by classifier\n");
1167 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
1168 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
1173 if (mask->hdr.src_addr) {
1174 ret = mrvl_parse_ip4_sip(spec, mask, flow);
1179 if (mask->hdr.dst_addr) {
1180 ret = mrvl_parse_ip4_dip(spec, mask, flow);
1185 if (mask->hdr.next_proto_id) {
1186 MRVL_LOG(WARNING, "next proto id mask is ignored");
1187 ret = mrvl_parse_ip4_proto(spec, mask, flow);
1194 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1195 "Reached maximum number of fields in cls tbl key\n");
1200 * Parse ipv6 flow item.
1202 * @param item Pointer to the flow item.
1203 * @param flow Pointer to the flow.
1204 * @param error Pointer to the flow error.
1205 * @returns 0 on success, negative value otherwise.
1208 mrvl_parse_ip6(const struct rte_flow_item *item,
1209 struct rte_flow *flow,
1210 struct rte_flow_error *error)
1212 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1213 struct ipv6_hdr zero;
1217 ret = mrvl_parse_init(item, (const void **)&spec,
1218 (const void **)&mask,
1219 &rte_flow_item_ipv6_mask,
1220 sizeof(struct rte_flow_item_ipv6),
1225 memset(&zero, 0, sizeof(zero));
1227 if (mask->hdr.payload_len ||
1228 mask->hdr.hop_limits) {
1229 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1230 NULL, "Not supported by classifier\n");
1234 if (memcmp(mask->hdr.src_addr,
1235 zero.src_addr, sizeof(mask->hdr.src_addr))) {
1236 ret = mrvl_parse_ip6_sip(spec, mask, flow);
1241 if (memcmp(mask->hdr.dst_addr,
1242 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1243 ret = mrvl_parse_ip6_dip(spec, mask, flow);
1248 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1250 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1255 if (mask->hdr.proto) {
1256 MRVL_LOG(WARNING, "next header mask is ignored");
1257 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1264 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1265 "Reached maximum number of fields in cls tbl key\n");
1270 * Parse tcp flow item.
1272 * @param item Pointer to the flow item.
1273 * @param flow Pointer to the flow.
1274 * @param error Pointer to the flow error.
1275 * @returns 0 on success, negative value otherwise.
1278 mrvl_parse_tcp(const struct rte_flow_item *item,
1279 struct rte_flow *flow,
1280 struct rte_flow_error *error)
1282 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1285 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1286 &rte_flow_item_ipv4_mask,
1287 sizeof(struct rte_flow_item_ipv4), error);
1291 if (mask->hdr.sent_seq ||
1292 mask->hdr.recv_ack ||
1293 mask->hdr.data_off ||
1294 mask->hdr.tcp_flags ||
1297 mask->hdr.tcp_urp) {
1298 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1299 NULL, "Not supported by classifier\n");
1303 if (mask->hdr.src_port) {
1304 MRVL_LOG(WARNING, "tcp sport mask is ignored");
1305 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1310 if (mask->hdr.dst_port) {
1311 MRVL_LOG(WARNING, "tcp dport mask is ignored");
1312 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1319 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1320 "Reached maximum number of fields in cls tbl key\n");
1325 * Parse udp flow item.
1327 * @param item Pointer to the flow item.
1328 * @param flow Pointer to the flow.
1329 * @param error Pointer to the flow error.
1330 * @returns 0 on success, negative value otherwise.
1333 mrvl_parse_udp(const struct rte_flow_item *item,
1334 struct rte_flow *flow,
1335 struct rte_flow_error *error)
1337 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1340 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1341 &rte_flow_item_ipv4_mask,
1342 sizeof(struct rte_flow_item_ipv4), error);
1346 if (mask->hdr.dgram_len ||
1347 mask->hdr.dgram_cksum) {
1348 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1349 NULL, "Not supported by classifier\n");
1353 if (mask->hdr.src_port) {
1354 MRVL_LOG(WARNING, "udp sport mask is ignored");
1355 ret = mrvl_parse_udp_sport(spec, mask, flow);
1360 if (mask->hdr.dst_port) {
1361 MRVL_LOG(WARNING, "udp dport mask is ignored");
1362 ret = mrvl_parse_udp_dport(spec, mask, flow);
1369 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1370 "Reached maximum number of fields in cls tbl key\n");
1375 * Parse flow pattern composed of the the eth item.
1377 * @param pattern Pointer to the flow pattern table.
1378 * @param flow Pointer to the flow.
1379 * @param error Pointer to the flow error.
1380 * @returns 0 in case of success, negative value otherwise.
1383 mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
1384 struct rte_flow *flow,
1385 struct rte_flow_error *error)
1387 return mrvl_parse_eth(pattern, flow, error);
1391 * Parse flow pattern composed of the eth and vlan items.
1393 * @param pattern Pointer to the flow pattern table.
1394 * @param flow Pointer to the flow.
1395 * @param error Pointer to the flow error.
1396 * @returns 0 in case of success, negative value otherwise.
1399 mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
1400 struct rte_flow *flow,
1401 struct rte_flow_error *error)
1403 const struct rte_flow_item *item = mrvl_next_item(pattern);
1406 ret = mrvl_parse_eth(item, flow, error);
1410 item = mrvl_next_item(item + 1);
1412 return mrvl_parse_vlan(item, flow, error);
1416 * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
1418 * @param pattern Pointer to the flow pattern table.
1419 * @param flow Pointer to the flow.
1420 * @param error Pointer to the flow error.
1421 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1422 * @returns 0 in case of success, negative value otherwise.
1425 mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1426 struct rte_flow *flow,
1427 struct rte_flow_error *error, int ip6)
1429 const struct rte_flow_item *item = mrvl_next_item(pattern);
1432 ret = mrvl_parse_eth(item, flow, error);
1436 item = mrvl_next_item(item + 1);
1437 ret = mrvl_parse_vlan(item, flow, error);
1441 item = mrvl_next_item(item + 1);
1443 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1444 mrvl_parse_ip4(item, flow, error);
1448 * Parse flow pattern composed of the eth, vlan and ipv4 items.
1450 * @param pattern Pointer to the flow pattern table.
1451 * @param flow Pointer to the flow.
1452 * @param error Pointer to the flow error.
1453 * @returns 0 in case of success, negative value otherwise.
1456 mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
1457 struct rte_flow *flow,
1458 struct rte_flow_error *error)
1460 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
1464 * Parse flow pattern composed of the eth, vlan and ipv6 items.
1466 * @param pattern Pointer to the flow pattern table.
1467 * @param flow Pointer to the flow.
1468 * @param error Pointer to the flow error.
1469 * @returns 0 in case of success, negative value otherwise.
1472 mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
1473 struct rte_flow *flow,
1474 struct rte_flow_error *error)
1476 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
1480 * Parse flow pattern composed of the eth and ip4/ip6 items.
1482 * @param pattern Pointer to the flow pattern table.
1483 * @param flow Pointer to the flow.
1484 * @param error Pointer to the flow error.
1485 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1486 * @returns 0 in case of success, negative value otherwise.
1489 mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
1490 struct rte_flow *flow,
1491 struct rte_flow_error *error, int ip6)
1493 const struct rte_flow_item *item = mrvl_next_item(pattern);
1496 ret = mrvl_parse_eth(item, flow, error);
1500 item = mrvl_next_item(item + 1);
1502 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1503 mrvl_parse_ip4(item, flow, error);
1507 * Parse flow pattern composed of the eth and ipv4 items.
1509 * @param pattern Pointer to the flow pattern table.
1510 * @param flow Pointer to the flow.
1511 * @param error Pointer to the flow error.
1512 * @returns 0 in case of success, negative value otherwise.
1515 mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
1516 struct rte_flow *flow,
1517 struct rte_flow_error *error)
1519 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1523 * Parse flow pattern composed of the eth and ipv6 items.
1525 * @param pattern Pointer to the flow pattern table.
1526 * @param flow Pointer to the flow.
1527 * @param error Pointer to the flow error.
1528 * @returns 0 in case of success, negative value otherwise.
1531 mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
1532 struct rte_flow *flow,
1533 struct rte_flow_error *error)
1535 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1539 * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
1541 * @param pattern Pointer to the flow pattern table.
1542 * @param flow Pointer to the flow.
1543 * @param error Pointer to the flow error.
1544 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1545 * @returns 0 in case of success, negative value otherwise.
1548 mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
1549 struct rte_flow *flow,
1550 struct rte_flow_error *error, int tcp)
1552 const struct rte_flow_item *item = mrvl_next_item(pattern);
1555 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1559 item = mrvl_next_item(item + 1);
1560 item = mrvl_next_item(item + 1);
1563 return mrvl_parse_tcp(item, flow, error);
1565 return mrvl_parse_udp(item, flow, error);
1569 * Parse flow pattern composed of the eth, ipv4 and tcp items.
1571 * @param pattern Pointer to the flow pattern table.
1572 * @param flow Pointer to the flow.
1573 * @param error Pointer to the flow error.
1574 * @returns 0 in case of success, negative value otherwise.
1577 mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
1578 struct rte_flow *flow,
1579 struct rte_flow_error *error)
1581 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
1585 * Parse flow pattern composed of the eth, ipv4 and udp items.
1587 * @param pattern Pointer to the flow pattern table.
1588 * @param flow Pointer to the flow.
1589 * @param error Pointer to the flow error.
1590 * @returns 0 in case of success, negative value otherwise.
1593 mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
1594 struct rte_flow *flow,
1595 struct rte_flow_error *error)
1597 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
1601 * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
1603 * @param pattern Pointer to the flow pattern table.
1604 * @param flow Pointer to the flow.
1605 * @param error Pointer to the flow error.
1606 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1607 * @returns 0 in case of success, negative value otherwise.
1610 mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
1611 struct rte_flow *flow,
1612 struct rte_flow_error *error, int tcp)
1614 const struct rte_flow_item *item = mrvl_next_item(pattern);
1617 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1621 item = mrvl_next_item(item + 1);
1622 item = mrvl_next_item(item + 1);
1625 return mrvl_parse_tcp(item, flow, error);
1627 return mrvl_parse_udp(item, flow, error);
1631 * Parse flow pattern composed of the eth, ipv6 and tcp items.
1633 * @param pattern Pointer to the flow pattern table.
1634 * @param flow Pointer to the flow.
1635 * @param error Pointer to the flow error.
1636 * @returns 0 in case of success, negative value otherwise.
1639 mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
1640 struct rte_flow *flow,
1641 struct rte_flow_error *error)
1643 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
1647 * Parse flow pattern composed of the eth, ipv6 and udp items.
1649 * @param pattern Pointer to the flow pattern table.
1650 * @param flow Pointer to the flow.
1651 * @param error Pointer to the flow error.
1652 * @returns 0 in case of success, negative value otherwise.
1655 mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
1656 struct rte_flow *flow,
1657 struct rte_flow_error *error)
1659 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
1663 * Parse flow pattern composed of the vlan item.
1665 * @param pattern Pointer to the flow pattern table.
1666 * @param flow Pointer to the flow.
1667 * @param error Pointer to the flow error.
1668 * @returns 0 in case of success, negative value otherwise.
1671 mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
1672 struct rte_flow *flow,
1673 struct rte_flow_error *error)
1675 const struct rte_flow_item *item = mrvl_next_item(pattern);
1677 return mrvl_parse_vlan(item, flow, error);
1681 * Parse flow pattern composed of the vlan and ip4/ip6 items.
1683 * @param pattern Pointer to the flow pattern table.
1684 * @param flow Pointer to the flow.
1685 * @param error Pointer to the flow error.
1686 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1687 * @returns 0 in case of success, negative value otherwise.
1690 mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1691 struct rte_flow *flow,
1692 struct rte_flow_error *error, int ip6)
1694 const struct rte_flow_item *item = mrvl_next_item(pattern);
1697 ret = mrvl_parse_vlan(item, flow, error);
1701 item = mrvl_next_item(item + 1);
1703 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1704 mrvl_parse_ip4(item, flow, error);
1708 * Parse flow pattern composed of the vlan and ipv4 items.
1710 * @param pattern Pointer to the flow pattern table.
1711 * @param flow Pointer to the flow.
1712 * @param error Pointer to the flow error.
1713 * @returns 0 in case of success, negative value otherwise.
1716 mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
1717 struct rte_flow *flow,
1718 struct rte_flow_error *error)
1720 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1724 * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
1726 * @param pattern Pointer to the flow pattern table.
1727 * @param flow Pointer to the flow.
1728 * @param error Pointer to the flow error.
1729 * @returns 0 in case of success, negative value otherwise.
1732 mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
1733 struct rte_flow *flow,
1734 struct rte_flow_error *error, int tcp)
1736 const struct rte_flow_item *item = mrvl_next_item(pattern);
1739 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1743 item = mrvl_next_item(item + 1);
1744 item = mrvl_next_item(item + 1);
1747 return mrvl_parse_tcp(item, flow, error);
1749 return mrvl_parse_udp(item, flow, error);
1753 * Parse flow pattern composed of the vlan, ipv4 and tcp items.
1755 * @param pattern Pointer to the flow pattern table.
1756 * @param flow Pointer to the flow.
1757 * @param error Pointer to the flow error.
1758 * @returns 0 in case of success, negative value otherwise.
1761 mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
1762 struct rte_flow *flow,
1763 struct rte_flow_error *error)
1765 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
1769 * Parse flow pattern composed of the vlan, ipv4 and udp items.
1771 * @param pattern Pointer to the flow pattern table.
1772 * @param flow Pointer to the flow.
1773 * @param error Pointer to the flow error.
1774 * @returns 0 in case of success, negative value otherwise.
1777 mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
1778 struct rte_flow *flow,
1779 struct rte_flow_error *error)
1781 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
1785 * Parse flow pattern composed of the vlan and ipv6 items.
1787 * @param pattern Pointer to the flow pattern table.
1788 * @param flow Pointer to the flow.
1789 * @param error Pointer to the flow error.
1790 * @returns 0 in case of success, negative value otherwise.
1793 mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
1794 struct rte_flow *flow,
1795 struct rte_flow_error *error)
1797 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1801 * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
1803 * @param pattern Pointer to the flow pattern table.
1804 * @param flow Pointer to the flow.
1805 * @param error Pointer to the flow error.
1806 * @returns 0 in case of success, negative value otherwise.
1809 mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
1810 struct rte_flow *flow,
1811 struct rte_flow_error *error, int tcp)
1813 const struct rte_flow_item *item = mrvl_next_item(pattern);
1816 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1820 item = mrvl_next_item(item + 1);
1821 item = mrvl_next_item(item + 1);
1824 return mrvl_parse_tcp(item, flow, error);
1826 return mrvl_parse_udp(item, flow, error);
1830 * Parse flow pattern composed of the vlan, ipv6 and tcp items.
1832 * @param pattern Pointer to the flow pattern table.
1833 * @param flow Pointer to the flow.
1834 * @param error Pointer to the flow error.
1835 * @returns 0 in case of success, negative value otherwise.
1838 mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
1839 struct rte_flow *flow,
1840 struct rte_flow_error *error)
1842 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
1846 * Parse flow pattern composed of the vlan, ipv6 and udp items.
1848 * @param pattern Pointer to the flow pattern table.
1849 * @param flow Pointer to the flow.
1850 * @param error Pointer to the flow error.
1851 * @returns 0 in case of success, negative value otherwise.
1854 mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
1855 struct rte_flow *flow,
1856 struct rte_flow_error *error)
1858 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
1862 * Parse flow pattern composed of the ip4/ip6 item.
1864 * @param pattern Pointer to the flow pattern table.
1865 * @param flow Pointer to the flow.
1866 * @param error Pointer to the flow error.
1867 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1868 * @returns 0 in case of success, negative value otherwise.
1871 mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
1872 struct rte_flow *flow,
1873 struct rte_flow_error *error, int ip6)
1875 const struct rte_flow_item *item = mrvl_next_item(pattern);
1877 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1878 mrvl_parse_ip4(item, flow, error);
1882 * Parse flow pattern composed of the ipv4 item.
1884 * @param pattern Pointer to the flow pattern table.
1885 * @param flow Pointer to the flow.
1886 * @param error Pointer to the flow error.
1887 * @returns 0 in case of success, negative value otherwise.
1890 mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
1891 struct rte_flow *flow,
1892 struct rte_flow_error *error)
1894 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
1898 * Parse flow pattern composed of the ipv6 item.
1900 * @param pattern Pointer to the flow pattern table.
1901 * @param flow Pointer to the flow.
1902 * @param error Pointer to the flow error.
1903 * @returns 0 in case of success, negative value otherwise.
1906 mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
1907 struct rte_flow *flow,
1908 struct rte_flow_error *error)
1910 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
1914 * Parse flow pattern composed of the ip4/ip6 and tcp items.
1916 * @param pattern Pointer to the flow pattern table.
1917 * @param flow Pointer to the flow.
1918 * @param error Pointer to the flow error.
1919 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1920 * @returns 0 in case of success, negative value otherwise.
1923 mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
1924 struct rte_flow *flow,
1925 struct rte_flow_error *error, int ip6)
1927 const struct rte_flow_item *item = mrvl_next_item(pattern);
1930 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1931 mrvl_parse_ip4(item, flow, error);
1935 item = mrvl_next_item(item + 1);
1937 return mrvl_parse_tcp(item, flow, error);
1941 * Parse flow pattern composed of the ipv4 and tcp items.
1943 * @param pattern Pointer to the flow pattern table.
1944 * @param flow Pointer to the flow.
1945 * @param error Pointer to the flow error.
1946 * @returns 0 in case of success, negative value otherwise.
1949 mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
1950 struct rte_flow *flow,
1951 struct rte_flow_error *error)
1953 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
1957 * Parse flow pattern composed of the ipv6 and tcp items.
1959 * @param pattern Pointer to the flow pattern table.
1960 * @param flow Pointer to the flow.
1961 * @param error Pointer to the flow error.
1962 * @returns 0 in case of success, negative value otherwise.
1965 mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
1966 struct rte_flow *flow,
1967 struct rte_flow_error *error)
1969 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
1973 * Parse flow pattern composed of the ipv4/ipv6 and udp items.
1975 * @param pattern Pointer to the flow pattern table.
1976 * @param flow Pointer to the flow.
1977 * @param error Pointer to the flow error.
1978 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1979 * @returns 0 in case of success, negative value otherwise.
1982 mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
1983 struct rte_flow *flow,
1984 struct rte_flow_error *error, int ip6)
1986 const struct rte_flow_item *item = mrvl_next_item(pattern);
1989 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1990 mrvl_parse_ip4(item, flow, error);
1994 item = mrvl_next_item(item + 1);
1996 return mrvl_parse_udp(item, flow, error);
2000 * Parse flow pattern composed of the ipv4 and udp items.
2002 * @param pattern Pointer to the flow pattern table.
2003 * @param flow Pointer to the flow.
2004 * @param error Pointer to the flow error.
2005 * @returns 0 in case of success, negative value otherwise.
2008 mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
2009 struct rte_flow *flow,
2010 struct rte_flow_error *error)
2012 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
2016 * Parse flow pattern composed of the ipv6 and udp items.
2018 * @param pattern Pointer to the flow pattern table.
2019 * @param flow Pointer to the flow.
2020 * @param error Pointer to the flow error.
2021 * @returns 0 in case of success, negative value otherwise.
2024 mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
2025 struct rte_flow *flow,
2026 struct rte_flow_error *error)
2028 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
2032 * Parse flow pattern composed of the tcp item.
2034 * @param pattern Pointer to the flow pattern table.
2035 * @param flow Pointer to the flow.
2036 * @param error Pointer to the flow error.
2037 * @returns 0 in case of success, negative value otherwise.
2040 mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
2041 struct rte_flow *flow,
2042 struct rte_flow_error *error)
2044 const struct rte_flow_item *item = mrvl_next_item(pattern);
2046 return mrvl_parse_tcp(item, flow, error);
2050 * Parse flow pattern composed of the udp item.
2052 * @param pattern Pointer to the flow pattern table.
2053 * @param flow Pointer to the flow.
2054 * @param error Pointer to the flow error.
2055 * @returns 0 in case of success, negative value otherwise.
2058 mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
2059 struct rte_flow *flow,
2060 struct rte_flow_error *error)
2062 const struct rte_flow_item *item = mrvl_next_item(pattern);
2064 return mrvl_parse_udp(item, flow, error);
2068 * Structure used to map specific flow pattern to the pattern parse callback
2069 * which will iterate over each pattern item and extract relevant data.
2071 static const struct {
2072 const enum rte_flow_item_type *pattern;
2073 int (*parse)(const struct rte_flow_item pattern[],
2074 struct rte_flow *flow,
2075 struct rte_flow_error *error);
2076 } mrvl_patterns[] = {
2077 { pattern_eth, mrvl_parse_pattern_eth },
2078 { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
2079 { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
2080 { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
2081 { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
2082 { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
2083 { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
2084 { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
2085 { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
2086 { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
2087 { pattern_vlan, mrvl_parse_pattern_vlan },
2088 { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
2089 { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
2090 { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
2091 { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
2092 { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
2093 { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
2094 { pattern_ip, mrvl_parse_pattern_ip4 },
2095 { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
2096 { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
2097 { pattern_ip6, mrvl_parse_pattern_ip6 },
2098 { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
2099 { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
2100 { pattern_tcp, mrvl_parse_pattern_tcp },
2101 { pattern_udp, mrvl_parse_pattern_udp }
2105 * Check whether provided pattern matches any of the supported ones.
2107 * @param type_pattern Pointer to the pattern type.
2108 * @param item_pattern Pointer to the flow pattern.
2109 * @returns 1 in case of success, 0 value otherwise.
2112 mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
2113 const struct rte_flow_item *item_pattern)
2115 const enum rte_flow_item_type *type = type_pattern;
2116 const struct rte_flow_item *item = item_pattern;
2119 if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
2124 if (*type == RTE_FLOW_ITEM_TYPE_END ||
2125 item->type == RTE_FLOW_ITEM_TYPE_END)
2128 if (*type != item->type)
2135 return *type == item->type;
2139 * Parse flow attribute.
2141 * This will check whether the provided attribute's flags are supported.
2143 * @param priv Unused
2144 * @param attr Pointer to the flow attribute.
2145 * @param flow Unused
2146 * @param error Pointer to the flow error.
2147 * @returns 0 in case of success, negative value otherwise.
2150 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
2151 const struct rte_flow_attr *attr,
2152 struct rte_flow *flow __rte_unused,
2153 struct rte_flow_error *error)
2156 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
2157 NULL, "NULL attribute");
2162 rte_flow_error_set(error, ENOTSUP,
2163 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2164 "Groups are not supported");
2167 if (attr->priority) {
2168 rte_flow_error_set(error, ENOTSUP,
2169 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2170 "Priorities are not supported");
2173 if (!attr->ingress) {
2174 rte_flow_error_set(error, ENOTSUP,
2175 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
2176 "Only ingress is supported");
2180 rte_flow_error_set(error, ENOTSUP,
2181 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2182 "Egress is not supported");
2185 if (attr->transfer) {
2186 rte_flow_error_set(error, ENOTSUP,
2187 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
2188 "Transfer is not supported");
2196 * Parse flow pattern.
2198 * Specific classifier rule will be created as well.
2200 * @param priv Unused
2201 * @param pattern Pointer to the flow pattern.
2202 * @param flow Pointer to the flow.
2203 * @param error Pointer to the flow error.
2204 * @returns 0 in case of success, negative value otherwise.
2207 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
2208 const struct rte_flow_item pattern[],
2209 struct rte_flow *flow,
2210 struct rte_flow_error *error)
2215 for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
2216 if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
2219 ret = mrvl_patterns[i].parse(pattern, flow, error);
2221 mrvl_free_all_key_mask(&flow->rule);
2226 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2227 "Unsupported pattern");
2233 * Parse flow actions.
2235 * @param priv Pointer to the port's private data.
2236 * @param actions Pointer the action table.
2237 * @param flow Pointer to the flow.
2238 * @param error Pointer to the flow error.
2239 * @returns 0 in case of success, negative value otherwise.
2242 mrvl_flow_parse_actions(struct mrvl_priv *priv,
2243 const struct rte_flow_action actions[],
2244 struct rte_flow *flow,
2245 struct rte_flow_error *error)
2247 const struct rte_flow_action *action = actions;
2250 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
2251 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
2254 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
2255 flow->cos.ppio = priv->ppio;
2257 flow->action.type = PP2_CLS_TBL_ACT_DROP;
2258 flow->action.cos = &flow->cos;
2260 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2261 const struct rte_flow_action_queue *q =
2262 (const struct rte_flow_action_queue *)
2265 if (q->index > priv->nb_rx_queues) {
2266 rte_flow_error_set(error, EINVAL,
2267 RTE_FLOW_ERROR_TYPE_ACTION,
2269 "Queue index out of range");
2273 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
2275 * Unknown TC mapping, mapping will not have
2279 "Unknown TC mapping for queue %hu eth%hhu",
2280 q->index, priv->ppio_id);
2282 rte_flow_error_set(error, EFAULT,
2283 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2289 "Action: Assign packets to queue %d, tc:%d, q:%d",
2290 q->index, priv->rxq_map[q->index].tc,
2291 priv->rxq_map[q->index].inq);
2293 flow->cos.ppio = priv->ppio;
2294 flow->cos.tc = priv->rxq_map[q->index].tc;
2295 flow->action.type = PP2_CLS_TBL_ACT_DONE;
2296 flow->action.cos = &flow->cos;
2299 rte_flow_error_set(error, ENOTSUP,
2300 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2301 "Action not supported");
2308 rte_flow_error_set(error, EINVAL,
2309 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2310 NULL, "Action not specified");
2318 * Parse flow attribute, pattern and actions.
2320 * @param priv Pointer to the port's private data.
2321 * @param attr Pointer to the flow attribute.
2322 * @param pattern Pointer to the flow pattern.
2323 * @param actions Pointer to the flow actions.
2324 * @param flow Pointer to the flow.
2325 * @param error Pointer to the flow error.
2326 * @returns 0 on success, negative value otherwise.
2329 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
2330 const struct rte_flow_item pattern[],
2331 const struct rte_flow_action actions[],
2332 struct rte_flow *flow,
2333 struct rte_flow_error *error)
2337 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
2341 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
2345 return mrvl_flow_parse_actions(priv, actions, flow, error);
2349 * Get engine type for the given flow.
2351 * @param field Pointer to the flow.
2352 * @returns The type of the engine.
2354 static inline enum pp2_cls_tbl_type
2355 mrvl_engine_type(const struct rte_flow *flow)
2359 for (i = 0; i < flow->rule.num_fields; i++)
2360 size += flow->rule.fields[i].size;
2363 * For maskable engine type the key size must be up to 8 bytes.
2364 * For keys with size bigger than 8 bytes, engine type must
2365 * be set to exact match.
2368 return PP2_CLS_TBL_EXACT_MATCH;
2370 return PP2_CLS_TBL_MASKABLE;
2374 * Create classifier table.
2376 * @param dev Pointer to the device.
2377 * @param flow Pointer to the very first flow.
2378 * @returns 0 in case of success, negative value otherwise.
2381 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
2383 struct mrvl_priv *priv = dev->data->dev_private;
2384 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
2387 if (priv->cls_tbl) {
2388 pp2_cls_tbl_deinit(priv->cls_tbl);
2389 priv->cls_tbl = NULL;
2392 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
2394 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
2395 MRVL_LOG(INFO, "Setting cls search engine type to %s",
2396 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
2397 "exact" : "maskable");
2398 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
2399 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
2400 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
2402 if (first_flow->pattern & F_DMAC) {
2403 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2404 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
2406 key->num_fields += 1;
2409 if (first_flow->pattern & F_SMAC) {
2410 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2411 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
2413 key->num_fields += 1;
2416 if (first_flow->pattern & F_TYPE) {
2417 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2418 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
2420 key->num_fields += 1;
2423 if (first_flow->pattern & F_VLAN_ID) {
2424 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2425 key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
2427 key->num_fields += 1;
2430 if (first_flow->pattern & F_VLAN_PRI) {
2431 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2432 key->proto_field[key->num_fields].field.vlan =
2435 key->num_fields += 1;
2438 if (first_flow->pattern & F_IP4_TOS) {
2439 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2440 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_TOS;
2442 key->num_fields += 1;
2445 if (first_flow->pattern & F_IP4_SIP) {
2446 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2447 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
2449 key->num_fields += 1;
2452 if (first_flow->pattern & F_IP4_DIP) {
2453 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2454 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
2456 key->num_fields += 1;
2459 if (first_flow->pattern & F_IP4_PROTO) {
2460 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2461 key->proto_field[key->num_fields].field.ipv4 =
2464 key->num_fields += 1;
2467 if (first_flow->pattern & F_IP6_SIP) {
2468 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2469 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
2470 key->key_size += 16;
2471 key->num_fields += 1;
2474 if (first_flow->pattern & F_IP6_DIP) {
2475 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2476 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
2477 key->key_size += 16;
2478 key->num_fields += 1;
2481 if (first_flow->pattern & F_IP6_FLOW) {
2482 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2483 key->proto_field[key->num_fields].field.ipv6 =
2486 key->num_fields += 1;
2489 if (first_flow->pattern & F_IP6_NEXT_HDR) {
2490 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2491 key->proto_field[key->num_fields].field.ipv6 =
2492 MV_NET_IP6_F_NEXT_HDR;
2494 key->num_fields += 1;
2497 if (first_flow->pattern & F_TCP_SPORT) {
2498 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2499 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2501 key->num_fields += 1;
2504 if (first_flow->pattern & F_TCP_DPORT) {
2505 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2506 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
2508 key->num_fields += 1;
2511 if (first_flow->pattern & F_UDP_SPORT) {
2512 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2513 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2515 key->num_fields += 1;
2518 if (first_flow->pattern & F_UDP_DPORT) {
2519 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2520 key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
2522 key->num_fields += 1;
2525 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
2527 priv->cls_tbl_pattern = first_flow->pattern;
2533 * Check whether new flow can be added to the table
2535 * @param priv Pointer to the port's private data.
2536 * @param flow Pointer to the new flow.
2537 * @return 1 in case flow can be added, 0 otherwise.
2540 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
2542 return flow->pattern == priv->cls_tbl_pattern &&
2543 mrvl_engine_type(flow) == priv->cls_tbl_params.type;
2547 * DPDK flow create callback called when flow is to be created.
2549 * @param dev Pointer to the device.
2550 * @param attr Pointer to the flow attribute.
2551 * @param pattern Pointer to the flow pattern.
2552 * @param actions Pointer to the flow actions.
2553 * @param error Pointer to the flow error.
2554 * @returns Pointer to the created flow in case of success, NULL otherwise.
2556 static struct rte_flow *
2557 mrvl_flow_create(struct rte_eth_dev *dev,
2558 const struct rte_flow_attr *attr,
2559 const struct rte_flow_item pattern[],
2560 const struct rte_flow_action actions[],
2561 struct rte_flow_error *error)
2563 struct mrvl_priv *priv = dev->data->dev_private;
2564 struct rte_flow *flow, *first;
2567 if (!dev->data->dev_started) {
2568 rte_flow_error_set(error, EINVAL,
2569 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2570 "Port must be started first\n");
2574 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
2578 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
2585 * 1. In case table does not exist - create one.
2586 * 2. In case table exists, is empty and new flow cannot be added
2588 * 3. In case table is not empty and new flow matches table format
2590 * 4. Otherwise flow cannot be added.
2592 first = LIST_FIRST(&priv->flows);
2593 if (!priv->cls_tbl) {
2594 ret = mrvl_create_cls_table(dev, flow);
2595 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
2596 ret = mrvl_create_cls_table(dev, flow);
2597 } else if (mrvl_flow_can_be_added(priv, flow)) {
2600 rte_flow_error_set(error, EINVAL,
2601 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2602 "Pattern does not match cls table format\n");
2607 rte_flow_error_set(error, EINVAL,
2608 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2609 "Failed to create cls table\n");
2613 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
2615 rte_flow_error_set(error, EINVAL,
2616 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2617 "Failed to add rule\n");
2621 LIST_INSERT_HEAD(&priv->flows, flow, next);
2630 * Remove classifier rule associated with given flow.
2632 * @param priv Pointer to the port's private data.
2633 * @param flow Pointer to the flow.
2634 * @param error Pointer to the flow error.
2635 * @returns 0 in case of success, negative value otherwise.
2638 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
2639 struct rte_flow_error *error)
2643 if (!priv->cls_tbl) {
2644 rte_flow_error_set(error, EINVAL,
2645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2646 "Classifier table not initialized");
2650 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
2652 rte_flow_error_set(error, EINVAL,
2653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2654 "Failed to remove rule");
2658 mrvl_free_all_key_mask(&flow->rule);
2664 * DPDK flow destroy callback called when flow is to be removed.
2666 * @param dev Pointer to the device.
2667 * @param flow Pointer to the flow.
2668 * @param error Pointer to the flow error.
2669 * @returns 0 in case of success, negative value otherwise.
2672 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2673 struct rte_flow_error *error)
2675 struct mrvl_priv *priv = dev->data->dev_private;
2679 LIST_FOREACH(f, &priv->flows, next) {
2685 rte_flow_error_set(error, EINVAL,
2686 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2687 "Rule was not found");
2691 LIST_REMOVE(f, next);
2693 ret = mrvl_flow_remove(priv, flow, error);
2703 * DPDK flow callback called to verify given attribute, pattern and actions.
2705 * @param dev Pointer to the device.
2706 * @param attr Pointer to the flow attribute.
2707 * @param pattern Pointer to the flow pattern.
2708 * @param actions Pointer to the flow actions.
2709 * @param error Pointer to the flow error.
2710 * @returns 0 on success, negative value otherwise.
2713 mrvl_flow_validate(struct rte_eth_dev *dev,
2714 const struct rte_flow_attr *attr,
2715 const struct rte_flow_item pattern[],
2716 const struct rte_flow_action actions[],
2717 struct rte_flow_error *error)
2719 static struct rte_flow *flow;
2721 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
2725 mrvl_flow_destroy(dev, flow, error);
2731 * DPDK flow flush callback called when flows are to be flushed.
2733 * @param dev Pointer to the device.
2734 * @param error Pointer to the flow error.
2735 * @returns 0 in case of success, negative value otherwise.
2738 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2740 struct mrvl_priv *priv = dev->data->dev_private;
2742 while (!LIST_EMPTY(&priv->flows)) {
2743 struct rte_flow *flow = LIST_FIRST(&priv->flows);
2744 int ret = mrvl_flow_remove(priv, flow, error);
2748 LIST_REMOVE(flow, next);
2756 * DPDK flow isolate callback called to isolate port.
2758 * @param dev Pointer to the device.
2759 * @param enable Pass 0/1 to disable/enable port isolation.
2760 * @param error Pointer to the flow error.
2761 * @returns 0 in case of success, negative value otherwise.
2764 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
2765 struct rte_flow_error *error)
2767 struct mrvl_priv *priv = dev->data->dev_private;
2769 if (dev->data->dev_started) {
2770 rte_flow_error_set(error, EBUSY,
2771 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2772 NULL, "Port must be stopped first\n");
2776 priv->isolated = enable;
2781 const struct rte_flow_ops mrvl_flow_ops = {
2782 .validate = mrvl_flow_validate,
2783 .create = mrvl_flow_create,
2784 .destroy = mrvl_flow_destroy,
2785 .flush = mrvl_flow_flush,
2786 .isolate = mrvl_flow_isolate