1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
12 #include <arpa/inet.h>
14 #include "mrvl_flow.h"
17 /** Number of rules in the classifier table. */
18 #define MRVL_CLS_MAX_NUM_RULES 20
20 /** Size of the classifier key and mask strings. */
21 #define MRVL_CLS_STR_SIZE_MAX 40
23 static const enum rte_flow_item_type pattern_eth[] = {
24 RTE_FLOW_ITEM_TYPE_ETH,
25 RTE_FLOW_ITEM_TYPE_END
28 static const enum rte_flow_item_type pattern_eth_vlan[] = {
29 RTE_FLOW_ITEM_TYPE_ETH,
30 RTE_FLOW_ITEM_TYPE_VLAN,
31 RTE_FLOW_ITEM_TYPE_END
34 static const enum rte_flow_item_type pattern_eth_vlan_ip[] = {
35 RTE_FLOW_ITEM_TYPE_ETH,
36 RTE_FLOW_ITEM_TYPE_VLAN,
37 RTE_FLOW_ITEM_TYPE_IPV4,
38 RTE_FLOW_ITEM_TYPE_END
41 static const enum rte_flow_item_type pattern_eth_vlan_ip6[] = {
42 RTE_FLOW_ITEM_TYPE_ETH,
43 RTE_FLOW_ITEM_TYPE_VLAN,
44 RTE_FLOW_ITEM_TYPE_IPV6,
45 RTE_FLOW_ITEM_TYPE_END
48 static const enum rte_flow_item_type pattern_eth_ip4[] = {
49 RTE_FLOW_ITEM_TYPE_ETH,
50 RTE_FLOW_ITEM_TYPE_IPV4,
51 RTE_FLOW_ITEM_TYPE_END
54 static const enum rte_flow_item_type pattern_eth_ip4_tcp[] = {
55 RTE_FLOW_ITEM_TYPE_ETH,
56 RTE_FLOW_ITEM_TYPE_IPV4,
57 RTE_FLOW_ITEM_TYPE_TCP,
58 RTE_FLOW_ITEM_TYPE_END
61 static const enum rte_flow_item_type pattern_eth_ip4_udp[] = {
62 RTE_FLOW_ITEM_TYPE_ETH,
63 RTE_FLOW_ITEM_TYPE_IPV4,
64 RTE_FLOW_ITEM_TYPE_UDP,
65 RTE_FLOW_ITEM_TYPE_END
68 static const enum rte_flow_item_type pattern_eth_ip6[] = {
69 RTE_FLOW_ITEM_TYPE_ETH,
70 RTE_FLOW_ITEM_TYPE_IPV6,
71 RTE_FLOW_ITEM_TYPE_END
74 static const enum rte_flow_item_type pattern_eth_ip6_tcp[] = {
75 RTE_FLOW_ITEM_TYPE_ETH,
76 RTE_FLOW_ITEM_TYPE_IPV6,
77 RTE_FLOW_ITEM_TYPE_TCP,
78 RTE_FLOW_ITEM_TYPE_END
81 static const enum rte_flow_item_type pattern_eth_ip6_udp[] = {
82 RTE_FLOW_ITEM_TYPE_ETH,
83 RTE_FLOW_ITEM_TYPE_IPV6,
84 RTE_FLOW_ITEM_TYPE_UDP,
85 RTE_FLOW_ITEM_TYPE_END
88 static const enum rte_flow_item_type pattern_vlan[] = {
89 RTE_FLOW_ITEM_TYPE_VLAN,
90 RTE_FLOW_ITEM_TYPE_END
93 static const enum rte_flow_item_type pattern_vlan_ip[] = {
94 RTE_FLOW_ITEM_TYPE_VLAN,
95 RTE_FLOW_ITEM_TYPE_IPV4,
96 RTE_FLOW_ITEM_TYPE_END
99 static const enum rte_flow_item_type pattern_vlan_ip_tcp[] = {
100 RTE_FLOW_ITEM_TYPE_VLAN,
101 RTE_FLOW_ITEM_TYPE_IPV4,
102 RTE_FLOW_ITEM_TYPE_TCP,
103 RTE_FLOW_ITEM_TYPE_END
106 static const enum rte_flow_item_type pattern_vlan_ip_udp[] = {
107 RTE_FLOW_ITEM_TYPE_VLAN,
108 RTE_FLOW_ITEM_TYPE_IPV4,
109 RTE_FLOW_ITEM_TYPE_UDP,
110 RTE_FLOW_ITEM_TYPE_END
113 static const enum rte_flow_item_type pattern_vlan_ip6[] = {
114 RTE_FLOW_ITEM_TYPE_VLAN,
115 RTE_FLOW_ITEM_TYPE_IPV6,
116 RTE_FLOW_ITEM_TYPE_END
119 static const enum rte_flow_item_type pattern_vlan_ip6_tcp[] = {
120 RTE_FLOW_ITEM_TYPE_VLAN,
121 RTE_FLOW_ITEM_TYPE_IPV6,
122 RTE_FLOW_ITEM_TYPE_TCP,
123 RTE_FLOW_ITEM_TYPE_END
126 static const enum rte_flow_item_type pattern_vlan_ip6_udp[] = {
127 RTE_FLOW_ITEM_TYPE_VLAN,
128 RTE_FLOW_ITEM_TYPE_IPV6,
129 RTE_FLOW_ITEM_TYPE_UDP,
130 RTE_FLOW_ITEM_TYPE_END
133 static const enum rte_flow_item_type pattern_ip[] = {
134 RTE_FLOW_ITEM_TYPE_IPV4,
135 RTE_FLOW_ITEM_TYPE_END
138 static const enum rte_flow_item_type pattern_ip6[] = {
139 RTE_FLOW_ITEM_TYPE_IPV6,
140 RTE_FLOW_ITEM_TYPE_END
143 static const enum rte_flow_item_type pattern_ip_tcp[] = {
144 RTE_FLOW_ITEM_TYPE_IPV4,
145 RTE_FLOW_ITEM_TYPE_TCP,
146 RTE_FLOW_ITEM_TYPE_END
149 static const enum rte_flow_item_type pattern_ip6_tcp[] = {
150 RTE_FLOW_ITEM_TYPE_IPV6,
151 RTE_FLOW_ITEM_TYPE_TCP,
152 RTE_FLOW_ITEM_TYPE_END
155 static const enum rte_flow_item_type pattern_ip_udp[] = {
156 RTE_FLOW_ITEM_TYPE_IPV4,
157 RTE_FLOW_ITEM_TYPE_UDP,
158 RTE_FLOW_ITEM_TYPE_END
161 static const enum rte_flow_item_type pattern_ip6_udp[] = {
162 RTE_FLOW_ITEM_TYPE_IPV6,
163 RTE_FLOW_ITEM_TYPE_UDP,
164 RTE_FLOW_ITEM_TYPE_END
167 static const enum rte_flow_item_type pattern_tcp[] = {
168 RTE_FLOW_ITEM_TYPE_TCP,
169 RTE_FLOW_ITEM_TYPE_END
172 static const enum rte_flow_item_type pattern_udp[] = {
173 RTE_FLOW_ITEM_TYPE_UDP,
174 RTE_FLOW_ITEM_TYPE_END
177 #define MRVL_VLAN_ID_MASK 0x0fff
178 #define MRVL_VLAN_PRI_MASK 0x7000
179 #define MRVL_IPV4_DSCP_MASK 0xfc
180 #define MRVL_IPV4_ADDR_MASK 0xffffffff
181 #define MRVL_IPV6_FLOW_MASK 0x0fffff
184 * Given a flow item, return the next non-void one.
186 * @param items Pointer to the item in the table.
187 * @returns Next not-void item, NULL otherwise.
189 static const struct rte_flow_item *
190 mrvl_next_item(const struct rte_flow_item *items)
192 const struct rte_flow_item *item = items;
194 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
195 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
203 * Allocate memory for classifier rule key and mask fields.
205 * @param field Pointer to the classifier rule.
206 * @returns 0 in case of success, negative value otherwise.
209 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
211 unsigned int id = rte_socket_id();
213 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
217 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
223 rte_free(field->key);
231 * Free memory allocated for classifier rule key and mask fields.
233 * @param field Pointer to the classifier rule.
236 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
238 rte_free(field->key);
239 rte_free(field->mask);
245 * Free memory allocated for all classifier rule key and mask fields.
247 * @param rule Pointer to the classifier table rule.
250 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
254 for (i = 0; i < rule->num_fields; i++)
255 mrvl_free_key_mask(&rule->fields[i]);
256 rule->num_fields = 0;
260 * Initialize rte flow item parsing.
262 * @param item Pointer to the flow item.
263 * @param spec_ptr Pointer to the specific item pointer.
264 * @param mask_ptr Pointer to the specific item's mask pointer.
265 * @def_mask Pointer to the default mask.
266 * @size Size of the flow item.
267 * @error Pointer to the rte flow error.
268 * @returns 0 in case of success, negative value otherwise.
271 mrvl_parse_init(const struct rte_flow_item *item,
272 const void **spec_ptr,
273 const void **mask_ptr,
274 const void *def_mask,
276 struct rte_flow_error *error)
283 memset(zeros, 0, size);
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
292 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
293 rte_flow_error_set(error, EINVAL,
294 RTE_FLOW_ERROR_TYPE_ITEM, item,
295 "Mask or last is set without spec\n");
300 * If "mask" is not set, default mask is used,
301 * but if default mask is NULL, "mask" should be set.
303 if (item->mask == NULL) {
304 if (def_mask == NULL) {
305 rte_flow_error_set(error, EINVAL,
306 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
307 "Mask should be specified\n");
311 mask = (const uint8_t *)def_mask;
313 mask = (const uint8_t *)item->mask;
316 spec = (const uint8_t *)item->spec;
317 last = (const uint8_t *)item->last;
320 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
321 NULL, "Spec should be specified\n");
326 * If field values in "last" are either 0 or equal to the corresponding
327 * values in "spec" then they are ignored.
330 !memcmp(last, zeros, size) &&
331 memcmp(last, spec, size) != 0) {
332 rte_flow_error_set(error, ENOTSUP,
333 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
334 "Ranging is not supported\n");
345 * Parse the eth flow item.
347 * This will create classifier rule that matches either destination or source
350 * @param spec Pointer to the specific flow item.
351 * @param mask Pointer to the specific flow item's mask.
352 * @param parse_dst Parse either destination or source mac address.
353 * @param flow Pointer to the flow.
354 * @return 0 in case of success, negative error value otherwise.
357 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
358 const struct rte_flow_item_eth *mask,
359 int parse_dst, struct rte_flow *flow)
361 struct pp2_cls_rule_key_field *key_field;
362 const uint8_t *k, *m;
364 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
368 k = spec->dst.addr_bytes;
369 m = mask->dst.addr_bytes;
371 flow->pattern |= F_DMAC;
373 k = spec->src.addr_bytes;
374 m = mask->src.addr_bytes;
376 flow->pattern |= F_SMAC;
379 key_field = &flow->rule.fields[flow->rule.num_fields];
380 mrvl_alloc_key_mask(key_field);
383 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
384 "%02x:%02x:%02x:%02x:%02x:%02x",
385 k[0], k[1], k[2], k[3], k[4], k[5]);
387 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
388 "%02x:%02x:%02x:%02x:%02x:%02x",
389 m[0], m[1], m[2], m[3], m[4], m[5]);
391 flow->rule.num_fields += 1;
397 * Helper for parsing the eth flow item destination mac address.
399 * @param spec Pointer to the specific flow item.
400 * @param mask Pointer to the specific flow item's mask.
401 * @param flow Pointer to the flow.
402 * @return 0 in case of success, negative error value otherwise.
405 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
406 const struct rte_flow_item_eth *mask,
407 struct rte_flow *flow)
409 return mrvl_parse_mac(spec, mask, 1, flow);
413 * Helper for parsing the eth flow item source mac address.
415 * @param spec Pointer to the specific flow item.
416 * @param mask Pointer to the specific flow item's mask.
417 * @param flow Pointer to the flow.
418 * @return 0 in case of success, negative error value otherwise.
421 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
422 const struct rte_flow_item_eth *mask,
423 struct rte_flow *flow)
425 return mrvl_parse_mac(spec, mask, 0, flow);
429 * Parse the ether type field of the eth flow item.
431 * @param spec Pointer to the specific flow item.
432 * @param mask Pointer to the specific flow item's mask.
433 * @param flow Pointer to the flow.
434 * @return 0 in case of success, negative error value otherwise.
437 mrvl_parse_type(const struct rte_flow_item_eth *spec,
438 const struct rte_flow_item_eth *mask __rte_unused,
439 struct rte_flow *flow)
441 struct pp2_cls_rule_key_field *key_field;
444 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
447 key_field = &flow->rule.fields[flow->rule.num_fields];
448 mrvl_alloc_key_mask(key_field);
451 k = rte_be_to_cpu_16(spec->type);
452 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
454 flow->pattern |= F_TYPE;
455 flow->rule.num_fields += 1;
461 * Parse the vid field of the vlan rte flow item.
463 * This will create classifier rule that matches vid.
465 * @param spec Pointer to the specific flow item.
466 * @param mask Pointer to the specific flow item's mask.
467 * @param flow Pointer to the flow.
468 * @return 0 in case of success, negative error value otherwise.
471 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
472 const struct rte_flow_item_vlan *mask __rte_unused,
473 struct rte_flow *flow)
475 struct pp2_cls_rule_key_field *key_field;
478 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
481 key_field = &flow->rule.fields[flow->rule.num_fields];
482 mrvl_alloc_key_mask(key_field);
485 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
486 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
488 flow->pattern |= F_VLAN_ID;
489 flow->rule.num_fields += 1;
495 * Parse the pri field of the vlan rte flow item.
497 * This will create classifier rule that matches pri.
499 * @param spec Pointer to the specific flow item.
500 * @param mask Pointer to the specific flow item's mask.
501 * @param flow Pointer to the flow.
502 * @return 0 in case of success, negative error value otherwise.
505 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
506 const struct rte_flow_item_vlan *mask __rte_unused,
507 struct rte_flow *flow)
509 struct pp2_cls_rule_key_field *key_field;
512 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
515 key_field = &flow->rule.fields[flow->rule.num_fields];
516 mrvl_alloc_key_mask(key_field);
519 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
520 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
522 flow->pattern |= F_VLAN_PRI;
523 flow->rule.num_fields += 1;
529 * Parse the dscp field of the ipv4 rte flow item.
531 * This will create classifier rule that matches dscp field.
533 * @param spec Pointer to the specific flow item.
534 * @param mask Pointer to the specific flow item's mask.
535 * @param flow Pointer to the flow.
536 * @return 0 in case of success, negative error value otherwise.
539 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
540 const struct rte_flow_item_ipv4 *mask,
541 struct rte_flow *flow)
543 struct pp2_cls_rule_key_field *key_field;
546 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
549 key_field = &flow->rule.fields[flow->rule.num_fields];
550 mrvl_alloc_key_mask(key_field);
553 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
554 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
555 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
556 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
558 flow->pattern |= F_IP4_TOS;
559 flow->rule.num_fields += 1;
565 * Parse either source or destination ip addresses of the ipv4 flow item.
567 * This will create classifier rule that matches either destination
568 * or source ip field.
570 * @param spec Pointer to the specific flow item.
571 * @param mask Pointer to the specific flow item's mask.
572 * @param parse_dst Parse either destination or source ip address.
573 * @param flow Pointer to the flow.
574 * @return 0 in case of success, negative error value otherwise.
577 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
578 const struct rte_flow_item_ipv4 *mask,
579 int parse_dst, struct rte_flow *flow)
581 struct pp2_cls_rule_key_field *key_field;
585 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
588 memset(&k, 0, sizeof(k));
590 k.s_addr = spec->hdr.dst_addr;
591 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
593 flow->pattern |= F_IP4_DIP;
595 k.s_addr = spec->hdr.src_addr;
596 m = rte_be_to_cpu_32(mask->hdr.src_addr);
598 flow->pattern |= F_IP4_SIP;
601 key_field = &flow->rule.fields[flow->rule.num_fields];
602 mrvl_alloc_key_mask(key_field);
605 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
606 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
608 flow->rule.num_fields += 1;
614 * Helper for parsing destination ip of the ipv4 flow item.
616 * @param spec Pointer to the specific flow item.
617 * @param mask Pointer to the specific flow item's mask.
618 * @param flow Pointer to the flow.
619 * @return 0 in case of success, negative error value otherwise.
622 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
623 const struct rte_flow_item_ipv4 *mask,
624 struct rte_flow *flow)
626 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
630 * Helper for parsing source ip of the ipv4 flow item.
632 * @param spec Pointer to the specific flow item.
633 * @param mask Pointer to the specific flow item's mask.
634 * @param flow Pointer to the flow.
635 * @return 0 in case of success, negative error value otherwise.
638 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
639 const struct rte_flow_item_ipv4 *mask,
640 struct rte_flow *flow)
642 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
646 * Parse the proto field of the ipv4 rte flow item.
648 * This will create classifier rule that matches proto field.
650 * @param spec Pointer to the specific flow item.
651 * @param mask Pointer to the specific flow item's mask.
652 * @param flow Pointer to the flow.
653 * @return 0 in case of success, negative error value otherwise.
656 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
657 const struct rte_flow_item_ipv4 *mask __rte_unused,
658 struct rte_flow *flow)
660 struct pp2_cls_rule_key_field *key_field;
661 uint8_t k = spec->hdr.next_proto_id;
663 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
666 key_field = &flow->rule.fields[flow->rule.num_fields];
667 mrvl_alloc_key_mask(key_field);
670 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
672 flow->pattern |= F_IP4_PROTO;
673 flow->rule.num_fields += 1;
679 * Parse either source or destination ip addresses of the ipv6 rte flow item.
681 * This will create classifier rule that matches either destination
682 * or source ip field.
684 * @param spec Pointer to the specific flow item.
685 * @param mask Pointer to the specific flow item's mask.
686 * @param parse_dst Parse either destination or source ipv6 address.
687 * @param flow Pointer to the flow.
688 * @return 0 in case of success, negative error value otherwise.
691 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
692 const struct rte_flow_item_ipv6 *mask,
693 int parse_dst, struct rte_flow *flow)
695 struct pp2_cls_rule_key_field *key_field;
696 int size = sizeof(spec->hdr.dst_addr);
697 struct in6_addr k, m;
699 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
702 memset(&k, 0, sizeof(k));
704 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
705 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
707 flow->pattern |= F_IP6_DIP;
709 memcpy(k.s6_addr, spec->hdr.src_addr, size);
710 memcpy(m.s6_addr, mask->hdr.src_addr, size);
712 flow->pattern |= F_IP6_SIP;
715 key_field = &flow->rule.fields[flow->rule.num_fields];
716 mrvl_alloc_key_mask(key_field);
717 key_field->size = 16;
719 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
720 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
722 flow->rule.num_fields += 1;
728 * Helper for parsing destination ip of the ipv6 flow item.
730 * @param spec Pointer to the specific flow item.
731 * @param mask Pointer to the specific flow item's mask.
732 * @param flow Pointer to the flow.
733 * @return 0 in case of success, negative error value otherwise.
736 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
737 const struct rte_flow_item_ipv6 *mask,
738 struct rte_flow *flow)
740 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
744 * Helper for parsing source ip of the ipv6 flow item.
746 * @param spec Pointer to the specific flow item.
747 * @param mask Pointer to the specific flow item's mask.
748 * @param flow Pointer to the flow.
749 * @return 0 in case of success, negative error value otherwise.
752 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
753 const struct rte_flow_item_ipv6 *mask,
754 struct rte_flow *flow)
756 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
760 * Parse the flow label of the ipv6 flow item.
762 * This will create classifier rule that matches flow field.
764 * @param spec Pointer to the specific flow item.
765 * @param mask Pointer to the specific flow item's mask.
766 * @param flow Pointer to the flow.
767 * @return 0 in case of success, negative error value otherwise.
770 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
771 const struct rte_flow_item_ipv6 *mask,
772 struct rte_flow *flow)
774 struct pp2_cls_rule_key_field *key_field;
775 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
776 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
778 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
781 key_field = &flow->rule.fields[flow->rule.num_fields];
782 mrvl_alloc_key_mask(key_field);
785 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
786 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
788 flow->pattern |= F_IP6_FLOW;
789 flow->rule.num_fields += 1;
795 * Parse the next header of the ipv6 flow item.
797 * This will create classifier rule that matches next header field.
799 * @param spec Pointer to the specific flow item.
800 * @param mask Pointer to the specific flow item's mask.
801 * @param flow Pointer to the flow.
802 * @return 0 in case of success, negative error value otherwise.
805 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
806 const struct rte_flow_item_ipv6 *mask __rte_unused,
807 struct rte_flow *flow)
809 struct pp2_cls_rule_key_field *key_field;
810 uint8_t k = spec->hdr.proto;
812 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
815 key_field = &flow->rule.fields[flow->rule.num_fields];
816 mrvl_alloc_key_mask(key_field);
819 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
821 flow->pattern |= F_IP6_NEXT_HDR;
822 flow->rule.num_fields += 1;
828 * Parse destination or source port of the tcp flow item.
830 * This will create classifier rule that matches either destination or
833 * @param spec Pointer to the specific flow item.
834 * @param mask Pointer to the specific flow item's mask.
835 * @param parse_dst Parse either destination or source port.
836 * @param flow Pointer to the flow.
837 * @return 0 in case of success, negative error value otherwise.
840 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
841 const struct rte_flow_item_tcp *mask __rte_unused,
842 int parse_dst, struct rte_flow *flow)
844 struct pp2_cls_rule_key_field *key_field;
847 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
850 key_field = &flow->rule.fields[flow->rule.num_fields];
851 mrvl_alloc_key_mask(key_field);
855 k = rte_be_to_cpu_16(spec->hdr.dst_port);
857 flow->pattern |= F_TCP_DPORT;
859 k = rte_be_to_cpu_16(spec->hdr.src_port);
861 flow->pattern |= F_TCP_SPORT;
864 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
866 flow->rule.num_fields += 1;
872 * Helper for parsing the tcp source port of the tcp flow item.
874 * @param spec Pointer to the specific flow item.
875 * @param mask Pointer to the specific flow item's mask.
876 * @param flow Pointer to the flow.
877 * @return 0 in case of success, negative error value otherwise.
880 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
881 const struct rte_flow_item_tcp *mask,
882 struct rte_flow *flow)
884 return mrvl_parse_tcp_port(spec, mask, 0, flow);
888 * Helper for parsing the tcp destination port of the tcp flow item.
890 * @param spec Pointer to the specific flow item.
891 * @param mask Pointer to the specific flow item's mask.
892 * @param flow Pointer to the flow.
893 * @return 0 in case of success, negative error value otherwise.
896 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
897 const struct rte_flow_item_tcp *mask,
898 struct rte_flow *flow)
900 return mrvl_parse_tcp_port(spec, mask, 1, flow);
904 * Parse destination or source port of the udp flow item.
906 * This will create classifier rule that matches either destination or
909 * @param spec Pointer to the specific flow item.
910 * @param mask Pointer to the specific flow item's mask.
911 * @param parse_dst Parse either destination or source port.
912 * @param flow Pointer to the flow.
913 * @return 0 in case of success, negative error value otherwise.
916 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
917 const struct rte_flow_item_udp *mask __rte_unused,
918 int parse_dst, struct rte_flow *flow)
920 struct pp2_cls_rule_key_field *key_field;
923 if (flow->rule.num_fields >= PP2_CLS_TBL_MAX_NUM_FIELDS)
926 key_field = &flow->rule.fields[flow->rule.num_fields];
927 mrvl_alloc_key_mask(key_field);
931 k = rte_be_to_cpu_16(spec->hdr.dst_port);
933 flow->pattern |= F_UDP_DPORT;
935 k = rte_be_to_cpu_16(spec->hdr.src_port);
937 flow->pattern |= F_UDP_SPORT;
940 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
942 flow->rule.num_fields += 1;
948 * Helper for parsing the udp source port of the udp flow item.
950 * @param spec Pointer to the specific flow item.
951 * @param mask Pointer to the specific flow item's mask.
952 * @param flow Pointer to the flow.
953 * @return 0 in case of success, negative error value otherwise.
956 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
957 const struct rte_flow_item_udp *mask,
958 struct rte_flow *flow)
960 return mrvl_parse_udp_port(spec, mask, 0, flow);
964 * Helper for parsing the udp destination port of the udp flow item.
966 * @param spec Pointer to the specific flow item.
967 * @param mask Pointer to the specific flow item's mask.
968 * @param flow Pointer to the flow.
969 * @return 0 in case of success, negative error value otherwise.
972 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
973 const struct rte_flow_item_udp *mask,
974 struct rte_flow *flow)
976 return mrvl_parse_udp_port(spec, mask, 1, flow);
980 * Parse eth flow item.
982 * @param item Pointer to the flow item.
983 * @param flow Pointer to the flow.
984 * @param error Pointer to the flow error.
985 * @returns 0 on success, negative value otherwise.
988 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
989 struct rte_flow_error *error)
991 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
992 struct rte_ether_addr zero;
995 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
996 &rte_flow_item_eth_mask,
997 sizeof(struct rte_flow_item_eth), error);
1001 memset(&zero, 0, sizeof(zero));
1003 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
1004 ret = mrvl_parse_dmac(spec, mask, flow);
1009 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
1010 ret = mrvl_parse_smac(spec, mask, flow);
1016 MRVL_LOG(WARNING, "eth type mask is ignored");
1017 ret = mrvl_parse_type(spec, mask, flow);
1024 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1025 "Reached maximum number of fields in cls tbl key\n");
1030 * Parse vlan flow item.
1032 * @param item Pointer to the flow item.
1033 * @param flow Pointer to the flow.
1034 * @param error Pointer to the flow error.
1035 * @returns 0 on success, negative value otherwise.
1038 mrvl_parse_vlan(const struct rte_flow_item *item,
1039 struct rte_flow *flow,
1040 struct rte_flow_error *error)
1042 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
1046 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1047 &rte_flow_item_vlan_mask,
1048 sizeof(struct rte_flow_item_vlan), error);
1052 m = rte_be_to_cpu_16(mask->tci);
1053 if (m & MRVL_VLAN_ID_MASK) {
1054 MRVL_LOG(WARNING, "vlan id mask is ignored");
1055 ret = mrvl_parse_vlan_id(spec, mask, flow);
1060 if (m & MRVL_VLAN_PRI_MASK) {
1061 MRVL_LOG(WARNING, "vlan pri mask is ignored");
1062 ret = mrvl_parse_vlan_pri(spec, mask, flow);
1067 if (flow->pattern & F_TYPE) {
1068 rte_flow_error_set(error, ENOTSUP,
1069 RTE_FLOW_ERROR_TYPE_ITEM, item,
1070 "VLAN TPID matching is not supported");
1073 if (mask->inner_type) {
1074 struct rte_flow_item_eth spec_eth = {
1075 .type = spec->inner_type,
1077 struct rte_flow_item_eth mask_eth = {
1078 .type = mask->inner_type,
1081 MRVL_LOG(WARNING, "inner eth type mask is ignored");
1082 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
1089 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1090 "Reached maximum number of fields in cls tbl key\n");
1095 * Parse ipv4 flow item.
1097 * @param item Pointer to the flow item.
1098 * @param flow Pointer to the flow.
1099 * @param error Pointer to the flow error.
1100 * @returns 0 on success, negative value otherwise.
1103 mrvl_parse_ip4(const struct rte_flow_item *item,
1104 struct rte_flow *flow,
1105 struct rte_flow_error *error)
1107 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
1110 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1111 &rte_flow_item_ipv4_mask,
1112 sizeof(struct rte_flow_item_ipv4), error);
1116 if (mask->hdr.version_ihl ||
1117 mask->hdr.total_length ||
1118 mask->hdr.packet_id ||
1119 mask->hdr.fragment_offset ||
1120 mask->hdr.time_to_live ||
1121 mask->hdr.hdr_checksum) {
1122 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1123 NULL, "Not supported by classifier\n");
1127 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
1128 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
1133 if (mask->hdr.src_addr) {
1134 ret = mrvl_parse_ip4_sip(spec, mask, flow);
1139 if (mask->hdr.dst_addr) {
1140 ret = mrvl_parse_ip4_dip(spec, mask, flow);
1145 if (mask->hdr.next_proto_id) {
1146 MRVL_LOG(WARNING, "next proto id mask is ignored");
1147 ret = mrvl_parse_ip4_proto(spec, mask, flow);
1154 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1155 "Reached maximum number of fields in cls tbl key\n");
1160 * Parse ipv6 flow item.
1162 * @param item Pointer to the flow item.
1163 * @param flow Pointer to the flow.
1164 * @param error Pointer to the flow error.
1165 * @returns 0 on success, negative value otherwise.
1168 mrvl_parse_ip6(const struct rte_flow_item *item,
1169 struct rte_flow *flow,
1170 struct rte_flow_error *error)
1172 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1173 struct rte_ipv6_hdr zero;
1177 ret = mrvl_parse_init(item, (const void **)&spec,
1178 (const void **)&mask,
1179 &rte_flow_item_ipv6_mask,
1180 sizeof(struct rte_flow_item_ipv6),
1185 memset(&zero, 0, sizeof(zero));
1187 if (mask->hdr.payload_len ||
1188 mask->hdr.hop_limits) {
1189 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1190 NULL, "Not supported by classifier\n");
1194 if (memcmp(mask->hdr.src_addr,
1195 zero.src_addr, sizeof(mask->hdr.src_addr))) {
1196 ret = mrvl_parse_ip6_sip(spec, mask, flow);
1201 if (memcmp(mask->hdr.dst_addr,
1202 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1203 ret = mrvl_parse_ip6_dip(spec, mask, flow);
1208 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1210 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1215 if (mask->hdr.proto) {
1216 MRVL_LOG(WARNING, "next header mask is ignored");
1217 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1224 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1225 "Reached maximum number of fields in cls tbl key\n");
1230 * Parse tcp flow item.
1232 * @param item Pointer to the flow item.
1233 * @param flow Pointer to the flow.
1234 * @param error Pointer to the flow error.
1235 * @returns 0 on success, negative value otherwise.
1238 mrvl_parse_tcp(const struct rte_flow_item *item,
1239 struct rte_flow *flow,
1240 struct rte_flow_error *error)
1242 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1245 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1246 &rte_flow_item_ipv4_mask,
1247 sizeof(struct rte_flow_item_ipv4), error);
1251 if (mask->hdr.sent_seq ||
1252 mask->hdr.recv_ack ||
1253 mask->hdr.data_off ||
1254 mask->hdr.tcp_flags ||
1257 mask->hdr.tcp_urp) {
1258 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1259 NULL, "Not supported by classifier\n");
1263 if (mask->hdr.src_port) {
1264 MRVL_LOG(WARNING, "tcp sport mask is ignored");
1265 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1270 if (mask->hdr.dst_port) {
1271 MRVL_LOG(WARNING, "tcp dport mask is ignored");
1272 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1279 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1280 "Reached maximum number of fields in cls tbl key\n");
1285 * Parse udp flow item.
1287 * @param item Pointer to the flow item.
1288 * @param flow Pointer to the flow.
1289 * @param error Pointer to the flow error.
1290 * @returns 0 on success, negative value otherwise.
1293 mrvl_parse_udp(const struct rte_flow_item *item,
1294 struct rte_flow *flow,
1295 struct rte_flow_error *error)
1297 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1300 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1301 &rte_flow_item_ipv4_mask,
1302 sizeof(struct rte_flow_item_ipv4), error);
1306 if (mask->hdr.dgram_len ||
1307 mask->hdr.dgram_cksum) {
1308 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1309 NULL, "Not supported by classifier\n");
1313 if (mask->hdr.src_port) {
1314 MRVL_LOG(WARNING, "udp sport mask is ignored");
1315 ret = mrvl_parse_udp_sport(spec, mask, flow);
1320 if (mask->hdr.dst_port) {
1321 MRVL_LOG(WARNING, "udp dport mask is ignored");
1322 ret = mrvl_parse_udp_dport(spec, mask, flow);
1329 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1330 "Reached maximum number of fields in cls tbl key\n");
1335 * Parse flow pattern composed of the the eth item.
1337 * @param pattern Pointer to the flow pattern table.
1338 * @param flow Pointer to the flow.
1339 * @param error Pointer to the flow error.
1340 * @returns 0 in case of success, negative value otherwise.
1343 mrvl_parse_pattern_eth(const struct rte_flow_item pattern[],
1344 struct rte_flow *flow,
1345 struct rte_flow_error *error)
1347 return mrvl_parse_eth(pattern, flow, error);
1351 * Parse flow pattern composed of the eth and vlan items.
1353 * @param pattern Pointer to the flow pattern table.
1354 * @param flow Pointer to the flow.
1355 * @param error Pointer to the flow error.
1356 * @returns 0 in case of success, negative value otherwise.
1359 mrvl_parse_pattern_eth_vlan(const struct rte_flow_item pattern[],
1360 struct rte_flow *flow,
1361 struct rte_flow_error *error)
1363 const struct rte_flow_item *item = mrvl_next_item(pattern);
1366 ret = mrvl_parse_eth(item, flow, error);
1370 item = mrvl_next_item(item + 1);
1372 return mrvl_parse_vlan(item, flow, error);
1376 * Parse flow pattern composed of the eth, vlan and ip4/ip6 items.
1378 * @param pattern Pointer to the flow pattern table.
1379 * @param flow Pointer to the flow.
1380 * @param error Pointer to the flow error.
1381 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1382 * @returns 0 in case of success, negative value otherwise.
1385 mrvl_parse_pattern_eth_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1386 struct rte_flow *flow,
1387 struct rte_flow_error *error, int ip6)
1389 const struct rte_flow_item *item = mrvl_next_item(pattern);
1392 ret = mrvl_parse_eth(item, flow, error);
1396 item = mrvl_next_item(item + 1);
1397 ret = mrvl_parse_vlan(item, flow, error);
1401 item = mrvl_next_item(item + 1);
1403 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1404 mrvl_parse_ip4(item, flow, error);
1408 * Parse flow pattern composed of the eth, vlan and ipv4 items.
1410 * @param pattern Pointer to the flow pattern table.
1411 * @param flow Pointer to the flow.
1412 * @param error Pointer to the flow error.
1413 * @returns 0 in case of success, negative value otherwise.
1416 mrvl_parse_pattern_eth_vlan_ip4(const struct rte_flow_item pattern[],
1417 struct rte_flow *flow,
1418 struct rte_flow_error *error)
1420 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 0);
1424 * Parse flow pattern composed of the eth, vlan and ipv6 items.
1426 * @param pattern Pointer to the flow pattern table.
1427 * @param flow Pointer to the flow.
1428 * @param error Pointer to the flow error.
1429 * @returns 0 in case of success, negative value otherwise.
1432 mrvl_parse_pattern_eth_vlan_ip6(const struct rte_flow_item pattern[],
1433 struct rte_flow *flow,
1434 struct rte_flow_error *error)
1436 return mrvl_parse_pattern_eth_vlan_ip4_ip6(pattern, flow, error, 1);
1440 * Parse flow pattern composed of the eth and ip4/ip6 items.
1442 * @param pattern Pointer to the flow pattern table.
1443 * @param flow Pointer to the flow.
1444 * @param error Pointer to the flow error.
1445 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1446 * @returns 0 in case of success, negative value otherwise.
1449 mrvl_parse_pattern_eth_ip4_ip6(const struct rte_flow_item pattern[],
1450 struct rte_flow *flow,
1451 struct rte_flow_error *error, int ip6)
1453 const struct rte_flow_item *item = mrvl_next_item(pattern);
1456 ret = mrvl_parse_eth(item, flow, error);
1460 item = mrvl_next_item(item + 1);
1462 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1463 mrvl_parse_ip4(item, flow, error);
1467 * Parse flow pattern composed of the eth and ipv4 items.
1469 * @param pattern Pointer to the flow pattern table.
1470 * @param flow Pointer to the flow.
1471 * @param error Pointer to the flow error.
1472 * @returns 0 in case of success, negative value otherwise.
1475 mrvl_parse_pattern_eth_ip4(const struct rte_flow_item pattern[],
1476 struct rte_flow *flow,
1477 struct rte_flow_error *error)
1479 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1483 * Parse flow pattern composed of the eth and ipv6 items.
1485 * @param pattern Pointer to the flow pattern table.
1486 * @param flow Pointer to the flow.
1487 * @param error Pointer to the flow error.
1488 * @returns 0 in case of success, negative value otherwise.
1491 mrvl_parse_pattern_eth_ip6(const struct rte_flow_item pattern[],
1492 struct rte_flow *flow,
1493 struct rte_flow_error *error)
1495 return mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1499 * Parse flow pattern composed of the eth, ip4 and tcp/udp items.
1501 * @param pattern Pointer to the flow pattern table.
1502 * @param flow Pointer to the flow.
1503 * @param error Pointer to the flow error.
1504 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1505 * @returns 0 in case of success, negative value otherwise.
1508 mrvl_parse_pattern_eth_ip4_tcp_udp(const struct rte_flow_item pattern[],
1509 struct rte_flow *flow,
1510 struct rte_flow_error *error, int tcp)
1512 const struct rte_flow_item *item = mrvl_next_item(pattern);
1515 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 0);
1519 item = mrvl_next_item(item + 1);
1520 item = mrvl_next_item(item + 1);
1523 return mrvl_parse_tcp(item, flow, error);
1525 return mrvl_parse_udp(item, flow, error);
1529 * Parse flow pattern composed of the eth, ipv4 and tcp items.
1531 * @param pattern Pointer to the flow pattern table.
1532 * @param flow Pointer to the flow.
1533 * @param error Pointer to the flow error.
1534 * @returns 0 in case of success, negative value otherwise.
1537 mrvl_parse_pattern_eth_ip4_tcp(const struct rte_flow_item pattern[],
1538 struct rte_flow *flow,
1539 struct rte_flow_error *error)
1541 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 1);
1545 * Parse flow pattern composed of the eth, ipv4 and udp items.
1547 * @param pattern Pointer to the flow pattern table.
1548 * @param flow Pointer to the flow.
1549 * @param error Pointer to the flow error.
1550 * @returns 0 in case of success, negative value otherwise.
1553 mrvl_parse_pattern_eth_ip4_udp(const struct rte_flow_item pattern[],
1554 struct rte_flow *flow,
1555 struct rte_flow_error *error)
1557 return mrvl_parse_pattern_eth_ip4_tcp_udp(pattern, flow, error, 0);
1561 * Parse flow pattern composed of the eth, ipv6 and tcp/udp items.
1563 * @param pattern Pointer to the flow pattern table.
1564 * @param flow Pointer to the flow.
1565 * @param error Pointer to the flow error.
1566 * @param tcp 1 to parse tcp item, 0 to parse udp item.
1567 * @returns 0 in case of success, negative value otherwise.
1570 mrvl_parse_pattern_eth_ip6_tcp_udp(const struct rte_flow_item pattern[],
1571 struct rte_flow *flow,
1572 struct rte_flow_error *error, int tcp)
1574 const struct rte_flow_item *item = mrvl_next_item(pattern);
1577 ret = mrvl_parse_pattern_eth_ip4_ip6(pattern, flow, error, 1);
1581 item = mrvl_next_item(item + 1);
1582 item = mrvl_next_item(item + 1);
1585 return mrvl_parse_tcp(item, flow, error);
1587 return mrvl_parse_udp(item, flow, error);
1591 * Parse flow pattern composed of the eth, ipv6 and tcp items.
1593 * @param pattern Pointer to the flow pattern table.
1594 * @param flow Pointer to the flow.
1595 * @param error Pointer to the flow error.
1596 * @returns 0 in case of success, negative value otherwise.
1599 mrvl_parse_pattern_eth_ip6_tcp(const struct rte_flow_item pattern[],
1600 struct rte_flow *flow,
1601 struct rte_flow_error *error)
1603 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 1);
1607 * Parse flow pattern composed of the eth, ipv6 and udp items.
1609 * @param pattern Pointer to the flow pattern table.
1610 * @param flow Pointer to the flow.
1611 * @param error Pointer to the flow error.
1612 * @returns 0 in case of success, negative value otherwise.
1615 mrvl_parse_pattern_eth_ip6_udp(const struct rte_flow_item pattern[],
1616 struct rte_flow *flow,
1617 struct rte_flow_error *error)
1619 return mrvl_parse_pattern_eth_ip6_tcp_udp(pattern, flow, error, 0);
1623 * Parse flow pattern composed of the vlan item.
1625 * @param pattern Pointer to the flow pattern table.
1626 * @param flow Pointer to the flow.
1627 * @param error Pointer to the flow error.
1628 * @returns 0 in case of success, negative value otherwise.
1631 mrvl_parse_pattern_vlan(const struct rte_flow_item pattern[],
1632 struct rte_flow *flow,
1633 struct rte_flow_error *error)
1635 const struct rte_flow_item *item = mrvl_next_item(pattern);
1637 return mrvl_parse_vlan(item, flow, error);
1641 * Parse flow pattern composed of the vlan and ip4/ip6 items.
1643 * @param pattern Pointer to the flow pattern table.
1644 * @param flow Pointer to the flow.
1645 * @param error Pointer to the flow error.
1646 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1647 * @returns 0 in case of success, negative value otherwise.
1650 mrvl_parse_pattern_vlan_ip4_ip6(const struct rte_flow_item pattern[],
1651 struct rte_flow *flow,
1652 struct rte_flow_error *error, int ip6)
1654 const struct rte_flow_item *item = mrvl_next_item(pattern);
1657 ret = mrvl_parse_vlan(item, flow, error);
1661 item = mrvl_next_item(item + 1);
1663 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1664 mrvl_parse_ip4(item, flow, error);
1668 * Parse flow pattern composed of the vlan and ipv4 items.
1670 * @param pattern Pointer to the flow pattern table.
1671 * @param flow Pointer to the flow.
1672 * @param error Pointer to the flow error.
1673 * @returns 0 in case of success, negative value otherwise.
1676 mrvl_parse_pattern_vlan_ip4(const struct rte_flow_item pattern[],
1677 struct rte_flow *flow,
1678 struct rte_flow_error *error)
1680 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1684 * Parse flow pattern composed of the vlan, ipv4 and tcp/udp items.
1686 * @param pattern Pointer to the flow pattern table.
1687 * @param flow Pointer to the flow.
1688 * @param error Pointer to the flow error.
1689 * @returns 0 in case of success, negative value otherwise.
1692 mrvl_parse_pattern_vlan_ip_tcp_udp(const struct rte_flow_item pattern[],
1693 struct rte_flow *flow,
1694 struct rte_flow_error *error, int tcp)
1696 const struct rte_flow_item *item = mrvl_next_item(pattern);
1699 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 0);
1703 item = mrvl_next_item(item + 1);
1704 item = mrvl_next_item(item + 1);
1707 return mrvl_parse_tcp(item, flow, error);
1709 return mrvl_parse_udp(item, flow, error);
1713 * Parse flow pattern composed of the vlan, ipv4 and tcp items.
1715 * @param pattern Pointer to the flow pattern table.
1716 * @param flow Pointer to the flow.
1717 * @param error Pointer to the flow error.
1718 * @returns 0 in case of success, negative value otherwise.
1721 mrvl_parse_pattern_vlan_ip_tcp(const struct rte_flow_item pattern[],
1722 struct rte_flow *flow,
1723 struct rte_flow_error *error)
1725 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 1);
1729 * Parse flow pattern composed of the vlan, ipv4 and udp items.
1731 * @param pattern Pointer to the flow pattern table.
1732 * @param flow Pointer to the flow.
1733 * @param error Pointer to the flow error.
1734 * @returns 0 in case of success, negative value otherwise.
1737 mrvl_parse_pattern_vlan_ip_udp(const struct rte_flow_item pattern[],
1738 struct rte_flow *flow,
1739 struct rte_flow_error *error)
1741 return mrvl_parse_pattern_vlan_ip_tcp_udp(pattern, flow, error, 0);
1745 * Parse flow pattern composed of the vlan and ipv6 items.
1747 * @param pattern Pointer to the flow pattern table.
1748 * @param flow Pointer to the flow.
1749 * @param error Pointer to the flow error.
1750 * @returns 0 in case of success, negative value otherwise.
1753 mrvl_parse_pattern_vlan_ip6(const struct rte_flow_item pattern[],
1754 struct rte_flow *flow,
1755 struct rte_flow_error *error)
1757 return mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1761 * Parse flow pattern composed of the vlan, ipv6 and tcp/udp items.
1763 * @param pattern Pointer to the flow pattern table.
1764 * @param flow Pointer to the flow.
1765 * @param error Pointer to the flow error.
1766 * @returns 0 in case of success, negative value otherwise.
1769 mrvl_parse_pattern_vlan_ip6_tcp_udp(const struct rte_flow_item pattern[],
1770 struct rte_flow *flow,
1771 struct rte_flow_error *error, int tcp)
1773 const struct rte_flow_item *item = mrvl_next_item(pattern);
1776 ret = mrvl_parse_pattern_vlan_ip4_ip6(pattern, flow, error, 1);
1780 item = mrvl_next_item(item + 1);
1781 item = mrvl_next_item(item + 1);
1784 return mrvl_parse_tcp(item, flow, error);
1786 return mrvl_parse_udp(item, flow, error);
1790 * Parse flow pattern composed of the vlan, ipv6 and tcp items.
1792 * @param pattern Pointer to the flow pattern table.
1793 * @param flow Pointer to the flow.
1794 * @param error Pointer to the flow error.
1795 * @returns 0 in case of success, negative value otherwise.
1798 mrvl_parse_pattern_vlan_ip6_tcp(const struct rte_flow_item pattern[],
1799 struct rte_flow *flow,
1800 struct rte_flow_error *error)
1802 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 1);
1806 * Parse flow pattern composed of the vlan, ipv6 and udp items.
1808 * @param pattern Pointer to the flow pattern table.
1809 * @param flow Pointer to the flow.
1810 * @param error Pointer to the flow error.
1811 * @returns 0 in case of success, negative value otherwise.
1814 mrvl_parse_pattern_vlan_ip6_udp(const struct rte_flow_item pattern[],
1815 struct rte_flow *flow,
1816 struct rte_flow_error *error)
1818 return mrvl_parse_pattern_vlan_ip6_tcp_udp(pattern, flow, error, 0);
1822 * Parse flow pattern composed of the ip4/ip6 item.
1824 * @param pattern Pointer to the flow pattern table.
1825 * @param flow Pointer to the flow.
1826 * @param error Pointer to the flow error.
1827 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1828 * @returns 0 in case of success, negative value otherwise.
1831 mrvl_parse_pattern_ip4_ip6(const struct rte_flow_item pattern[],
1832 struct rte_flow *flow,
1833 struct rte_flow_error *error, int ip6)
1835 const struct rte_flow_item *item = mrvl_next_item(pattern);
1837 return ip6 ? mrvl_parse_ip6(item, flow, error) :
1838 mrvl_parse_ip4(item, flow, error);
1842 * Parse flow pattern composed of the ipv4 item.
1844 * @param pattern Pointer to the flow pattern table.
1845 * @param flow Pointer to the flow.
1846 * @param error Pointer to the flow error.
1847 * @returns 0 in case of success, negative value otherwise.
1850 mrvl_parse_pattern_ip4(const struct rte_flow_item pattern[],
1851 struct rte_flow *flow,
1852 struct rte_flow_error *error)
1854 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 0);
1858 * Parse flow pattern composed of the ipv6 item.
1860 * @param pattern Pointer to the flow pattern table.
1861 * @param flow Pointer to the flow.
1862 * @param error Pointer to the flow error.
1863 * @returns 0 in case of success, negative value otherwise.
1866 mrvl_parse_pattern_ip6(const struct rte_flow_item pattern[],
1867 struct rte_flow *flow,
1868 struct rte_flow_error *error)
1870 return mrvl_parse_pattern_ip4_ip6(pattern, flow, error, 1);
1874 * Parse flow pattern composed of the ip4/ip6 and tcp items.
1876 * @param pattern Pointer to the flow pattern table.
1877 * @param flow Pointer to the flow.
1878 * @param error Pointer to the flow error.
1879 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1880 * @returns 0 in case of success, negative value otherwise.
1883 mrvl_parse_pattern_ip4_ip6_tcp(const struct rte_flow_item pattern[],
1884 struct rte_flow *flow,
1885 struct rte_flow_error *error, int ip6)
1887 const struct rte_flow_item *item = mrvl_next_item(pattern);
1890 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1891 mrvl_parse_ip4(item, flow, error);
1895 item = mrvl_next_item(item + 1);
1897 return mrvl_parse_tcp(item, flow, error);
1901 * Parse flow pattern composed of the ipv4 and tcp items.
1903 * @param pattern Pointer to the flow pattern table.
1904 * @param flow Pointer to the flow.
1905 * @param error Pointer to the flow error.
1906 * @returns 0 in case of success, negative value otherwise.
1909 mrvl_parse_pattern_ip4_tcp(const struct rte_flow_item pattern[],
1910 struct rte_flow *flow,
1911 struct rte_flow_error *error)
1913 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 0);
1917 * Parse flow pattern composed of the ipv6 and tcp items.
1919 * @param pattern Pointer to the flow pattern table.
1920 * @param flow Pointer to the flow.
1921 * @param error Pointer to the flow error.
1922 * @returns 0 in case of success, negative value otherwise.
1925 mrvl_parse_pattern_ip6_tcp(const struct rte_flow_item pattern[],
1926 struct rte_flow *flow,
1927 struct rte_flow_error *error)
1929 return mrvl_parse_pattern_ip4_ip6_tcp(pattern, flow, error, 1);
1933 * Parse flow pattern composed of the ipv4/ipv6 and udp items.
1935 * @param pattern Pointer to the flow pattern table.
1936 * @param flow Pointer to the flow.
1937 * @param error Pointer to the flow error.
1938 * @param ip6 1 to parse ip6 item, 0 to parse ip4 item.
1939 * @returns 0 in case of success, negative value otherwise.
1942 mrvl_parse_pattern_ip4_ip6_udp(const struct rte_flow_item pattern[],
1943 struct rte_flow *flow,
1944 struct rte_flow_error *error, int ip6)
1946 const struct rte_flow_item *item = mrvl_next_item(pattern);
1949 ret = ip6 ? mrvl_parse_ip6(item, flow, error) :
1950 mrvl_parse_ip4(item, flow, error);
1954 item = mrvl_next_item(item + 1);
1956 return mrvl_parse_udp(item, flow, error);
1960 * Parse flow pattern composed of the ipv4 and udp items.
1962 * @param pattern Pointer to the flow pattern table.
1963 * @param flow Pointer to the flow.
1964 * @param error Pointer to the flow error.
1965 * @returns 0 in case of success, negative value otherwise.
1968 mrvl_parse_pattern_ip4_udp(const struct rte_flow_item pattern[],
1969 struct rte_flow *flow,
1970 struct rte_flow_error *error)
1972 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 0);
1976 * Parse flow pattern composed of the ipv6 and udp items.
1978 * @param pattern Pointer to the flow pattern table.
1979 * @param flow Pointer to the flow.
1980 * @param error Pointer to the flow error.
1981 * @returns 0 in case of success, negative value otherwise.
1984 mrvl_parse_pattern_ip6_udp(const struct rte_flow_item pattern[],
1985 struct rte_flow *flow,
1986 struct rte_flow_error *error)
1988 return mrvl_parse_pattern_ip4_ip6_udp(pattern, flow, error, 1);
1992 * Parse flow pattern composed of the tcp item.
1994 * @param pattern Pointer to the flow pattern table.
1995 * @param flow Pointer to the flow.
1996 * @param error Pointer to the flow error.
1997 * @returns 0 in case of success, negative value otherwise.
2000 mrvl_parse_pattern_tcp(const struct rte_flow_item pattern[],
2001 struct rte_flow *flow,
2002 struct rte_flow_error *error)
2004 const struct rte_flow_item *item = mrvl_next_item(pattern);
2006 return mrvl_parse_tcp(item, flow, error);
2010 * Parse flow pattern composed of the udp item.
2012 * @param pattern Pointer to the flow pattern table.
2013 * @param flow Pointer to the flow.
2014 * @param error Pointer to the flow error.
2015 * @returns 0 in case of success, negative value otherwise.
2018 mrvl_parse_pattern_udp(const struct rte_flow_item pattern[],
2019 struct rte_flow *flow,
2020 struct rte_flow_error *error)
2022 const struct rte_flow_item *item = mrvl_next_item(pattern);
2024 return mrvl_parse_udp(item, flow, error);
2028 * Structure used to map specific flow pattern to the pattern parse callback
2029 * which will iterate over each pattern item and extract relevant data.
2031 static const struct {
2032 const enum rte_flow_item_type *pattern;
2033 int (*parse)(const struct rte_flow_item pattern[],
2034 struct rte_flow *flow,
2035 struct rte_flow_error *error);
2036 } mrvl_patterns[] = {
2037 { pattern_eth, mrvl_parse_pattern_eth },
2038 { pattern_eth_vlan, mrvl_parse_pattern_eth_vlan },
2039 { pattern_eth_vlan_ip, mrvl_parse_pattern_eth_vlan_ip4 },
2040 { pattern_eth_vlan_ip6, mrvl_parse_pattern_eth_vlan_ip6 },
2041 { pattern_eth_ip4, mrvl_parse_pattern_eth_ip4 },
2042 { pattern_eth_ip4_tcp, mrvl_parse_pattern_eth_ip4_tcp },
2043 { pattern_eth_ip4_udp, mrvl_parse_pattern_eth_ip4_udp },
2044 { pattern_eth_ip6, mrvl_parse_pattern_eth_ip6 },
2045 { pattern_eth_ip6_tcp, mrvl_parse_pattern_eth_ip6_tcp },
2046 { pattern_eth_ip6_udp, mrvl_parse_pattern_eth_ip6_udp },
2047 { pattern_vlan, mrvl_parse_pattern_vlan },
2048 { pattern_vlan_ip, mrvl_parse_pattern_vlan_ip4 },
2049 { pattern_vlan_ip_tcp, mrvl_parse_pattern_vlan_ip_tcp },
2050 { pattern_vlan_ip_udp, mrvl_parse_pattern_vlan_ip_udp },
2051 { pattern_vlan_ip6, mrvl_parse_pattern_vlan_ip6 },
2052 { pattern_vlan_ip6_tcp, mrvl_parse_pattern_vlan_ip6_tcp },
2053 { pattern_vlan_ip6_udp, mrvl_parse_pattern_vlan_ip6_udp },
2054 { pattern_ip, mrvl_parse_pattern_ip4 },
2055 { pattern_ip_tcp, mrvl_parse_pattern_ip4_tcp },
2056 { pattern_ip_udp, mrvl_parse_pattern_ip4_udp },
2057 { pattern_ip6, mrvl_parse_pattern_ip6 },
2058 { pattern_ip6_tcp, mrvl_parse_pattern_ip6_tcp },
2059 { pattern_ip6_udp, mrvl_parse_pattern_ip6_udp },
2060 { pattern_tcp, mrvl_parse_pattern_tcp },
2061 { pattern_udp, mrvl_parse_pattern_udp }
2065 * Check whether provided pattern matches any of the supported ones.
2067 * @param type_pattern Pointer to the pattern type.
2068 * @param item_pattern Pointer to the flow pattern.
2069 * @returns 1 in case of success, 0 value otherwise.
2072 mrvl_patterns_match(const enum rte_flow_item_type *type_pattern,
2073 const struct rte_flow_item *item_pattern)
2075 const enum rte_flow_item_type *type = type_pattern;
2076 const struct rte_flow_item *item = item_pattern;
2079 if (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
2084 if (*type == RTE_FLOW_ITEM_TYPE_END ||
2085 item->type == RTE_FLOW_ITEM_TYPE_END)
2088 if (*type != item->type)
2095 return *type == item->type;
2099 * Parse flow attribute.
2101 * This will check whether the provided attribute's flags are supported.
2103 * @param priv Unused
2104 * @param attr Pointer to the flow attribute.
2105 * @param flow Unused
2106 * @param error Pointer to the flow error.
2107 * @returns 0 in case of success, negative value otherwise.
2110 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
2111 const struct rte_flow_attr *attr,
2112 struct rte_flow *flow __rte_unused,
2113 struct rte_flow_error *error)
2116 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
2117 NULL, "NULL attribute");
2122 rte_flow_error_set(error, ENOTSUP,
2123 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2124 "Groups are not supported");
2127 if (attr->priority) {
2128 rte_flow_error_set(error, ENOTSUP,
2129 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
2130 "Priorities are not supported");
2133 if (!attr->ingress) {
2134 rte_flow_error_set(error, ENOTSUP,
2135 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
2136 "Only ingress is supported");
2140 rte_flow_error_set(error, ENOTSUP,
2141 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
2142 "Egress is not supported");
2145 if (attr->transfer) {
2146 rte_flow_error_set(error, ENOTSUP,
2147 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
2148 "Transfer is not supported");
2156 * Parse flow pattern.
2158 * Specific classifier rule will be created as well.
2160 * @param priv Unused
2161 * @param pattern Pointer to the flow pattern.
2162 * @param flow Pointer to the flow.
2163 * @param error Pointer to the flow error.
2164 * @returns 0 in case of success, negative value otherwise.
2167 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
2168 const struct rte_flow_item pattern[],
2169 struct rte_flow *flow,
2170 struct rte_flow_error *error)
2175 for (i = 0; i < RTE_DIM(mrvl_patterns); i++) {
2176 if (!mrvl_patterns_match(mrvl_patterns[i].pattern, pattern))
2179 ret = mrvl_patterns[i].parse(pattern, flow, error);
2181 mrvl_free_all_key_mask(&flow->rule);
2186 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL,
2187 "Unsupported pattern");
2193 * Parse flow actions.
2195 * @param priv Pointer to the port's private data.
2196 * @param actions Pointer the action table.
2197 * @param flow Pointer to the flow.
2198 * @param error Pointer to the flow error.
2199 * @returns 0 in case of success, negative value otherwise.
2202 mrvl_flow_parse_actions(struct mrvl_priv *priv,
2203 const struct rte_flow_action actions[],
2204 struct rte_flow *flow,
2205 struct rte_flow_error *error)
2207 const struct rte_flow_action *action = actions;
2210 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
2211 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
2214 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
2215 flow->cos.ppio = priv->ppio;
2217 flow->action.type = PP2_CLS_TBL_ACT_DROP;
2218 flow->action.cos = &flow->cos;
2220 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2221 const struct rte_flow_action_queue *q =
2222 (const struct rte_flow_action_queue *)
2225 if (q->index > priv->nb_rx_queues) {
2226 rte_flow_error_set(error, EINVAL,
2227 RTE_FLOW_ERROR_TYPE_ACTION,
2229 "Queue index out of range");
2233 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
2235 * Unknown TC mapping, mapping will not have
2239 "Unknown TC mapping for queue %hu eth%hhu",
2240 q->index, priv->ppio_id);
2242 rte_flow_error_set(error, EFAULT,
2243 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2249 "Action: Assign packets to queue %d, tc:%d, q:%d",
2250 q->index, priv->rxq_map[q->index].tc,
2251 priv->rxq_map[q->index].inq);
2253 flow->cos.ppio = priv->ppio;
2254 flow->cos.tc = priv->rxq_map[q->index].tc;
2255 flow->action.type = PP2_CLS_TBL_ACT_DONE;
2256 flow->action.cos = &flow->cos;
2258 } else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
2259 const struct rte_flow_action_meter *meter;
2260 struct mrvl_mtr *mtr;
2262 meter = action->conf;
2264 return -rte_flow_error_set(error, EINVAL,
2265 RTE_FLOW_ERROR_TYPE_ACTION,
2266 NULL, "Invalid meter\n");
2268 LIST_FOREACH(mtr, &priv->mtrs, next)
2269 if (mtr->mtr_id == meter->mtr_id)
2273 return -rte_flow_error_set(error, EINVAL,
2274 RTE_FLOW_ERROR_TYPE_ACTION,
2276 "Meter id does not exist\n");
2278 if (!mtr->shared && mtr->refcnt)
2279 return -rte_flow_error_set(error, EPERM,
2280 RTE_FLOW_ERROR_TYPE_ACTION,
2282 "Meter cannot be shared\n");
2285 * In case cos has already been set
2288 if (!flow->cos.ppio) {
2289 flow->cos.ppio = priv->ppio;
2293 flow->action.type = PP2_CLS_TBL_ACT_DONE;
2294 flow->action.cos = &flow->cos;
2295 flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
2300 rte_flow_error_set(error, ENOTSUP,
2301 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2302 "Action not supported");
2308 rte_flow_error_set(error, EINVAL,
2309 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2310 "Action not specified");
2318 * Parse flow attribute, pattern and actions.
2320 * @param priv Pointer to the port's private data.
2321 * @param attr Pointer to the flow attribute.
2322 * @param pattern Pointer to the flow pattern.
2323 * @param actions Pointer to the flow actions.
2324 * @param flow Pointer to the flow.
2325 * @param error Pointer to the flow error.
2326 * @returns 0 on success, negative value otherwise.
2329 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
2330 const struct rte_flow_item pattern[],
2331 const struct rte_flow_action actions[],
2332 struct rte_flow *flow,
2333 struct rte_flow_error *error)
2337 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
2341 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
2345 return mrvl_flow_parse_actions(priv, actions, flow, error);
2349 * Get engine type for the given flow.
2351 * @param field Pointer to the flow.
2352 * @returns The type of the engine.
2354 static inline enum pp2_cls_tbl_type
2355 mrvl_engine_type(const struct rte_flow *flow)
2359 for (i = 0; i < flow->rule.num_fields; i++)
2360 size += flow->rule.fields[i].size;
2363 * For maskable engine type the key size must be up to 8 bytes.
2364 * For keys with size bigger than 8 bytes, engine type must
2365 * be set to exact match.
2368 return PP2_CLS_TBL_EXACT_MATCH;
2370 return PP2_CLS_TBL_MASKABLE;
2374 * Create classifier table.
2376 * @param dev Pointer to the device.
2377 * @param flow Pointer to the very first flow.
2378 * @returns 0 in case of success, negative value otherwise.
2381 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
2383 struct mrvl_priv *priv = dev->data->dev_private;
2384 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
2387 if (priv->cls_tbl) {
2388 pp2_cls_tbl_deinit(priv->cls_tbl);
2389 priv->cls_tbl = NULL;
2392 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
2394 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
2395 MRVL_LOG(INFO, "Setting cls search engine type to %s",
2396 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
2397 "exact" : "maskable");
2398 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
2399 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
2400 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
2402 if (first_flow->pattern & F_DMAC) {
2403 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2404 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
2406 key->num_fields += 1;
2409 if (first_flow->pattern & F_SMAC) {
2410 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2411 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
2413 key->num_fields += 1;
2416 if (first_flow->pattern & F_TYPE) {
2417 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
2418 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
2420 key->num_fields += 1;
2423 if (first_flow->pattern & F_VLAN_ID) {
2424 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2425 key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
2427 key->num_fields += 1;
2430 if (first_flow->pattern & F_VLAN_PRI) {
2431 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
2432 key->proto_field[key->num_fields].field.vlan =
2435 key->num_fields += 1;
2438 if (first_flow->pattern & F_IP4_TOS) {
2439 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2440 key->proto_field[key->num_fields].field.ipv4 =
2443 key->num_fields += 1;
2446 if (first_flow->pattern & F_IP4_SIP) {
2447 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2448 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
2450 key->num_fields += 1;
2453 if (first_flow->pattern & F_IP4_DIP) {
2454 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2455 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
2457 key->num_fields += 1;
2460 if (first_flow->pattern & F_IP4_PROTO) {
2461 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
2462 key->proto_field[key->num_fields].field.ipv4 =
2465 key->num_fields += 1;
2468 if (first_flow->pattern & F_IP6_SIP) {
2469 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2470 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
2471 key->key_size += 16;
2472 key->num_fields += 1;
2475 if (first_flow->pattern & F_IP6_DIP) {
2476 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2477 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
2478 key->key_size += 16;
2479 key->num_fields += 1;
2482 if (first_flow->pattern & F_IP6_FLOW) {
2483 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2484 key->proto_field[key->num_fields].field.ipv6 =
2487 key->num_fields += 1;
2490 if (first_flow->pattern & F_IP6_NEXT_HDR) {
2491 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
2492 key->proto_field[key->num_fields].field.ipv6 =
2493 MV_NET_IP6_F_NEXT_HDR;
2495 key->num_fields += 1;
2498 if (first_flow->pattern & F_TCP_SPORT) {
2499 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2500 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2502 key->num_fields += 1;
2505 if (first_flow->pattern & F_TCP_DPORT) {
2506 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
2507 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
2509 key->num_fields += 1;
2512 if (first_flow->pattern & F_UDP_SPORT) {
2513 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2514 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
2516 key->num_fields += 1;
2519 if (first_flow->pattern & F_UDP_DPORT) {
2520 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
2521 key->proto_field[key->num_fields].field.udp = MV_NET_TCP_F_DP;
2523 key->num_fields += 1;
2526 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
2528 priv->cls_tbl_pattern = first_flow->pattern;
2534 * Check whether new flow can be added to the table
2536 * @param priv Pointer to the port's private data.
2537 * @param flow Pointer to the new flow.
2538 * @return 1 in case flow can be added, 0 otherwise.
2541 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
2543 return flow->pattern == priv->cls_tbl_pattern &&
2544 mrvl_engine_type(flow) == priv->cls_tbl_params.type;
2548 * DPDK flow create callback called when flow is to be created.
2550 * @param dev Pointer to the device.
2551 * @param attr Pointer to the flow attribute.
2552 * @param pattern Pointer to the flow pattern.
2553 * @param actions Pointer to the flow actions.
2554 * @param error Pointer to the flow error.
2555 * @returns Pointer to the created flow in case of success, NULL otherwise.
2557 static struct rte_flow *
2558 mrvl_flow_create(struct rte_eth_dev *dev,
2559 const struct rte_flow_attr *attr,
2560 const struct rte_flow_item pattern[],
2561 const struct rte_flow_action actions[],
2562 struct rte_flow_error *error)
2564 struct mrvl_priv *priv = dev->data->dev_private;
2565 struct rte_flow *flow, *first;
2568 if (!dev->data->dev_started) {
2569 rte_flow_error_set(error, EINVAL,
2570 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2571 "Port must be started first\n");
2575 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
2579 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
2586 * 1. In case table does not exist - create one.
2587 * 2. In case table exists, is empty and new flow cannot be added
2589 * 3. In case table is not empty and new flow matches table format
2591 * 4. Otherwise flow cannot be added.
2593 first = LIST_FIRST(&priv->flows);
2594 if (!priv->cls_tbl) {
2595 ret = mrvl_create_cls_table(dev, flow);
2596 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
2597 ret = mrvl_create_cls_table(dev, flow);
2598 } else if (mrvl_flow_can_be_added(priv, flow)) {
2601 rte_flow_error_set(error, EINVAL,
2602 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2603 "Pattern does not match cls table format\n");
2608 rte_flow_error_set(error, EINVAL,
2609 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2610 "Failed to create cls table\n");
2614 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
2616 rte_flow_error_set(error, EINVAL,
2617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2618 "Failed to add rule\n");
2622 LIST_INSERT_HEAD(&priv->flows, flow, next);
2631 * Remove classifier rule associated with given flow.
2633 * @param priv Pointer to the port's private data.
2634 * @param flow Pointer to the flow.
2635 * @param error Pointer to the flow error.
2636 * @returns 0 in case of success, negative value otherwise.
2639 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
2640 struct rte_flow_error *error)
2644 if (!priv->cls_tbl) {
2645 rte_flow_error_set(error, EINVAL,
2646 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2647 "Classifier table not initialized");
2651 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
2653 rte_flow_error_set(error, EINVAL,
2654 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2655 "Failed to remove rule");
2659 mrvl_free_all_key_mask(&flow->rule);
2662 flow->mtr->refcnt--;
2670 * DPDK flow destroy callback called when flow is to be removed.
2672 * @param dev Pointer to the device.
2673 * @param flow Pointer to the flow.
2674 * @param error Pointer to the flow error.
2675 * @returns 0 in case of success, negative value otherwise.
2678 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
2679 struct rte_flow_error *error)
2681 struct mrvl_priv *priv = dev->data->dev_private;
2685 LIST_FOREACH(f, &priv->flows, next) {
2691 rte_flow_error_set(error, EINVAL,
2692 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2693 "Rule was not found");
2697 LIST_REMOVE(f, next);
2699 ret = mrvl_flow_remove(priv, flow, error);
2709 * DPDK flow callback called to verify given attribute, pattern and actions.
2711 * @param dev Pointer to the device.
2712 * @param attr Pointer to the flow attribute.
2713 * @param pattern Pointer to the flow pattern.
2714 * @param actions Pointer to the flow actions.
2715 * @param error Pointer to the flow error.
2716 * @returns 0 on success, negative value otherwise.
2719 mrvl_flow_validate(struct rte_eth_dev *dev,
2720 const struct rte_flow_attr *attr,
2721 const struct rte_flow_item pattern[],
2722 const struct rte_flow_action actions[],
2723 struct rte_flow_error *error)
2725 static struct rte_flow *flow;
2727 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
2731 mrvl_flow_destroy(dev, flow, error);
2737 * DPDK flow flush callback called when flows are to be flushed.
2739 * @param dev Pointer to the device.
2740 * @param error Pointer to the flow error.
2741 * @returns 0 in case of success, negative value otherwise.
2744 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2746 struct mrvl_priv *priv = dev->data->dev_private;
2748 while (!LIST_EMPTY(&priv->flows)) {
2749 struct rte_flow *flow = LIST_FIRST(&priv->flows);
2750 int ret = mrvl_flow_remove(priv, flow, error);
2754 LIST_REMOVE(flow, next);
2762 * DPDK flow isolate callback called to isolate port.
2764 * @param dev Pointer to the device.
2765 * @param enable Pass 0/1 to disable/enable port isolation.
2766 * @param error Pointer to the flow error.
2767 * @returns 0 in case of success, negative value otherwise.
2770 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
2771 struct rte_flow_error *error)
2773 struct mrvl_priv *priv = dev->data->dev_private;
2775 if (dev->data->dev_started) {
2776 rte_flow_error_set(error, EBUSY,
2777 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2778 NULL, "Port must be stopped first\n");
2782 priv->isolated = enable;
2787 const struct rte_flow_ops mrvl_flow_ops = {
2788 .validate = mrvl_flow_validate,
2789 .create = mrvl_flow_create,
2790 .destroy = mrvl_flow_destroy,
2791 .flush = mrvl_flow_flush,
2792 .isolate = mrvl_flow_isolate
2796 * Initialize flow resources.
2798 * @param dev Pointer to the device.
2801 mrvl_flow_init(struct rte_eth_dev *dev)
2803 struct mrvl_priv *priv = dev->data->dev_private;
2805 LIST_INIT(&priv->flows);
2809 * Cleanup flow resources.
2811 * @param dev Pointer to the device.
2814 mrvl_flow_deinit(struct rte_eth_dev *dev)
2816 struct mrvl_priv *priv = dev->data->dev_private;
2818 mrvl_flow_flush(dev, NULL);
2820 if (priv->cls_tbl) {
2821 pp2_cls_tbl_deinit(priv->cls_tbl);
2822 priv->cls_tbl = NULL;