1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
12 #include <arpa/inet.h>
14 #include "mrvl_flow.h"
17 /** Number of rules in the classifier table. */
18 #define MRVL_CLS_MAX_NUM_RULES 20
20 /** Size of the classifier key and mask strings. */
21 #define MRVL_CLS_STR_SIZE_MAX 40
23 #define MRVL_VLAN_ID_MASK 0x0fff
24 #define MRVL_VLAN_PRI_MASK 0x7000
25 #define MRVL_IPV4_DSCP_MASK 0xfc
26 #define MRVL_IPV4_ADDR_MASK 0xffffffff
27 #define MRVL_IPV6_FLOW_MASK 0x0fffff
30 * Allocate memory for classifier rule key and mask fields.
32 * @param field Pointer to the classifier rule.
33 * @returns 0 in case of success, negative value otherwise.
36 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
38 unsigned int id = rte_socket_id();
40 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
44 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
58 * Free memory allocated for classifier rule key and mask fields.
60 * @param field Pointer to the classifier rule.
63 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
66 rte_free(field->mask);
72 * Free memory allocated for all classifier rule key and mask fields.
74 * @param rule Pointer to the classifier table rule.
77 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
81 for (i = 0; i < rule->num_fields; i++)
82 mrvl_free_key_mask(&rule->fields[i]);
87 * Initialize rte flow item parsing.
89 * @param item Pointer to the flow item.
90 * @param spec_ptr Pointer to the specific item pointer.
91 * @param mask_ptr Pointer to the specific item's mask pointer.
92 * @def_mask Pointer to the default mask.
93 * @size Size of the flow item.
94 * @error Pointer to the rte flow error.
95 * @returns 0 in case of success, negative value otherwise.
98 mrvl_parse_init(const struct rte_flow_item *item,
99 const void **spec_ptr,
100 const void **mask_ptr,
101 const void *def_mask,
103 struct rte_flow_error *error)
110 memset(zeros, 0, size);
113 rte_flow_error_set(error, EINVAL,
114 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
119 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
120 rte_flow_error_set(error, EINVAL,
121 RTE_FLOW_ERROR_TYPE_ITEM, item,
122 "Mask or last is set without spec\n");
127 * If "mask" is not set, default mask is used,
128 * but if default mask is NULL, "mask" should be set.
130 if (item->mask == NULL) {
131 if (def_mask == NULL) {
132 rte_flow_error_set(error, EINVAL,
133 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134 "Mask should be specified\n");
138 mask = (const uint8_t *)def_mask;
140 mask = (const uint8_t *)item->mask;
143 spec = (const uint8_t *)item->spec;
144 last = (const uint8_t *)item->last;
147 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
148 NULL, "Spec should be specified\n");
153 * If field values in "last" are either 0 or equal to the corresponding
154 * values in "spec" then they are ignored.
157 !memcmp(last, zeros, size) &&
158 memcmp(last, spec, size) != 0) {
159 rte_flow_error_set(error, ENOTSUP,
160 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
161 "Ranging is not supported\n");
172 * Parse the eth flow item.
174 * This will create classifier rule that matches either destination or source
177 * @param spec Pointer to the specific flow item.
178 * @param mask Pointer to the specific flow item's mask.
179 * @param parse_dst Parse either destination or source mac address.
180 * @param flow Pointer to the flow.
181 * @return 0 in case of success, negative error value otherwise.
184 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
185 const struct rte_flow_item_eth *mask,
186 int parse_dst, struct rte_flow *flow)
188 struct pp2_cls_rule_key_field *key_field;
189 const uint8_t *k, *m;
192 k = spec->dst.addr_bytes;
193 m = mask->dst.addr_bytes;
195 flow->table_key.proto_field[flow->rule.num_fields].field.eth =
198 k = spec->src.addr_bytes;
199 m = mask->src.addr_bytes;
201 flow->table_key.proto_field[flow->rule.num_fields].field.eth =
205 key_field = &flow->rule.fields[flow->rule.num_fields];
206 mrvl_alloc_key_mask(key_field);
209 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
210 "%02x:%02x:%02x:%02x:%02x:%02x",
211 k[0], k[1], k[2], k[3], k[4], k[5]);
213 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
214 "%02x:%02x:%02x:%02x:%02x:%02x",
215 m[0], m[1], m[2], m[3], m[4], m[5]);
217 flow->table_key.proto_field[flow->rule.num_fields].proto =
219 flow->table_key.key_size += key_field->size;
221 flow->rule.num_fields += 1;
227 * Helper for parsing the eth flow item destination mac address.
229 * @param spec Pointer to the specific flow item.
230 * @param mask Pointer to the specific flow item's mask.
231 * @param flow Pointer to the flow.
232 * @return 0 in case of success, negative error value otherwise.
235 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
236 const struct rte_flow_item_eth *mask,
237 struct rte_flow *flow)
239 return mrvl_parse_mac(spec, mask, 1, flow);
243 * Helper for parsing the eth flow item source mac address.
245 * @param spec Pointer to the specific flow item.
246 * @param mask Pointer to the specific flow item's mask.
247 * @param flow Pointer to the flow.
248 * @return 0 in case of success, negative error value otherwise.
251 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
252 const struct rte_flow_item_eth *mask,
253 struct rte_flow *flow)
255 return mrvl_parse_mac(spec, mask, 0, flow);
259 * Parse the ether type field of the eth flow item.
261 * @param spec Pointer to the specific flow item.
262 * @param mask Pointer to the specific flow item's mask.
263 * @param flow Pointer to the flow.
264 * @return 0 in case of success, negative error value otherwise.
267 mrvl_parse_type(const struct rte_flow_item_eth *spec,
268 const struct rte_flow_item_eth *mask __rte_unused,
269 struct rte_flow *flow)
271 struct pp2_cls_rule_key_field *key_field;
274 key_field = &flow->rule.fields[flow->rule.num_fields];
275 mrvl_alloc_key_mask(key_field);
278 k = rte_be_to_cpu_16(spec->type);
279 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
281 flow->table_key.proto_field[flow->rule.num_fields].proto =
283 flow->table_key.proto_field[flow->rule.num_fields].field.eth =
285 flow->table_key.key_size += key_field->size;
287 flow->rule.num_fields += 1;
293 * Parse the vid field of the vlan rte flow item.
295 * This will create classifier rule that matches vid.
297 * @param spec Pointer to the specific flow item.
298 * @param mask Pointer to the specific flow item's mask.
299 * @param flow Pointer to the flow.
300 * @return 0 in case of success, negative error value otherwise.
303 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
304 const struct rte_flow_item_vlan *mask __rte_unused,
305 struct rte_flow *flow)
307 struct pp2_cls_rule_key_field *key_field;
310 key_field = &flow->rule.fields[flow->rule.num_fields];
311 mrvl_alloc_key_mask(key_field);
314 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
315 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
317 flow->table_key.proto_field[flow->rule.num_fields].proto =
319 flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
321 flow->table_key.key_size += key_field->size;
323 flow->rule.num_fields += 1;
329 * Parse the pri field of the vlan rte flow item.
331 * This will create classifier rule that matches pri.
333 * @param spec Pointer to the specific flow item.
334 * @param mask Pointer to the specific flow item's mask.
335 * @param flow Pointer to the flow.
336 * @return 0 in case of success, negative error value otherwise.
339 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
340 const struct rte_flow_item_vlan *mask __rte_unused,
341 struct rte_flow *flow)
343 struct pp2_cls_rule_key_field *key_field;
346 key_field = &flow->rule.fields[flow->rule.num_fields];
347 mrvl_alloc_key_mask(key_field);
350 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
351 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
353 flow->table_key.proto_field[flow->rule.num_fields].proto =
355 flow->table_key.proto_field[flow->rule.num_fields].field.vlan =
357 flow->table_key.key_size += key_field->size;
359 flow->rule.num_fields += 1;
365 * Parse the dscp field of the ipv4 rte flow item.
367 * This will create classifier rule that matches dscp field.
369 * @param spec Pointer to the specific flow item.
370 * @param mask Pointer to the specific flow item's mask.
371 * @param flow Pointer to the flow.
372 * @return 0 in case of success, negative error value otherwise.
375 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
376 const struct rte_flow_item_ipv4 *mask,
377 struct rte_flow *flow)
379 struct pp2_cls_rule_key_field *key_field;
382 key_field = &flow->rule.fields[flow->rule.num_fields];
383 mrvl_alloc_key_mask(key_field);
386 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
387 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
388 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
389 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
391 flow->table_key.proto_field[flow->rule.num_fields].proto =
393 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
395 flow->table_key.key_size += key_field->size;
397 flow->rule.num_fields += 1;
403 * Parse either source or destination ip addresses of the ipv4 flow item.
405 * This will create classifier rule that matches either destination
406 * or source ip field.
408 * @param spec Pointer to the specific flow item.
409 * @param mask Pointer to the specific flow item's mask.
410 * @param parse_dst Parse either destination or source ip address.
411 * @param flow Pointer to the flow.
412 * @return 0 in case of success, negative error value otherwise.
415 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
416 const struct rte_flow_item_ipv4 *mask,
417 int parse_dst, struct rte_flow *flow)
419 struct pp2_cls_rule_key_field *key_field;
423 memset(&k, 0, sizeof(k));
425 k.s_addr = spec->hdr.dst_addr;
426 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
428 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
431 k.s_addr = spec->hdr.src_addr;
432 m = rte_be_to_cpu_32(mask->hdr.src_addr);
434 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
438 key_field = &flow->rule.fields[flow->rule.num_fields];
439 mrvl_alloc_key_mask(key_field);
442 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
443 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
445 flow->table_key.proto_field[flow->rule.num_fields].proto =
447 flow->table_key.key_size += key_field->size;
449 flow->rule.num_fields += 1;
455 * Helper for parsing destination ip of the ipv4 flow item.
457 * @param spec Pointer to the specific flow item.
458 * @param mask Pointer to the specific flow item's mask.
459 * @param flow Pointer to the flow.
460 * @return 0 in case of success, negative error value otherwise.
463 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
464 const struct rte_flow_item_ipv4 *mask,
465 struct rte_flow *flow)
467 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
471 * Helper for parsing source ip of the ipv4 flow item.
473 * @param spec Pointer to the specific flow item.
474 * @param mask Pointer to the specific flow item's mask.
475 * @param flow Pointer to the flow.
476 * @return 0 in case of success, negative error value otherwise.
479 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
480 const struct rte_flow_item_ipv4 *mask,
481 struct rte_flow *flow)
483 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
487 * Parse the proto field of the ipv4 rte flow item.
489 * This will create classifier rule that matches proto field.
491 * @param spec Pointer to the specific flow item.
492 * @param mask Pointer to the specific flow item's mask.
493 * @param flow Pointer to the flow.
494 * @return 0 in case of success, negative error value otherwise.
497 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
498 const struct rte_flow_item_ipv4 *mask __rte_unused,
499 struct rte_flow *flow)
501 struct pp2_cls_rule_key_field *key_field;
502 uint8_t k = spec->hdr.next_proto_id;
504 key_field = &flow->rule.fields[flow->rule.num_fields];
505 mrvl_alloc_key_mask(key_field);
508 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
510 flow->table_key.proto_field[flow->rule.num_fields].proto =
512 flow->table_key.proto_field[flow->rule.num_fields].field.ipv4 =
514 flow->table_key.key_size += key_field->size;
516 flow->rule.num_fields += 1;
522 * Parse either source or destination ip addresses of the ipv6 rte flow item.
524 * This will create classifier rule that matches either destination
525 * or source ip field.
527 * @param spec Pointer to the specific flow item.
528 * @param mask Pointer to the specific flow item's mask.
529 * @param parse_dst Parse either destination or source ipv6 address.
530 * @param flow Pointer to the flow.
531 * @return 0 in case of success, negative error value otherwise.
534 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
535 const struct rte_flow_item_ipv6 *mask,
536 int parse_dst, struct rte_flow *flow)
538 struct pp2_cls_rule_key_field *key_field;
539 int size = sizeof(spec->hdr.dst_addr);
540 struct in6_addr k, m;
542 memset(&k, 0, sizeof(k));
544 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
545 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
547 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
550 memcpy(k.s6_addr, spec->hdr.src_addr, size);
551 memcpy(m.s6_addr, mask->hdr.src_addr, size);
553 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
557 key_field = &flow->rule.fields[flow->rule.num_fields];
558 mrvl_alloc_key_mask(key_field);
559 key_field->size = 16;
561 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
562 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
564 flow->table_key.proto_field[flow->rule.num_fields].proto =
566 flow->table_key.key_size += key_field->size;
568 flow->rule.num_fields += 1;
574 * Helper for parsing destination ip of the ipv6 flow item.
576 * @param spec Pointer to the specific flow item.
577 * @param mask Pointer to the specific flow item's mask.
578 * @param flow Pointer to the flow.
579 * @return 0 in case of success, negative error value otherwise.
582 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
583 const struct rte_flow_item_ipv6 *mask,
584 struct rte_flow *flow)
586 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
590 * Helper for parsing source ip of the ipv6 flow item.
592 * @param spec Pointer to the specific flow item.
593 * @param mask Pointer to the specific flow item's mask.
594 * @param flow Pointer to the flow.
595 * @return 0 in case of success, negative error value otherwise.
598 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
599 const struct rte_flow_item_ipv6 *mask,
600 struct rte_flow *flow)
602 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
606 * Parse the flow label of the ipv6 flow item.
608 * This will create classifier rule that matches flow field.
610 * @param spec Pointer to the specific flow item.
611 * @param mask Pointer to the specific flow item's mask.
612 * @param flow Pointer to the flow.
613 * @return 0 in case of success, negative error value otherwise.
616 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
617 const struct rte_flow_item_ipv6 *mask,
618 struct rte_flow *flow)
620 struct pp2_cls_rule_key_field *key_field;
621 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
622 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
624 key_field = &flow->rule.fields[flow->rule.num_fields];
625 mrvl_alloc_key_mask(key_field);
628 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
629 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
631 flow->table_key.proto_field[flow->rule.num_fields].proto =
633 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
635 flow->table_key.key_size += key_field->size;
637 flow->rule.num_fields += 1;
643 * Parse the next header of the ipv6 flow item.
645 * This will create classifier rule that matches next header field.
647 * @param spec Pointer to the specific flow item.
648 * @param mask Pointer to the specific flow item's mask.
649 * @param flow Pointer to the flow.
650 * @return 0 in case of success, negative error value otherwise.
653 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
654 const struct rte_flow_item_ipv6 *mask __rte_unused,
655 struct rte_flow *flow)
657 struct pp2_cls_rule_key_field *key_field;
658 uint8_t k = spec->hdr.proto;
660 key_field = &flow->rule.fields[flow->rule.num_fields];
661 mrvl_alloc_key_mask(key_field);
664 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
666 flow->table_key.proto_field[flow->rule.num_fields].proto =
668 flow->table_key.proto_field[flow->rule.num_fields].field.ipv6 =
669 MV_NET_IP6_F_NEXT_HDR;
670 flow->table_key.key_size += key_field->size;
672 flow->rule.num_fields += 1;
678 * Parse destination or source port of the tcp flow item.
680 * This will create classifier rule that matches either destination or
683 * @param spec Pointer to the specific flow item.
684 * @param mask Pointer to the specific flow item's mask.
685 * @param parse_dst Parse either destination or source port.
686 * @param flow Pointer to the flow.
687 * @return 0 in case of success, negative error value otherwise.
690 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
691 const struct rte_flow_item_tcp *mask __rte_unused,
692 int parse_dst, struct rte_flow *flow)
694 struct pp2_cls_rule_key_field *key_field;
697 key_field = &flow->rule.fields[flow->rule.num_fields];
698 mrvl_alloc_key_mask(key_field);
702 k = rte_be_to_cpu_16(spec->hdr.dst_port);
704 flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
707 k = rte_be_to_cpu_16(spec->hdr.src_port);
709 flow->table_key.proto_field[flow->rule.num_fields].field.tcp =
713 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
715 flow->table_key.proto_field[flow->rule.num_fields].proto =
717 flow->table_key.key_size += key_field->size;
719 flow->rule.num_fields += 1;
725 * Helper for parsing the tcp source port of the tcp flow item.
727 * @param spec Pointer to the specific flow item.
728 * @param mask Pointer to the specific flow item's mask.
729 * @param flow Pointer to the flow.
730 * @return 0 in case of success, negative error value otherwise.
733 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
734 const struct rte_flow_item_tcp *mask,
735 struct rte_flow *flow)
737 return mrvl_parse_tcp_port(spec, mask, 0, flow);
741 * Helper for parsing the tcp destination port of the tcp flow item.
743 * @param spec Pointer to the specific flow item.
744 * @param mask Pointer to the specific flow item's mask.
745 * @param flow Pointer to the flow.
746 * @return 0 in case of success, negative error value otherwise.
749 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
750 const struct rte_flow_item_tcp *mask,
751 struct rte_flow *flow)
753 return mrvl_parse_tcp_port(spec, mask, 1, flow);
757 * Parse destination or source port of the udp flow item.
759 * This will create classifier rule that matches either destination or
762 * @param spec Pointer to the specific flow item.
763 * @param mask Pointer to the specific flow item's mask.
764 * @param parse_dst Parse either destination or source port.
765 * @param flow Pointer to the flow.
766 * @return 0 in case of success, negative error value otherwise.
769 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
770 const struct rte_flow_item_udp *mask __rte_unused,
771 int parse_dst, struct rte_flow *flow)
773 struct pp2_cls_rule_key_field *key_field;
776 key_field = &flow->rule.fields[flow->rule.num_fields];
777 mrvl_alloc_key_mask(key_field);
781 k = rte_be_to_cpu_16(spec->hdr.dst_port);
783 flow->table_key.proto_field[flow->rule.num_fields].field.udp =
786 k = rte_be_to_cpu_16(spec->hdr.src_port);
788 flow->table_key.proto_field[flow->rule.num_fields].field.udp =
792 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
794 flow->table_key.proto_field[flow->rule.num_fields].proto =
796 flow->table_key.key_size += key_field->size;
798 flow->rule.num_fields += 1;
804 * Helper for parsing the udp source port of the udp flow item.
806 * @param spec Pointer to the specific flow item.
807 * @param mask Pointer to the specific flow item's mask.
808 * @param flow Pointer to the flow.
809 * @return 0 in case of success, negative error value otherwise.
812 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
813 const struct rte_flow_item_udp *mask,
814 struct rte_flow *flow)
816 return mrvl_parse_udp_port(spec, mask, 0, flow);
820 * Helper for parsing the udp destination port of the udp flow item.
822 * @param spec Pointer to the specific flow item.
823 * @param mask Pointer to the specific flow item's mask.
824 * @param flow Pointer to the flow.
825 * @return 0 in case of success, negative error value otherwise.
828 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
829 const struct rte_flow_item_udp *mask,
830 struct rte_flow *flow)
832 return mrvl_parse_udp_port(spec, mask, 1, flow);
836 * Parse eth flow item.
838 * @param item Pointer to the flow item.
839 * @param flow Pointer to the flow.
840 * @param error Pointer to the flow error.
841 * @returns 0 on success, negative value otherwise.
844 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
845 struct rte_flow_error *error)
847 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
848 struct rte_ether_addr zero;
851 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
852 &rte_flow_item_eth_mask,
853 sizeof(struct rte_flow_item_eth), error);
857 memset(&zero, 0, sizeof(zero));
859 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
860 ret = mrvl_parse_dmac(spec, mask, flow);
865 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
866 ret = mrvl_parse_smac(spec, mask, flow);
872 MRVL_LOG(WARNING, "eth type mask is ignored");
873 ret = mrvl_parse_type(spec, mask, flow);
880 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
881 "Reached maximum number of fields in cls tbl key\n");
886 * Parse vlan flow item.
888 * @param item Pointer to the flow item.
889 * @param flow Pointer to the flow.
890 * @param error Pointer to the flow error.
891 * @returns 0 on success, negative value otherwise.
894 mrvl_parse_vlan(const struct rte_flow_item *item,
895 struct rte_flow *flow,
896 struct rte_flow_error *error)
898 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
902 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
903 &rte_flow_item_vlan_mask,
904 sizeof(struct rte_flow_item_vlan), error);
908 m = rte_be_to_cpu_16(mask->tci);
909 if (m & MRVL_VLAN_ID_MASK) {
910 MRVL_LOG(WARNING, "vlan id mask is ignored");
911 ret = mrvl_parse_vlan_id(spec, mask, flow);
916 if (m & MRVL_VLAN_PRI_MASK) {
917 MRVL_LOG(WARNING, "vlan pri mask is ignored");
918 ret = mrvl_parse_vlan_pri(spec, mask, flow);
923 if (mask->inner_type) {
924 struct rte_flow_item_eth spec_eth = {
925 .type = spec->inner_type,
927 struct rte_flow_item_eth mask_eth = {
928 .type = mask->inner_type,
931 /* TPID is not supported so if ETH_TYPE was selected,
932 * error is return. else, classify eth-type with the tpid value
934 for (i = 0; i < flow->rule.num_fields; i++)
935 if (flow->table_key.proto_field[i].proto ==
937 flow->table_key.proto_field[i].field.eth ==
939 rte_flow_error_set(error, ENOTSUP,
940 RTE_FLOW_ERROR_TYPE_ITEM,
942 "VLAN TPID matching is not supported");
946 MRVL_LOG(WARNING, "inner eth type mask is ignored");
947 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
954 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
955 "Reached maximum number of fields in cls tbl key\n");
960 * Parse ipv4 flow item.
962 * @param item Pointer to the flow item.
963 * @param flow Pointer to the flow.
964 * @param error Pointer to the flow error.
965 * @returns 0 on success, negative value otherwise.
968 mrvl_parse_ip4(const struct rte_flow_item *item,
969 struct rte_flow *flow,
970 struct rte_flow_error *error)
972 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
975 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
976 &rte_flow_item_ipv4_mask,
977 sizeof(struct rte_flow_item_ipv4), error);
981 if (mask->hdr.version_ihl ||
982 mask->hdr.total_length ||
983 mask->hdr.packet_id ||
984 mask->hdr.fragment_offset ||
985 mask->hdr.time_to_live ||
986 mask->hdr.hdr_checksum) {
987 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
988 NULL, "Not supported by classifier\n");
992 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
993 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
998 if (mask->hdr.src_addr) {
999 ret = mrvl_parse_ip4_sip(spec, mask, flow);
1004 if (mask->hdr.dst_addr) {
1005 ret = mrvl_parse_ip4_dip(spec, mask, flow);
1010 if (mask->hdr.next_proto_id) {
1011 MRVL_LOG(WARNING, "next proto id mask is ignored");
1012 ret = mrvl_parse_ip4_proto(spec, mask, flow);
1019 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1020 "Reached maximum number of fields in cls tbl key\n");
1025 * Parse ipv6 flow item.
1027 * @param item Pointer to the flow item.
1028 * @param flow Pointer to the flow.
1029 * @param error Pointer to the flow error.
1030 * @returns 0 on success, negative value otherwise.
1033 mrvl_parse_ip6(const struct rte_flow_item *item,
1034 struct rte_flow *flow,
1035 struct rte_flow_error *error)
1037 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
1038 struct rte_ipv6_hdr zero;
1042 ret = mrvl_parse_init(item, (const void **)&spec,
1043 (const void **)&mask,
1044 &rte_flow_item_ipv6_mask,
1045 sizeof(struct rte_flow_item_ipv6),
1050 memset(&zero, 0, sizeof(zero));
1052 if (mask->hdr.payload_len ||
1053 mask->hdr.hop_limits) {
1054 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1055 NULL, "Not supported by classifier\n");
1059 if (memcmp(mask->hdr.src_addr,
1060 zero.src_addr, sizeof(mask->hdr.src_addr))) {
1061 ret = mrvl_parse_ip6_sip(spec, mask, flow);
1066 if (memcmp(mask->hdr.dst_addr,
1067 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
1068 ret = mrvl_parse_ip6_dip(spec, mask, flow);
1073 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1075 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1080 if (mask->hdr.proto) {
1081 MRVL_LOG(WARNING, "next header mask is ignored");
1082 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1089 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1090 "Reached maximum number of fields in cls tbl key\n");
1095 * Parse tcp flow item.
1097 * @param item Pointer to the flow item.
1098 * @param flow Pointer to the flow.
1099 * @param error Pointer to the flow error.
1100 * @returns 0 on success, negative value otherwise.
1103 mrvl_parse_tcp(const struct rte_flow_item *item,
1104 struct rte_flow *flow,
1105 struct rte_flow_error *error)
1107 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1110 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1111 &rte_flow_item_tcp_mask,
1112 sizeof(struct rte_flow_item_tcp), error);
1116 if (mask->hdr.sent_seq ||
1117 mask->hdr.recv_ack ||
1118 mask->hdr.data_off ||
1119 mask->hdr.tcp_flags ||
1122 mask->hdr.tcp_urp) {
1123 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1124 NULL, "Not supported by classifier\n");
1128 if (mask->hdr.src_port) {
1129 MRVL_LOG(WARNING, "tcp sport mask is ignored");
1130 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1135 if (mask->hdr.dst_port) {
1136 MRVL_LOG(WARNING, "tcp dport mask is ignored");
1137 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1144 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1145 "Reached maximum number of fields in cls tbl key\n");
1150 * Parse udp flow item.
1152 * @param item Pointer to the flow item.
1153 * @param flow Pointer to the flow.
1154 * @param error Pointer to the flow error.
1155 * @returns 0 on success, negative value otherwise.
1158 mrvl_parse_udp(const struct rte_flow_item *item,
1159 struct rte_flow *flow,
1160 struct rte_flow_error *error)
1162 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1165 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1166 &rte_flow_item_udp_mask,
1167 sizeof(struct rte_flow_item_udp), error);
1171 if (mask->hdr.dgram_len ||
1172 mask->hdr.dgram_cksum) {
1173 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1174 NULL, "Not supported by classifier\n");
1178 if (mask->hdr.src_port) {
1179 MRVL_LOG(WARNING, "udp sport mask is ignored");
1180 ret = mrvl_parse_udp_sport(spec, mask, flow);
1185 if (mask->hdr.dst_port) {
1186 MRVL_LOG(WARNING, "udp dport mask is ignored");
1187 ret = mrvl_parse_udp_dport(spec, mask, flow);
1194 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1195 "Reached maximum number of fields in cls tbl key\n");
1200 * Structure used to map specific flow pattern to the pattern parse callback
1201 * which will iterate over each pattern item and extract relevant data.
1203 static const struct {
1204 const enum rte_flow_item_type pattern_type;
1205 int (*parse)(const struct rte_flow_item *pattern,
1206 struct rte_flow *flow,
1207 struct rte_flow_error *error);
1208 } mrvl_patterns[] = {
1209 { RTE_FLOW_ITEM_TYPE_ETH, mrvl_parse_eth },
1210 { RTE_FLOW_ITEM_TYPE_VLAN, mrvl_parse_vlan },
1211 { RTE_FLOW_ITEM_TYPE_IPV4, mrvl_parse_ip4 },
1212 { RTE_FLOW_ITEM_TYPE_IPV6, mrvl_parse_ip6 },
1213 { RTE_FLOW_ITEM_TYPE_TCP, mrvl_parse_tcp },
1214 { RTE_FLOW_ITEM_TYPE_UDP, mrvl_parse_udp },
1215 { RTE_FLOW_ITEM_TYPE_END, NULL }
1219 * Parse flow attribute.
1221 * This will check whether the provided attribute's flags are supported.
1223 * @param priv Unused
1224 * @param attr Pointer to the flow attribute.
1225 * @param flow Unused
1226 * @param error Pointer to the flow error.
1227 * @returns 0 in case of success, negative value otherwise.
1230 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
1231 const struct rte_flow_attr *attr,
1232 struct rte_flow *flow __rte_unused,
1233 struct rte_flow_error *error)
1236 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
1237 NULL, "NULL attribute");
1242 rte_flow_error_set(error, ENOTSUP,
1243 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1244 "Groups are not supported");
1247 if (attr->priority) {
1248 rte_flow_error_set(error, ENOTSUP,
1249 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
1250 "Priorities are not supported");
1253 if (!attr->ingress) {
1254 rte_flow_error_set(error, ENOTSUP,
1255 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
1256 "Only ingress is supported");
1260 rte_flow_error_set(error, ENOTSUP,
1261 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1262 "Egress is not supported");
1265 if (attr->transfer) {
1266 rte_flow_error_set(error, ENOTSUP,
1267 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
1268 "Transfer is not supported");
1276 * Parse flow pattern.
1278 * Specific classifier rule will be created as well.
1280 * @param priv Unused
1281 * @param pattern Pointer to the flow pattern.
1282 * @param flow Pointer to the flow.
1283 * @param error Pointer to the flow error.
1284 * @returns 0 in case of success, negative value otherwise.
1287 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
1288 const struct rte_flow_item pattern[],
1289 struct rte_flow *flow,
1290 struct rte_flow_error *error)
1295 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1296 if (pattern[i].type == RTE_FLOW_ITEM_TYPE_VOID)
1298 for (j = 0; mrvl_patterns[j].pattern_type !=
1299 RTE_FLOW_ITEM_TYPE_END; j++) {
1300 if (mrvl_patterns[j].pattern_type != pattern[i].type)
1303 if (flow->rule.num_fields >=
1304 PP2_CLS_TBL_MAX_NUM_FIELDS) {
1305 rte_flow_error_set(error, ENOSPC,
1306 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1308 "too many pattern (max %d)");
1312 ret = mrvl_patterns[j].parse(&pattern[i], flow, error);
1314 mrvl_free_all_key_mask(&flow->rule);
1319 if (mrvl_patterns[j].pattern_type == RTE_FLOW_ITEM_TYPE_END) {
1320 rte_flow_error_set(error, ENOTSUP,
1321 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1322 "Unsupported pattern");
1327 flow->table_key.num_fields = flow->rule.num_fields;
1333 * Parse flow actions.
1335 * @param priv Pointer to the port's private data.
1336 * @param actions Pointer the action table.
1337 * @param flow Pointer to the flow.
1338 * @param error Pointer to the flow error.
1339 * @returns 0 in case of success, negative value otherwise.
1342 mrvl_flow_parse_actions(struct mrvl_priv *priv,
1343 const struct rte_flow_action actions[],
1344 struct rte_flow *flow,
1345 struct rte_flow_error *error)
1347 const struct rte_flow_action *action = actions;
1350 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1351 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1354 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
1355 flow->cos.ppio = priv->ppio;
1357 flow->action.type = PP2_CLS_TBL_ACT_DROP;
1358 flow->action.cos = &flow->cos;
1360 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1361 const struct rte_flow_action_queue *q =
1362 (const struct rte_flow_action_queue *)
1365 if (q->index > priv->nb_rx_queues) {
1366 rte_flow_error_set(error, EINVAL,
1367 RTE_FLOW_ERROR_TYPE_ACTION,
1369 "Queue index out of range");
1373 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
1375 * Unknown TC mapping, mapping will not have
1379 "Unknown TC mapping for queue %hu eth%hhu",
1380 q->index, priv->ppio_id);
1382 rte_flow_error_set(error, EFAULT,
1383 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1389 "Action: Assign packets to queue %d, tc:%d, q:%d",
1390 q->index, priv->rxq_map[q->index].tc,
1391 priv->rxq_map[q->index].inq);
1393 flow->cos.ppio = priv->ppio;
1394 flow->cos.tc = priv->rxq_map[q->index].tc;
1395 flow->action.type = PP2_CLS_TBL_ACT_DONE;
1396 flow->action.cos = &flow->cos;
1398 } else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1399 const struct rte_flow_action_meter *meter;
1400 struct mrvl_mtr *mtr;
1402 meter = action->conf;
1404 return -rte_flow_error_set(error, EINVAL,
1405 RTE_FLOW_ERROR_TYPE_ACTION,
1406 NULL, "Invalid meter\n");
1408 LIST_FOREACH(mtr, &priv->mtrs, next)
1409 if (mtr->mtr_id == meter->mtr_id)
1413 return -rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION,
1416 "Meter id does not exist\n");
1418 if (!mtr->shared && mtr->refcnt)
1419 return -rte_flow_error_set(error, EPERM,
1420 RTE_FLOW_ERROR_TYPE_ACTION,
1422 "Meter cannot be shared\n");
1425 * In case cos has already been set
1428 if (!flow->cos.ppio) {
1429 flow->cos.ppio = priv->ppio;
1433 flow->action.type = PP2_CLS_TBL_ACT_DONE;
1434 flow->action.cos = &flow->cos;
1435 flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
1440 rte_flow_error_set(error, ENOTSUP,
1441 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1442 "Action not supported");
1448 rte_flow_error_set(error, EINVAL,
1449 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1450 "Action not specified");
1458 * Parse flow attribute, pattern and actions.
1460 * @param priv Pointer to the port's private data.
1461 * @param attr Pointer to the flow attribute.
1462 * @param pattern Pointer to the flow pattern.
1463 * @param actions Pointer to the flow actions.
1464 * @param flow Pointer to the flow.
1465 * @param error Pointer to the flow error.
1466 * @returns 0 on success, negative value otherwise.
1469 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
1470 const struct rte_flow_item pattern[],
1471 const struct rte_flow_action actions[],
1472 struct rte_flow *flow,
1473 struct rte_flow_error *error)
1477 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
1481 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
1485 return mrvl_flow_parse_actions(priv, actions, flow, error);
1489 * Get engine type for the given flow.
1491 * @param field Pointer to the flow.
1492 * @returns The type of the engine.
1494 static inline enum pp2_cls_tbl_type
1495 mrvl_engine_type(const struct rte_flow *flow)
1499 for (i = 0; i < flow->rule.num_fields; i++)
1500 size += flow->rule.fields[i].size;
1503 * For maskable engine type the key size must be up to 8 bytes.
1504 * For keys with size bigger than 8 bytes, engine type must
1505 * be set to exact match.
1508 return PP2_CLS_TBL_EXACT_MATCH;
1510 return PP2_CLS_TBL_MASKABLE;
1514 * Create classifier table.
1516 * @param dev Pointer to the device.
1517 * @param flow Pointer to the very first flow.
1518 * @returns 0 in case of success, negative value otherwise.
1521 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
1523 struct mrvl_priv *priv = dev->data->dev_private;
1524 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
1527 if (priv->cls_tbl) {
1528 pp2_cls_tbl_deinit(priv->cls_tbl);
1529 priv->cls_tbl = NULL;
1532 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
1534 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
1535 MRVL_LOG(INFO, "Setting cls search engine type to %s",
1536 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
1537 "exact" : "maskable");
1538 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
1539 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
1540 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
1541 memcpy(key, &first_flow->table_key, sizeof(struct pp2_cls_tbl_key));
1543 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
1549 * Check whether new flow can be added to the table
1551 * @param priv Pointer to the port's private data.
1552 * @param flow Pointer to the new flow.
1553 * @return 1 in case flow can be added, 0 otherwise.
1556 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
1558 int same = memcmp(&flow->table_key, &priv->cls_tbl_params.key,
1559 sizeof(struct pp2_cls_tbl_key)) == 0;
1561 return same && mrvl_engine_type(flow) == priv->cls_tbl_params.type;
1565 * DPDK flow create callback called when flow is to be created.
1567 * @param dev Pointer to the device.
1568 * @param attr Pointer to the flow attribute.
1569 * @param pattern Pointer to the flow pattern.
1570 * @param actions Pointer to the flow actions.
1571 * @param error Pointer to the flow error.
1572 * @returns Pointer to the created flow in case of success, NULL otherwise.
1574 static struct rte_flow *
1575 mrvl_flow_create(struct rte_eth_dev *dev,
1576 const struct rte_flow_attr *attr,
1577 const struct rte_flow_item pattern[],
1578 const struct rte_flow_action actions[],
1579 struct rte_flow_error *error)
1581 struct mrvl_priv *priv = dev->data->dev_private;
1582 struct rte_flow *flow, *first;
1585 if (!dev->data->dev_started) {
1586 rte_flow_error_set(error, EINVAL,
1587 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1588 "Port must be started first\n");
1592 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
1596 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
1603 * 1. In case table does not exist - create one.
1604 * 2. In case table exists, is empty and new flow cannot be added
1606 * 3. In case table is not empty and new flow matches table format
1608 * 4. Otherwise flow cannot be added.
1610 first = LIST_FIRST(&priv->flows);
1611 if (!priv->cls_tbl) {
1612 ret = mrvl_create_cls_table(dev, flow);
1613 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
1614 ret = mrvl_create_cls_table(dev, flow);
1615 } else if (mrvl_flow_can_be_added(priv, flow)) {
1618 rte_flow_error_set(error, EINVAL,
1619 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1620 "Pattern does not match cls table format\n");
1625 rte_flow_error_set(error, EINVAL,
1626 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1627 "Failed to create cls table\n");
1631 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
1633 rte_flow_error_set(error, EINVAL,
1634 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1635 "Failed to add rule\n");
1639 LIST_INSERT_HEAD(&priv->flows, flow, next);
1648 * Remove classifier rule associated with given flow.
1650 * @param priv Pointer to the port's private data.
1651 * @param flow Pointer to the flow.
1652 * @param error Pointer to the flow error.
1653 * @returns 0 in case of success, negative value otherwise.
1656 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
1657 struct rte_flow_error *error)
1661 if (!priv->cls_tbl) {
1662 rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1664 "Classifier table not initialized");
1668 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
1670 rte_flow_error_set(error, EINVAL,
1671 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1672 "Failed to remove rule");
1676 mrvl_free_all_key_mask(&flow->rule);
1679 flow->mtr->refcnt--;
1687 * DPDK flow destroy callback called when flow is to be removed.
1689 * @param dev Pointer to the device.
1690 * @param flow Pointer to the flow.
1691 * @param error Pointer to the flow error.
1692 * @returns 0 in case of success, negative value otherwise.
1695 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1696 struct rte_flow_error *error)
1698 struct mrvl_priv *priv = dev->data->dev_private;
1702 LIST_FOREACH(f, &priv->flows, next) {
1708 rte_flow_error_set(error, EINVAL,
1709 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1710 "Rule was not found");
1714 LIST_REMOVE(f, next);
1716 ret = mrvl_flow_remove(priv, flow, error);
1726 * DPDK flow callback called to verify given attribute, pattern and actions.
1728 * @param dev Pointer to the device.
1729 * @param attr Pointer to the flow attribute.
1730 * @param pattern Pointer to the flow pattern.
1731 * @param actions Pointer to the flow actions.
1732 * @param error Pointer to the flow error.
1733 * @returns 0 on success, negative value otherwise.
1736 mrvl_flow_validate(struct rte_eth_dev *dev,
1737 const struct rte_flow_attr *attr,
1738 const struct rte_flow_item pattern[],
1739 const struct rte_flow_action actions[],
1740 struct rte_flow_error *error)
1742 static struct rte_flow *flow;
1744 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
1748 mrvl_flow_destroy(dev, flow, error);
1754 * DPDK flow flush callback called when flows are to be flushed.
1756 * @param dev Pointer to the device.
1757 * @param error Pointer to the flow error.
1758 * @returns 0 in case of success, negative value otherwise.
1761 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1763 struct mrvl_priv *priv = dev->data->dev_private;
1765 while (!LIST_EMPTY(&priv->flows)) {
1766 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1767 int ret = mrvl_flow_remove(priv, flow, error);
1771 LIST_REMOVE(flow, next);
1775 if (priv->cls_tbl) {
1776 pp2_cls_tbl_deinit(priv->cls_tbl);
1777 priv->cls_tbl = NULL;
1784 * DPDK flow isolate callback called to isolate port.
1786 * @param dev Pointer to the device.
1787 * @param enable Pass 0/1 to disable/enable port isolation.
1788 * @param error Pointer to the flow error.
1789 * @returns 0 in case of success, negative value otherwise.
1792 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
1793 struct rte_flow_error *error)
1795 struct mrvl_priv *priv = dev->data->dev_private;
1797 if (dev->data->dev_started) {
1798 rte_flow_error_set(error, EBUSY,
1799 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1800 NULL, "Port must be stopped first\n");
1804 priv->isolated = enable;
1809 const struct rte_flow_ops mrvl_flow_ops = {
1810 .validate = mrvl_flow_validate,
1811 .create = mrvl_flow_create,
1812 .destroy = mrvl_flow_destroy,
1813 .flush = mrvl_flow_flush,
1814 .isolate = mrvl_flow_isolate
1818 * Initialize flow resources.
1820 * @param dev Pointer to the device.
1823 mrvl_flow_init(struct rte_eth_dev *dev)
1825 struct mrvl_priv *priv = dev->data->dev_private;
1827 LIST_INIT(&priv->flows);
1831 * Cleanup flow resources.
1833 * @param dev Pointer to the device.
1836 mrvl_flow_deinit(struct rte_eth_dev *dev)
1838 mrvl_flow_flush(dev, NULL);