1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Marvell International Ltd.
3 * Copyright(c) 2018 Semihalf.
8 #include <rte_flow_driver.h>
9 #include <rte_malloc.h>
12 #include <arpa/inet.h>
14 #include "mrvl_flow.h"
17 /** Number of rules in the classifier table. */
18 #define MRVL_CLS_MAX_NUM_RULES 20
20 /** Size of the classifier key and mask strings. */
21 #define MRVL_CLS_STR_SIZE_MAX 40
23 #define MRVL_VLAN_ID_MASK 0x0fff
24 #define MRVL_VLAN_PRI_MASK 0x7000
25 #define MRVL_IPV4_DSCP_MASK 0xfc
26 #define MRVL_IPV4_ADDR_MASK 0xffffffff
27 #define MRVL_IPV6_FLOW_MASK 0x0fffff
30 * Allocate memory for classifier rule key and mask fields.
32 * @param field Pointer to the classifier rule.
33 * @returns 0 in case of success, negative value otherwise.
36 mrvl_alloc_key_mask(struct pp2_cls_rule_key_field *field)
38 unsigned int id = rte_socket_id();
40 field->key = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
44 field->mask = rte_zmalloc_socket(NULL, MRVL_CLS_STR_SIZE_MAX, 0, id);
58 * Free memory allocated for classifier rule key and mask fields.
60 * @param field Pointer to the classifier rule.
63 mrvl_free_key_mask(struct pp2_cls_rule_key_field *field)
66 rte_free(field->mask);
72 * Free memory allocated for all classifier rule key and mask fields.
74 * @param rule Pointer to the classifier table rule.
77 mrvl_free_all_key_mask(struct pp2_cls_tbl_rule *rule)
81 for (i = 0; i < rule->num_fields; i++)
82 mrvl_free_key_mask(&rule->fields[i]);
87 * Initialize rte flow item parsing.
89 * @param item Pointer to the flow item.
90 * @param spec_ptr Pointer to the specific item pointer.
91 * @param mask_ptr Pointer to the specific item's mask pointer.
92 * @def_mask Pointer to the default mask.
93 * @size Size of the flow item.
94 * @error Pointer to the rte flow error.
95 * @returns 0 in case of success, negative value otherwise.
98 mrvl_parse_init(const struct rte_flow_item *item,
99 const void **spec_ptr,
100 const void **mask_ptr,
101 const void *def_mask,
103 struct rte_flow_error *error)
110 memset(zeros, 0, size);
113 rte_flow_error_set(error, EINVAL,
114 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
119 if ((item->last != NULL || item->mask != NULL) && item->spec == NULL) {
120 rte_flow_error_set(error, EINVAL,
121 RTE_FLOW_ERROR_TYPE_ITEM, item,
122 "Mask or last is set without spec\n");
127 * If "mask" is not set, default mask is used,
128 * but if default mask is NULL, "mask" should be set.
130 if (item->mask == NULL) {
131 if (def_mask == NULL) {
132 rte_flow_error_set(error, EINVAL,
133 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
134 "Mask should be specified\n");
138 mask = (const uint8_t *)def_mask;
140 mask = (const uint8_t *)item->mask;
143 spec = (const uint8_t *)item->spec;
144 last = (const uint8_t *)item->last;
147 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
148 NULL, "Spec should be specified\n");
153 * If field values in "last" are either 0 or equal to the corresponding
154 * values in "spec" then they are ignored.
157 !memcmp(last, zeros, size) &&
158 memcmp(last, spec, size) != 0) {
159 rte_flow_error_set(error, ENOTSUP,
160 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
161 "Ranging is not supported\n");
172 * Parse the eth flow item.
174 * This will create classifier rule that matches either destination or source
177 * @param spec Pointer to the specific flow item.
178 * @param mask Pointer to the specific flow item's mask.
179 * @param parse_dst Parse either destination or source mac address.
180 * @param flow Pointer to the flow.
181 * @return 0 in case of success, negative error value otherwise.
184 mrvl_parse_mac(const struct rte_flow_item_eth *spec,
185 const struct rte_flow_item_eth *mask,
186 int parse_dst, struct rte_flow *flow)
188 struct pp2_cls_rule_key_field *key_field;
189 const uint8_t *k, *m;
192 k = spec->dst.addr_bytes;
193 m = mask->dst.addr_bytes;
195 flow->pattern |= F_DMAC;
197 k = spec->src.addr_bytes;
198 m = mask->src.addr_bytes;
200 flow->pattern |= F_SMAC;
203 key_field = &flow->rule.fields[flow->rule.num_fields];
204 mrvl_alloc_key_mask(key_field);
207 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX,
208 "%02x:%02x:%02x:%02x:%02x:%02x",
209 k[0], k[1], k[2], k[3], k[4], k[5]);
211 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX,
212 "%02x:%02x:%02x:%02x:%02x:%02x",
213 m[0], m[1], m[2], m[3], m[4], m[5]);
215 flow->rule.num_fields += 1;
221 * Helper for parsing the eth flow item destination mac address.
223 * @param spec Pointer to the specific flow item.
224 * @param mask Pointer to the specific flow item's mask.
225 * @param flow Pointer to the flow.
226 * @return 0 in case of success, negative error value otherwise.
229 mrvl_parse_dmac(const struct rte_flow_item_eth *spec,
230 const struct rte_flow_item_eth *mask,
231 struct rte_flow *flow)
233 return mrvl_parse_mac(spec, mask, 1, flow);
237 * Helper for parsing the eth flow item source mac address.
239 * @param spec Pointer to the specific flow item.
240 * @param mask Pointer to the specific flow item's mask.
241 * @param flow Pointer to the flow.
242 * @return 0 in case of success, negative error value otherwise.
245 mrvl_parse_smac(const struct rte_flow_item_eth *spec,
246 const struct rte_flow_item_eth *mask,
247 struct rte_flow *flow)
249 return mrvl_parse_mac(spec, mask, 0, flow);
253 * Parse the ether type field of the eth flow item.
255 * @param spec Pointer to the specific flow item.
256 * @param mask Pointer to the specific flow item's mask.
257 * @param flow Pointer to the flow.
258 * @return 0 in case of success, negative error value otherwise.
261 mrvl_parse_type(const struct rte_flow_item_eth *spec,
262 const struct rte_flow_item_eth *mask __rte_unused,
263 struct rte_flow *flow)
265 struct pp2_cls_rule_key_field *key_field;
268 key_field = &flow->rule.fields[flow->rule.num_fields];
269 mrvl_alloc_key_mask(key_field);
272 k = rte_be_to_cpu_16(spec->type);
273 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
275 flow->pattern |= F_TYPE;
276 flow->rule.num_fields += 1;
282 * Parse the vid field of the vlan rte flow item.
284 * This will create classifier rule that matches vid.
286 * @param spec Pointer to the specific flow item.
287 * @param mask Pointer to the specific flow item's mask.
288 * @param flow Pointer to the flow.
289 * @return 0 in case of success, negative error value otherwise.
292 mrvl_parse_vlan_id(const struct rte_flow_item_vlan *spec,
293 const struct rte_flow_item_vlan *mask __rte_unused,
294 struct rte_flow *flow)
296 struct pp2_cls_rule_key_field *key_field;
299 key_field = &flow->rule.fields[flow->rule.num_fields];
300 mrvl_alloc_key_mask(key_field);
303 k = rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_ID_MASK;
304 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
306 flow->pattern |= F_VLAN_ID;
307 flow->rule.num_fields += 1;
313 * Parse the pri field of the vlan rte flow item.
315 * This will create classifier rule that matches pri.
317 * @param spec Pointer to the specific flow item.
318 * @param mask Pointer to the specific flow item's mask.
319 * @param flow Pointer to the flow.
320 * @return 0 in case of success, negative error value otherwise.
323 mrvl_parse_vlan_pri(const struct rte_flow_item_vlan *spec,
324 const struct rte_flow_item_vlan *mask __rte_unused,
325 struct rte_flow *flow)
327 struct pp2_cls_rule_key_field *key_field;
330 key_field = &flow->rule.fields[flow->rule.num_fields];
331 mrvl_alloc_key_mask(key_field);
334 k = (rte_be_to_cpu_16(spec->tci) & MRVL_VLAN_PRI_MASK) >> 13;
335 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
337 flow->pattern |= F_VLAN_PRI;
338 flow->rule.num_fields += 1;
344 * Parse the dscp field of the ipv4 rte flow item.
346 * This will create classifier rule that matches dscp field.
348 * @param spec Pointer to the specific flow item.
349 * @param mask Pointer to the specific flow item's mask.
350 * @param flow Pointer to the flow.
351 * @return 0 in case of success, negative error value otherwise.
354 mrvl_parse_ip4_dscp(const struct rte_flow_item_ipv4 *spec,
355 const struct rte_flow_item_ipv4 *mask,
356 struct rte_flow *flow)
358 struct pp2_cls_rule_key_field *key_field;
361 key_field = &flow->rule.fields[flow->rule.num_fields];
362 mrvl_alloc_key_mask(key_field);
365 k = (spec->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
366 m = (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) >> 2;
367 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
368 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
370 flow->pattern |= F_IP4_TOS;
371 flow->rule.num_fields += 1;
377 * Parse either source or destination ip addresses of the ipv4 flow item.
379 * This will create classifier rule that matches either destination
380 * or source ip field.
382 * @param spec Pointer to the specific flow item.
383 * @param mask Pointer to the specific flow item's mask.
384 * @param parse_dst Parse either destination or source ip address.
385 * @param flow Pointer to the flow.
386 * @return 0 in case of success, negative error value otherwise.
389 mrvl_parse_ip4_addr(const struct rte_flow_item_ipv4 *spec,
390 const struct rte_flow_item_ipv4 *mask,
391 int parse_dst, struct rte_flow *flow)
393 struct pp2_cls_rule_key_field *key_field;
397 memset(&k, 0, sizeof(k));
399 k.s_addr = spec->hdr.dst_addr;
400 m = rte_be_to_cpu_32(mask->hdr.dst_addr);
402 flow->pattern |= F_IP4_DIP;
404 k.s_addr = spec->hdr.src_addr;
405 m = rte_be_to_cpu_32(mask->hdr.src_addr);
407 flow->pattern |= F_IP4_SIP;
410 key_field = &flow->rule.fields[flow->rule.num_fields];
411 mrvl_alloc_key_mask(key_field);
414 inet_ntop(AF_INET, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
415 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "0x%x", m);
417 flow->rule.num_fields += 1;
423 * Helper for parsing destination ip of the ipv4 flow item.
425 * @param spec Pointer to the specific flow item.
426 * @param mask Pointer to the specific flow item's mask.
427 * @param flow Pointer to the flow.
428 * @return 0 in case of success, negative error value otherwise.
431 mrvl_parse_ip4_dip(const struct rte_flow_item_ipv4 *spec,
432 const struct rte_flow_item_ipv4 *mask,
433 struct rte_flow *flow)
435 return mrvl_parse_ip4_addr(spec, mask, 1, flow);
439 * Helper for parsing source ip of the ipv4 flow item.
441 * @param spec Pointer to the specific flow item.
442 * @param mask Pointer to the specific flow item's mask.
443 * @param flow Pointer to the flow.
444 * @return 0 in case of success, negative error value otherwise.
447 mrvl_parse_ip4_sip(const struct rte_flow_item_ipv4 *spec,
448 const struct rte_flow_item_ipv4 *mask,
449 struct rte_flow *flow)
451 return mrvl_parse_ip4_addr(spec, mask, 0, flow);
455 * Parse the proto field of the ipv4 rte flow item.
457 * This will create classifier rule that matches proto field.
459 * @param spec Pointer to the specific flow item.
460 * @param mask Pointer to the specific flow item's mask.
461 * @param flow Pointer to the flow.
462 * @return 0 in case of success, negative error value otherwise.
465 mrvl_parse_ip4_proto(const struct rte_flow_item_ipv4 *spec,
466 const struct rte_flow_item_ipv4 *mask __rte_unused,
467 struct rte_flow *flow)
469 struct pp2_cls_rule_key_field *key_field;
470 uint8_t k = spec->hdr.next_proto_id;
472 key_field = &flow->rule.fields[flow->rule.num_fields];
473 mrvl_alloc_key_mask(key_field);
476 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
478 flow->pattern |= F_IP4_PROTO;
479 flow->rule.num_fields += 1;
485 * Parse either source or destination ip addresses of the ipv6 rte flow item.
487 * This will create classifier rule that matches either destination
488 * or source ip field.
490 * @param spec Pointer to the specific flow item.
491 * @param mask Pointer to the specific flow item's mask.
492 * @param parse_dst Parse either destination or source ipv6 address.
493 * @param flow Pointer to the flow.
494 * @return 0 in case of success, negative error value otherwise.
497 mrvl_parse_ip6_addr(const struct rte_flow_item_ipv6 *spec,
498 const struct rte_flow_item_ipv6 *mask,
499 int parse_dst, struct rte_flow *flow)
501 struct pp2_cls_rule_key_field *key_field;
502 int size = sizeof(spec->hdr.dst_addr);
503 struct in6_addr k, m;
505 memset(&k, 0, sizeof(k));
507 memcpy(k.s6_addr, spec->hdr.dst_addr, size);
508 memcpy(m.s6_addr, mask->hdr.dst_addr, size);
510 flow->pattern |= F_IP6_DIP;
512 memcpy(k.s6_addr, spec->hdr.src_addr, size);
513 memcpy(m.s6_addr, mask->hdr.src_addr, size);
515 flow->pattern |= F_IP6_SIP;
518 key_field = &flow->rule.fields[flow->rule.num_fields];
519 mrvl_alloc_key_mask(key_field);
520 key_field->size = 16;
522 inet_ntop(AF_INET6, &k, (char *)key_field->key, MRVL_CLS_STR_SIZE_MAX);
523 inet_ntop(AF_INET6, &m, (char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX);
525 flow->rule.num_fields += 1;
531 * Helper for parsing destination ip of the ipv6 flow item.
533 * @param spec Pointer to the specific flow item.
534 * @param mask Pointer to the specific flow item's mask.
535 * @param flow Pointer to the flow.
536 * @return 0 in case of success, negative error value otherwise.
539 mrvl_parse_ip6_dip(const struct rte_flow_item_ipv6 *spec,
540 const struct rte_flow_item_ipv6 *mask,
541 struct rte_flow *flow)
543 return mrvl_parse_ip6_addr(spec, mask, 1, flow);
547 * Helper for parsing source ip of the ipv6 flow item.
549 * @param spec Pointer to the specific flow item.
550 * @param mask Pointer to the specific flow item's mask.
551 * @param flow Pointer to the flow.
552 * @return 0 in case of success, negative error value otherwise.
555 mrvl_parse_ip6_sip(const struct rte_flow_item_ipv6 *spec,
556 const struct rte_flow_item_ipv6 *mask,
557 struct rte_flow *flow)
559 return mrvl_parse_ip6_addr(spec, mask, 0, flow);
563 * Parse the flow label of the ipv6 flow item.
565 * This will create classifier rule that matches flow field.
567 * @param spec Pointer to the specific flow item.
568 * @param mask Pointer to the specific flow item's mask.
569 * @param flow Pointer to the flow.
570 * @return 0 in case of success, negative error value otherwise.
573 mrvl_parse_ip6_flow(const struct rte_flow_item_ipv6 *spec,
574 const struct rte_flow_item_ipv6 *mask,
575 struct rte_flow *flow)
577 struct pp2_cls_rule_key_field *key_field;
578 uint32_t k = rte_be_to_cpu_32(spec->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK,
579 m = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
581 key_field = &flow->rule.fields[flow->rule.num_fields];
582 mrvl_alloc_key_mask(key_field);
585 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
586 snprintf((char *)key_field->mask, MRVL_CLS_STR_SIZE_MAX, "%u", m);
588 flow->pattern |= F_IP6_FLOW;
589 flow->rule.num_fields += 1;
595 * Parse the next header of the ipv6 flow item.
597 * This will create classifier rule that matches next header field.
599 * @param spec Pointer to the specific flow item.
600 * @param mask Pointer to the specific flow item's mask.
601 * @param flow Pointer to the flow.
602 * @return 0 in case of success, negative error value otherwise.
605 mrvl_parse_ip6_next_hdr(const struct rte_flow_item_ipv6 *spec,
606 const struct rte_flow_item_ipv6 *mask __rte_unused,
607 struct rte_flow *flow)
609 struct pp2_cls_rule_key_field *key_field;
610 uint8_t k = spec->hdr.proto;
612 key_field = &flow->rule.fields[flow->rule.num_fields];
613 mrvl_alloc_key_mask(key_field);
616 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
618 flow->pattern |= F_IP6_NEXT_HDR;
619 flow->rule.num_fields += 1;
625 * Parse destination or source port of the tcp flow item.
627 * This will create classifier rule that matches either destination or
630 * @param spec Pointer to the specific flow item.
631 * @param mask Pointer to the specific flow item's mask.
632 * @param parse_dst Parse either destination or source port.
633 * @param flow Pointer to the flow.
634 * @return 0 in case of success, negative error value otherwise.
637 mrvl_parse_tcp_port(const struct rte_flow_item_tcp *spec,
638 const struct rte_flow_item_tcp *mask __rte_unused,
639 int parse_dst, struct rte_flow *flow)
641 struct pp2_cls_rule_key_field *key_field;
644 key_field = &flow->rule.fields[flow->rule.num_fields];
645 mrvl_alloc_key_mask(key_field);
649 k = rte_be_to_cpu_16(spec->hdr.dst_port);
651 flow->pattern |= F_TCP_DPORT;
653 k = rte_be_to_cpu_16(spec->hdr.src_port);
655 flow->pattern |= F_TCP_SPORT;
658 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
660 flow->rule.num_fields += 1;
666 * Helper for parsing the tcp source port of the tcp flow item.
668 * @param spec Pointer to the specific flow item.
669 * @param mask Pointer to the specific flow item's mask.
670 * @param flow Pointer to the flow.
671 * @return 0 in case of success, negative error value otherwise.
674 mrvl_parse_tcp_sport(const struct rte_flow_item_tcp *spec,
675 const struct rte_flow_item_tcp *mask,
676 struct rte_flow *flow)
678 return mrvl_parse_tcp_port(spec, mask, 0, flow);
682 * Helper for parsing the tcp destination port of the tcp flow item.
684 * @param spec Pointer to the specific flow item.
685 * @param mask Pointer to the specific flow item's mask.
686 * @param flow Pointer to the flow.
687 * @return 0 in case of success, negative error value otherwise.
690 mrvl_parse_tcp_dport(const struct rte_flow_item_tcp *spec,
691 const struct rte_flow_item_tcp *mask,
692 struct rte_flow *flow)
694 return mrvl_parse_tcp_port(spec, mask, 1, flow);
698 * Parse destination or source port of the udp flow item.
700 * This will create classifier rule that matches either destination or
703 * @param spec Pointer to the specific flow item.
704 * @param mask Pointer to the specific flow item's mask.
705 * @param parse_dst Parse either destination or source port.
706 * @param flow Pointer to the flow.
707 * @return 0 in case of success, negative error value otherwise.
710 mrvl_parse_udp_port(const struct rte_flow_item_udp *spec,
711 const struct rte_flow_item_udp *mask __rte_unused,
712 int parse_dst, struct rte_flow *flow)
714 struct pp2_cls_rule_key_field *key_field;
717 key_field = &flow->rule.fields[flow->rule.num_fields];
718 mrvl_alloc_key_mask(key_field);
722 k = rte_be_to_cpu_16(spec->hdr.dst_port);
724 flow->pattern |= F_UDP_DPORT;
726 k = rte_be_to_cpu_16(spec->hdr.src_port);
728 flow->pattern |= F_UDP_SPORT;
731 snprintf((char *)key_field->key, MRVL_CLS_STR_SIZE_MAX, "%u", k);
733 flow->rule.num_fields += 1;
739 * Helper for parsing the udp source port of the udp flow item.
741 * @param spec Pointer to the specific flow item.
742 * @param mask Pointer to the specific flow item's mask.
743 * @param flow Pointer to the flow.
744 * @return 0 in case of success, negative error value otherwise.
747 mrvl_parse_udp_sport(const struct rte_flow_item_udp *spec,
748 const struct rte_flow_item_udp *mask,
749 struct rte_flow *flow)
751 return mrvl_parse_udp_port(spec, mask, 0, flow);
755 * Helper for parsing the udp destination port of the udp flow item.
757 * @param spec Pointer to the specific flow item.
758 * @param mask Pointer to the specific flow item's mask.
759 * @param flow Pointer to the flow.
760 * @return 0 in case of success, negative error value otherwise.
763 mrvl_parse_udp_dport(const struct rte_flow_item_udp *spec,
764 const struct rte_flow_item_udp *mask,
765 struct rte_flow *flow)
767 return mrvl_parse_udp_port(spec, mask, 1, flow);
771 * Parse eth flow item.
773 * @param item Pointer to the flow item.
774 * @param flow Pointer to the flow.
775 * @param error Pointer to the flow error.
776 * @returns 0 on success, negative value otherwise.
779 mrvl_parse_eth(const struct rte_flow_item *item, struct rte_flow *flow,
780 struct rte_flow_error *error)
782 const struct rte_flow_item_eth *spec = NULL, *mask = NULL;
783 struct rte_ether_addr zero;
786 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
787 &rte_flow_item_eth_mask,
788 sizeof(struct rte_flow_item_eth), error);
792 memset(&zero, 0, sizeof(zero));
794 if (memcmp(&mask->dst, &zero, sizeof(mask->dst))) {
795 ret = mrvl_parse_dmac(spec, mask, flow);
800 if (memcmp(&mask->src, &zero, sizeof(mask->src))) {
801 ret = mrvl_parse_smac(spec, mask, flow);
807 MRVL_LOG(WARNING, "eth type mask is ignored");
808 ret = mrvl_parse_type(spec, mask, flow);
815 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
816 "Reached maximum number of fields in cls tbl key\n");
821 * Parse vlan flow item.
823 * @param item Pointer to the flow item.
824 * @param flow Pointer to the flow.
825 * @param error Pointer to the flow error.
826 * @returns 0 on success, negative value otherwise.
829 mrvl_parse_vlan(const struct rte_flow_item *item,
830 struct rte_flow *flow,
831 struct rte_flow_error *error)
833 const struct rte_flow_item_vlan *spec = NULL, *mask = NULL;
837 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
838 &rte_flow_item_vlan_mask,
839 sizeof(struct rte_flow_item_vlan), error);
843 m = rte_be_to_cpu_16(mask->tci);
844 if (m & MRVL_VLAN_ID_MASK) {
845 MRVL_LOG(WARNING, "vlan id mask is ignored");
846 ret = mrvl_parse_vlan_id(spec, mask, flow);
851 if (m & MRVL_VLAN_PRI_MASK) {
852 MRVL_LOG(WARNING, "vlan pri mask is ignored");
853 ret = mrvl_parse_vlan_pri(spec, mask, flow);
858 if (flow->pattern & F_TYPE) {
859 rte_flow_error_set(error, ENOTSUP,
860 RTE_FLOW_ERROR_TYPE_ITEM, item,
861 "VLAN TPID matching is not supported");
864 if (mask->inner_type) {
865 struct rte_flow_item_eth spec_eth = {
866 .type = spec->inner_type,
868 struct rte_flow_item_eth mask_eth = {
869 .type = mask->inner_type,
872 MRVL_LOG(WARNING, "inner eth type mask is ignored");
873 ret = mrvl_parse_type(&spec_eth, &mask_eth, flow);
880 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
881 "Reached maximum number of fields in cls tbl key\n");
886 * Parse ipv4 flow item.
888 * @param item Pointer to the flow item.
889 * @param flow Pointer to the flow.
890 * @param error Pointer to the flow error.
891 * @returns 0 on success, negative value otherwise.
894 mrvl_parse_ip4(const struct rte_flow_item *item,
895 struct rte_flow *flow,
896 struct rte_flow_error *error)
898 const struct rte_flow_item_ipv4 *spec = NULL, *mask = NULL;
901 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
902 &rte_flow_item_ipv4_mask,
903 sizeof(struct rte_flow_item_ipv4), error);
907 if (mask->hdr.version_ihl ||
908 mask->hdr.total_length ||
909 mask->hdr.packet_id ||
910 mask->hdr.fragment_offset ||
911 mask->hdr.time_to_live ||
912 mask->hdr.hdr_checksum) {
913 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
914 NULL, "Not supported by classifier\n");
918 if (mask->hdr.type_of_service & MRVL_IPV4_DSCP_MASK) {
919 ret = mrvl_parse_ip4_dscp(spec, mask, flow);
924 if (mask->hdr.src_addr) {
925 ret = mrvl_parse_ip4_sip(spec, mask, flow);
930 if (mask->hdr.dst_addr) {
931 ret = mrvl_parse_ip4_dip(spec, mask, flow);
936 if (mask->hdr.next_proto_id) {
937 MRVL_LOG(WARNING, "next proto id mask is ignored");
938 ret = mrvl_parse_ip4_proto(spec, mask, flow);
945 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
946 "Reached maximum number of fields in cls tbl key\n");
951 * Parse ipv6 flow item.
953 * @param item Pointer to the flow item.
954 * @param flow Pointer to the flow.
955 * @param error Pointer to the flow error.
956 * @returns 0 on success, negative value otherwise.
959 mrvl_parse_ip6(const struct rte_flow_item *item,
960 struct rte_flow *flow,
961 struct rte_flow_error *error)
963 const struct rte_flow_item_ipv6 *spec = NULL, *mask = NULL;
964 struct rte_ipv6_hdr zero;
968 ret = mrvl_parse_init(item, (const void **)&spec,
969 (const void **)&mask,
970 &rte_flow_item_ipv6_mask,
971 sizeof(struct rte_flow_item_ipv6),
976 memset(&zero, 0, sizeof(zero));
978 if (mask->hdr.payload_len ||
979 mask->hdr.hop_limits) {
980 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
981 NULL, "Not supported by classifier\n");
985 if (memcmp(mask->hdr.src_addr,
986 zero.src_addr, sizeof(mask->hdr.src_addr))) {
987 ret = mrvl_parse_ip6_sip(spec, mask, flow);
992 if (memcmp(mask->hdr.dst_addr,
993 zero.dst_addr, sizeof(mask->hdr.dst_addr))) {
994 ret = mrvl_parse_ip6_dip(spec, mask, flow);
999 flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow) & MRVL_IPV6_FLOW_MASK;
1001 ret = mrvl_parse_ip6_flow(spec, mask, flow);
1006 if (mask->hdr.proto) {
1007 MRVL_LOG(WARNING, "next header mask is ignored");
1008 ret = mrvl_parse_ip6_next_hdr(spec, mask, flow);
1015 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1016 "Reached maximum number of fields in cls tbl key\n");
1021 * Parse tcp flow item.
1023 * @param item Pointer to the flow item.
1024 * @param flow Pointer to the flow.
1025 * @param error Pointer to the flow error.
1026 * @returns 0 on success, negative value otherwise.
1029 mrvl_parse_tcp(const struct rte_flow_item *item,
1030 struct rte_flow *flow,
1031 struct rte_flow_error *error)
1033 const struct rte_flow_item_tcp *spec = NULL, *mask = NULL;
1036 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1037 &rte_flow_item_tcp_mask,
1038 sizeof(struct rte_flow_item_tcp), error);
1042 if (mask->hdr.sent_seq ||
1043 mask->hdr.recv_ack ||
1044 mask->hdr.data_off ||
1045 mask->hdr.tcp_flags ||
1048 mask->hdr.tcp_urp) {
1049 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1050 NULL, "Not supported by classifier\n");
1054 if (mask->hdr.src_port) {
1055 MRVL_LOG(WARNING, "tcp sport mask is ignored");
1056 ret = mrvl_parse_tcp_sport(spec, mask, flow);
1061 if (mask->hdr.dst_port) {
1062 MRVL_LOG(WARNING, "tcp dport mask is ignored");
1063 ret = mrvl_parse_tcp_dport(spec, mask, flow);
1070 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1071 "Reached maximum number of fields in cls tbl key\n");
1076 * Parse udp flow item.
1078 * @param item Pointer to the flow item.
1079 * @param flow Pointer to the flow.
1080 * @param error Pointer to the flow error.
1081 * @returns 0 on success, negative value otherwise.
1084 mrvl_parse_udp(const struct rte_flow_item *item,
1085 struct rte_flow *flow,
1086 struct rte_flow_error *error)
1088 const struct rte_flow_item_udp *spec = NULL, *mask = NULL;
1091 ret = mrvl_parse_init(item, (const void **)&spec, (const void **)&mask,
1092 &rte_flow_item_udp_mask,
1093 sizeof(struct rte_flow_item_udp), error);
1097 if (mask->hdr.dgram_len ||
1098 mask->hdr.dgram_cksum) {
1099 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1100 NULL, "Not supported by classifier\n");
1104 if (mask->hdr.src_port) {
1105 MRVL_LOG(WARNING, "udp sport mask is ignored");
1106 ret = mrvl_parse_udp_sport(spec, mask, flow);
1111 if (mask->hdr.dst_port) {
1112 MRVL_LOG(WARNING, "udp dport mask is ignored");
1113 ret = mrvl_parse_udp_dport(spec, mask, flow);
1120 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1121 "Reached maximum number of fields in cls tbl key\n");
1126 * Structure used to map specific flow pattern to the pattern parse callback
1127 * which will iterate over each pattern item and extract relevant data.
1129 static const struct {
1130 const enum rte_flow_item_type pattern_type;
1131 int (*parse)(const struct rte_flow_item *pattern,
1132 struct rte_flow *flow,
1133 struct rte_flow_error *error);
1134 } mrvl_patterns[] = {
1135 { RTE_FLOW_ITEM_TYPE_ETH, mrvl_parse_eth },
1136 { RTE_FLOW_ITEM_TYPE_VLAN, mrvl_parse_vlan },
1137 { RTE_FLOW_ITEM_TYPE_IPV4, mrvl_parse_ip4 },
1138 { RTE_FLOW_ITEM_TYPE_IPV6, mrvl_parse_ip6 },
1139 { RTE_FLOW_ITEM_TYPE_TCP, mrvl_parse_tcp },
1140 { RTE_FLOW_ITEM_TYPE_UDP, mrvl_parse_udp },
1141 { RTE_FLOW_ITEM_TYPE_END, NULL }
1145 * Parse flow attribute.
1147 * This will check whether the provided attribute's flags are supported.
1149 * @param priv Unused
1150 * @param attr Pointer to the flow attribute.
1151 * @param flow Unused
1152 * @param error Pointer to the flow error.
1153 * @returns 0 in case of success, negative value otherwise.
1156 mrvl_flow_parse_attr(struct mrvl_priv *priv __rte_unused,
1157 const struct rte_flow_attr *attr,
1158 struct rte_flow *flow __rte_unused,
1159 struct rte_flow_error *error)
1162 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR,
1163 NULL, "NULL attribute");
1168 rte_flow_error_set(error, ENOTSUP,
1169 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1170 "Groups are not supported");
1173 if (attr->priority) {
1174 rte_flow_error_set(error, ENOTSUP,
1175 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
1176 "Priorities are not supported");
1179 if (!attr->ingress) {
1180 rte_flow_error_set(error, ENOTSUP,
1181 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, NULL,
1182 "Only ingress is supported");
1186 rte_flow_error_set(error, ENOTSUP,
1187 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1188 "Egress is not supported");
1191 if (attr->transfer) {
1192 rte_flow_error_set(error, ENOTSUP,
1193 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL,
1194 "Transfer is not supported");
1202 * Parse flow pattern.
1204 * Specific classifier rule will be created as well.
1206 * @param priv Unused
1207 * @param pattern Pointer to the flow pattern.
1208 * @param flow Pointer to the flow.
1209 * @param error Pointer to the flow error.
1210 * @returns 0 in case of success, negative value otherwise.
1213 mrvl_flow_parse_pattern(struct mrvl_priv *priv __rte_unused,
1214 const struct rte_flow_item pattern[],
1215 struct rte_flow *flow,
1216 struct rte_flow_error *error)
1221 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1222 if (pattern[i].type == RTE_FLOW_ITEM_TYPE_VOID)
1224 for (j = 0; mrvl_patterns[j].pattern_type !=
1225 RTE_FLOW_ITEM_TYPE_END; j++) {
1226 if (mrvl_patterns[j].pattern_type != pattern[i].type)
1229 if (flow->rule.num_fields >=
1230 PP2_CLS_TBL_MAX_NUM_FIELDS) {
1231 rte_flow_error_set(error, ENOSPC,
1232 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1234 "too many pattern (max %d)");
1238 ret = mrvl_patterns[j].parse(&pattern[i], flow, error);
1240 mrvl_free_all_key_mask(&flow->rule);
1245 if (mrvl_patterns[j].pattern_type == RTE_FLOW_ITEM_TYPE_END) {
1246 rte_flow_error_set(error, ENOTSUP,
1247 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1248 "Unsupported pattern");
1257 * Parse flow actions.
1259 * @param priv Pointer to the port's private data.
1260 * @param actions Pointer the action table.
1261 * @param flow Pointer to the flow.
1262 * @param error Pointer to the flow error.
1263 * @returns 0 in case of success, negative value otherwise.
1266 mrvl_flow_parse_actions(struct mrvl_priv *priv,
1267 const struct rte_flow_action actions[],
1268 struct rte_flow *flow,
1269 struct rte_flow_error *error)
1271 const struct rte_flow_action *action = actions;
1274 for (; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
1275 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1278 if (action->type == RTE_FLOW_ACTION_TYPE_DROP) {
1279 flow->cos.ppio = priv->ppio;
1281 flow->action.type = PP2_CLS_TBL_ACT_DROP;
1282 flow->action.cos = &flow->cos;
1284 } else if (action->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1285 const struct rte_flow_action_queue *q =
1286 (const struct rte_flow_action_queue *)
1289 if (q->index > priv->nb_rx_queues) {
1290 rte_flow_error_set(error, EINVAL,
1291 RTE_FLOW_ERROR_TYPE_ACTION,
1293 "Queue index out of range");
1297 if (priv->rxq_map[q->index].tc == MRVL_UNKNOWN_TC) {
1299 * Unknown TC mapping, mapping will not have
1303 "Unknown TC mapping for queue %hu eth%hhu",
1304 q->index, priv->ppio_id);
1306 rte_flow_error_set(error, EFAULT,
1307 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1313 "Action: Assign packets to queue %d, tc:%d, q:%d",
1314 q->index, priv->rxq_map[q->index].tc,
1315 priv->rxq_map[q->index].inq);
1317 flow->cos.ppio = priv->ppio;
1318 flow->cos.tc = priv->rxq_map[q->index].tc;
1319 flow->action.type = PP2_CLS_TBL_ACT_DONE;
1320 flow->action.cos = &flow->cos;
1322 } else if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
1323 const struct rte_flow_action_meter *meter;
1324 struct mrvl_mtr *mtr;
1326 meter = action->conf;
1328 return -rte_flow_error_set(error, EINVAL,
1329 RTE_FLOW_ERROR_TYPE_ACTION,
1330 NULL, "Invalid meter\n");
1332 LIST_FOREACH(mtr, &priv->mtrs, next)
1333 if (mtr->mtr_id == meter->mtr_id)
1337 return -rte_flow_error_set(error, EINVAL,
1338 RTE_FLOW_ERROR_TYPE_ACTION,
1340 "Meter id does not exist\n");
1342 if (!mtr->shared && mtr->refcnt)
1343 return -rte_flow_error_set(error, EPERM,
1344 RTE_FLOW_ERROR_TYPE_ACTION,
1346 "Meter cannot be shared\n");
1349 * In case cos has already been set
1352 if (!flow->cos.ppio) {
1353 flow->cos.ppio = priv->ppio;
1357 flow->action.type = PP2_CLS_TBL_ACT_DONE;
1358 flow->action.cos = &flow->cos;
1359 flow->action.plcr = mtr->enabled ? mtr->plcr : NULL;
1364 rte_flow_error_set(error, ENOTSUP,
1365 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1366 "Action not supported");
1372 rte_flow_error_set(error, EINVAL,
1373 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1374 "Action not specified");
1382 * Parse flow attribute, pattern and actions.
1384 * @param priv Pointer to the port's private data.
1385 * @param attr Pointer to the flow attribute.
1386 * @param pattern Pointer to the flow pattern.
1387 * @param actions Pointer to the flow actions.
1388 * @param flow Pointer to the flow.
1389 * @param error Pointer to the flow error.
1390 * @returns 0 on success, negative value otherwise.
1393 mrvl_flow_parse(struct mrvl_priv *priv, const struct rte_flow_attr *attr,
1394 const struct rte_flow_item pattern[],
1395 const struct rte_flow_action actions[],
1396 struct rte_flow *flow,
1397 struct rte_flow_error *error)
1401 ret = mrvl_flow_parse_attr(priv, attr, flow, error);
1405 ret = mrvl_flow_parse_pattern(priv, pattern, flow, error);
1409 return mrvl_flow_parse_actions(priv, actions, flow, error);
1413 * Get engine type for the given flow.
1415 * @param field Pointer to the flow.
1416 * @returns The type of the engine.
1418 static inline enum pp2_cls_tbl_type
1419 mrvl_engine_type(const struct rte_flow *flow)
1423 for (i = 0; i < flow->rule.num_fields; i++)
1424 size += flow->rule.fields[i].size;
1427 * For maskable engine type the key size must be up to 8 bytes.
1428 * For keys with size bigger than 8 bytes, engine type must
1429 * be set to exact match.
1432 return PP2_CLS_TBL_EXACT_MATCH;
1434 return PP2_CLS_TBL_MASKABLE;
1438 * Create classifier table.
1440 * @param dev Pointer to the device.
1441 * @param flow Pointer to the very first flow.
1442 * @returns 0 in case of success, negative value otherwise.
1445 mrvl_create_cls_table(struct rte_eth_dev *dev, struct rte_flow *first_flow)
1447 struct mrvl_priv *priv = dev->data->dev_private;
1448 struct pp2_cls_tbl_key *key = &priv->cls_tbl_params.key;
1451 if (priv->cls_tbl) {
1452 pp2_cls_tbl_deinit(priv->cls_tbl);
1453 priv->cls_tbl = NULL;
1456 memset(&priv->cls_tbl_params, 0, sizeof(priv->cls_tbl_params));
1458 priv->cls_tbl_params.type = mrvl_engine_type(first_flow);
1459 MRVL_LOG(INFO, "Setting cls search engine type to %s",
1460 priv->cls_tbl_params.type == PP2_CLS_TBL_EXACT_MATCH ?
1461 "exact" : "maskable");
1462 priv->cls_tbl_params.max_num_rules = MRVL_CLS_MAX_NUM_RULES;
1463 priv->cls_tbl_params.default_act.type = PP2_CLS_TBL_ACT_DONE;
1464 priv->cls_tbl_params.default_act.cos = &first_flow->cos;
1466 if (first_flow->pattern & F_DMAC) {
1467 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
1468 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_DA;
1470 key->num_fields += 1;
1473 if (first_flow->pattern & F_SMAC) {
1474 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
1475 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_SA;
1477 key->num_fields += 1;
1480 if (first_flow->pattern & F_TYPE) {
1481 key->proto_field[key->num_fields].proto = MV_NET_PROTO_ETH;
1482 key->proto_field[key->num_fields].field.eth = MV_NET_ETH_F_TYPE;
1484 key->num_fields += 1;
1487 if (first_flow->pattern & F_VLAN_ID) {
1488 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
1489 key->proto_field[key->num_fields].field.vlan = MV_NET_VLAN_F_ID;
1491 key->num_fields += 1;
1494 if (first_flow->pattern & F_VLAN_PRI) {
1495 key->proto_field[key->num_fields].proto = MV_NET_PROTO_VLAN;
1496 key->proto_field[key->num_fields].field.vlan =
1499 key->num_fields += 1;
1502 if (first_flow->pattern & F_IP4_TOS) {
1503 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
1504 key->proto_field[key->num_fields].field.ipv4 =
1507 key->num_fields += 1;
1510 if (first_flow->pattern & F_IP4_SIP) {
1511 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
1512 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_SA;
1514 key->num_fields += 1;
1517 if (first_flow->pattern & F_IP4_DIP) {
1518 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
1519 key->proto_field[key->num_fields].field.ipv4 = MV_NET_IP4_F_DA;
1521 key->num_fields += 1;
1524 if (first_flow->pattern & F_IP4_PROTO) {
1525 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP4;
1526 key->proto_field[key->num_fields].field.ipv4 =
1529 key->num_fields += 1;
1532 if (first_flow->pattern & F_IP6_SIP) {
1533 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
1534 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_SA;
1535 key->key_size += 16;
1536 key->num_fields += 1;
1539 if (first_flow->pattern & F_IP6_DIP) {
1540 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
1541 key->proto_field[key->num_fields].field.ipv6 = MV_NET_IP6_F_DA;
1542 key->key_size += 16;
1543 key->num_fields += 1;
1546 if (first_flow->pattern & F_IP6_FLOW) {
1547 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
1548 key->proto_field[key->num_fields].field.ipv6 =
1551 key->num_fields += 1;
1554 if (first_flow->pattern & F_IP6_NEXT_HDR) {
1555 key->proto_field[key->num_fields].proto = MV_NET_PROTO_IP6;
1556 key->proto_field[key->num_fields].field.ipv6 =
1557 MV_NET_IP6_F_NEXT_HDR;
1559 key->num_fields += 1;
1562 if (first_flow->pattern & F_TCP_SPORT) {
1563 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
1564 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_SP;
1566 key->num_fields += 1;
1569 if (first_flow->pattern & F_TCP_DPORT) {
1570 key->proto_field[key->num_fields].proto = MV_NET_PROTO_TCP;
1571 key->proto_field[key->num_fields].field.tcp = MV_NET_TCP_F_DP;
1573 key->num_fields += 1;
1576 if (first_flow->pattern & F_UDP_SPORT) {
1577 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
1578 key->proto_field[key->num_fields].field.udp = MV_NET_UDP_F_SP;
1580 key->num_fields += 1;
1583 if (first_flow->pattern & F_UDP_DPORT) {
1584 key->proto_field[key->num_fields].proto = MV_NET_PROTO_UDP;
1585 key->proto_field[key->num_fields].field.udp = MV_NET_UDP_F_DP;
1587 key->num_fields += 1;
1590 ret = pp2_cls_tbl_init(&priv->cls_tbl_params, &priv->cls_tbl);
1592 priv->cls_tbl_pattern = first_flow->pattern;
1598 * Check whether new flow can be added to the table
1600 * @param priv Pointer to the port's private data.
1601 * @param flow Pointer to the new flow.
1602 * @return 1 in case flow can be added, 0 otherwise.
1605 mrvl_flow_can_be_added(struct mrvl_priv *priv, const struct rte_flow *flow)
1607 return flow->pattern == priv->cls_tbl_pattern &&
1608 mrvl_engine_type(flow) == priv->cls_tbl_params.type;
1612 * DPDK flow create callback called when flow is to be created.
1614 * @param dev Pointer to the device.
1615 * @param attr Pointer to the flow attribute.
1616 * @param pattern Pointer to the flow pattern.
1617 * @param actions Pointer to the flow actions.
1618 * @param error Pointer to the flow error.
1619 * @returns Pointer to the created flow in case of success, NULL otherwise.
1621 static struct rte_flow *
1622 mrvl_flow_create(struct rte_eth_dev *dev,
1623 const struct rte_flow_attr *attr,
1624 const struct rte_flow_item pattern[],
1625 const struct rte_flow_action actions[],
1626 struct rte_flow_error *error)
1628 struct mrvl_priv *priv = dev->data->dev_private;
1629 struct rte_flow *flow, *first;
1632 if (!dev->data->dev_started) {
1633 rte_flow_error_set(error, EINVAL,
1634 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1635 "Port must be started first\n");
1639 flow = rte_zmalloc_socket(NULL, sizeof(*flow), 0, rte_socket_id());
1643 ret = mrvl_flow_parse(priv, attr, pattern, actions, flow, error);
1650 * 1. In case table does not exist - create one.
1651 * 2. In case table exists, is empty and new flow cannot be added
1653 * 3. In case table is not empty and new flow matches table format
1655 * 4. Otherwise flow cannot be added.
1657 first = LIST_FIRST(&priv->flows);
1658 if (!priv->cls_tbl) {
1659 ret = mrvl_create_cls_table(dev, flow);
1660 } else if (!first && !mrvl_flow_can_be_added(priv, flow)) {
1661 ret = mrvl_create_cls_table(dev, flow);
1662 } else if (mrvl_flow_can_be_added(priv, flow)) {
1665 rte_flow_error_set(error, EINVAL,
1666 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1667 "Pattern does not match cls table format\n");
1672 rte_flow_error_set(error, EINVAL,
1673 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1674 "Failed to create cls table\n");
1678 ret = pp2_cls_tbl_add_rule(priv->cls_tbl, &flow->rule, &flow->action);
1680 rte_flow_error_set(error, EINVAL,
1681 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1682 "Failed to add rule\n");
1686 LIST_INSERT_HEAD(&priv->flows, flow, next);
1695 * Remove classifier rule associated with given flow.
1697 * @param priv Pointer to the port's private data.
1698 * @param flow Pointer to the flow.
1699 * @param error Pointer to the flow error.
1700 * @returns 0 in case of success, negative value otherwise.
1703 mrvl_flow_remove(struct mrvl_priv *priv, struct rte_flow *flow,
1704 struct rte_flow_error *error)
1708 if (!priv->cls_tbl) {
1709 rte_flow_error_set(error, EINVAL,
1710 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1711 "Classifier table not initialized");
1715 ret = pp2_cls_tbl_remove_rule(priv->cls_tbl, &flow->rule);
1717 rte_flow_error_set(error, EINVAL,
1718 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1719 "Failed to remove rule");
1723 mrvl_free_all_key_mask(&flow->rule);
1726 flow->mtr->refcnt--;
1734 * DPDK flow destroy callback called when flow is to be removed.
1736 * @param dev Pointer to the device.
1737 * @param flow Pointer to the flow.
1738 * @param error Pointer to the flow error.
1739 * @returns 0 in case of success, negative value otherwise.
1742 mrvl_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1743 struct rte_flow_error *error)
1745 struct mrvl_priv *priv = dev->data->dev_private;
1749 LIST_FOREACH(f, &priv->flows, next) {
1755 rte_flow_error_set(error, EINVAL,
1756 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1757 "Rule was not found");
1761 LIST_REMOVE(f, next);
1763 ret = mrvl_flow_remove(priv, flow, error);
1773 * DPDK flow callback called to verify given attribute, pattern and actions.
1775 * @param dev Pointer to the device.
1776 * @param attr Pointer to the flow attribute.
1777 * @param pattern Pointer to the flow pattern.
1778 * @param actions Pointer to the flow actions.
1779 * @param error Pointer to the flow error.
1780 * @returns 0 on success, negative value otherwise.
1783 mrvl_flow_validate(struct rte_eth_dev *dev,
1784 const struct rte_flow_attr *attr,
1785 const struct rte_flow_item pattern[],
1786 const struct rte_flow_action actions[],
1787 struct rte_flow_error *error)
1789 static struct rte_flow *flow;
1791 flow = mrvl_flow_create(dev, attr, pattern, actions, error);
1795 mrvl_flow_destroy(dev, flow, error);
1801 * DPDK flow flush callback called when flows are to be flushed.
1803 * @param dev Pointer to the device.
1804 * @param error Pointer to the flow error.
1805 * @returns 0 in case of success, negative value otherwise.
1808 mrvl_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1810 struct mrvl_priv *priv = dev->data->dev_private;
1812 while (!LIST_EMPTY(&priv->flows)) {
1813 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1814 int ret = mrvl_flow_remove(priv, flow, error);
1818 LIST_REMOVE(flow, next);
1822 if (priv->cls_tbl) {
1823 pp2_cls_tbl_deinit(priv->cls_tbl);
1824 priv->cls_tbl = NULL;
1831 * DPDK flow isolate callback called to isolate port.
1833 * @param dev Pointer to the device.
1834 * @param enable Pass 0/1 to disable/enable port isolation.
1835 * @param error Pointer to the flow error.
1836 * @returns 0 in case of success, negative value otherwise.
1839 mrvl_flow_isolate(struct rte_eth_dev *dev, int enable,
1840 struct rte_flow_error *error)
1842 struct mrvl_priv *priv = dev->data->dev_private;
1844 if (dev->data->dev_started) {
1845 rte_flow_error_set(error, EBUSY,
1846 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1847 NULL, "Port must be stopped first\n");
1851 priv->isolated = enable;
1856 const struct rte_flow_ops mrvl_flow_ops = {
1857 .validate = mrvl_flow_validate,
1858 .create = mrvl_flow_create,
1859 .destroy = mrvl_flow_destroy,
1860 .flush = mrvl_flow_flush,
1861 .isolate = mrvl_flow_isolate
1865 * Initialize flow resources.
1867 * @param dev Pointer to the device.
1870 mrvl_flow_init(struct rte_eth_dev *dev)
1872 struct mrvl_priv *priv = dev->data->dev_private;
1874 LIST_INIT(&priv->flows);
1878 * Cleanup flow resources.
1880 * @param dev Pointer to the device.
1883 mrvl_flow_deinit(struct rte_eth_dev *dev)
1885 mrvl_flow_flush(dev, NULL);