1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
13 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
25 /* Workaround to discriminate the UDP/TCP/SCTP
26 * with next protocol of l3.
27 * MC/WRIOP are not able to identify
28 * the l4 protocol with l4 ports.
30 int mc_l4_port_identification;
32 static char *dpaa2_flow_control_log;
33 static int dpaa2_flow_miss_flow_id =
36 #define FIXED_ENTRY_SIZE 54
38 enum flow_rule_ipaddr_type {
44 struct flow_rule_ipaddr {
45 enum flow_rule_ipaddr_type ipaddr_type;
53 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 struct dpni_rule_cfg qos_rule;
55 struct dpni_rule_cfg fs_rule;
56 uint8_t qos_real_key_size;
57 uint8_t fs_real_key_size;
58 uint8_t tc_id; /** Traffic Class ID. */
59 uint8_t tc_index; /** index within this Traffic Class. */
60 enum rte_flow_action_type action;
61 /* Special for IP address to specify the offset
64 struct flow_rule_ipaddr ipaddr_rule;
65 struct dpni_fs_action_cfg action_cfg;
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 RTE_FLOW_ITEM_TYPE_END,
71 RTE_FLOW_ITEM_TYPE_ETH,
72 RTE_FLOW_ITEM_TYPE_VLAN,
73 RTE_FLOW_ITEM_TYPE_IPV4,
74 RTE_FLOW_ITEM_TYPE_IPV6,
75 RTE_FLOW_ITEM_TYPE_ICMP,
76 RTE_FLOW_ITEM_TYPE_UDP,
77 RTE_FLOW_ITEM_TYPE_TCP,
78 RTE_FLOW_ITEM_TYPE_SCTP,
79 RTE_FLOW_ITEM_TYPE_GRE,
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 RTE_FLOW_ACTION_TYPE_END,
85 RTE_FLOW_ACTION_TYPE_QUEUE,
86 RTE_FLOW_ACTION_TYPE_RSS
89 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
90 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
93 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
94 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
95 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
96 .type = RTE_BE16(0xffff),
99 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
100 .tci = RTE_BE16(0xffff),
103 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
104 .hdr.src_addr = RTE_BE32(0xffffffff),
105 .hdr.dst_addr = RTE_BE32(0xffffffff),
106 .hdr.next_proto_id = 0xff,
109 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
112 "\xff\xff\xff\xff\xff\xff\xff\xff"
113 "\xff\xff\xff\xff\xff\xff\xff\xff",
115 "\xff\xff\xff\xff\xff\xff\xff\xff"
116 "\xff\xff\xff\xff\xff\xff\xff\xff",
121 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
122 .hdr.icmp_type = 0xff,
123 .hdr.icmp_code = 0xff,
126 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
128 .src_port = RTE_BE16(0xffff),
129 .dst_port = RTE_BE16(0xffff),
133 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
135 .src_port = RTE_BE16(0xffff),
136 .dst_port = RTE_BE16(0xffff),
140 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
142 .src_port = RTE_BE16(0xffff),
143 .dst_port = RTE_BE16(0xffff),
147 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
148 .protocol = RTE_BE16(0xffff),
153 static inline void dpaa2_prot_field_string(
154 enum net_prot prot, uint32_t field,
157 if (!dpaa2_flow_control_log)
160 if (prot == NET_PROT_ETH) {
161 strcpy(string, "eth");
162 if (field == NH_FLD_ETH_DA)
163 strcat(string, ".dst");
164 else if (field == NH_FLD_ETH_SA)
165 strcat(string, ".src");
166 else if (field == NH_FLD_ETH_TYPE)
167 strcat(string, ".type");
169 strcat(string, ".unknown field");
170 } else if (prot == NET_PROT_VLAN) {
171 strcpy(string, "vlan");
172 if (field == NH_FLD_VLAN_TCI)
173 strcat(string, ".tci");
175 strcat(string, ".unknown field");
176 } else if (prot == NET_PROT_IP) {
177 strcpy(string, "ip");
178 if (field == NH_FLD_IP_SRC)
179 strcat(string, ".src");
180 else if (field == NH_FLD_IP_DST)
181 strcat(string, ".dst");
182 else if (field == NH_FLD_IP_PROTO)
183 strcat(string, ".proto");
185 strcat(string, ".unknown field");
186 } else if (prot == NET_PROT_TCP) {
187 strcpy(string, "tcp");
188 if (field == NH_FLD_TCP_PORT_SRC)
189 strcat(string, ".src");
190 else if (field == NH_FLD_TCP_PORT_DST)
191 strcat(string, ".dst");
193 strcat(string, ".unknown field");
194 } else if (prot == NET_PROT_UDP) {
195 strcpy(string, "udp");
196 if (field == NH_FLD_UDP_PORT_SRC)
197 strcat(string, ".src");
198 else if (field == NH_FLD_UDP_PORT_DST)
199 strcat(string, ".dst");
201 strcat(string, ".unknown field");
202 } else if (prot == NET_PROT_ICMP) {
203 strcpy(string, "icmp");
204 if (field == NH_FLD_ICMP_TYPE)
205 strcat(string, ".type");
206 else if (field == NH_FLD_ICMP_CODE)
207 strcat(string, ".code");
209 strcat(string, ".unknown field");
210 } else if (prot == NET_PROT_SCTP) {
211 strcpy(string, "sctp");
212 if (field == NH_FLD_SCTP_PORT_SRC)
213 strcat(string, ".src");
214 else if (field == NH_FLD_SCTP_PORT_DST)
215 strcat(string, ".dst");
217 strcat(string, ".unknown field");
218 } else if (prot == NET_PROT_GRE) {
219 strcpy(string, "gre");
220 if (field == NH_FLD_GRE_TYPE)
221 strcat(string, ".type");
223 strcat(string, ".unknown field");
225 strcpy(string, "unknown protocol");
229 static inline void dpaa2_flow_qos_table_extracts_log(
230 const struct dpaa2_dev_priv *priv)
235 if (!dpaa2_flow_control_log)
238 printf("Setup QoS table: number of extracts: %d\r\n",
239 priv->extract.qos_key_extract.dpkg.num_extracts);
240 for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
242 dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
243 .extracts[idx].extract.from_hdr.prot,
244 priv->extract.qos_key_extract.dpkg.extracts[idx]
245 .extract.from_hdr.field,
247 printf("%s", string);
248 if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
254 static inline void dpaa2_flow_fs_table_extracts_log(
255 const struct dpaa2_dev_priv *priv, int tc_id)
260 if (!dpaa2_flow_control_log)
263 printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
264 tc_id, priv->extract.tc_key_extract[tc_id]
266 for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
267 .dpkg.num_extracts; idx++) {
268 dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
269 .dpkg.extracts[idx].extract.from_hdr.prot,
270 priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
271 .extract.from_hdr.field,
273 printf("%s", string);
274 if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
281 static inline void dpaa2_flow_qos_entry_log(
282 const char *log_info, const struct rte_flow *flow, int qos_index)
287 if (!dpaa2_flow_control_log)
290 printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
291 log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
293 key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
294 mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
297 for (idx = 0; idx < flow->qos_real_key_size; idx++)
298 printf("%02x ", key[idx]);
300 printf("\r\nmask:\r\n");
301 for (idx = 0; idx < flow->qos_real_key_size; idx++)
302 printf("%02x ", mask[idx]);
304 printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
305 flow->ipaddr_rule.qos_ipsrc_offset,
306 flow->ipaddr_rule.qos_ipdst_offset);
309 static inline void dpaa2_flow_fs_entry_log(
310 const char *log_info, const struct rte_flow *flow)
315 if (!dpaa2_flow_control_log)
318 printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
319 log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
321 key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
322 mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
325 for (idx = 0; idx < flow->fs_real_key_size; idx++)
326 printf("%02x ", key[idx]);
328 printf("\r\nmask:\r\n");
329 for (idx = 0; idx < flow->fs_real_key_size; idx++)
330 printf("%02x ", mask[idx]);
332 printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
333 flow->ipaddr_rule.fs_ipsrc_offset,
334 flow->ipaddr_rule.fs_ipdst_offset);
337 static inline void dpaa2_flow_extract_key_set(
338 struct dpaa2_key_info *key_info, int index, uint8_t size)
340 key_info->key_size[index] = size;
342 key_info->key_offset[index] =
343 key_info->key_offset[index - 1] +
344 key_info->key_size[index - 1];
346 key_info->key_offset[index] = 0;
348 key_info->key_total_size += size;
351 static int dpaa2_flow_extract_add(
352 struct dpaa2_key_extract *key_extract,
354 uint32_t field, uint8_t field_size)
356 int index, ip_src = -1, ip_dst = -1;
357 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
358 struct dpaa2_key_info *key_info = &key_extract->key_info;
360 if (dpkg->num_extracts >=
361 DPKG_MAX_NUM_OF_EXTRACTS) {
362 DPAA2_PMD_WARN("Number of extracts overflows");
365 /* Before reorder, the IP SRC and IP DST are already last
368 for (index = 0; index < dpkg->num_extracts; index++) {
369 if (dpkg->extracts[index].extract.from_hdr.prot ==
371 if (dpkg->extracts[index].extract.from_hdr.field ==
375 if (dpkg->extracts[index].extract.from_hdr.field ==
383 RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
386 RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
388 if (prot == NET_PROT_IP &&
389 (field == NH_FLD_IP_SRC ||
390 field == NH_FLD_IP_DST)) {
391 index = dpkg->num_extracts;
393 if (ip_src >= 0 && ip_dst >= 0)
394 index = dpkg->num_extracts - 2;
395 else if (ip_src >= 0 || ip_dst >= 0)
396 index = dpkg->num_extracts - 1;
398 index = dpkg->num_extracts;
401 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_HDR;
402 dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
403 dpkg->extracts[index].extract.from_hdr.prot = prot;
404 dpkg->extracts[index].extract.from_hdr.field = field;
405 if (prot == NET_PROT_IP &&
406 (field == NH_FLD_IP_SRC ||
407 field == NH_FLD_IP_DST)) {
408 dpaa2_flow_extract_key_set(key_info, index, 0);
410 dpaa2_flow_extract_key_set(key_info, index, field_size);
413 if (prot == NET_PROT_IP) {
414 if (field == NH_FLD_IP_SRC) {
415 if (key_info->ipv4_dst_offset >= 0) {
416 key_info->ipv4_src_offset =
417 key_info->ipv4_dst_offset +
418 NH_FLD_IPV4_ADDR_SIZE;
420 key_info->ipv4_src_offset =
421 key_info->key_offset[index - 1] +
422 key_info->key_size[index - 1];
424 if (key_info->ipv6_dst_offset >= 0) {
425 key_info->ipv6_src_offset =
426 key_info->ipv6_dst_offset +
427 NH_FLD_IPV6_ADDR_SIZE;
429 key_info->ipv6_src_offset =
430 key_info->key_offset[index - 1] +
431 key_info->key_size[index - 1];
433 } else if (field == NH_FLD_IP_DST) {
434 if (key_info->ipv4_src_offset >= 0) {
435 key_info->ipv4_dst_offset =
436 key_info->ipv4_src_offset +
437 NH_FLD_IPV4_ADDR_SIZE;
439 key_info->ipv4_dst_offset =
440 key_info->key_offset[index - 1] +
441 key_info->key_size[index - 1];
443 if (key_info->ipv6_src_offset >= 0) {
444 key_info->ipv6_dst_offset =
445 key_info->ipv6_src_offset +
446 NH_FLD_IPV6_ADDR_SIZE;
448 key_info->ipv6_dst_offset =
449 key_info->key_offset[index - 1] +
450 key_info->key_size[index - 1];
455 if (index == dpkg->num_extracts) {
456 dpkg->num_extracts++;
462 dpkg->extracts[ip_src].type =
463 DPKG_EXTRACT_FROM_HDR;
464 dpkg->extracts[ip_src].extract.from_hdr.type =
466 dpkg->extracts[ip_src].extract.from_hdr.prot =
468 dpkg->extracts[ip_src].extract.from_hdr.field =
470 dpaa2_flow_extract_key_set(key_info, ip_src, 0);
471 key_info->ipv4_src_offset += field_size;
472 key_info->ipv6_src_offset += field_size;
476 dpkg->extracts[ip_dst].type =
477 DPKG_EXTRACT_FROM_HDR;
478 dpkg->extracts[ip_dst].extract.from_hdr.type =
480 dpkg->extracts[ip_dst].extract.from_hdr.prot =
482 dpkg->extracts[ip_dst].extract.from_hdr.field =
484 dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
485 key_info->ipv4_dst_offset += field_size;
486 key_info->ipv6_dst_offset += field_size;
489 dpkg->num_extracts++;
494 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
497 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
498 struct dpaa2_key_info *key_info = &key_extract->key_info;
499 int last_extract_size, index;
501 if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
502 DPKG_EXTRACT_FROM_DATA) {
503 DPAA2_PMD_WARN("RAW extract cannot be combined with others");
507 last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
508 dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
509 if (last_extract_size)
510 dpkg->num_extracts++;
512 last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
514 for (index = 0; index < dpkg->num_extracts; index++) {
515 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
516 if (index == dpkg->num_extracts - 1)
517 dpkg->extracts[index].extract.from_data.size =
520 dpkg->extracts[index].extract.from_data.size =
521 DPAA2_FLOW_MAX_KEY_SIZE;
522 dpkg->extracts[index].extract.from_data.offset =
523 DPAA2_FLOW_MAX_KEY_SIZE * index;
526 key_info->key_total_size = size;
530 /* Protocol discrimination.
531 * Discriminate IPv4/IPv6/vLan by Eth type.
532 * Discriminate UDP/TCP/ICMP by next proto of IP.
535 dpaa2_flow_proto_discrimination_extract(
536 struct dpaa2_key_extract *key_extract,
537 enum rte_flow_item_type type)
539 if (type == RTE_FLOW_ITEM_TYPE_ETH) {
540 return dpaa2_flow_extract_add(
541 key_extract, NET_PROT_ETH,
544 } else if (type == (enum rte_flow_item_type)
545 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
546 return dpaa2_flow_extract_add(
547 key_extract, NET_PROT_IP,
549 NH_FLD_IP_PROTO_SIZE);
555 static inline int dpaa2_flow_extract_search(
556 struct dpkg_profile_cfg *dpkg,
557 enum net_prot prot, uint32_t field)
561 for (i = 0; i < dpkg->num_extracts; i++) {
562 if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
563 dpkg->extracts[i].extract.from_hdr.field == field) {
571 static inline int dpaa2_flow_extract_key_offset(
572 struct dpaa2_key_extract *key_extract,
573 enum net_prot prot, uint32_t field)
576 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
577 struct dpaa2_key_info *key_info = &key_extract->key_info;
579 if (prot == NET_PROT_IPV4 ||
580 prot == NET_PROT_IPV6)
581 i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
583 i = dpaa2_flow_extract_search(dpkg, prot, field);
586 if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
587 return key_info->ipv4_src_offset;
588 else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
589 return key_info->ipv4_dst_offset;
590 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
591 return key_info->ipv6_src_offset;
592 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
593 return key_info->ipv6_dst_offset;
595 return key_info->key_offset[i];
601 struct proto_discrimination {
602 enum rte_flow_item_type type;
610 dpaa2_flow_proto_discrimination_rule(
611 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
612 struct proto_discrimination proto, int group)
622 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
624 field = NH_FLD_ETH_TYPE;
625 } else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
627 field = NH_FLD_IP_PROTO;
630 "Only Eth and IP support to discriminate next proto.");
634 offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
637 DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
641 key_iova = flow->qos_rule.key_iova + offset;
642 mask_iova = flow->qos_rule.mask_iova + offset;
643 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
644 eth_type = proto.eth_type;
645 memcpy((void *)key_iova, (const void *)(ð_type),
648 memcpy((void *)mask_iova, (const void *)(ð_type),
651 ip_proto = proto.ip_proto;
652 memcpy((void *)key_iova, (const void *)(&ip_proto),
655 memcpy((void *)mask_iova, (const void *)(&ip_proto),
659 offset = dpaa2_flow_extract_key_offset(
660 &priv->extract.tc_key_extract[group],
663 DPAA2_PMD_ERR("FS prot %d field %d extract failed",
667 key_iova = flow->fs_rule.key_iova + offset;
668 mask_iova = flow->fs_rule.mask_iova + offset;
670 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
671 eth_type = proto.eth_type;
672 memcpy((void *)key_iova, (const void *)(ð_type),
675 memcpy((void *)mask_iova, (const void *)(ð_type),
678 ip_proto = proto.ip_proto;
679 memcpy((void *)key_iova, (const void *)(&ip_proto),
682 memcpy((void *)mask_iova, (const void *)(&ip_proto),
690 dpaa2_flow_rule_data_set(
691 struct dpaa2_key_extract *key_extract,
692 struct dpni_rule_cfg *rule,
693 enum net_prot prot, uint32_t field,
694 const void *key, const void *mask, int size)
696 int offset = dpaa2_flow_extract_key_offset(key_extract,
700 DPAA2_PMD_ERR("prot %d, field %d extract failed",
705 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
706 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
712 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
713 const void *key, const void *mask, int size)
717 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
718 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
724 _dpaa2_flow_rule_move_ipaddr_tail(
725 struct dpaa2_key_extract *key_extract,
726 struct dpni_rule_cfg *rule, int src_offset,
727 uint32_t field, bool ipv4)
735 char tmp[NH_FLD_IPV6_ADDR_SIZE];
737 if (field != NH_FLD_IP_SRC &&
738 field != NH_FLD_IP_DST) {
739 DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
743 prot = NET_PROT_IPV4;
745 prot = NET_PROT_IPV6;
746 dst_offset = dpaa2_flow_extract_key_offset(key_extract,
748 if (dst_offset < 0) {
749 DPAA2_PMD_ERR("Field %d reorder extract failed", field);
752 key_src = rule->key_iova + src_offset;
753 mask_src = rule->mask_iova + src_offset;
754 key_dst = rule->key_iova + dst_offset;
755 mask_dst = rule->mask_iova + dst_offset;
757 len = sizeof(rte_be32_t);
759 len = NH_FLD_IPV6_ADDR_SIZE;
761 memcpy(tmp, (char *)key_src, len);
762 memset((char *)key_src, 0, len);
763 memcpy((char *)key_dst, tmp, len);
765 memcpy(tmp, (char *)mask_src, len);
766 memset((char *)mask_src, 0, len);
767 memcpy((char *)mask_dst, tmp, len);
773 dpaa2_flow_rule_move_ipaddr_tail(
774 struct rte_flow *flow, struct dpaa2_dev_priv *priv,
780 if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
783 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
784 prot = NET_PROT_IPV4;
786 prot = NET_PROT_IPV6;
788 if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
789 ret = _dpaa2_flow_rule_move_ipaddr_tail(
790 &priv->extract.qos_key_extract,
792 flow->ipaddr_rule.qos_ipsrc_offset,
793 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
795 DPAA2_PMD_ERR("QoS src address reorder failed");
798 flow->ipaddr_rule.qos_ipsrc_offset =
799 dpaa2_flow_extract_key_offset(
800 &priv->extract.qos_key_extract,
801 prot, NH_FLD_IP_SRC);
804 if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
805 ret = _dpaa2_flow_rule_move_ipaddr_tail(
806 &priv->extract.qos_key_extract,
808 flow->ipaddr_rule.qos_ipdst_offset,
809 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
811 DPAA2_PMD_ERR("QoS dst address reorder failed");
814 flow->ipaddr_rule.qos_ipdst_offset =
815 dpaa2_flow_extract_key_offset(
816 &priv->extract.qos_key_extract,
817 prot, NH_FLD_IP_DST);
820 if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
821 ret = _dpaa2_flow_rule_move_ipaddr_tail(
822 &priv->extract.tc_key_extract[fs_group],
824 flow->ipaddr_rule.fs_ipsrc_offset,
825 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
827 DPAA2_PMD_ERR("FS src address reorder failed");
830 flow->ipaddr_rule.fs_ipsrc_offset =
831 dpaa2_flow_extract_key_offset(
832 &priv->extract.tc_key_extract[fs_group],
833 prot, NH_FLD_IP_SRC);
835 if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
836 ret = _dpaa2_flow_rule_move_ipaddr_tail(
837 &priv->extract.tc_key_extract[fs_group],
839 flow->ipaddr_rule.fs_ipdst_offset,
840 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
842 DPAA2_PMD_ERR("FS dst address reorder failed");
845 flow->ipaddr_rule.fs_ipdst_offset =
846 dpaa2_flow_extract_key_offset(
847 &priv->extract.tc_key_extract[fs_group],
848 prot, NH_FLD_IP_DST);
855 dpaa2_flow_extract_support(
856 const uint8_t *mask_src,
857 enum rte_flow_item_type type)
861 const char *mask_support = 0;
864 case RTE_FLOW_ITEM_TYPE_ETH:
865 mask_support = (const char *)&dpaa2_flow_item_eth_mask;
866 size = sizeof(struct rte_flow_item_eth);
868 case RTE_FLOW_ITEM_TYPE_VLAN:
869 mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
870 size = sizeof(struct rte_flow_item_vlan);
872 case RTE_FLOW_ITEM_TYPE_IPV4:
873 mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
874 size = sizeof(struct rte_flow_item_ipv4);
876 case RTE_FLOW_ITEM_TYPE_IPV6:
877 mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
878 size = sizeof(struct rte_flow_item_ipv6);
880 case RTE_FLOW_ITEM_TYPE_ICMP:
881 mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
882 size = sizeof(struct rte_flow_item_icmp);
884 case RTE_FLOW_ITEM_TYPE_UDP:
885 mask_support = (const char *)&dpaa2_flow_item_udp_mask;
886 size = sizeof(struct rte_flow_item_udp);
888 case RTE_FLOW_ITEM_TYPE_TCP:
889 mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
890 size = sizeof(struct rte_flow_item_tcp);
892 case RTE_FLOW_ITEM_TYPE_SCTP:
893 mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
894 size = sizeof(struct rte_flow_item_sctp);
896 case RTE_FLOW_ITEM_TYPE_GRE:
897 mask_support = (const char *)&dpaa2_flow_item_gre_mask;
898 size = sizeof(struct rte_flow_item_gre);
904 memcpy(mask, mask_support, size);
906 for (i = 0; i < size; i++)
907 mask[i] = (mask[i] | mask_src[i]);
909 if (memcmp(mask, mask_support, size))
916 dpaa2_configure_flow_eth(struct rte_flow *flow,
917 struct rte_eth_dev *dev,
918 const struct rte_flow_attr *attr,
919 const struct rte_flow_item *pattern,
920 const struct rte_flow_action actions[] __rte_unused,
921 struct rte_flow_error *error __rte_unused,
922 int *device_configured)
927 const struct rte_flow_item_eth *spec, *mask;
929 /* TODO: Currently upper bound of range parameter is not implemented */
930 const struct rte_flow_item_eth *last __rte_unused;
931 struct dpaa2_dev_priv *priv = dev->data->dev_private;
932 const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
936 /* Parse pattern list to get the matching parameters */
937 spec = (const struct rte_flow_item_eth *)pattern->spec;
938 last = (const struct rte_flow_item_eth *)pattern->last;
939 mask = (const struct rte_flow_item_eth *)
940 (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
942 /* Don't care any field of eth header,
943 * only care eth protocol.
945 DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
949 /* Get traffic class index and flow id to be configured */
951 flow->tc_index = attr->priority;
953 if (dpaa2_flow_extract_support((const uint8_t *)mask,
954 RTE_FLOW_ITEM_TYPE_ETH)) {
955 DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
960 if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
961 index = dpaa2_flow_extract_search(
962 &priv->extract.qos_key_extract.dpkg,
963 NET_PROT_ETH, NH_FLD_ETH_SA);
965 ret = dpaa2_flow_extract_add(
966 &priv->extract.qos_key_extract,
967 NET_PROT_ETH, NH_FLD_ETH_SA,
970 DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
974 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
976 index = dpaa2_flow_extract_search(
977 &priv->extract.tc_key_extract[group].dpkg,
978 NET_PROT_ETH, NH_FLD_ETH_SA);
980 ret = dpaa2_flow_extract_add(
981 &priv->extract.tc_key_extract[group],
982 NET_PROT_ETH, NH_FLD_ETH_SA,
985 DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
988 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
991 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
994 "Move ipaddr before ETH_SA rule set failed");
998 ret = dpaa2_flow_rule_data_set(
999 &priv->extract.qos_key_extract,
1003 &spec->src.addr_bytes,
1004 &mask->src.addr_bytes,
1005 sizeof(struct rte_ether_addr));
1007 DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1011 ret = dpaa2_flow_rule_data_set(
1012 &priv->extract.tc_key_extract[group],
1016 &spec->src.addr_bytes,
1017 &mask->src.addr_bytes,
1018 sizeof(struct rte_ether_addr));
1020 DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1025 if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1026 index = dpaa2_flow_extract_search(
1027 &priv->extract.qos_key_extract.dpkg,
1028 NET_PROT_ETH, NH_FLD_ETH_DA);
1030 ret = dpaa2_flow_extract_add(
1031 &priv->extract.qos_key_extract,
1032 NET_PROT_ETH, NH_FLD_ETH_DA,
1033 RTE_ETHER_ADDR_LEN);
1035 DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1039 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1042 index = dpaa2_flow_extract_search(
1043 &priv->extract.tc_key_extract[group].dpkg,
1044 NET_PROT_ETH, NH_FLD_ETH_DA);
1046 ret = dpaa2_flow_extract_add(
1047 &priv->extract.tc_key_extract[group],
1048 NET_PROT_ETH, NH_FLD_ETH_DA,
1049 RTE_ETHER_ADDR_LEN);
1051 DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1055 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1058 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1061 "Move ipaddr before ETH DA rule set failed");
1065 ret = dpaa2_flow_rule_data_set(
1066 &priv->extract.qos_key_extract,
1070 &spec->dst.addr_bytes,
1071 &mask->dst.addr_bytes,
1072 sizeof(struct rte_ether_addr));
1074 DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1078 ret = dpaa2_flow_rule_data_set(
1079 &priv->extract.tc_key_extract[group],
1083 &spec->dst.addr_bytes,
1084 &mask->dst.addr_bytes,
1085 sizeof(struct rte_ether_addr));
1087 DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1092 if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1093 index = dpaa2_flow_extract_search(
1094 &priv->extract.qos_key_extract.dpkg,
1095 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1097 ret = dpaa2_flow_extract_add(
1098 &priv->extract.qos_key_extract,
1099 NET_PROT_ETH, NH_FLD_ETH_TYPE,
1100 RTE_ETHER_TYPE_LEN);
1102 DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1106 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1108 index = dpaa2_flow_extract_search(
1109 &priv->extract.tc_key_extract[group].dpkg,
1110 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1112 ret = dpaa2_flow_extract_add(
1113 &priv->extract.tc_key_extract[group],
1114 NET_PROT_ETH, NH_FLD_ETH_TYPE,
1115 RTE_ETHER_TYPE_LEN);
1117 DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1121 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1124 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1127 "Move ipaddr before ETH TYPE rule set failed");
1131 ret = dpaa2_flow_rule_data_set(
1132 &priv->extract.qos_key_extract,
1138 sizeof(rte_be16_t));
1140 DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1144 ret = dpaa2_flow_rule_data_set(
1145 &priv->extract.tc_key_extract[group],
1151 sizeof(rte_be16_t));
1153 DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1158 (*device_configured) |= local_cfg;
1164 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1165 struct rte_eth_dev *dev,
1166 const struct rte_flow_attr *attr,
1167 const struct rte_flow_item *pattern,
1168 const struct rte_flow_action actions[] __rte_unused,
1169 struct rte_flow_error *error __rte_unused,
1170 int *device_configured)
1175 const struct rte_flow_item_vlan *spec, *mask;
1177 const struct rte_flow_item_vlan *last __rte_unused;
1178 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1180 group = attr->group;
1182 /* Parse pattern list to get the matching parameters */
1183 spec = (const struct rte_flow_item_vlan *)pattern->spec;
1184 last = (const struct rte_flow_item_vlan *)pattern->last;
1185 mask = (const struct rte_flow_item_vlan *)
1186 (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1188 /* Get traffic class index and flow id to be configured */
1189 flow->tc_id = group;
1190 flow->tc_index = attr->priority;
1193 /* Don't care any field of vlan header,
1194 * only care vlan protocol.
1196 /* Eth type is actually used for vLan classification.
1198 struct proto_discrimination proto;
1200 index = dpaa2_flow_extract_search(
1201 &priv->extract.qos_key_extract.dpkg,
1202 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1204 ret = dpaa2_flow_proto_discrimination_extract(
1205 &priv->extract.qos_key_extract,
1206 RTE_FLOW_ITEM_TYPE_ETH);
1209 "QoS Ext ETH_TYPE to discriminate vLan failed");
1213 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1216 index = dpaa2_flow_extract_search(
1217 &priv->extract.tc_key_extract[group].dpkg,
1218 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1220 ret = dpaa2_flow_proto_discrimination_extract(
1221 &priv->extract.tc_key_extract[group],
1222 RTE_FLOW_ITEM_TYPE_ETH);
1225 "FS Ext ETH_TYPE to discriminate vLan failed.");
1229 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1232 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1235 "Move ipaddr before vLan discrimination set failed");
1239 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1240 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1241 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1244 DPAA2_PMD_ERR("vLan discrimination rule set failed");
1248 (*device_configured) |= local_cfg;
1253 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1254 RTE_FLOW_ITEM_TYPE_VLAN)) {
1255 DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1263 index = dpaa2_flow_extract_search(
1264 &priv->extract.qos_key_extract.dpkg,
1265 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1267 ret = dpaa2_flow_extract_add(
1268 &priv->extract.qos_key_extract,
1271 sizeof(rte_be16_t));
1273 DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1277 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1280 index = dpaa2_flow_extract_search(
1281 &priv->extract.tc_key_extract[group].dpkg,
1282 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1284 ret = dpaa2_flow_extract_add(
1285 &priv->extract.tc_key_extract[group],
1288 sizeof(rte_be16_t));
1290 DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1294 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1297 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1300 "Move ipaddr before VLAN TCI rule set failed");
1304 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1310 sizeof(rte_be16_t));
1312 DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1316 ret = dpaa2_flow_rule_data_set(
1317 &priv->extract.tc_key_extract[group],
1323 sizeof(rte_be16_t));
1325 DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1329 (*device_configured) |= local_cfg;
1335 dpaa2_configure_flow_ip_discrimation(
1336 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1337 const struct rte_flow_item *pattern,
1338 int *local_cfg, int *device_configured,
1342 struct proto_discrimination proto;
1344 index = dpaa2_flow_extract_search(
1345 &priv->extract.qos_key_extract.dpkg,
1346 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1348 ret = dpaa2_flow_proto_discrimination_extract(
1349 &priv->extract.qos_key_extract,
1350 RTE_FLOW_ITEM_TYPE_ETH);
1353 "QoS Extract ETH_TYPE to discriminate IP failed.");
1356 (*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1359 index = dpaa2_flow_extract_search(
1360 &priv->extract.tc_key_extract[group].dpkg,
1361 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1363 ret = dpaa2_flow_proto_discrimination_extract(
1364 &priv->extract.tc_key_extract[group],
1365 RTE_FLOW_ITEM_TYPE_ETH);
1368 "FS Extract ETH_TYPE to discriminate IP failed.");
1371 (*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1374 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1377 "Move ipaddr before IP discrimination set failed");
1381 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1382 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1383 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1385 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1386 ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1388 DPAA2_PMD_ERR("IP discrimination rule set failed");
1392 (*device_configured) |= (*local_cfg);
1399 dpaa2_configure_flow_generic_ip(
1400 struct rte_flow *flow,
1401 struct rte_eth_dev *dev,
1402 const struct rte_flow_attr *attr,
1403 const struct rte_flow_item *pattern,
1404 const struct rte_flow_action actions[] __rte_unused,
1405 struct rte_flow_error *error __rte_unused,
1406 int *device_configured)
1411 const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1413 const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1415 const void *key, *mask;
1418 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1419 const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1422 group = attr->group;
1424 /* Parse pattern list to get the matching parameters */
1425 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1426 spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1427 mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1428 (pattern->mask ? pattern->mask :
1429 &dpaa2_flow_item_ipv4_mask);
1431 spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1432 mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1433 (pattern->mask ? pattern->mask :
1434 &dpaa2_flow_item_ipv6_mask);
1437 /* Get traffic class index and flow id to be configured */
1438 flow->tc_id = group;
1439 flow->tc_index = attr->priority;
1441 ret = dpaa2_configure_flow_ip_discrimation(priv,
1442 flow, pattern, &local_cfg,
1443 device_configured, group);
1445 DPAA2_PMD_ERR("IP discrimation failed!");
1449 if (!spec_ipv4 && !spec_ipv6)
1453 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1454 RTE_FLOW_ITEM_TYPE_IPV4)) {
1455 DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1462 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1463 RTE_FLOW_ITEM_TYPE_IPV6)) {
1464 DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1470 if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1471 mask_ipv4->hdr.dst_addr)) {
1472 flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1473 } else if (mask_ipv6 &&
1474 (memcmp((const char *)mask_ipv6->hdr.src_addr,
1475 zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1476 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1477 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1478 flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1481 if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1483 memcmp((const char *)mask_ipv6->hdr.src_addr,
1484 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1485 index = dpaa2_flow_extract_search(
1486 &priv->extract.qos_key_extract.dpkg,
1487 NET_PROT_IP, NH_FLD_IP_SRC);
1489 ret = dpaa2_flow_extract_add(
1490 &priv->extract.qos_key_extract,
1495 DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1499 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1502 index = dpaa2_flow_extract_search(
1503 &priv->extract.tc_key_extract[group].dpkg,
1504 NET_PROT_IP, NH_FLD_IP_SRC);
1506 ret = dpaa2_flow_extract_add(
1507 &priv->extract.tc_key_extract[group],
1512 DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1516 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1520 key = &spec_ipv4->hdr.src_addr;
1522 key = &spec_ipv6->hdr.src_addr[0];
1524 mask = &mask_ipv4->hdr.src_addr;
1525 size = NH_FLD_IPV4_ADDR_SIZE;
1526 prot = NET_PROT_IPV4;
1528 mask = &mask_ipv6->hdr.src_addr[0];
1529 size = NH_FLD_IPV6_ADDR_SIZE;
1530 prot = NET_PROT_IPV6;
1533 ret = dpaa2_flow_rule_data_set(
1534 &priv->extract.qos_key_extract,
1536 prot, NH_FLD_IP_SRC,
1539 DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1543 ret = dpaa2_flow_rule_data_set(
1544 &priv->extract.tc_key_extract[group],
1546 prot, NH_FLD_IP_SRC,
1549 DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1553 flow->ipaddr_rule.qos_ipsrc_offset =
1554 dpaa2_flow_extract_key_offset(
1555 &priv->extract.qos_key_extract,
1556 prot, NH_FLD_IP_SRC);
1557 flow->ipaddr_rule.fs_ipsrc_offset =
1558 dpaa2_flow_extract_key_offset(
1559 &priv->extract.tc_key_extract[group],
1560 prot, NH_FLD_IP_SRC);
1563 if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1565 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1566 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1567 index = dpaa2_flow_extract_search(
1568 &priv->extract.qos_key_extract.dpkg,
1569 NET_PROT_IP, NH_FLD_IP_DST);
1572 size = NH_FLD_IPV4_ADDR_SIZE;
1574 size = NH_FLD_IPV6_ADDR_SIZE;
1575 ret = dpaa2_flow_extract_add(
1576 &priv->extract.qos_key_extract,
1581 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1585 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1588 index = dpaa2_flow_extract_search(
1589 &priv->extract.tc_key_extract[group].dpkg,
1590 NET_PROT_IP, NH_FLD_IP_DST);
1593 size = NH_FLD_IPV4_ADDR_SIZE;
1595 size = NH_FLD_IPV6_ADDR_SIZE;
1596 ret = dpaa2_flow_extract_add(
1597 &priv->extract.tc_key_extract[group],
1602 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1606 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1610 key = &spec_ipv4->hdr.dst_addr;
1612 key = spec_ipv6->hdr.dst_addr;
1614 mask = &mask_ipv4->hdr.dst_addr;
1615 size = NH_FLD_IPV4_ADDR_SIZE;
1616 prot = NET_PROT_IPV4;
1618 mask = &mask_ipv6->hdr.dst_addr[0];
1619 size = NH_FLD_IPV6_ADDR_SIZE;
1620 prot = NET_PROT_IPV6;
1623 ret = dpaa2_flow_rule_data_set(
1624 &priv->extract.qos_key_extract,
1626 prot, NH_FLD_IP_DST,
1629 DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1633 ret = dpaa2_flow_rule_data_set(
1634 &priv->extract.tc_key_extract[group],
1636 prot, NH_FLD_IP_DST,
1639 DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1642 flow->ipaddr_rule.qos_ipdst_offset =
1643 dpaa2_flow_extract_key_offset(
1644 &priv->extract.qos_key_extract,
1645 prot, NH_FLD_IP_DST);
1646 flow->ipaddr_rule.fs_ipdst_offset =
1647 dpaa2_flow_extract_key_offset(
1648 &priv->extract.tc_key_extract[group],
1649 prot, NH_FLD_IP_DST);
1652 if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1653 (mask_ipv6 && mask_ipv6->hdr.proto)) {
1654 index = dpaa2_flow_extract_search(
1655 &priv->extract.qos_key_extract.dpkg,
1656 NET_PROT_IP, NH_FLD_IP_PROTO);
1658 ret = dpaa2_flow_extract_add(
1659 &priv->extract.qos_key_extract,
1662 NH_FLD_IP_PROTO_SIZE);
1664 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1668 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1671 index = dpaa2_flow_extract_search(
1672 &priv->extract.tc_key_extract[group].dpkg,
1673 NET_PROT_IP, NH_FLD_IP_PROTO);
1675 ret = dpaa2_flow_extract_add(
1676 &priv->extract.tc_key_extract[group],
1679 NH_FLD_IP_PROTO_SIZE);
1681 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1685 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1688 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1691 "Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1696 key = &spec_ipv4->hdr.next_proto_id;
1698 key = &spec_ipv6->hdr.proto;
1700 mask = &mask_ipv4->hdr.next_proto_id;
1702 mask = &mask_ipv6->hdr.proto;
1704 ret = dpaa2_flow_rule_data_set(
1705 &priv->extract.qos_key_extract,
1709 key, mask, NH_FLD_IP_PROTO_SIZE);
1711 DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1715 ret = dpaa2_flow_rule_data_set(
1716 &priv->extract.tc_key_extract[group],
1720 key, mask, NH_FLD_IP_PROTO_SIZE);
1722 DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1727 (*device_configured) |= local_cfg;
1733 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1734 struct rte_eth_dev *dev,
1735 const struct rte_flow_attr *attr,
1736 const struct rte_flow_item *pattern,
1737 const struct rte_flow_action actions[] __rte_unused,
1738 struct rte_flow_error *error __rte_unused,
1739 int *device_configured)
1744 const struct rte_flow_item_icmp *spec, *mask;
1746 const struct rte_flow_item_icmp *last __rte_unused;
1747 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1749 group = attr->group;
1751 /* Parse pattern list to get the matching parameters */
1752 spec = (const struct rte_flow_item_icmp *)pattern->spec;
1753 last = (const struct rte_flow_item_icmp *)pattern->last;
1754 mask = (const struct rte_flow_item_icmp *)
1755 (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1757 /* Get traffic class index and flow id to be configured */
1758 flow->tc_id = group;
1759 flow->tc_index = attr->priority;
1762 /* Don't care any field of ICMP header,
1763 * only care ICMP protocol.
1764 * Example: flow create 0 ingress pattern icmp /
1766 /* Next proto of Generical IP is actually used
1767 * for ICMP identification.
1769 struct proto_discrimination proto;
1771 index = dpaa2_flow_extract_search(
1772 &priv->extract.qos_key_extract.dpkg,
1773 NET_PROT_IP, NH_FLD_IP_PROTO);
1775 ret = dpaa2_flow_proto_discrimination_extract(
1776 &priv->extract.qos_key_extract,
1777 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1780 "QoS Extract IP protocol to discriminate ICMP failed.");
1784 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1787 index = dpaa2_flow_extract_search(
1788 &priv->extract.tc_key_extract[group].dpkg,
1789 NET_PROT_IP, NH_FLD_IP_PROTO);
1791 ret = dpaa2_flow_proto_discrimination_extract(
1792 &priv->extract.tc_key_extract[group],
1793 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1796 "FS Extract IP protocol to discriminate ICMP failed.");
1800 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1803 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1806 "Move IP addr before ICMP discrimination set failed");
1810 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1811 proto.ip_proto = IPPROTO_ICMP;
1812 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1815 DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1819 (*device_configured) |= local_cfg;
1824 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1825 RTE_FLOW_ITEM_TYPE_ICMP)) {
1826 DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1831 if (mask->hdr.icmp_type) {
1832 index = dpaa2_flow_extract_search(
1833 &priv->extract.qos_key_extract.dpkg,
1834 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1836 ret = dpaa2_flow_extract_add(
1837 &priv->extract.qos_key_extract,
1840 NH_FLD_ICMP_TYPE_SIZE);
1842 DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1846 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1849 index = dpaa2_flow_extract_search(
1850 &priv->extract.tc_key_extract[group].dpkg,
1851 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1853 ret = dpaa2_flow_extract_add(
1854 &priv->extract.tc_key_extract[group],
1857 NH_FLD_ICMP_TYPE_SIZE);
1859 DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1863 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1866 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1869 "Move ipaddr before ICMP TYPE set failed");
1873 ret = dpaa2_flow_rule_data_set(
1874 &priv->extract.qos_key_extract,
1878 &spec->hdr.icmp_type,
1879 &mask->hdr.icmp_type,
1880 NH_FLD_ICMP_TYPE_SIZE);
1882 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1886 ret = dpaa2_flow_rule_data_set(
1887 &priv->extract.tc_key_extract[group],
1891 &spec->hdr.icmp_type,
1892 &mask->hdr.icmp_type,
1893 NH_FLD_ICMP_TYPE_SIZE);
1895 DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1900 if (mask->hdr.icmp_code) {
1901 index = dpaa2_flow_extract_search(
1902 &priv->extract.qos_key_extract.dpkg,
1903 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1905 ret = dpaa2_flow_extract_add(
1906 &priv->extract.qos_key_extract,
1909 NH_FLD_ICMP_CODE_SIZE);
1911 DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1915 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1918 index = dpaa2_flow_extract_search(
1919 &priv->extract.tc_key_extract[group].dpkg,
1920 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1922 ret = dpaa2_flow_extract_add(
1923 &priv->extract.tc_key_extract[group],
1926 NH_FLD_ICMP_CODE_SIZE);
1928 DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1932 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1935 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1938 "Move ipaddr after ICMP CODE set failed");
1942 ret = dpaa2_flow_rule_data_set(
1943 &priv->extract.qos_key_extract,
1947 &spec->hdr.icmp_code,
1948 &mask->hdr.icmp_code,
1949 NH_FLD_ICMP_CODE_SIZE);
1951 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1955 ret = dpaa2_flow_rule_data_set(
1956 &priv->extract.tc_key_extract[group],
1960 &spec->hdr.icmp_code,
1961 &mask->hdr.icmp_code,
1962 NH_FLD_ICMP_CODE_SIZE);
1964 DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1969 (*device_configured) |= local_cfg;
1975 dpaa2_configure_flow_udp(struct rte_flow *flow,
1976 struct rte_eth_dev *dev,
1977 const struct rte_flow_attr *attr,
1978 const struct rte_flow_item *pattern,
1979 const struct rte_flow_action actions[] __rte_unused,
1980 struct rte_flow_error *error __rte_unused,
1981 int *device_configured)
1986 const struct rte_flow_item_udp *spec, *mask;
1988 const struct rte_flow_item_udp *last __rte_unused;
1989 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1991 group = attr->group;
1993 /* Parse pattern list to get the matching parameters */
1994 spec = (const struct rte_flow_item_udp *)pattern->spec;
1995 last = (const struct rte_flow_item_udp *)pattern->last;
1996 mask = (const struct rte_flow_item_udp *)
1997 (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1999 /* Get traffic class index and flow id to be configured */
2000 flow->tc_id = group;
2001 flow->tc_index = attr->priority;
2003 if (!spec || !mc_l4_port_identification) {
2004 struct proto_discrimination proto;
2006 index = dpaa2_flow_extract_search(
2007 &priv->extract.qos_key_extract.dpkg,
2008 NET_PROT_IP, NH_FLD_IP_PROTO);
2010 ret = dpaa2_flow_proto_discrimination_extract(
2011 &priv->extract.qos_key_extract,
2012 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2015 "QoS Extract IP protocol to discriminate UDP failed.");
2019 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2022 index = dpaa2_flow_extract_search(
2023 &priv->extract.tc_key_extract[group].dpkg,
2024 NET_PROT_IP, NH_FLD_IP_PROTO);
2026 ret = dpaa2_flow_proto_discrimination_extract(
2027 &priv->extract.tc_key_extract[group],
2028 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2031 "FS Extract IP protocol to discriminate UDP failed.");
2035 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2038 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2041 "Move IP addr before UDP discrimination set failed");
2045 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2046 proto.ip_proto = IPPROTO_UDP;
2047 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2050 DPAA2_PMD_ERR("UDP discrimination rule set failed");
2054 (*device_configured) |= local_cfg;
2060 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2061 RTE_FLOW_ITEM_TYPE_UDP)) {
2062 DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2067 if (mask->hdr.src_port) {
2068 index = dpaa2_flow_extract_search(
2069 &priv->extract.qos_key_extract.dpkg,
2070 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2072 ret = dpaa2_flow_extract_add(
2073 &priv->extract.qos_key_extract,
2075 NH_FLD_UDP_PORT_SRC,
2076 NH_FLD_UDP_PORT_SIZE);
2078 DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2082 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2085 index = dpaa2_flow_extract_search(
2086 &priv->extract.tc_key_extract[group].dpkg,
2087 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2089 ret = dpaa2_flow_extract_add(
2090 &priv->extract.tc_key_extract[group],
2092 NH_FLD_UDP_PORT_SRC,
2093 NH_FLD_UDP_PORT_SIZE);
2095 DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2099 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2102 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2105 "Move ipaddr before UDP_PORT_SRC set failed");
2109 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2112 NH_FLD_UDP_PORT_SRC,
2113 &spec->hdr.src_port,
2114 &mask->hdr.src_port,
2115 NH_FLD_UDP_PORT_SIZE);
2118 "QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2122 ret = dpaa2_flow_rule_data_set(
2123 &priv->extract.tc_key_extract[group],
2126 NH_FLD_UDP_PORT_SRC,
2127 &spec->hdr.src_port,
2128 &mask->hdr.src_port,
2129 NH_FLD_UDP_PORT_SIZE);
2132 "FS NH_FLD_UDP_PORT_SRC rule data set failed");
2137 if (mask->hdr.dst_port) {
2138 index = dpaa2_flow_extract_search(
2139 &priv->extract.qos_key_extract.dpkg,
2140 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2142 ret = dpaa2_flow_extract_add(
2143 &priv->extract.qos_key_extract,
2145 NH_FLD_UDP_PORT_DST,
2146 NH_FLD_UDP_PORT_SIZE);
2148 DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2152 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2155 index = dpaa2_flow_extract_search(
2156 &priv->extract.tc_key_extract[group].dpkg,
2157 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2159 ret = dpaa2_flow_extract_add(
2160 &priv->extract.tc_key_extract[group],
2162 NH_FLD_UDP_PORT_DST,
2163 NH_FLD_UDP_PORT_SIZE);
2165 DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2169 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2172 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2175 "Move ipaddr before UDP_PORT_DST set failed");
2179 ret = dpaa2_flow_rule_data_set(
2180 &priv->extract.qos_key_extract,
2183 NH_FLD_UDP_PORT_DST,
2184 &spec->hdr.dst_port,
2185 &mask->hdr.dst_port,
2186 NH_FLD_UDP_PORT_SIZE);
2189 "QoS NH_FLD_UDP_PORT_DST rule data set failed");
2193 ret = dpaa2_flow_rule_data_set(
2194 &priv->extract.tc_key_extract[group],
2197 NH_FLD_UDP_PORT_DST,
2198 &spec->hdr.dst_port,
2199 &mask->hdr.dst_port,
2200 NH_FLD_UDP_PORT_SIZE);
2203 "FS NH_FLD_UDP_PORT_DST rule data set failed");
2208 (*device_configured) |= local_cfg;
2214 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2215 struct rte_eth_dev *dev,
2216 const struct rte_flow_attr *attr,
2217 const struct rte_flow_item *pattern,
2218 const struct rte_flow_action actions[] __rte_unused,
2219 struct rte_flow_error *error __rte_unused,
2220 int *device_configured)
2225 const struct rte_flow_item_tcp *spec, *mask;
2227 const struct rte_flow_item_tcp *last __rte_unused;
2228 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2230 group = attr->group;
2232 /* Parse pattern list to get the matching parameters */
2233 spec = (const struct rte_flow_item_tcp *)pattern->spec;
2234 last = (const struct rte_flow_item_tcp *)pattern->last;
2235 mask = (const struct rte_flow_item_tcp *)
2236 (pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2238 /* Get traffic class index and flow id to be configured */
2239 flow->tc_id = group;
2240 flow->tc_index = attr->priority;
2242 if (!spec || !mc_l4_port_identification) {
2243 struct proto_discrimination proto;
2245 index = dpaa2_flow_extract_search(
2246 &priv->extract.qos_key_extract.dpkg,
2247 NET_PROT_IP, NH_FLD_IP_PROTO);
2249 ret = dpaa2_flow_proto_discrimination_extract(
2250 &priv->extract.qos_key_extract,
2251 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2254 "QoS Extract IP protocol to discriminate TCP failed.");
2258 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2261 index = dpaa2_flow_extract_search(
2262 &priv->extract.tc_key_extract[group].dpkg,
2263 NET_PROT_IP, NH_FLD_IP_PROTO);
2265 ret = dpaa2_flow_proto_discrimination_extract(
2266 &priv->extract.tc_key_extract[group],
2267 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2270 "FS Extract IP protocol to discriminate TCP failed.");
2274 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2277 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2280 "Move IP addr before TCP discrimination set failed");
2284 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2285 proto.ip_proto = IPPROTO_TCP;
2286 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2289 DPAA2_PMD_ERR("TCP discrimination rule set failed");
2293 (*device_configured) |= local_cfg;
2299 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2300 RTE_FLOW_ITEM_TYPE_TCP)) {
2301 DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2306 if (mask->hdr.src_port) {
2307 index = dpaa2_flow_extract_search(
2308 &priv->extract.qos_key_extract.dpkg,
2309 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2311 ret = dpaa2_flow_extract_add(
2312 &priv->extract.qos_key_extract,
2314 NH_FLD_TCP_PORT_SRC,
2315 NH_FLD_TCP_PORT_SIZE);
2317 DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2321 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2324 index = dpaa2_flow_extract_search(
2325 &priv->extract.tc_key_extract[group].dpkg,
2326 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2328 ret = dpaa2_flow_extract_add(
2329 &priv->extract.tc_key_extract[group],
2331 NH_FLD_TCP_PORT_SRC,
2332 NH_FLD_TCP_PORT_SIZE);
2334 DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2338 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2341 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2344 "Move ipaddr before TCP_PORT_SRC set failed");
2348 ret = dpaa2_flow_rule_data_set(
2349 &priv->extract.qos_key_extract,
2352 NH_FLD_TCP_PORT_SRC,
2353 &spec->hdr.src_port,
2354 &mask->hdr.src_port,
2355 NH_FLD_TCP_PORT_SIZE);
2358 "QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2362 ret = dpaa2_flow_rule_data_set(
2363 &priv->extract.tc_key_extract[group],
2366 NH_FLD_TCP_PORT_SRC,
2367 &spec->hdr.src_port,
2368 &mask->hdr.src_port,
2369 NH_FLD_TCP_PORT_SIZE);
2372 "FS NH_FLD_TCP_PORT_SRC rule data set failed");
2377 if (mask->hdr.dst_port) {
2378 index = dpaa2_flow_extract_search(
2379 &priv->extract.qos_key_extract.dpkg,
2380 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2382 ret = dpaa2_flow_extract_add(
2383 &priv->extract.qos_key_extract,
2385 NH_FLD_TCP_PORT_DST,
2386 NH_FLD_TCP_PORT_SIZE);
2388 DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2392 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2395 index = dpaa2_flow_extract_search(
2396 &priv->extract.tc_key_extract[group].dpkg,
2397 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2399 ret = dpaa2_flow_extract_add(
2400 &priv->extract.tc_key_extract[group],
2402 NH_FLD_TCP_PORT_DST,
2403 NH_FLD_TCP_PORT_SIZE);
2405 DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2409 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2412 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2415 "Move ipaddr before TCP_PORT_DST set failed");
2419 ret = dpaa2_flow_rule_data_set(
2420 &priv->extract.qos_key_extract,
2423 NH_FLD_TCP_PORT_DST,
2424 &spec->hdr.dst_port,
2425 &mask->hdr.dst_port,
2426 NH_FLD_TCP_PORT_SIZE);
2429 "QoS NH_FLD_TCP_PORT_DST rule data set failed");
2433 ret = dpaa2_flow_rule_data_set(
2434 &priv->extract.tc_key_extract[group],
2437 NH_FLD_TCP_PORT_DST,
2438 &spec->hdr.dst_port,
2439 &mask->hdr.dst_port,
2440 NH_FLD_TCP_PORT_SIZE);
2443 "FS NH_FLD_TCP_PORT_DST rule data set failed");
2448 (*device_configured) |= local_cfg;
2454 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2455 struct rte_eth_dev *dev,
2456 const struct rte_flow_attr *attr,
2457 const struct rte_flow_item *pattern,
2458 const struct rte_flow_action actions[] __rte_unused,
2459 struct rte_flow_error *error __rte_unused,
2460 int *device_configured)
2465 const struct rte_flow_item_sctp *spec, *mask;
2467 const struct rte_flow_item_sctp *last __rte_unused;
2468 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2470 group = attr->group;
2472 /* Parse pattern list to get the matching parameters */
2473 spec = (const struct rte_flow_item_sctp *)pattern->spec;
2474 last = (const struct rte_flow_item_sctp *)pattern->last;
2475 mask = (const struct rte_flow_item_sctp *)
2476 (pattern->mask ? pattern->mask :
2477 &dpaa2_flow_item_sctp_mask);
2479 /* Get traffic class index and flow id to be configured */
2480 flow->tc_id = group;
2481 flow->tc_index = attr->priority;
2483 if (!spec || !mc_l4_port_identification) {
2484 struct proto_discrimination proto;
2486 index = dpaa2_flow_extract_search(
2487 &priv->extract.qos_key_extract.dpkg,
2488 NET_PROT_IP, NH_FLD_IP_PROTO);
2490 ret = dpaa2_flow_proto_discrimination_extract(
2491 &priv->extract.qos_key_extract,
2492 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2495 "QoS Extract IP protocol to discriminate SCTP failed.");
2499 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2502 index = dpaa2_flow_extract_search(
2503 &priv->extract.tc_key_extract[group].dpkg,
2504 NET_PROT_IP, NH_FLD_IP_PROTO);
2506 ret = dpaa2_flow_proto_discrimination_extract(
2507 &priv->extract.tc_key_extract[group],
2508 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2511 "FS Extract IP protocol to discriminate SCTP failed.");
2515 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2518 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2521 "Move ipaddr before SCTP discrimination set failed");
2525 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2526 proto.ip_proto = IPPROTO_SCTP;
2527 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2530 DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2534 (*device_configured) |= local_cfg;
2540 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2541 RTE_FLOW_ITEM_TYPE_SCTP)) {
2542 DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2547 if (mask->hdr.src_port) {
2548 index = dpaa2_flow_extract_search(
2549 &priv->extract.qos_key_extract.dpkg,
2550 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2552 ret = dpaa2_flow_extract_add(
2553 &priv->extract.qos_key_extract,
2555 NH_FLD_SCTP_PORT_SRC,
2556 NH_FLD_SCTP_PORT_SIZE);
2558 DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2562 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2565 index = dpaa2_flow_extract_search(
2566 &priv->extract.tc_key_extract[group].dpkg,
2567 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2569 ret = dpaa2_flow_extract_add(
2570 &priv->extract.tc_key_extract[group],
2572 NH_FLD_SCTP_PORT_SRC,
2573 NH_FLD_SCTP_PORT_SIZE);
2575 DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2579 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2582 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2585 "Move ipaddr before SCTP_PORT_SRC set failed");
2589 ret = dpaa2_flow_rule_data_set(
2590 &priv->extract.qos_key_extract,
2593 NH_FLD_SCTP_PORT_SRC,
2594 &spec->hdr.src_port,
2595 &mask->hdr.src_port,
2596 NH_FLD_SCTP_PORT_SIZE);
2599 "QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2603 ret = dpaa2_flow_rule_data_set(
2604 &priv->extract.tc_key_extract[group],
2607 NH_FLD_SCTP_PORT_SRC,
2608 &spec->hdr.src_port,
2609 &mask->hdr.src_port,
2610 NH_FLD_SCTP_PORT_SIZE);
2613 "FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2618 if (mask->hdr.dst_port) {
2619 index = dpaa2_flow_extract_search(
2620 &priv->extract.qos_key_extract.dpkg,
2621 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2623 ret = dpaa2_flow_extract_add(
2624 &priv->extract.qos_key_extract,
2626 NH_FLD_SCTP_PORT_DST,
2627 NH_FLD_SCTP_PORT_SIZE);
2629 DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2633 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2636 index = dpaa2_flow_extract_search(
2637 &priv->extract.tc_key_extract[group].dpkg,
2638 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2640 ret = dpaa2_flow_extract_add(
2641 &priv->extract.tc_key_extract[group],
2643 NH_FLD_SCTP_PORT_DST,
2644 NH_FLD_SCTP_PORT_SIZE);
2646 DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2650 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2653 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2656 "Move ipaddr before SCTP_PORT_DST set failed");
2660 ret = dpaa2_flow_rule_data_set(
2661 &priv->extract.qos_key_extract,
2664 NH_FLD_SCTP_PORT_DST,
2665 &spec->hdr.dst_port,
2666 &mask->hdr.dst_port,
2667 NH_FLD_SCTP_PORT_SIZE);
2670 "QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2674 ret = dpaa2_flow_rule_data_set(
2675 &priv->extract.tc_key_extract[group],
2678 NH_FLD_SCTP_PORT_DST,
2679 &spec->hdr.dst_port,
2680 &mask->hdr.dst_port,
2681 NH_FLD_SCTP_PORT_SIZE);
2684 "FS NH_FLD_SCTP_PORT_DST rule data set failed");
2689 (*device_configured) |= local_cfg;
2695 dpaa2_configure_flow_gre(struct rte_flow *flow,
2696 struct rte_eth_dev *dev,
2697 const struct rte_flow_attr *attr,
2698 const struct rte_flow_item *pattern,
2699 const struct rte_flow_action actions[] __rte_unused,
2700 struct rte_flow_error *error __rte_unused,
2701 int *device_configured)
2706 const struct rte_flow_item_gre *spec, *mask;
2708 const struct rte_flow_item_gre *last __rte_unused;
2709 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2711 group = attr->group;
2713 /* Parse pattern list to get the matching parameters */
2714 spec = (const struct rte_flow_item_gre *)pattern->spec;
2715 last = (const struct rte_flow_item_gre *)pattern->last;
2716 mask = (const struct rte_flow_item_gre *)
2717 (pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2719 /* Get traffic class index and flow id to be configured */
2720 flow->tc_id = group;
2721 flow->tc_index = attr->priority;
2724 struct proto_discrimination proto;
2726 index = dpaa2_flow_extract_search(
2727 &priv->extract.qos_key_extract.dpkg,
2728 NET_PROT_IP, NH_FLD_IP_PROTO);
2730 ret = dpaa2_flow_proto_discrimination_extract(
2731 &priv->extract.qos_key_extract,
2732 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2735 "QoS Extract IP protocol to discriminate GRE failed.");
2739 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2742 index = dpaa2_flow_extract_search(
2743 &priv->extract.tc_key_extract[group].dpkg,
2744 NET_PROT_IP, NH_FLD_IP_PROTO);
2746 ret = dpaa2_flow_proto_discrimination_extract(
2747 &priv->extract.tc_key_extract[group],
2748 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2751 "FS Extract IP protocol to discriminate GRE failed.");
2755 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2758 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2761 "Move IP addr before GRE discrimination set failed");
2765 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2766 proto.ip_proto = IPPROTO_GRE;
2767 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2770 DPAA2_PMD_ERR("GRE discrimination rule set failed");
2774 (*device_configured) |= local_cfg;
2779 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2780 RTE_FLOW_ITEM_TYPE_GRE)) {
2781 DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2786 if (!mask->protocol)
2789 index = dpaa2_flow_extract_search(
2790 &priv->extract.qos_key_extract.dpkg,
2791 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2793 ret = dpaa2_flow_extract_add(
2794 &priv->extract.qos_key_extract,
2797 sizeof(rte_be16_t));
2799 DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2803 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2806 index = dpaa2_flow_extract_search(
2807 &priv->extract.tc_key_extract[group].dpkg,
2808 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2810 ret = dpaa2_flow_extract_add(
2811 &priv->extract.tc_key_extract[group],
2814 sizeof(rte_be16_t));
2816 DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2820 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2823 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2826 "Move ipaddr before GRE_TYPE set failed");
2830 ret = dpaa2_flow_rule_data_set(
2831 &priv->extract.qos_key_extract,
2837 sizeof(rte_be16_t));
2840 "QoS NH_FLD_GRE_TYPE rule data set failed");
2844 ret = dpaa2_flow_rule_data_set(
2845 &priv->extract.tc_key_extract[group],
2851 sizeof(rte_be16_t));
2854 "FS NH_FLD_GRE_TYPE rule data set failed");
2858 (*device_configured) |= local_cfg;
2864 dpaa2_configure_flow_raw(struct rte_flow *flow,
2865 struct rte_eth_dev *dev,
2866 const struct rte_flow_attr *attr,
2867 const struct rte_flow_item *pattern,
2868 const struct rte_flow_action actions[] __rte_unused,
2869 struct rte_flow_error *error __rte_unused,
2870 int *device_configured)
2872 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2873 const struct rte_flow_item_raw *spec = pattern->spec;
2874 const struct rte_flow_item_raw *mask = pattern->mask;
2876 priv->extract.qos_key_extract.key_info.key_total_size;
2877 int local_cfg = 0, ret;
2880 /* Need both spec and mask */
2881 if (!spec || !mask) {
2882 DPAA2_PMD_ERR("spec or mask not present.");
2885 /* Only supports non-relative with offset 0 */
2886 if (spec->relative || spec->offset != 0 ||
2887 spec->search || spec->limit) {
2888 DPAA2_PMD_ERR("relative and non zero offset not supported.");
2891 /* Spec len and mask len should be same */
2892 if (spec->length != mask->length) {
2893 DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2897 /* Get traffic class index and flow id to be configured */
2898 group = attr->group;
2899 flow->tc_id = group;
2900 flow->tc_index = attr->priority;
2902 if (prev_key_size <= spec->length) {
2903 ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2906 DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2909 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2911 ret = dpaa2_flow_extract_add_raw(
2912 &priv->extract.tc_key_extract[group],
2915 DPAA2_PMD_ERR("FS Extract RAW add failed.");
2918 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2921 ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2922 mask->pattern, spec->length);
2924 DPAA2_PMD_ERR("QoS RAW rule data set failed");
2928 ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2929 mask->pattern, spec->length);
2931 DPAA2_PMD_ERR("FS RAW rule data set failed");
2935 (*device_configured) |= local_cfg;
2940 /* The existing QoS/FS entry with IP address(es)
2941 * needs update after
2942 * new extract(s) are inserted before IP
2943 * address(es) extract(s).
2946 dpaa2_flow_entry_update(
2947 struct dpaa2_dev_priv *priv, uint8_t tc_id)
2949 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2950 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2952 int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2953 int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2954 struct dpaa2_key_extract *qos_key_extract =
2955 &priv->extract.qos_key_extract;
2956 struct dpaa2_key_extract *tc_key_extract =
2957 &priv->extract.tc_key_extract[tc_id];
2958 char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2959 char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2960 char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2961 char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2962 int extend = -1, extend1, size = -1;
2966 if (curr->ipaddr_rule.ipaddr_type ==
2968 curr = LIST_NEXT(curr, next);
2972 if (curr->ipaddr_rule.ipaddr_type ==
2975 qos_key_extract->key_info.ipv4_src_offset;
2977 qos_key_extract->key_info.ipv4_dst_offset;
2979 tc_key_extract->key_info.ipv4_src_offset;
2981 tc_key_extract->key_info.ipv4_dst_offset;
2982 size = NH_FLD_IPV4_ADDR_SIZE;
2985 qos_key_extract->key_info.ipv6_src_offset;
2987 qos_key_extract->key_info.ipv6_dst_offset;
2989 tc_key_extract->key_info.ipv6_src_offset;
2991 tc_key_extract->key_info.ipv6_dst_offset;
2992 size = NH_FLD_IPV6_ADDR_SIZE;
2995 qos_index = curr->tc_id * priv->fs_entries +
2998 dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
3000 if (priv->num_rx_tc > 1) {
3001 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3002 priv->token, &curr->qos_rule);
3004 DPAA2_PMD_ERR("Qos entry remove failed.");
3011 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3012 RTE_ASSERT(qos_ipsrc_offset >=
3013 curr->ipaddr_rule.qos_ipsrc_offset);
3014 extend1 = qos_ipsrc_offset -
3015 curr->ipaddr_rule.qos_ipsrc_offset;
3017 RTE_ASSERT(extend == extend1);
3021 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3022 (size == NH_FLD_IPV6_ADDR_SIZE));
3025 (char *)(size_t)curr->qos_rule.key_iova +
3026 curr->ipaddr_rule.qos_ipsrc_offset,
3028 memset((char *)(size_t)curr->qos_rule.key_iova +
3029 curr->ipaddr_rule.qos_ipsrc_offset,
3033 (char *)(size_t)curr->qos_rule.mask_iova +
3034 curr->ipaddr_rule.qos_ipsrc_offset,
3036 memset((char *)(size_t)curr->qos_rule.mask_iova +
3037 curr->ipaddr_rule.qos_ipsrc_offset,
3040 curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3043 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3044 RTE_ASSERT(qos_ipdst_offset >=
3045 curr->ipaddr_rule.qos_ipdst_offset);
3046 extend1 = qos_ipdst_offset -
3047 curr->ipaddr_rule.qos_ipdst_offset;
3049 RTE_ASSERT(extend == extend1);
3053 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3054 (size == NH_FLD_IPV6_ADDR_SIZE));
3057 (char *)(size_t)curr->qos_rule.key_iova +
3058 curr->ipaddr_rule.qos_ipdst_offset,
3060 memset((char *)(size_t)curr->qos_rule.key_iova +
3061 curr->ipaddr_rule.qos_ipdst_offset,
3065 (char *)(size_t)curr->qos_rule.mask_iova +
3066 curr->ipaddr_rule.qos_ipdst_offset,
3068 memset((char *)(size_t)curr->qos_rule.mask_iova +
3069 curr->ipaddr_rule.qos_ipdst_offset,
3072 curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3075 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3076 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3077 (size == NH_FLD_IPV6_ADDR_SIZE));
3078 memcpy((char *)(size_t)curr->qos_rule.key_iova +
3079 curr->ipaddr_rule.qos_ipsrc_offset,
3082 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3083 curr->ipaddr_rule.qos_ipsrc_offset,
3087 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3088 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3089 (size == NH_FLD_IPV6_ADDR_SIZE));
3090 memcpy((char *)(size_t)curr->qos_rule.key_iova +
3091 curr->ipaddr_rule.qos_ipdst_offset,
3094 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3095 curr->ipaddr_rule.qos_ipdst_offset,
3101 curr->qos_real_key_size += extend;
3103 curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3105 dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
3107 if (priv->num_rx_tc > 1) {
3108 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3109 priv->token, &curr->qos_rule,
3110 curr->tc_id, qos_index,
3113 DPAA2_PMD_ERR("Qos entry update failed.");
3118 if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
3119 curr = LIST_NEXT(curr, next);
3123 dpaa2_flow_fs_entry_log("Before update", curr);
3126 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3127 priv->token, curr->tc_id, &curr->fs_rule);
3129 DPAA2_PMD_ERR("FS entry remove failed.");
3133 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3134 tc_id == curr->tc_id) {
3135 RTE_ASSERT(fs_ipsrc_offset >=
3136 curr->ipaddr_rule.fs_ipsrc_offset);
3137 extend1 = fs_ipsrc_offset -
3138 curr->ipaddr_rule.fs_ipsrc_offset;
3140 RTE_ASSERT(extend == extend1);
3145 (char *)(size_t)curr->fs_rule.key_iova +
3146 curr->ipaddr_rule.fs_ipsrc_offset,
3148 memset((char *)(size_t)curr->fs_rule.key_iova +
3149 curr->ipaddr_rule.fs_ipsrc_offset,
3153 (char *)(size_t)curr->fs_rule.mask_iova +
3154 curr->ipaddr_rule.fs_ipsrc_offset,
3156 memset((char *)(size_t)curr->fs_rule.mask_iova +
3157 curr->ipaddr_rule.fs_ipsrc_offset,
3160 curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3163 if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3164 tc_id == curr->tc_id) {
3165 RTE_ASSERT(fs_ipdst_offset >=
3166 curr->ipaddr_rule.fs_ipdst_offset);
3167 extend1 = fs_ipdst_offset -
3168 curr->ipaddr_rule.fs_ipdst_offset;
3170 RTE_ASSERT(extend == extend1);
3175 (char *)(size_t)curr->fs_rule.key_iova +
3176 curr->ipaddr_rule.fs_ipdst_offset,
3178 memset((char *)(size_t)curr->fs_rule.key_iova +
3179 curr->ipaddr_rule.fs_ipdst_offset,
3183 (char *)(size_t)curr->fs_rule.mask_iova +
3184 curr->ipaddr_rule.fs_ipdst_offset,
3186 memset((char *)(size_t)curr->fs_rule.mask_iova +
3187 curr->ipaddr_rule.fs_ipdst_offset,
3190 curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3193 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3194 memcpy((char *)(size_t)curr->fs_rule.key_iova +
3195 curr->ipaddr_rule.fs_ipsrc_offset,
3198 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3199 curr->ipaddr_rule.fs_ipsrc_offset,
3203 if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3204 memcpy((char *)(size_t)curr->fs_rule.key_iova +
3205 curr->ipaddr_rule.fs_ipdst_offset,
3208 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3209 curr->ipaddr_rule.fs_ipdst_offset,
3215 curr->fs_real_key_size += extend;
3216 curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3218 dpaa2_flow_fs_entry_log("Start update", curr);
3220 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3221 priv->token, curr->tc_id, curr->tc_index,
3222 &curr->fs_rule, &curr->action_cfg);
3224 DPAA2_PMD_ERR("FS entry update failed.");
3228 curr = LIST_NEXT(curr, next);
3235 dpaa2_flow_verify_attr(
3236 struct dpaa2_dev_priv *priv,
3237 const struct rte_flow_attr *attr)
3239 struct rte_flow *curr = LIST_FIRST(&priv->flows);
3242 if (curr->tc_id == attr->group &&
3243 curr->tc_index == attr->priority) {
3245 "Flow with group %d and priority %d already exists.",
3246 attr->group, attr->priority);
3250 curr = LIST_NEXT(curr, next);
3257 dpaa2_flow_verify_action(
3258 struct dpaa2_dev_priv *priv,
3259 const struct rte_flow_attr *attr,
3260 const struct rte_flow_action actions[])
3262 int end_of_list = 0, i, j = 0;
3263 const struct rte_flow_action_queue *dest_queue;
3264 const struct rte_flow_action_rss *rss_conf;
3265 struct dpaa2_queue *rxq;
3267 while (!end_of_list) {
3268 switch (actions[j].type) {
3269 case RTE_FLOW_ACTION_TYPE_QUEUE:
3270 dest_queue = (const struct rte_flow_action_queue *)
3272 rxq = priv->rx_vq[dest_queue->index];
3273 if (attr->group != rxq->tc_index) {
3275 "RXQ[%d] does not belong to the group %d",
3276 dest_queue->index, attr->group);
3281 case RTE_FLOW_ACTION_TYPE_RSS:
3282 rss_conf = (const struct rte_flow_action_rss *)
3284 if (rss_conf->queue_num > priv->dist_queues) {
3286 "RSS number exceeds the distrbution size");
3289 for (i = 0; i < (int)rss_conf->queue_num; i++) {
3290 if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3292 "RSS queue index exceeds the number of RXQs");
3295 rxq = priv->rx_vq[rss_conf->queue[i]];
3296 if (rxq->tc_index != attr->group) {
3298 "Queue/Group combination are not supported\n");
3304 case RTE_FLOW_ACTION_TYPE_END:
3308 DPAA2_PMD_ERR("Invalid action type");
3318 dpaa2_generic_flow_set(struct rte_flow *flow,
3319 struct rte_eth_dev *dev,
3320 const struct rte_flow_attr *attr,
3321 const struct rte_flow_item pattern[],
3322 const struct rte_flow_action actions[],
3323 struct rte_flow_error *error)
3325 const struct rte_flow_action_queue *dest_queue;
3326 const struct rte_flow_action_rss *rss_conf;
3327 int is_keycfg_configured = 0, end_of_list = 0;
3328 int ret = 0, i = 0, j = 0;
3329 struct dpni_rx_dist_cfg tc_cfg;
3330 struct dpni_qos_tbl_cfg qos_cfg;
3331 struct dpni_fs_action_cfg action;
3332 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3333 struct dpaa2_queue *rxq;
3334 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3336 struct rte_flow *curr = LIST_FIRST(&priv->flows);
3339 ret = dpaa2_flow_verify_attr(priv, attr);
3343 ret = dpaa2_flow_verify_action(priv, attr, actions);
3347 /* Parse pattern list to get the matching parameters */
3348 while (!end_of_list) {
3349 switch (pattern[i].type) {
3350 case RTE_FLOW_ITEM_TYPE_ETH:
3351 ret = dpaa2_configure_flow_eth(flow,
3352 dev, attr, &pattern[i], actions, error,
3353 &is_keycfg_configured);
3355 DPAA2_PMD_ERR("ETH flow configuration failed!");
3359 case RTE_FLOW_ITEM_TYPE_VLAN:
3360 ret = dpaa2_configure_flow_vlan(flow,
3361 dev, attr, &pattern[i], actions, error,
3362 &is_keycfg_configured);
3364 DPAA2_PMD_ERR("vLan flow configuration failed!");
3368 case RTE_FLOW_ITEM_TYPE_IPV4:
3369 case RTE_FLOW_ITEM_TYPE_IPV6:
3370 ret = dpaa2_configure_flow_generic_ip(flow,
3371 dev, attr, &pattern[i], actions, error,
3372 &is_keycfg_configured);
3374 DPAA2_PMD_ERR("IP flow configuration failed!");
3378 case RTE_FLOW_ITEM_TYPE_ICMP:
3379 ret = dpaa2_configure_flow_icmp(flow,
3380 dev, attr, &pattern[i], actions, error,
3381 &is_keycfg_configured);
3383 DPAA2_PMD_ERR("ICMP flow configuration failed!");
3387 case RTE_FLOW_ITEM_TYPE_UDP:
3388 ret = dpaa2_configure_flow_udp(flow,
3389 dev, attr, &pattern[i], actions, error,
3390 &is_keycfg_configured);
3392 DPAA2_PMD_ERR("UDP flow configuration failed!");
3396 case RTE_FLOW_ITEM_TYPE_TCP:
3397 ret = dpaa2_configure_flow_tcp(flow,
3398 dev, attr, &pattern[i], actions, error,
3399 &is_keycfg_configured);
3401 DPAA2_PMD_ERR("TCP flow configuration failed!");
3405 case RTE_FLOW_ITEM_TYPE_SCTP:
3406 ret = dpaa2_configure_flow_sctp(flow,
3407 dev, attr, &pattern[i], actions, error,
3408 &is_keycfg_configured);
3410 DPAA2_PMD_ERR("SCTP flow configuration failed!");
3414 case RTE_FLOW_ITEM_TYPE_GRE:
3415 ret = dpaa2_configure_flow_gre(flow,
3416 dev, attr, &pattern[i], actions, error,
3417 &is_keycfg_configured);
3419 DPAA2_PMD_ERR("GRE flow configuration failed!");
3423 case RTE_FLOW_ITEM_TYPE_RAW:
3424 ret = dpaa2_configure_flow_raw(flow,
3425 dev, attr, &pattern[i],
3427 &is_keycfg_configured);
3429 DPAA2_PMD_ERR("RAW flow configuration failed!");
3433 case RTE_FLOW_ITEM_TYPE_END:
3435 break; /*End of List*/
3437 DPAA2_PMD_ERR("Invalid action type");
3444 /* Let's parse action on matching traffic */
3446 while (!end_of_list) {
3447 switch (actions[j].type) {
3448 case RTE_FLOW_ACTION_TYPE_QUEUE:
3450 (const struct rte_flow_action_queue *)(actions[j].conf);
3451 rxq = priv->rx_vq[dest_queue->index];
3452 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3453 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3454 action.flow_id = rxq->flow_id;
3456 /* Configure FS table first*/
3457 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3458 dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3459 if (dpkg_prepare_key_cfg(
3460 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3461 (uint8_t *)(size_t)priv->extract
3462 .tc_extract_param[flow->tc_id]) < 0) {
3464 "Unable to prepare extract parameters");
3469 sizeof(struct dpni_rx_dist_cfg));
3470 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3471 tc_cfg.key_cfg_iova =
3472 (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3473 tc_cfg.tc = flow->tc_id;
3474 tc_cfg.enable = false;
3475 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3476 priv->token, &tc_cfg);
3479 "TC hash cannot be disabled.(%d)",
3483 tc_cfg.enable = true;
3484 tc_cfg.fs_miss_flow_id =
3485 dpaa2_flow_miss_flow_id;
3486 ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3487 priv->token, &tc_cfg);
3490 "TC distribution cannot be configured.(%d)",
3496 /* Configure QoS table then.*/
3497 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3498 dpaa2_flow_qos_table_extracts_log(priv);
3499 if (dpkg_prepare_key_cfg(
3500 &priv->extract.qos_key_extract.dpkg,
3501 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3503 "Unable to prepare extract parameters");
3507 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3508 qos_cfg.discard_on_miss = false;
3509 qos_cfg.default_tc = 0;
3510 qos_cfg.keep_entries = true;
3511 qos_cfg.key_cfg_iova =
3512 (size_t)priv->extract.qos_extract_param;
3513 /* QoS table is effecitive for multiple TCs.*/
3514 if (priv->num_rx_tc > 1) {
3515 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3516 priv->token, &qos_cfg);
3519 "RSS QoS table can not be configured(%d)\n",
3526 flow->qos_real_key_size = priv->extract
3527 .qos_key_extract.key_info.key_total_size;
3528 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3529 if (flow->ipaddr_rule.qos_ipdst_offset >=
3530 flow->ipaddr_rule.qos_ipsrc_offset) {
3531 flow->qos_real_key_size =
3532 flow->ipaddr_rule.qos_ipdst_offset +
3533 NH_FLD_IPV4_ADDR_SIZE;
3535 flow->qos_real_key_size =
3536 flow->ipaddr_rule.qos_ipsrc_offset +
3537 NH_FLD_IPV4_ADDR_SIZE;
3539 } else if (flow->ipaddr_rule.ipaddr_type ==
3541 if (flow->ipaddr_rule.qos_ipdst_offset >=
3542 flow->ipaddr_rule.qos_ipsrc_offset) {
3543 flow->qos_real_key_size =
3544 flow->ipaddr_rule.qos_ipdst_offset +
3545 NH_FLD_IPV6_ADDR_SIZE;
3547 flow->qos_real_key_size =
3548 flow->ipaddr_rule.qos_ipsrc_offset +
3549 NH_FLD_IPV6_ADDR_SIZE;
3553 /* QoS entry added is only effective for multiple TCs.*/
3554 if (priv->num_rx_tc > 1) {
3555 qos_index = flow->tc_id * priv->fs_entries +
3557 if (qos_index >= priv->qos_entries) {
3558 DPAA2_PMD_ERR("QoS table with %d entries full",
3562 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3564 dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3566 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3567 priv->token, &flow->qos_rule,
3568 flow->tc_id, qos_index,
3572 "Error in addnig entry to QoS table(%d)", ret);
3577 if (flow->tc_index >= priv->fs_entries) {
3578 DPAA2_PMD_ERR("FS table with %d entries full",
3583 flow->fs_real_key_size =
3584 priv->extract.tc_key_extract[flow->tc_id]
3585 .key_info.key_total_size;
3587 if (flow->ipaddr_rule.ipaddr_type ==
3589 if (flow->ipaddr_rule.fs_ipdst_offset >=
3590 flow->ipaddr_rule.fs_ipsrc_offset) {
3591 flow->fs_real_key_size =
3592 flow->ipaddr_rule.fs_ipdst_offset +
3593 NH_FLD_IPV4_ADDR_SIZE;
3595 flow->fs_real_key_size =
3596 flow->ipaddr_rule.fs_ipsrc_offset +
3597 NH_FLD_IPV4_ADDR_SIZE;
3599 } else if (flow->ipaddr_rule.ipaddr_type ==
3601 if (flow->ipaddr_rule.fs_ipdst_offset >=
3602 flow->ipaddr_rule.fs_ipsrc_offset) {
3603 flow->fs_real_key_size =
3604 flow->ipaddr_rule.fs_ipdst_offset +
3605 NH_FLD_IPV6_ADDR_SIZE;
3607 flow->fs_real_key_size =
3608 flow->ipaddr_rule.fs_ipsrc_offset +
3609 NH_FLD_IPV6_ADDR_SIZE;
3613 flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3615 dpaa2_flow_fs_entry_log("Start add", flow);
3617 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3618 flow->tc_id, flow->tc_index,
3619 &flow->fs_rule, &action);
3622 "Error in adding entry to FS table(%d)", ret);
3625 memcpy(&flow->action_cfg, &action,
3626 sizeof(struct dpni_fs_action_cfg));
3628 case RTE_FLOW_ACTION_TYPE_RSS:
3629 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3631 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3632 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3633 &priv->extract.tc_key_extract[flow->tc_id].dpkg);
3636 "unable to set flow distribution.please check queue config\n");
3640 /* Allocate DMA'ble memory to write the rules */
3641 param = (size_t)rte_malloc(NULL, 256, 64);
3643 DPAA2_PMD_ERR("Memory allocation failure\n");
3647 if (dpkg_prepare_key_cfg(
3648 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3649 (uint8_t *)param) < 0) {
3651 "Unable to prepare extract parameters");
3652 rte_free((void *)param);
3656 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3657 tc_cfg.dist_size = rss_conf->queue_num;
3658 tc_cfg.key_cfg_iova = (size_t)param;
3659 tc_cfg.enable = true;
3660 tc_cfg.tc = flow->tc_id;
3661 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3662 priv->token, &tc_cfg);
3665 "RSS TC table cannot be configured: %d\n",
3667 rte_free((void *)param);
3671 rte_free((void *)param);
3672 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3673 if (dpkg_prepare_key_cfg(
3674 &priv->extract.qos_key_extract.dpkg,
3675 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3677 "Unable to prepare extract parameters");
3681 sizeof(struct dpni_qos_tbl_cfg));
3682 qos_cfg.discard_on_miss = true;
3683 qos_cfg.keep_entries = true;
3684 qos_cfg.key_cfg_iova =
3685 (size_t)priv->extract.qos_extract_param;
3686 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3687 priv->token, &qos_cfg);
3690 "RSS QoS dist can't be configured-%d\n",
3696 /* Add Rule into QoS table */
3697 qos_index = flow->tc_id * priv->fs_entries +
3699 if (qos_index >= priv->qos_entries) {
3700 DPAA2_PMD_ERR("QoS table with %d entries full",
3705 flow->qos_real_key_size =
3706 priv->extract.qos_key_extract.key_info.key_total_size;
3707 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3708 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3709 &flow->qos_rule, flow->tc_id,
3713 "Error in entry addition in QoS table(%d)",
3718 case RTE_FLOW_ACTION_TYPE_END:
3722 DPAA2_PMD_ERR("Invalid action type");
3730 if (is_keycfg_configured &
3731 (DPAA2_QOS_TABLE_RECONFIGURE |
3732 DPAA2_FS_TABLE_RECONFIGURE)) {
3733 ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3735 DPAA2_PMD_ERR("Flow entry update failed.");
3740 /* New rules are inserted. */
3742 LIST_INSERT_HEAD(&priv->flows, flow, next);
3744 while (LIST_NEXT(curr, next))
3745 curr = LIST_NEXT(curr, next);
3746 LIST_INSERT_AFTER(curr, flow, next);
3753 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3754 const struct rte_flow_attr *attr)
3758 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3759 DPAA2_PMD_ERR("Priority group is out of range\n");
3762 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3763 DPAA2_PMD_ERR("Priority within the group is out of range\n");
3766 if (unlikely(attr->egress)) {
3768 "Flow configuration is not supported on egress side\n");
3771 if (unlikely(!attr->ingress)) {
3772 DPAA2_PMD_ERR("Ingress flag must be configured\n");
3779 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3781 unsigned int i, j, is_found = 0;
3784 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3785 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3786 if (dpaa2_supported_pattern_type[i]
3787 == pattern[j].type) {
3797 /* Lets verify other combinations of given pattern rules */
3798 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3799 if (!pattern[j].spec) {
3809 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3811 unsigned int i, j, is_found = 0;
3814 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3815 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3816 if (dpaa2_supported_action_type[i] == actions[j].type) {
3826 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3827 if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3835 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3836 const struct rte_flow_attr *flow_attr,
3837 const struct rte_flow_item pattern[],
3838 const struct rte_flow_action actions[],
3839 struct rte_flow_error *error)
3841 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3842 struct dpni_attr dpni_attr;
3843 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3844 uint16_t token = priv->token;
3847 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3848 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3851 "Failure to get dpni@%p attribute, err code %d\n",
3853 rte_flow_error_set(error, EPERM,
3854 RTE_FLOW_ERROR_TYPE_ATTR,
3855 flow_attr, "invalid");
3859 /* Verify input attributes */
3860 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3863 "Invalid attributes are given\n");
3864 rte_flow_error_set(error, EPERM,
3865 RTE_FLOW_ERROR_TYPE_ATTR,
3866 flow_attr, "invalid");
3867 goto not_valid_params;
3869 /* Verify input pattern list */
3870 ret = dpaa2_dev_verify_patterns(pattern);
3873 "Invalid pattern list is given\n");
3874 rte_flow_error_set(error, EPERM,
3875 RTE_FLOW_ERROR_TYPE_ITEM,
3876 pattern, "invalid");
3877 goto not_valid_params;
3879 /* Verify input action list */
3880 ret = dpaa2_dev_verify_actions(actions);
3883 "Invalid action list is given\n");
3884 rte_flow_error_set(error, EPERM,
3885 RTE_FLOW_ERROR_TYPE_ACTION,
3886 actions, "invalid");
3887 goto not_valid_params;
3894 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3895 const struct rte_flow_attr *attr,
3896 const struct rte_flow_item pattern[],
3897 const struct rte_flow_action actions[],
3898 struct rte_flow_error *error)
3900 struct rte_flow *flow = NULL;
3901 size_t key_iova = 0, mask_iova = 0;
3904 dpaa2_flow_control_log =
3905 getenv("DPAA2_FLOW_CONTROL_LOG");
3907 if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3908 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3910 dpaa2_flow_miss_flow_id =
3911 atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3912 if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3914 "The missed flow ID %d exceeds the max flow ID %d",
3915 dpaa2_flow_miss_flow_id,
3916 priv->dist_queues - 1);
3921 flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3923 DPAA2_PMD_ERR("Failure to allocate memory for flow");
3926 /* Allocate DMA'ble memory to write the rules */
3927 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3930 "Memory allocation failure for rule configuration\n");
3933 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3936 "Memory allocation failure for rule configuration\n");
3940 flow->qos_rule.key_iova = key_iova;
3941 flow->qos_rule.mask_iova = mask_iova;
3943 /* Allocate DMA'ble memory to write the rules */
3944 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3947 "Memory allocation failure for rule configuration\n");
3950 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3953 "Memory allocation failure for rule configuration\n");
3957 flow->fs_rule.key_iova = key_iova;
3958 flow->fs_rule.mask_iova = mask_iova;
3960 flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3961 flow->ipaddr_rule.qos_ipsrc_offset =
3962 IP_ADDRESS_OFFSET_INVALID;
3963 flow->ipaddr_rule.qos_ipdst_offset =
3964 IP_ADDRESS_OFFSET_INVALID;
3965 flow->ipaddr_rule.fs_ipsrc_offset =
3966 IP_ADDRESS_OFFSET_INVALID;
3967 flow->ipaddr_rule.fs_ipdst_offset =
3968 IP_ADDRESS_OFFSET_INVALID;
3970 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3973 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3974 rte_flow_error_set(error, EPERM,
3975 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3977 DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
3978 goto creation_error;
3983 rte_flow_error_set(error, EPERM,
3984 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3985 NULL, "memory alloc");
3987 rte_free((void *)flow);
3988 rte_free((void *)key_iova);
3989 rte_free((void *)mask_iova);
3995 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3996 struct rte_flow *flow,
3997 struct rte_flow_error *error)
4000 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4001 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4003 switch (flow->action) {
4004 case RTE_FLOW_ACTION_TYPE_QUEUE:
4005 if (priv->num_rx_tc > 1) {
4006 /* Remove entry from QoS table first */
4007 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4011 "Error in removing entry from QoS table(%d)", ret);
4016 /* Then remove entry from FS table */
4017 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4018 flow->tc_id, &flow->fs_rule);
4021 "Error in removing entry from FS table(%d)", ret);
4025 case RTE_FLOW_ACTION_TYPE_RSS:
4026 if (priv->num_rx_tc > 1) {
4027 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4031 "Error in entry addition in QoS table(%d)", ret);
4038 "Action type (%d) is not supported", flow->action);
4043 LIST_REMOVE(flow, next);
4044 rte_free((void *)(size_t)flow->qos_rule.key_iova);
4045 rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4046 rte_free((void *)(size_t)flow->fs_rule.key_iova);
4047 rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4048 /* Now free the flow */
4053 rte_flow_error_set(error, EPERM,
4054 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4060 * Destroy user-configured flow rules.
4062 * This function skips internal flows rules.
4064 * @see rte_flow_flush()
4068 dpaa2_flow_flush(struct rte_eth_dev *dev,
4069 struct rte_flow_error *error)
4071 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4072 struct rte_flow *flow = LIST_FIRST(&priv->flows);
4075 struct rte_flow *next = LIST_NEXT(flow, next);
4077 dpaa2_flow_destroy(dev, flow, error);
4084 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4085 struct rte_flow *flow __rte_unused,
4086 const struct rte_flow_action *actions __rte_unused,
4087 void *data __rte_unused,
4088 struct rte_flow_error *error __rte_unused)
4094 * Clean up all flow rules.
4096 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4097 * rules regardless of whether they are internal or user-configured.
4100 * Pointer to private structure.
4103 dpaa2_flow_clean(struct rte_eth_dev *dev)
4105 struct rte_flow *flow;
4106 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4108 while ((flow = LIST_FIRST(&priv->flows)))
4109 dpaa2_flow_destroy(dev, flow, NULL);
4112 const struct rte_flow_ops dpaa2_flow_ops = {
4113 .create = dpaa2_flow_create,
4114 .validate = dpaa2_flow_validate,
4115 .destroy = dpaa2_flow_destroy,
4116 .flush = dpaa2_flow_flush,
4117 .query = dpaa2_flow_query,