1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2021 NXP
13 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
25 /* Workaround to discriminate the UDP/TCP/SCTP
26 * with next protocol of l3.
27 * MC/WRIOP are not able to identify
28 * the l4 protocol with l4 ports.
30 int mc_l4_port_identification;
32 static char *dpaa2_flow_control_log;
33 static uint16_t dpaa2_flow_miss_flow_id =
36 #define FIXED_ENTRY_SIZE DPNI_MAX_KEY_SIZE
38 enum flow_rule_ipaddr_type {
44 struct flow_rule_ipaddr {
45 enum flow_rule_ipaddr_type ipaddr_type;
53 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 struct dpni_rule_cfg qos_rule;
55 struct dpni_rule_cfg fs_rule;
56 uint8_t qos_real_key_size;
57 uint8_t fs_real_key_size;
58 uint8_t tc_id; /** Traffic Class ID. */
59 uint8_t tc_index; /** index within this Traffic Class. */
60 enum rte_flow_action_type action;
61 /* Special for IP address to specify the offset
64 struct flow_rule_ipaddr ipaddr_rule;
65 struct dpni_fs_action_cfg action_cfg;
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 RTE_FLOW_ITEM_TYPE_END,
71 RTE_FLOW_ITEM_TYPE_ETH,
72 RTE_FLOW_ITEM_TYPE_VLAN,
73 RTE_FLOW_ITEM_TYPE_IPV4,
74 RTE_FLOW_ITEM_TYPE_IPV6,
75 RTE_FLOW_ITEM_TYPE_ICMP,
76 RTE_FLOW_ITEM_TYPE_UDP,
77 RTE_FLOW_ITEM_TYPE_TCP,
78 RTE_FLOW_ITEM_TYPE_SCTP,
79 RTE_FLOW_ITEM_TYPE_GRE,
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 RTE_FLOW_ACTION_TYPE_END,
85 RTE_FLOW_ACTION_TYPE_QUEUE,
86 RTE_FLOW_ACTION_TYPE_PHY_PORT,
87 RTE_FLOW_ACTION_TYPE_PORT_ID,
88 RTE_FLOW_ACTION_TYPE_RSS
92 enum rte_flow_action_type dpaa2_supported_fs_action_type[] = {
93 RTE_FLOW_ACTION_TYPE_QUEUE,
94 RTE_FLOW_ACTION_TYPE_PHY_PORT,
95 RTE_FLOW_ACTION_TYPE_PORT_ID
98 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
99 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
102 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
103 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
104 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
105 .type = RTE_BE16(0xffff),
108 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
109 .tci = RTE_BE16(0xffff),
112 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
113 .hdr.src_addr = RTE_BE32(0xffffffff),
114 .hdr.dst_addr = RTE_BE32(0xffffffff),
115 .hdr.next_proto_id = 0xff,
118 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
121 "\xff\xff\xff\xff\xff\xff\xff\xff"
122 "\xff\xff\xff\xff\xff\xff\xff\xff",
124 "\xff\xff\xff\xff\xff\xff\xff\xff"
125 "\xff\xff\xff\xff\xff\xff\xff\xff",
130 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
131 .hdr.icmp_type = 0xff,
132 .hdr.icmp_code = 0xff,
135 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
137 .src_port = RTE_BE16(0xffff),
138 .dst_port = RTE_BE16(0xffff),
142 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
144 .src_port = RTE_BE16(0xffff),
145 .dst_port = RTE_BE16(0xffff),
149 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
151 .src_port = RTE_BE16(0xffff),
152 .dst_port = RTE_BE16(0xffff),
156 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
157 .protocol = RTE_BE16(0xffff),
162 static inline void dpaa2_prot_field_string(
163 enum net_prot prot, uint32_t field,
166 if (!dpaa2_flow_control_log)
169 if (prot == NET_PROT_ETH) {
170 strcpy(string, "eth");
171 if (field == NH_FLD_ETH_DA)
172 strcat(string, ".dst");
173 else if (field == NH_FLD_ETH_SA)
174 strcat(string, ".src");
175 else if (field == NH_FLD_ETH_TYPE)
176 strcat(string, ".type");
178 strcat(string, ".unknown field");
179 } else if (prot == NET_PROT_VLAN) {
180 strcpy(string, "vlan");
181 if (field == NH_FLD_VLAN_TCI)
182 strcat(string, ".tci");
184 strcat(string, ".unknown field");
185 } else if (prot == NET_PROT_IP) {
186 strcpy(string, "ip");
187 if (field == NH_FLD_IP_SRC)
188 strcat(string, ".src");
189 else if (field == NH_FLD_IP_DST)
190 strcat(string, ".dst");
191 else if (field == NH_FLD_IP_PROTO)
192 strcat(string, ".proto");
194 strcat(string, ".unknown field");
195 } else if (prot == NET_PROT_TCP) {
196 strcpy(string, "tcp");
197 if (field == NH_FLD_TCP_PORT_SRC)
198 strcat(string, ".src");
199 else if (field == NH_FLD_TCP_PORT_DST)
200 strcat(string, ".dst");
202 strcat(string, ".unknown field");
203 } else if (prot == NET_PROT_UDP) {
204 strcpy(string, "udp");
205 if (field == NH_FLD_UDP_PORT_SRC)
206 strcat(string, ".src");
207 else if (field == NH_FLD_UDP_PORT_DST)
208 strcat(string, ".dst");
210 strcat(string, ".unknown field");
211 } else if (prot == NET_PROT_ICMP) {
212 strcpy(string, "icmp");
213 if (field == NH_FLD_ICMP_TYPE)
214 strcat(string, ".type");
215 else if (field == NH_FLD_ICMP_CODE)
216 strcat(string, ".code");
218 strcat(string, ".unknown field");
219 } else if (prot == NET_PROT_SCTP) {
220 strcpy(string, "sctp");
221 if (field == NH_FLD_SCTP_PORT_SRC)
222 strcat(string, ".src");
223 else if (field == NH_FLD_SCTP_PORT_DST)
224 strcat(string, ".dst");
226 strcat(string, ".unknown field");
227 } else if (prot == NET_PROT_GRE) {
228 strcpy(string, "gre");
229 if (field == NH_FLD_GRE_TYPE)
230 strcat(string, ".type");
232 strcat(string, ".unknown field");
234 strcpy(string, "unknown protocol");
238 static inline void dpaa2_flow_qos_table_extracts_log(
239 const struct dpaa2_dev_priv *priv)
244 if (!dpaa2_flow_control_log)
247 printf("Setup QoS table: number of extracts: %d\r\n",
248 priv->extract.qos_key_extract.dpkg.num_extracts);
249 for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
251 dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
252 .extracts[idx].extract.from_hdr.prot,
253 priv->extract.qos_key_extract.dpkg.extracts[idx]
254 .extract.from_hdr.field,
256 printf("%s", string);
257 if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
263 static inline void dpaa2_flow_fs_table_extracts_log(
264 const struct dpaa2_dev_priv *priv, int tc_id)
269 if (!dpaa2_flow_control_log)
272 printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
273 tc_id, priv->extract.tc_key_extract[tc_id]
275 for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
276 .dpkg.num_extracts; idx++) {
277 dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
278 .dpkg.extracts[idx].extract.from_hdr.prot,
279 priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
280 .extract.from_hdr.field,
282 printf("%s", string);
283 if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
290 static inline void dpaa2_flow_qos_entry_log(
291 const char *log_info, const struct rte_flow *flow, int qos_index)
296 if (!dpaa2_flow_control_log)
299 printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
300 log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
302 key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
303 mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
306 for (idx = 0; idx < flow->qos_real_key_size; idx++)
307 printf("%02x ", key[idx]);
309 printf("\r\nmask:\r\n");
310 for (idx = 0; idx < flow->qos_real_key_size; idx++)
311 printf("%02x ", mask[idx]);
313 printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
314 flow->ipaddr_rule.qos_ipsrc_offset,
315 flow->ipaddr_rule.qos_ipdst_offset);
318 static inline void dpaa2_flow_fs_entry_log(
319 const char *log_info, const struct rte_flow *flow)
324 if (!dpaa2_flow_control_log)
327 printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
328 log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
330 key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
331 mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
334 for (idx = 0; idx < flow->fs_real_key_size; idx++)
335 printf("%02x ", key[idx]);
337 printf("\r\nmask:\r\n");
338 for (idx = 0; idx < flow->fs_real_key_size; idx++)
339 printf("%02x ", mask[idx]);
341 printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
342 flow->ipaddr_rule.fs_ipsrc_offset,
343 flow->ipaddr_rule.fs_ipdst_offset);
346 static inline void dpaa2_flow_extract_key_set(
347 struct dpaa2_key_info *key_info, int index, uint8_t size)
349 key_info->key_size[index] = size;
351 key_info->key_offset[index] =
352 key_info->key_offset[index - 1] +
353 key_info->key_size[index - 1];
355 key_info->key_offset[index] = 0;
357 key_info->key_total_size += size;
360 static int dpaa2_flow_extract_add(
361 struct dpaa2_key_extract *key_extract,
363 uint32_t field, uint8_t field_size)
365 int index, ip_src = -1, ip_dst = -1;
366 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
367 struct dpaa2_key_info *key_info = &key_extract->key_info;
369 if (dpkg->num_extracts >=
370 DPKG_MAX_NUM_OF_EXTRACTS) {
371 DPAA2_PMD_WARN("Number of extracts overflows");
374 /* Before reorder, the IP SRC and IP DST are already last
377 for (index = 0; index < dpkg->num_extracts; index++) {
378 if (dpkg->extracts[index].extract.from_hdr.prot ==
380 if (dpkg->extracts[index].extract.from_hdr.field ==
384 if (dpkg->extracts[index].extract.from_hdr.field ==
392 RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
395 RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
397 if (prot == NET_PROT_IP &&
398 (field == NH_FLD_IP_SRC ||
399 field == NH_FLD_IP_DST)) {
400 index = dpkg->num_extracts;
402 if (ip_src >= 0 && ip_dst >= 0)
403 index = dpkg->num_extracts - 2;
404 else if (ip_src >= 0 || ip_dst >= 0)
405 index = dpkg->num_extracts - 1;
407 index = dpkg->num_extracts;
410 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_HDR;
411 dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
412 dpkg->extracts[index].extract.from_hdr.prot = prot;
413 dpkg->extracts[index].extract.from_hdr.field = field;
414 if (prot == NET_PROT_IP &&
415 (field == NH_FLD_IP_SRC ||
416 field == NH_FLD_IP_DST)) {
417 dpaa2_flow_extract_key_set(key_info, index, 0);
419 dpaa2_flow_extract_key_set(key_info, index, field_size);
422 if (prot == NET_PROT_IP) {
423 if (field == NH_FLD_IP_SRC) {
424 if (key_info->ipv4_dst_offset >= 0) {
425 key_info->ipv4_src_offset =
426 key_info->ipv4_dst_offset +
427 NH_FLD_IPV4_ADDR_SIZE;
429 key_info->ipv4_src_offset =
430 key_info->key_offset[index - 1] +
431 key_info->key_size[index - 1];
433 if (key_info->ipv6_dst_offset >= 0) {
434 key_info->ipv6_src_offset =
435 key_info->ipv6_dst_offset +
436 NH_FLD_IPV6_ADDR_SIZE;
438 key_info->ipv6_src_offset =
439 key_info->key_offset[index - 1] +
440 key_info->key_size[index - 1];
442 } else if (field == NH_FLD_IP_DST) {
443 if (key_info->ipv4_src_offset >= 0) {
444 key_info->ipv4_dst_offset =
445 key_info->ipv4_src_offset +
446 NH_FLD_IPV4_ADDR_SIZE;
448 key_info->ipv4_dst_offset =
449 key_info->key_offset[index - 1] +
450 key_info->key_size[index - 1];
452 if (key_info->ipv6_src_offset >= 0) {
453 key_info->ipv6_dst_offset =
454 key_info->ipv6_src_offset +
455 NH_FLD_IPV6_ADDR_SIZE;
457 key_info->ipv6_dst_offset =
458 key_info->key_offset[index - 1] +
459 key_info->key_size[index - 1];
464 if (index == dpkg->num_extracts) {
465 dpkg->num_extracts++;
471 dpkg->extracts[ip_src].type =
472 DPKG_EXTRACT_FROM_HDR;
473 dpkg->extracts[ip_src].extract.from_hdr.type =
475 dpkg->extracts[ip_src].extract.from_hdr.prot =
477 dpkg->extracts[ip_src].extract.from_hdr.field =
479 dpaa2_flow_extract_key_set(key_info, ip_src, 0);
480 key_info->ipv4_src_offset += field_size;
481 key_info->ipv6_src_offset += field_size;
485 dpkg->extracts[ip_dst].type =
486 DPKG_EXTRACT_FROM_HDR;
487 dpkg->extracts[ip_dst].extract.from_hdr.type =
489 dpkg->extracts[ip_dst].extract.from_hdr.prot =
491 dpkg->extracts[ip_dst].extract.from_hdr.field =
493 dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
494 key_info->ipv4_dst_offset += field_size;
495 key_info->ipv6_dst_offset += field_size;
498 dpkg->num_extracts++;
503 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
506 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
507 struct dpaa2_key_info *key_info = &key_extract->key_info;
508 int last_extract_size, index;
510 if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
511 DPKG_EXTRACT_FROM_DATA) {
512 DPAA2_PMD_WARN("RAW extract cannot be combined with others");
516 last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
517 dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
518 if (last_extract_size)
519 dpkg->num_extracts++;
521 last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
523 for (index = 0; index < dpkg->num_extracts; index++) {
524 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
525 if (index == dpkg->num_extracts - 1)
526 dpkg->extracts[index].extract.from_data.size =
529 dpkg->extracts[index].extract.from_data.size =
530 DPAA2_FLOW_MAX_KEY_SIZE;
531 dpkg->extracts[index].extract.from_data.offset =
532 DPAA2_FLOW_MAX_KEY_SIZE * index;
535 key_info->key_total_size = size;
539 /* Protocol discrimination.
540 * Discriminate IPv4/IPv6/vLan by Eth type.
541 * Discriminate UDP/TCP/ICMP by next proto of IP.
544 dpaa2_flow_proto_discrimination_extract(
545 struct dpaa2_key_extract *key_extract,
546 enum rte_flow_item_type type)
548 if (type == RTE_FLOW_ITEM_TYPE_ETH) {
549 return dpaa2_flow_extract_add(
550 key_extract, NET_PROT_ETH,
553 } else if (type == (enum rte_flow_item_type)
554 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
555 return dpaa2_flow_extract_add(
556 key_extract, NET_PROT_IP,
558 NH_FLD_IP_PROTO_SIZE);
564 static inline int dpaa2_flow_extract_search(
565 struct dpkg_profile_cfg *dpkg,
566 enum net_prot prot, uint32_t field)
570 for (i = 0; i < dpkg->num_extracts; i++) {
571 if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
572 dpkg->extracts[i].extract.from_hdr.field == field) {
580 static inline int dpaa2_flow_extract_key_offset(
581 struct dpaa2_key_extract *key_extract,
582 enum net_prot prot, uint32_t field)
585 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
586 struct dpaa2_key_info *key_info = &key_extract->key_info;
588 if (prot == NET_PROT_IPV4 ||
589 prot == NET_PROT_IPV6)
590 i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
592 i = dpaa2_flow_extract_search(dpkg, prot, field);
595 if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
596 return key_info->ipv4_src_offset;
597 else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
598 return key_info->ipv4_dst_offset;
599 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
600 return key_info->ipv6_src_offset;
601 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
602 return key_info->ipv6_dst_offset;
604 return key_info->key_offset[i];
610 struct proto_discrimination {
611 enum rte_flow_item_type type;
619 dpaa2_flow_proto_discrimination_rule(
620 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
621 struct proto_discrimination proto, int group)
631 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
633 field = NH_FLD_ETH_TYPE;
634 } else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
636 field = NH_FLD_IP_PROTO;
639 "Only Eth and IP support to discriminate next proto.");
643 offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
646 DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
650 key_iova = flow->qos_rule.key_iova + offset;
651 mask_iova = flow->qos_rule.mask_iova + offset;
652 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
653 eth_type = proto.eth_type;
654 memcpy((void *)key_iova, (const void *)(ð_type),
657 memcpy((void *)mask_iova, (const void *)(ð_type),
660 ip_proto = proto.ip_proto;
661 memcpy((void *)key_iova, (const void *)(&ip_proto),
664 memcpy((void *)mask_iova, (const void *)(&ip_proto),
668 offset = dpaa2_flow_extract_key_offset(
669 &priv->extract.tc_key_extract[group],
672 DPAA2_PMD_ERR("FS prot %d field %d extract failed",
676 key_iova = flow->fs_rule.key_iova + offset;
677 mask_iova = flow->fs_rule.mask_iova + offset;
679 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
680 eth_type = proto.eth_type;
681 memcpy((void *)key_iova, (const void *)(ð_type),
684 memcpy((void *)mask_iova, (const void *)(ð_type),
687 ip_proto = proto.ip_proto;
688 memcpy((void *)key_iova, (const void *)(&ip_proto),
691 memcpy((void *)mask_iova, (const void *)(&ip_proto),
699 dpaa2_flow_rule_data_set(
700 struct dpaa2_key_extract *key_extract,
701 struct dpni_rule_cfg *rule,
702 enum net_prot prot, uint32_t field,
703 const void *key, const void *mask, int size)
705 int offset = dpaa2_flow_extract_key_offset(key_extract,
709 DPAA2_PMD_ERR("prot %d, field %d extract failed",
714 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
715 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
721 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
722 const void *key, const void *mask, int size)
726 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
727 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
733 _dpaa2_flow_rule_move_ipaddr_tail(
734 struct dpaa2_key_extract *key_extract,
735 struct dpni_rule_cfg *rule, int src_offset,
736 uint32_t field, bool ipv4)
744 char tmp[NH_FLD_IPV6_ADDR_SIZE];
746 if (field != NH_FLD_IP_SRC &&
747 field != NH_FLD_IP_DST) {
748 DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
752 prot = NET_PROT_IPV4;
754 prot = NET_PROT_IPV6;
755 dst_offset = dpaa2_flow_extract_key_offset(key_extract,
757 if (dst_offset < 0) {
758 DPAA2_PMD_ERR("Field %d reorder extract failed", field);
761 key_src = rule->key_iova + src_offset;
762 mask_src = rule->mask_iova + src_offset;
763 key_dst = rule->key_iova + dst_offset;
764 mask_dst = rule->mask_iova + dst_offset;
766 len = sizeof(rte_be32_t);
768 len = NH_FLD_IPV6_ADDR_SIZE;
770 memcpy(tmp, (char *)key_src, len);
771 memset((char *)key_src, 0, len);
772 memcpy((char *)key_dst, tmp, len);
774 memcpy(tmp, (char *)mask_src, len);
775 memset((char *)mask_src, 0, len);
776 memcpy((char *)mask_dst, tmp, len);
782 dpaa2_flow_rule_move_ipaddr_tail(
783 struct rte_flow *flow, struct dpaa2_dev_priv *priv,
789 if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
792 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
793 prot = NET_PROT_IPV4;
795 prot = NET_PROT_IPV6;
797 if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
798 ret = _dpaa2_flow_rule_move_ipaddr_tail(
799 &priv->extract.qos_key_extract,
801 flow->ipaddr_rule.qos_ipsrc_offset,
802 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
804 DPAA2_PMD_ERR("QoS src address reorder failed");
807 flow->ipaddr_rule.qos_ipsrc_offset =
808 dpaa2_flow_extract_key_offset(
809 &priv->extract.qos_key_extract,
810 prot, NH_FLD_IP_SRC);
813 if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
814 ret = _dpaa2_flow_rule_move_ipaddr_tail(
815 &priv->extract.qos_key_extract,
817 flow->ipaddr_rule.qos_ipdst_offset,
818 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
820 DPAA2_PMD_ERR("QoS dst address reorder failed");
823 flow->ipaddr_rule.qos_ipdst_offset =
824 dpaa2_flow_extract_key_offset(
825 &priv->extract.qos_key_extract,
826 prot, NH_FLD_IP_DST);
829 if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
830 ret = _dpaa2_flow_rule_move_ipaddr_tail(
831 &priv->extract.tc_key_extract[fs_group],
833 flow->ipaddr_rule.fs_ipsrc_offset,
834 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
836 DPAA2_PMD_ERR("FS src address reorder failed");
839 flow->ipaddr_rule.fs_ipsrc_offset =
840 dpaa2_flow_extract_key_offset(
841 &priv->extract.tc_key_extract[fs_group],
842 prot, NH_FLD_IP_SRC);
844 if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
845 ret = _dpaa2_flow_rule_move_ipaddr_tail(
846 &priv->extract.tc_key_extract[fs_group],
848 flow->ipaddr_rule.fs_ipdst_offset,
849 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
851 DPAA2_PMD_ERR("FS dst address reorder failed");
854 flow->ipaddr_rule.fs_ipdst_offset =
855 dpaa2_flow_extract_key_offset(
856 &priv->extract.tc_key_extract[fs_group],
857 prot, NH_FLD_IP_DST);
864 dpaa2_flow_extract_support(
865 const uint8_t *mask_src,
866 enum rte_flow_item_type type)
870 const char *mask_support = 0;
873 case RTE_FLOW_ITEM_TYPE_ETH:
874 mask_support = (const char *)&dpaa2_flow_item_eth_mask;
875 size = sizeof(struct rte_flow_item_eth);
877 case RTE_FLOW_ITEM_TYPE_VLAN:
878 mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
879 size = sizeof(struct rte_flow_item_vlan);
881 case RTE_FLOW_ITEM_TYPE_IPV4:
882 mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
883 size = sizeof(struct rte_flow_item_ipv4);
885 case RTE_FLOW_ITEM_TYPE_IPV6:
886 mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
887 size = sizeof(struct rte_flow_item_ipv6);
889 case RTE_FLOW_ITEM_TYPE_ICMP:
890 mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
891 size = sizeof(struct rte_flow_item_icmp);
893 case RTE_FLOW_ITEM_TYPE_UDP:
894 mask_support = (const char *)&dpaa2_flow_item_udp_mask;
895 size = sizeof(struct rte_flow_item_udp);
897 case RTE_FLOW_ITEM_TYPE_TCP:
898 mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
899 size = sizeof(struct rte_flow_item_tcp);
901 case RTE_FLOW_ITEM_TYPE_SCTP:
902 mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
903 size = sizeof(struct rte_flow_item_sctp);
905 case RTE_FLOW_ITEM_TYPE_GRE:
906 mask_support = (const char *)&dpaa2_flow_item_gre_mask;
907 size = sizeof(struct rte_flow_item_gre);
913 memcpy(mask, mask_support, size);
915 for (i = 0; i < size; i++)
916 mask[i] = (mask[i] | mask_src[i]);
918 if (memcmp(mask, mask_support, size))
925 dpaa2_configure_flow_eth(struct rte_flow *flow,
926 struct rte_eth_dev *dev,
927 const struct rte_flow_attr *attr,
928 const struct rte_flow_item *pattern,
929 const struct rte_flow_action actions[] __rte_unused,
930 struct rte_flow_error *error __rte_unused,
931 int *device_configured)
936 const struct rte_flow_item_eth *spec, *mask;
938 /* TODO: Currently upper bound of range parameter is not implemented */
939 const struct rte_flow_item_eth *last __rte_unused;
940 struct dpaa2_dev_priv *priv = dev->data->dev_private;
941 const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
945 /* Parse pattern list to get the matching parameters */
946 spec = (const struct rte_flow_item_eth *)pattern->spec;
947 last = (const struct rte_flow_item_eth *)pattern->last;
948 mask = (const struct rte_flow_item_eth *)
949 (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
951 /* Don't care any field of eth header,
952 * only care eth protocol.
954 DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
958 /* Get traffic class index and flow id to be configured */
960 flow->tc_index = attr->priority;
962 if (dpaa2_flow_extract_support((const uint8_t *)mask,
963 RTE_FLOW_ITEM_TYPE_ETH)) {
964 DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
969 if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
970 index = dpaa2_flow_extract_search(
971 &priv->extract.qos_key_extract.dpkg,
972 NET_PROT_ETH, NH_FLD_ETH_SA);
974 ret = dpaa2_flow_extract_add(
975 &priv->extract.qos_key_extract,
976 NET_PROT_ETH, NH_FLD_ETH_SA,
979 DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
983 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
985 index = dpaa2_flow_extract_search(
986 &priv->extract.tc_key_extract[group].dpkg,
987 NET_PROT_ETH, NH_FLD_ETH_SA);
989 ret = dpaa2_flow_extract_add(
990 &priv->extract.tc_key_extract[group],
991 NET_PROT_ETH, NH_FLD_ETH_SA,
994 DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
997 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1000 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1003 "Move ipaddr before ETH_SA rule set failed");
1007 ret = dpaa2_flow_rule_data_set(
1008 &priv->extract.qos_key_extract,
1012 &spec->src.addr_bytes,
1013 &mask->src.addr_bytes,
1014 sizeof(struct rte_ether_addr));
1016 DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1020 ret = dpaa2_flow_rule_data_set(
1021 &priv->extract.tc_key_extract[group],
1025 &spec->src.addr_bytes,
1026 &mask->src.addr_bytes,
1027 sizeof(struct rte_ether_addr));
1029 DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1034 if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1035 index = dpaa2_flow_extract_search(
1036 &priv->extract.qos_key_extract.dpkg,
1037 NET_PROT_ETH, NH_FLD_ETH_DA);
1039 ret = dpaa2_flow_extract_add(
1040 &priv->extract.qos_key_extract,
1041 NET_PROT_ETH, NH_FLD_ETH_DA,
1042 RTE_ETHER_ADDR_LEN);
1044 DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1048 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1051 index = dpaa2_flow_extract_search(
1052 &priv->extract.tc_key_extract[group].dpkg,
1053 NET_PROT_ETH, NH_FLD_ETH_DA);
1055 ret = dpaa2_flow_extract_add(
1056 &priv->extract.tc_key_extract[group],
1057 NET_PROT_ETH, NH_FLD_ETH_DA,
1058 RTE_ETHER_ADDR_LEN);
1060 DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1064 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1067 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1070 "Move ipaddr before ETH DA rule set failed");
1074 ret = dpaa2_flow_rule_data_set(
1075 &priv->extract.qos_key_extract,
1079 &spec->dst.addr_bytes,
1080 &mask->dst.addr_bytes,
1081 sizeof(struct rte_ether_addr));
1083 DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1087 ret = dpaa2_flow_rule_data_set(
1088 &priv->extract.tc_key_extract[group],
1092 &spec->dst.addr_bytes,
1093 &mask->dst.addr_bytes,
1094 sizeof(struct rte_ether_addr));
1096 DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1101 if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1102 index = dpaa2_flow_extract_search(
1103 &priv->extract.qos_key_extract.dpkg,
1104 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1106 ret = dpaa2_flow_extract_add(
1107 &priv->extract.qos_key_extract,
1108 NET_PROT_ETH, NH_FLD_ETH_TYPE,
1109 RTE_ETHER_TYPE_LEN);
1111 DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1115 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1117 index = dpaa2_flow_extract_search(
1118 &priv->extract.tc_key_extract[group].dpkg,
1119 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1121 ret = dpaa2_flow_extract_add(
1122 &priv->extract.tc_key_extract[group],
1123 NET_PROT_ETH, NH_FLD_ETH_TYPE,
1124 RTE_ETHER_TYPE_LEN);
1126 DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1130 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1133 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1136 "Move ipaddr before ETH TYPE rule set failed");
1140 ret = dpaa2_flow_rule_data_set(
1141 &priv->extract.qos_key_extract,
1147 sizeof(rte_be16_t));
1149 DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1153 ret = dpaa2_flow_rule_data_set(
1154 &priv->extract.tc_key_extract[group],
1160 sizeof(rte_be16_t));
1162 DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1167 (*device_configured) |= local_cfg;
1173 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1174 struct rte_eth_dev *dev,
1175 const struct rte_flow_attr *attr,
1176 const struct rte_flow_item *pattern,
1177 const struct rte_flow_action actions[] __rte_unused,
1178 struct rte_flow_error *error __rte_unused,
1179 int *device_configured)
1184 const struct rte_flow_item_vlan *spec, *mask;
1186 const struct rte_flow_item_vlan *last __rte_unused;
1187 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1189 group = attr->group;
1191 /* Parse pattern list to get the matching parameters */
1192 spec = (const struct rte_flow_item_vlan *)pattern->spec;
1193 last = (const struct rte_flow_item_vlan *)pattern->last;
1194 mask = (const struct rte_flow_item_vlan *)
1195 (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1197 /* Get traffic class index and flow id to be configured */
1198 flow->tc_id = group;
1199 flow->tc_index = attr->priority;
1202 /* Don't care any field of vlan header,
1203 * only care vlan protocol.
1205 /* Eth type is actually used for vLan classification.
1207 struct proto_discrimination proto;
1209 index = dpaa2_flow_extract_search(
1210 &priv->extract.qos_key_extract.dpkg,
1211 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1213 ret = dpaa2_flow_proto_discrimination_extract(
1214 &priv->extract.qos_key_extract,
1215 RTE_FLOW_ITEM_TYPE_ETH);
1218 "QoS Ext ETH_TYPE to discriminate vLan failed");
1222 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1225 index = dpaa2_flow_extract_search(
1226 &priv->extract.tc_key_extract[group].dpkg,
1227 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1229 ret = dpaa2_flow_proto_discrimination_extract(
1230 &priv->extract.tc_key_extract[group],
1231 RTE_FLOW_ITEM_TYPE_ETH);
1234 "FS Ext ETH_TYPE to discriminate vLan failed.");
1238 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1241 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1244 "Move ipaddr before vLan discrimination set failed");
1248 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1249 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1250 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1253 DPAA2_PMD_ERR("vLan discrimination rule set failed");
1257 (*device_configured) |= local_cfg;
1262 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1263 RTE_FLOW_ITEM_TYPE_VLAN)) {
1264 DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1272 index = dpaa2_flow_extract_search(
1273 &priv->extract.qos_key_extract.dpkg,
1274 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1276 ret = dpaa2_flow_extract_add(
1277 &priv->extract.qos_key_extract,
1280 sizeof(rte_be16_t));
1282 DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1286 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1289 index = dpaa2_flow_extract_search(
1290 &priv->extract.tc_key_extract[group].dpkg,
1291 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1293 ret = dpaa2_flow_extract_add(
1294 &priv->extract.tc_key_extract[group],
1297 sizeof(rte_be16_t));
1299 DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1303 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1306 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1309 "Move ipaddr before VLAN TCI rule set failed");
1313 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1319 sizeof(rte_be16_t));
1321 DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1325 ret = dpaa2_flow_rule_data_set(
1326 &priv->extract.tc_key_extract[group],
1332 sizeof(rte_be16_t));
1334 DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1338 (*device_configured) |= local_cfg;
1344 dpaa2_configure_flow_ip_discrimation(
1345 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1346 const struct rte_flow_item *pattern,
1347 int *local_cfg, int *device_configured,
1351 struct proto_discrimination proto;
1353 index = dpaa2_flow_extract_search(
1354 &priv->extract.qos_key_extract.dpkg,
1355 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1357 ret = dpaa2_flow_proto_discrimination_extract(
1358 &priv->extract.qos_key_extract,
1359 RTE_FLOW_ITEM_TYPE_ETH);
1362 "QoS Extract ETH_TYPE to discriminate IP failed.");
1365 (*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1368 index = dpaa2_flow_extract_search(
1369 &priv->extract.tc_key_extract[group].dpkg,
1370 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1372 ret = dpaa2_flow_proto_discrimination_extract(
1373 &priv->extract.tc_key_extract[group],
1374 RTE_FLOW_ITEM_TYPE_ETH);
1377 "FS Extract ETH_TYPE to discriminate IP failed.");
1380 (*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1383 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1386 "Move ipaddr before IP discrimination set failed");
1390 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1391 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1392 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1394 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1395 ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1397 DPAA2_PMD_ERR("IP discrimination rule set failed");
1401 (*device_configured) |= (*local_cfg);
1408 dpaa2_configure_flow_generic_ip(
1409 struct rte_flow *flow,
1410 struct rte_eth_dev *dev,
1411 const struct rte_flow_attr *attr,
1412 const struct rte_flow_item *pattern,
1413 const struct rte_flow_action actions[] __rte_unused,
1414 struct rte_flow_error *error __rte_unused,
1415 int *device_configured)
1420 const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1422 const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1424 const void *key, *mask;
1427 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1428 const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1431 group = attr->group;
1433 /* Parse pattern list to get the matching parameters */
1434 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1435 spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1436 mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1437 (pattern->mask ? pattern->mask :
1438 &dpaa2_flow_item_ipv4_mask);
1440 spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1441 mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1442 (pattern->mask ? pattern->mask :
1443 &dpaa2_flow_item_ipv6_mask);
1446 /* Get traffic class index and flow id to be configured */
1447 flow->tc_id = group;
1448 flow->tc_index = attr->priority;
1450 ret = dpaa2_configure_flow_ip_discrimation(priv,
1451 flow, pattern, &local_cfg,
1452 device_configured, group);
1454 DPAA2_PMD_ERR("IP discrimation failed!");
1458 if (!spec_ipv4 && !spec_ipv6)
1462 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1463 RTE_FLOW_ITEM_TYPE_IPV4)) {
1464 DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1471 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1472 RTE_FLOW_ITEM_TYPE_IPV6)) {
1473 DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1479 if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1480 mask_ipv4->hdr.dst_addr)) {
1481 flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1482 } else if (mask_ipv6 &&
1483 (memcmp((const char *)mask_ipv6->hdr.src_addr,
1484 zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1485 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1486 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1487 flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1490 if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1492 memcmp((const char *)mask_ipv6->hdr.src_addr,
1493 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1494 index = dpaa2_flow_extract_search(
1495 &priv->extract.qos_key_extract.dpkg,
1496 NET_PROT_IP, NH_FLD_IP_SRC);
1498 ret = dpaa2_flow_extract_add(
1499 &priv->extract.qos_key_extract,
1504 DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1508 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1511 index = dpaa2_flow_extract_search(
1512 &priv->extract.tc_key_extract[group].dpkg,
1513 NET_PROT_IP, NH_FLD_IP_SRC);
1515 ret = dpaa2_flow_extract_add(
1516 &priv->extract.tc_key_extract[group],
1521 DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1525 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1529 key = &spec_ipv4->hdr.src_addr;
1531 key = &spec_ipv6->hdr.src_addr[0];
1533 mask = &mask_ipv4->hdr.src_addr;
1534 size = NH_FLD_IPV4_ADDR_SIZE;
1535 prot = NET_PROT_IPV4;
1537 mask = &mask_ipv6->hdr.src_addr[0];
1538 size = NH_FLD_IPV6_ADDR_SIZE;
1539 prot = NET_PROT_IPV6;
1542 ret = dpaa2_flow_rule_data_set(
1543 &priv->extract.qos_key_extract,
1545 prot, NH_FLD_IP_SRC,
1548 DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1552 ret = dpaa2_flow_rule_data_set(
1553 &priv->extract.tc_key_extract[group],
1555 prot, NH_FLD_IP_SRC,
1558 DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1562 flow->ipaddr_rule.qos_ipsrc_offset =
1563 dpaa2_flow_extract_key_offset(
1564 &priv->extract.qos_key_extract,
1565 prot, NH_FLD_IP_SRC);
1566 flow->ipaddr_rule.fs_ipsrc_offset =
1567 dpaa2_flow_extract_key_offset(
1568 &priv->extract.tc_key_extract[group],
1569 prot, NH_FLD_IP_SRC);
1572 if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1574 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1575 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1576 index = dpaa2_flow_extract_search(
1577 &priv->extract.qos_key_extract.dpkg,
1578 NET_PROT_IP, NH_FLD_IP_DST);
1581 size = NH_FLD_IPV4_ADDR_SIZE;
1583 size = NH_FLD_IPV6_ADDR_SIZE;
1584 ret = dpaa2_flow_extract_add(
1585 &priv->extract.qos_key_extract,
1590 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1594 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1597 index = dpaa2_flow_extract_search(
1598 &priv->extract.tc_key_extract[group].dpkg,
1599 NET_PROT_IP, NH_FLD_IP_DST);
1602 size = NH_FLD_IPV4_ADDR_SIZE;
1604 size = NH_FLD_IPV6_ADDR_SIZE;
1605 ret = dpaa2_flow_extract_add(
1606 &priv->extract.tc_key_extract[group],
1611 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1615 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1619 key = &spec_ipv4->hdr.dst_addr;
1621 key = spec_ipv6->hdr.dst_addr;
1623 mask = &mask_ipv4->hdr.dst_addr;
1624 size = NH_FLD_IPV4_ADDR_SIZE;
1625 prot = NET_PROT_IPV4;
1627 mask = &mask_ipv6->hdr.dst_addr[0];
1628 size = NH_FLD_IPV6_ADDR_SIZE;
1629 prot = NET_PROT_IPV6;
1632 ret = dpaa2_flow_rule_data_set(
1633 &priv->extract.qos_key_extract,
1635 prot, NH_FLD_IP_DST,
1638 DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1642 ret = dpaa2_flow_rule_data_set(
1643 &priv->extract.tc_key_extract[group],
1645 prot, NH_FLD_IP_DST,
1648 DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1651 flow->ipaddr_rule.qos_ipdst_offset =
1652 dpaa2_flow_extract_key_offset(
1653 &priv->extract.qos_key_extract,
1654 prot, NH_FLD_IP_DST);
1655 flow->ipaddr_rule.fs_ipdst_offset =
1656 dpaa2_flow_extract_key_offset(
1657 &priv->extract.tc_key_extract[group],
1658 prot, NH_FLD_IP_DST);
1661 if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1662 (mask_ipv6 && mask_ipv6->hdr.proto)) {
1663 index = dpaa2_flow_extract_search(
1664 &priv->extract.qos_key_extract.dpkg,
1665 NET_PROT_IP, NH_FLD_IP_PROTO);
1667 ret = dpaa2_flow_extract_add(
1668 &priv->extract.qos_key_extract,
1671 NH_FLD_IP_PROTO_SIZE);
1673 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1677 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1680 index = dpaa2_flow_extract_search(
1681 &priv->extract.tc_key_extract[group].dpkg,
1682 NET_PROT_IP, NH_FLD_IP_PROTO);
1684 ret = dpaa2_flow_extract_add(
1685 &priv->extract.tc_key_extract[group],
1688 NH_FLD_IP_PROTO_SIZE);
1690 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1694 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1697 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1700 "Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1705 key = &spec_ipv4->hdr.next_proto_id;
1707 key = &spec_ipv6->hdr.proto;
1709 mask = &mask_ipv4->hdr.next_proto_id;
1711 mask = &mask_ipv6->hdr.proto;
1713 ret = dpaa2_flow_rule_data_set(
1714 &priv->extract.qos_key_extract,
1718 key, mask, NH_FLD_IP_PROTO_SIZE);
1720 DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1724 ret = dpaa2_flow_rule_data_set(
1725 &priv->extract.tc_key_extract[group],
1729 key, mask, NH_FLD_IP_PROTO_SIZE);
1731 DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1736 (*device_configured) |= local_cfg;
1742 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1743 struct rte_eth_dev *dev,
1744 const struct rte_flow_attr *attr,
1745 const struct rte_flow_item *pattern,
1746 const struct rte_flow_action actions[] __rte_unused,
1747 struct rte_flow_error *error __rte_unused,
1748 int *device_configured)
1753 const struct rte_flow_item_icmp *spec, *mask;
1755 const struct rte_flow_item_icmp *last __rte_unused;
1756 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1758 group = attr->group;
1760 /* Parse pattern list to get the matching parameters */
1761 spec = (const struct rte_flow_item_icmp *)pattern->spec;
1762 last = (const struct rte_flow_item_icmp *)pattern->last;
1763 mask = (const struct rte_flow_item_icmp *)
1764 (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1766 /* Get traffic class index and flow id to be configured */
1767 flow->tc_id = group;
1768 flow->tc_index = attr->priority;
1771 /* Don't care any field of ICMP header,
1772 * only care ICMP protocol.
1773 * Example: flow create 0 ingress pattern icmp /
1775 /* Next proto of Generical IP is actually used
1776 * for ICMP identification.
1778 struct proto_discrimination proto;
1780 index = dpaa2_flow_extract_search(
1781 &priv->extract.qos_key_extract.dpkg,
1782 NET_PROT_IP, NH_FLD_IP_PROTO);
1784 ret = dpaa2_flow_proto_discrimination_extract(
1785 &priv->extract.qos_key_extract,
1786 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1789 "QoS Extract IP protocol to discriminate ICMP failed.");
1793 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1796 index = dpaa2_flow_extract_search(
1797 &priv->extract.tc_key_extract[group].dpkg,
1798 NET_PROT_IP, NH_FLD_IP_PROTO);
1800 ret = dpaa2_flow_proto_discrimination_extract(
1801 &priv->extract.tc_key_extract[group],
1802 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1805 "FS Extract IP protocol to discriminate ICMP failed.");
1809 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1812 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1815 "Move IP addr before ICMP discrimination set failed");
1819 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1820 proto.ip_proto = IPPROTO_ICMP;
1821 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1824 DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1828 (*device_configured) |= local_cfg;
1833 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1834 RTE_FLOW_ITEM_TYPE_ICMP)) {
1835 DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1840 if (mask->hdr.icmp_type) {
1841 index = dpaa2_flow_extract_search(
1842 &priv->extract.qos_key_extract.dpkg,
1843 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1845 ret = dpaa2_flow_extract_add(
1846 &priv->extract.qos_key_extract,
1849 NH_FLD_ICMP_TYPE_SIZE);
1851 DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1855 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1858 index = dpaa2_flow_extract_search(
1859 &priv->extract.tc_key_extract[group].dpkg,
1860 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1862 ret = dpaa2_flow_extract_add(
1863 &priv->extract.tc_key_extract[group],
1866 NH_FLD_ICMP_TYPE_SIZE);
1868 DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1872 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1875 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1878 "Move ipaddr before ICMP TYPE set failed");
1882 ret = dpaa2_flow_rule_data_set(
1883 &priv->extract.qos_key_extract,
1887 &spec->hdr.icmp_type,
1888 &mask->hdr.icmp_type,
1889 NH_FLD_ICMP_TYPE_SIZE);
1891 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1895 ret = dpaa2_flow_rule_data_set(
1896 &priv->extract.tc_key_extract[group],
1900 &spec->hdr.icmp_type,
1901 &mask->hdr.icmp_type,
1902 NH_FLD_ICMP_TYPE_SIZE);
1904 DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1909 if (mask->hdr.icmp_code) {
1910 index = dpaa2_flow_extract_search(
1911 &priv->extract.qos_key_extract.dpkg,
1912 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1914 ret = dpaa2_flow_extract_add(
1915 &priv->extract.qos_key_extract,
1918 NH_FLD_ICMP_CODE_SIZE);
1920 DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1924 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1927 index = dpaa2_flow_extract_search(
1928 &priv->extract.tc_key_extract[group].dpkg,
1929 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1931 ret = dpaa2_flow_extract_add(
1932 &priv->extract.tc_key_extract[group],
1935 NH_FLD_ICMP_CODE_SIZE);
1937 DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1941 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1944 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1947 "Move ipaddr after ICMP CODE set failed");
1951 ret = dpaa2_flow_rule_data_set(
1952 &priv->extract.qos_key_extract,
1956 &spec->hdr.icmp_code,
1957 &mask->hdr.icmp_code,
1958 NH_FLD_ICMP_CODE_SIZE);
1960 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1964 ret = dpaa2_flow_rule_data_set(
1965 &priv->extract.tc_key_extract[group],
1969 &spec->hdr.icmp_code,
1970 &mask->hdr.icmp_code,
1971 NH_FLD_ICMP_CODE_SIZE);
1973 DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1978 (*device_configured) |= local_cfg;
1984 dpaa2_configure_flow_udp(struct rte_flow *flow,
1985 struct rte_eth_dev *dev,
1986 const struct rte_flow_attr *attr,
1987 const struct rte_flow_item *pattern,
1988 const struct rte_flow_action actions[] __rte_unused,
1989 struct rte_flow_error *error __rte_unused,
1990 int *device_configured)
1995 const struct rte_flow_item_udp *spec, *mask;
1997 const struct rte_flow_item_udp *last __rte_unused;
1998 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2000 group = attr->group;
2002 /* Parse pattern list to get the matching parameters */
2003 spec = (const struct rte_flow_item_udp *)pattern->spec;
2004 last = (const struct rte_flow_item_udp *)pattern->last;
2005 mask = (const struct rte_flow_item_udp *)
2006 (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2008 /* Get traffic class index and flow id to be configured */
2009 flow->tc_id = group;
2010 flow->tc_index = attr->priority;
2012 if (!spec || !mc_l4_port_identification) {
2013 struct proto_discrimination proto;
2015 index = dpaa2_flow_extract_search(
2016 &priv->extract.qos_key_extract.dpkg,
2017 NET_PROT_IP, NH_FLD_IP_PROTO);
2019 ret = dpaa2_flow_proto_discrimination_extract(
2020 &priv->extract.qos_key_extract,
2021 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2024 "QoS Extract IP protocol to discriminate UDP failed.");
2028 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2031 index = dpaa2_flow_extract_search(
2032 &priv->extract.tc_key_extract[group].dpkg,
2033 NET_PROT_IP, NH_FLD_IP_PROTO);
2035 ret = dpaa2_flow_proto_discrimination_extract(
2036 &priv->extract.tc_key_extract[group],
2037 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2040 "FS Extract IP protocol to discriminate UDP failed.");
2044 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2047 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2050 "Move IP addr before UDP discrimination set failed");
2054 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2055 proto.ip_proto = IPPROTO_UDP;
2056 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2059 DPAA2_PMD_ERR("UDP discrimination rule set failed");
2063 (*device_configured) |= local_cfg;
2069 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2070 RTE_FLOW_ITEM_TYPE_UDP)) {
2071 DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2076 if (mask->hdr.src_port) {
2077 index = dpaa2_flow_extract_search(
2078 &priv->extract.qos_key_extract.dpkg,
2079 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2081 ret = dpaa2_flow_extract_add(
2082 &priv->extract.qos_key_extract,
2084 NH_FLD_UDP_PORT_SRC,
2085 NH_FLD_UDP_PORT_SIZE);
2087 DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2091 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2094 index = dpaa2_flow_extract_search(
2095 &priv->extract.tc_key_extract[group].dpkg,
2096 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2098 ret = dpaa2_flow_extract_add(
2099 &priv->extract.tc_key_extract[group],
2101 NH_FLD_UDP_PORT_SRC,
2102 NH_FLD_UDP_PORT_SIZE);
2104 DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2108 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2111 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2114 "Move ipaddr before UDP_PORT_SRC set failed");
2118 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2121 NH_FLD_UDP_PORT_SRC,
2122 &spec->hdr.src_port,
2123 &mask->hdr.src_port,
2124 NH_FLD_UDP_PORT_SIZE);
2127 "QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2131 ret = dpaa2_flow_rule_data_set(
2132 &priv->extract.tc_key_extract[group],
2135 NH_FLD_UDP_PORT_SRC,
2136 &spec->hdr.src_port,
2137 &mask->hdr.src_port,
2138 NH_FLD_UDP_PORT_SIZE);
2141 "FS NH_FLD_UDP_PORT_SRC rule data set failed");
2146 if (mask->hdr.dst_port) {
2147 index = dpaa2_flow_extract_search(
2148 &priv->extract.qos_key_extract.dpkg,
2149 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2151 ret = dpaa2_flow_extract_add(
2152 &priv->extract.qos_key_extract,
2154 NH_FLD_UDP_PORT_DST,
2155 NH_FLD_UDP_PORT_SIZE);
2157 DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2161 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2164 index = dpaa2_flow_extract_search(
2165 &priv->extract.tc_key_extract[group].dpkg,
2166 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2168 ret = dpaa2_flow_extract_add(
2169 &priv->extract.tc_key_extract[group],
2171 NH_FLD_UDP_PORT_DST,
2172 NH_FLD_UDP_PORT_SIZE);
2174 DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2178 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2181 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2184 "Move ipaddr before UDP_PORT_DST set failed");
2188 ret = dpaa2_flow_rule_data_set(
2189 &priv->extract.qos_key_extract,
2192 NH_FLD_UDP_PORT_DST,
2193 &spec->hdr.dst_port,
2194 &mask->hdr.dst_port,
2195 NH_FLD_UDP_PORT_SIZE);
2198 "QoS NH_FLD_UDP_PORT_DST rule data set failed");
2202 ret = dpaa2_flow_rule_data_set(
2203 &priv->extract.tc_key_extract[group],
2206 NH_FLD_UDP_PORT_DST,
2207 &spec->hdr.dst_port,
2208 &mask->hdr.dst_port,
2209 NH_FLD_UDP_PORT_SIZE);
2212 "FS NH_FLD_UDP_PORT_DST rule data set failed");
2217 (*device_configured) |= local_cfg;
2223 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2224 struct rte_eth_dev *dev,
2225 const struct rte_flow_attr *attr,
2226 const struct rte_flow_item *pattern,
2227 const struct rte_flow_action actions[] __rte_unused,
2228 struct rte_flow_error *error __rte_unused,
2229 int *device_configured)
2234 const struct rte_flow_item_tcp *spec, *mask;
2236 const struct rte_flow_item_tcp *last __rte_unused;
2237 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2239 group = attr->group;
2241 /* Parse pattern list to get the matching parameters */
2242 spec = (const struct rte_flow_item_tcp *)pattern->spec;
2243 last = (const struct rte_flow_item_tcp *)pattern->last;
2244 mask = (const struct rte_flow_item_tcp *)
2245 (pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2247 /* Get traffic class index and flow id to be configured */
2248 flow->tc_id = group;
2249 flow->tc_index = attr->priority;
2251 if (!spec || !mc_l4_port_identification) {
2252 struct proto_discrimination proto;
2254 index = dpaa2_flow_extract_search(
2255 &priv->extract.qos_key_extract.dpkg,
2256 NET_PROT_IP, NH_FLD_IP_PROTO);
2258 ret = dpaa2_flow_proto_discrimination_extract(
2259 &priv->extract.qos_key_extract,
2260 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2263 "QoS Extract IP protocol to discriminate TCP failed.");
2267 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2270 index = dpaa2_flow_extract_search(
2271 &priv->extract.tc_key_extract[group].dpkg,
2272 NET_PROT_IP, NH_FLD_IP_PROTO);
2274 ret = dpaa2_flow_proto_discrimination_extract(
2275 &priv->extract.tc_key_extract[group],
2276 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2279 "FS Extract IP protocol to discriminate TCP failed.");
2283 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2286 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2289 "Move IP addr before TCP discrimination set failed");
2293 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2294 proto.ip_proto = IPPROTO_TCP;
2295 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2298 DPAA2_PMD_ERR("TCP discrimination rule set failed");
2302 (*device_configured) |= local_cfg;
2308 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2309 RTE_FLOW_ITEM_TYPE_TCP)) {
2310 DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2315 if (mask->hdr.src_port) {
2316 index = dpaa2_flow_extract_search(
2317 &priv->extract.qos_key_extract.dpkg,
2318 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2320 ret = dpaa2_flow_extract_add(
2321 &priv->extract.qos_key_extract,
2323 NH_FLD_TCP_PORT_SRC,
2324 NH_FLD_TCP_PORT_SIZE);
2326 DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2330 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2333 index = dpaa2_flow_extract_search(
2334 &priv->extract.tc_key_extract[group].dpkg,
2335 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2337 ret = dpaa2_flow_extract_add(
2338 &priv->extract.tc_key_extract[group],
2340 NH_FLD_TCP_PORT_SRC,
2341 NH_FLD_TCP_PORT_SIZE);
2343 DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2347 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2350 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2353 "Move ipaddr before TCP_PORT_SRC set failed");
2357 ret = dpaa2_flow_rule_data_set(
2358 &priv->extract.qos_key_extract,
2361 NH_FLD_TCP_PORT_SRC,
2362 &spec->hdr.src_port,
2363 &mask->hdr.src_port,
2364 NH_FLD_TCP_PORT_SIZE);
2367 "QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2371 ret = dpaa2_flow_rule_data_set(
2372 &priv->extract.tc_key_extract[group],
2375 NH_FLD_TCP_PORT_SRC,
2376 &spec->hdr.src_port,
2377 &mask->hdr.src_port,
2378 NH_FLD_TCP_PORT_SIZE);
2381 "FS NH_FLD_TCP_PORT_SRC rule data set failed");
2386 if (mask->hdr.dst_port) {
2387 index = dpaa2_flow_extract_search(
2388 &priv->extract.qos_key_extract.dpkg,
2389 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2391 ret = dpaa2_flow_extract_add(
2392 &priv->extract.qos_key_extract,
2394 NH_FLD_TCP_PORT_DST,
2395 NH_FLD_TCP_PORT_SIZE);
2397 DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2401 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2404 index = dpaa2_flow_extract_search(
2405 &priv->extract.tc_key_extract[group].dpkg,
2406 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2408 ret = dpaa2_flow_extract_add(
2409 &priv->extract.tc_key_extract[group],
2411 NH_FLD_TCP_PORT_DST,
2412 NH_FLD_TCP_PORT_SIZE);
2414 DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2418 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2421 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2424 "Move ipaddr before TCP_PORT_DST set failed");
2428 ret = dpaa2_flow_rule_data_set(
2429 &priv->extract.qos_key_extract,
2432 NH_FLD_TCP_PORT_DST,
2433 &spec->hdr.dst_port,
2434 &mask->hdr.dst_port,
2435 NH_FLD_TCP_PORT_SIZE);
2438 "QoS NH_FLD_TCP_PORT_DST rule data set failed");
2442 ret = dpaa2_flow_rule_data_set(
2443 &priv->extract.tc_key_extract[group],
2446 NH_FLD_TCP_PORT_DST,
2447 &spec->hdr.dst_port,
2448 &mask->hdr.dst_port,
2449 NH_FLD_TCP_PORT_SIZE);
2452 "FS NH_FLD_TCP_PORT_DST rule data set failed");
2457 (*device_configured) |= local_cfg;
2463 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2464 struct rte_eth_dev *dev,
2465 const struct rte_flow_attr *attr,
2466 const struct rte_flow_item *pattern,
2467 const struct rte_flow_action actions[] __rte_unused,
2468 struct rte_flow_error *error __rte_unused,
2469 int *device_configured)
2474 const struct rte_flow_item_sctp *spec, *mask;
2476 const struct rte_flow_item_sctp *last __rte_unused;
2477 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2479 group = attr->group;
2481 /* Parse pattern list to get the matching parameters */
2482 spec = (const struct rte_flow_item_sctp *)pattern->spec;
2483 last = (const struct rte_flow_item_sctp *)pattern->last;
2484 mask = (const struct rte_flow_item_sctp *)
2485 (pattern->mask ? pattern->mask :
2486 &dpaa2_flow_item_sctp_mask);
2488 /* Get traffic class index and flow id to be configured */
2489 flow->tc_id = group;
2490 flow->tc_index = attr->priority;
2492 if (!spec || !mc_l4_port_identification) {
2493 struct proto_discrimination proto;
2495 index = dpaa2_flow_extract_search(
2496 &priv->extract.qos_key_extract.dpkg,
2497 NET_PROT_IP, NH_FLD_IP_PROTO);
2499 ret = dpaa2_flow_proto_discrimination_extract(
2500 &priv->extract.qos_key_extract,
2501 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2504 "QoS Extract IP protocol to discriminate SCTP failed.");
2508 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2511 index = dpaa2_flow_extract_search(
2512 &priv->extract.tc_key_extract[group].dpkg,
2513 NET_PROT_IP, NH_FLD_IP_PROTO);
2515 ret = dpaa2_flow_proto_discrimination_extract(
2516 &priv->extract.tc_key_extract[group],
2517 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2520 "FS Extract IP protocol to discriminate SCTP failed.");
2524 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2527 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2530 "Move ipaddr before SCTP discrimination set failed");
2534 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2535 proto.ip_proto = IPPROTO_SCTP;
2536 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2539 DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2543 (*device_configured) |= local_cfg;
2549 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2550 RTE_FLOW_ITEM_TYPE_SCTP)) {
2551 DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2556 if (mask->hdr.src_port) {
2557 index = dpaa2_flow_extract_search(
2558 &priv->extract.qos_key_extract.dpkg,
2559 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2561 ret = dpaa2_flow_extract_add(
2562 &priv->extract.qos_key_extract,
2564 NH_FLD_SCTP_PORT_SRC,
2565 NH_FLD_SCTP_PORT_SIZE);
2567 DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2571 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2574 index = dpaa2_flow_extract_search(
2575 &priv->extract.tc_key_extract[group].dpkg,
2576 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2578 ret = dpaa2_flow_extract_add(
2579 &priv->extract.tc_key_extract[group],
2581 NH_FLD_SCTP_PORT_SRC,
2582 NH_FLD_SCTP_PORT_SIZE);
2584 DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2588 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2591 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2594 "Move ipaddr before SCTP_PORT_SRC set failed");
2598 ret = dpaa2_flow_rule_data_set(
2599 &priv->extract.qos_key_extract,
2602 NH_FLD_SCTP_PORT_SRC,
2603 &spec->hdr.src_port,
2604 &mask->hdr.src_port,
2605 NH_FLD_SCTP_PORT_SIZE);
2608 "QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2612 ret = dpaa2_flow_rule_data_set(
2613 &priv->extract.tc_key_extract[group],
2616 NH_FLD_SCTP_PORT_SRC,
2617 &spec->hdr.src_port,
2618 &mask->hdr.src_port,
2619 NH_FLD_SCTP_PORT_SIZE);
2622 "FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2627 if (mask->hdr.dst_port) {
2628 index = dpaa2_flow_extract_search(
2629 &priv->extract.qos_key_extract.dpkg,
2630 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2632 ret = dpaa2_flow_extract_add(
2633 &priv->extract.qos_key_extract,
2635 NH_FLD_SCTP_PORT_DST,
2636 NH_FLD_SCTP_PORT_SIZE);
2638 DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2642 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2645 index = dpaa2_flow_extract_search(
2646 &priv->extract.tc_key_extract[group].dpkg,
2647 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2649 ret = dpaa2_flow_extract_add(
2650 &priv->extract.tc_key_extract[group],
2652 NH_FLD_SCTP_PORT_DST,
2653 NH_FLD_SCTP_PORT_SIZE);
2655 DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2659 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2662 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2665 "Move ipaddr before SCTP_PORT_DST set failed");
2669 ret = dpaa2_flow_rule_data_set(
2670 &priv->extract.qos_key_extract,
2673 NH_FLD_SCTP_PORT_DST,
2674 &spec->hdr.dst_port,
2675 &mask->hdr.dst_port,
2676 NH_FLD_SCTP_PORT_SIZE);
2679 "QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2683 ret = dpaa2_flow_rule_data_set(
2684 &priv->extract.tc_key_extract[group],
2687 NH_FLD_SCTP_PORT_DST,
2688 &spec->hdr.dst_port,
2689 &mask->hdr.dst_port,
2690 NH_FLD_SCTP_PORT_SIZE);
2693 "FS NH_FLD_SCTP_PORT_DST rule data set failed");
2698 (*device_configured) |= local_cfg;
2704 dpaa2_configure_flow_gre(struct rte_flow *flow,
2705 struct rte_eth_dev *dev,
2706 const struct rte_flow_attr *attr,
2707 const struct rte_flow_item *pattern,
2708 const struct rte_flow_action actions[] __rte_unused,
2709 struct rte_flow_error *error __rte_unused,
2710 int *device_configured)
2715 const struct rte_flow_item_gre *spec, *mask;
2717 const struct rte_flow_item_gre *last __rte_unused;
2718 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2720 group = attr->group;
2722 /* Parse pattern list to get the matching parameters */
2723 spec = (const struct rte_flow_item_gre *)pattern->spec;
2724 last = (const struct rte_flow_item_gre *)pattern->last;
2725 mask = (const struct rte_flow_item_gre *)
2726 (pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2728 /* Get traffic class index and flow id to be configured */
2729 flow->tc_id = group;
2730 flow->tc_index = attr->priority;
2733 struct proto_discrimination proto;
2735 index = dpaa2_flow_extract_search(
2736 &priv->extract.qos_key_extract.dpkg,
2737 NET_PROT_IP, NH_FLD_IP_PROTO);
2739 ret = dpaa2_flow_proto_discrimination_extract(
2740 &priv->extract.qos_key_extract,
2741 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2744 "QoS Extract IP protocol to discriminate GRE failed.");
2748 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2751 index = dpaa2_flow_extract_search(
2752 &priv->extract.tc_key_extract[group].dpkg,
2753 NET_PROT_IP, NH_FLD_IP_PROTO);
2755 ret = dpaa2_flow_proto_discrimination_extract(
2756 &priv->extract.tc_key_extract[group],
2757 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2760 "FS Extract IP protocol to discriminate GRE failed.");
2764 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2767 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2770 "Move IP addr before GRE discrimination set failed");
2774 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2775 proto.ip_proto = IPPROTO_GRE;
2776 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2779 DPAA2_PMD_ERR("GRE discrimination rule set failed");
2783 (*device_configured) |= local_cfg;
2788 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2789 RTE_FLOW_ITEM_TYPE_GRE)) {
2790 DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2795 if (!mask->protocol)
2798 index = dpaa2_flow_extract_search(
2799 &priv->extract.qos_key_extract.dpkg,
2800 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2802 ret = dpaa2_flow_extract_add(
2803 &priv->extract.qos_key_extract,
2806 sizeof(rte_be16_t));
2808 DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2812 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2815 index = dpaa2_flow_extract_search(
2816 &priv->extract.tc_key_extract[group].dpkg,
2817 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2819 ret = dpaa2_flow_extract_add(
2820 &priv->extract.tc_key_extract[group],
2823 sizeof(rte_be16_t));
2825 DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2829 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2832 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2835 "Move ipaddr before GRE_TYPE set failed");
2839 ret = dpaa2_flow_rule_data_set(
2840 &priv->extract.qos_key_extract,
2846 sizeof(rte_be16_t));
2849 "QoS NH_FLD_GRE_TYPE rule data set failed");
2853 ret = dpaa2_flow_rule_data_set(
2854 &priv->extract.tc_key_extract[group],
2860 sizeof(rte_be16_t));
2863 "FS NH_FLD_GRE_TYPE rule data set failed");
2867 (*device_configured) |= local_cfg;
2873 dpaa2_configure_flow_raw(struct rte_flow *flow,
2874 struct rte_eth_dev *dev,
2875 const struct rte_flow_attr *attr,
2876 const struct rte_flow_item *pattern,
2877 const struct rte_flow_action actions[] __rte_unused,
2878 struct rte_flow_error *error __rte_unused,
2879 int *device_configured)
2881 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2882 const struct rte_flow_item_raw *spec = pattern->spec;
2883 const struct rte_flow_item_raw *mask = pattern->mask;
2885 priv->extract.qos_key_extract.key_info.key_total_size;
2886 int local_cfg = 0, ret;
2889 /* Need both spec and mask */
2890 if (!spec || !mask) {
2891 DPAA2_PMD_ERR("spec or mask not present.");
2894 /* Only supports non-relative with offset 0 */
2895 if (spec->relative || spec->offset != 0 ||
2896 spec->search || spec->limit) {
2897 DPAA2_PMD_ERR("relative and non zero offset not supported.");
2900 /* Spec len and mask len should be same */
2901 if (spec->length != mask->length) {
2902 DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2906 /* Get traffic class index and flow id to be configured */
2907 group = attr->group;
2908 flow->tc_id = group;
2909 flow->tc_index = attr->priority;
2911 if (prev_key_size <= spec->length) {
2912 ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2915 DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2918 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2920 ret = dpaa2_flow_extract_add_raw(
2921 &priv->extract.tc_key_extract[group],
2924 DPAA2_PMD_ERR("FS Extract RAW add failed.");
2927 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2930 ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2931 mask->pattern, spec->length);
2933 DPAA2_PMD_ERR("QoS RAW rule data set failed");
2937 ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2938 mask->pattern, spec->length);
2940 DPAA2_PMD_ERR("FS RAW rule data set failed");
2944 (*device_configured) |= local_cfg;
2950 dpaa2_fs_action_supported(enum rte_flow_action_type action)
2954 for (i = 0; i < (int)(sizeof(dpaa2_supported_fs_action_type) /
2955 sizeof(enum rte_flow_action_type)); i++) {
2956 if (action == dpaa2_supported_fs_action_type[i])
2962 /* The existing QoS/FS entry with IP address(es)
2963 * needs update after
2964 * new extract(s) are inserted before IP
2965 * address(es) extract(s).
2968 dpaa2_flow_entry_update(
2969 struct dpaa2_dev_priv *priv, uint8_t tc_id)
2971 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2972 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2974 int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2975 int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2976 struct dpaa2_key_extract *qos_key_extract =
2977 &priv->extract.qos_key_extract;
2978 struct dpaa2_key_extract *tc_key_extract =
2979 &priv->extract.tc_key_extract[tc_id];
2980 char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2981 char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2982 char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2983 char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2984 int extend = -1, extend1, size = -1;
2988 if (curr->ipaddr_rule.ipaddr_type ==
2990 curr = LIST_NEXT(curr, next);
2994 if (curr->ipaddr_rule.ipaddr_type ==
2997 qos_key_extract->key_info.ipv4_src_offset;
2999 qos_key_extract->key_info.ipv4_dst_offset;
3001 tc_key_extract->key_info.ipv4_src_offset;
3003 tc_key_extract->key_info.ipv4_dst_offset;
3004 size = NH_FLD_IPV4_ADDR_SIZE;
3007 qos_key_extract->key_info.ipv6_src_offset;
3009 qos_key_extract->key_info.ipv6_dst_offset;
3011 tc_key_extract->key_info.ipv6_src_offset;
3013 tc_key_extract->key_info.ipv6_dst_offset;
3014 size = NH_FLD_IPV6_ADDR_SIZE;
3017 qos_index = curr->tc_id * priv->fs_entries +
3020 dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
3022 if (priv->num_rx_tc > 1) {
3023 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3024 priv->token, &curr->qos_rule);
3026 DPAA2_PMD_ERR("Qos entry remove failed.");
3033 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3034 RTE_ASSERT(qos_ipsrc_offset >=
3035 curr->ipaddr_rule.qos_ipsrc_offset);
3036 extend1 = qos_ipsrc_offset -
3037 curr->ipaddr_rule.qos_ipsrc_offset;
3039 RTE_ASSERT(extend == extend1);
3043 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3044 (size == NH_FLD_IPV6_ADDR_SIZE));
3047 (char *)(size_t)curr->qos_rule.key_iova +
3048 curr->ipaddr_rule.qos_ipsrc_offset,
3050 memset((char *)(size_t)curr->qos_rule.key_iova +
3051 curr->ipaddr_rule.qos_ipsrc_offset,
3055 (char *)(size_t)curr->qos_rule.mask_iova +
3056 curr->ipaddr_rule.qos_ipsrc_offset,
3058 memset((char *)(size_t)curr->qos_rule.mask_iova +
3059 curr->ipaddr_rule.qos_ipsrc_offset,
3062 curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3065 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3066 RTE_ASSERT(qos_ipdst_offset >=
3067 curr->ipaddr_rule.qos_ipdst_offset);
3068 extend1 = qos_ipdst_offset -
3069 curr->ipaddr_rule.qos_ipdst_offset;
3071 RTE_ASSERT(extend == extend1);
3075 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3076 (size == NH_FLD_IPV6_ADDR_SIZE));
3079 (char *)(size_t)curr->qos_rule.key_iova +
3080 curr->ipaddr_rule.qos_ipdst_offset,
3082 memset((char *)(size_t)curr->qos_rule.key_iova +
3083 curr->ipaddr_rule.qos_ipdst_offset,
3087 (char *)(size_t)curr->qos_rule.mask_iova +
3088 curr->ipaddr_rule.qos_ipdst_offset,
3090 memset((char *)(size_t)curr->qos_rule.mask_iova +
3091 curr->ipaddr_rule.qos_ipdst_offset,
3094 curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3097 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3098 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3099 (size == NH_FLD_IPV6_ADDR_SIZE));
3100 memcpy((char *)(size_t)curr->qos_rule.key_iova +
3101 curr->ipaddr_rule.qos_ipsrc_offset,
3104 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3105 curr->ipaddr_rule.qos_ipsrc_offset,
3109 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3110 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3111 (size == NH_FLD_IPV6_ADDR_SIZE));
3112 memcpy((char *)(size_t)curr->qos_rule.key_iova +
3113 curr->ipaddr_rule.qos_ipdst_offset,
3116 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3117 curr->ipaddr_rule.qos_ipdst_offset,
3123 curr->qos_real_key_size += extend;
3125 curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3127 dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
3129 if (priv->num_rx_tc > 1) {
3130 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3131 priv->token, &curr->qos_rule,
3132 curr->tc_id, qos_index,
3135 DPAA2_PMD_ERR("Qos entry update failed.");
3140 if (!dpaa2_fs_action_supported(curr->action)) {
3141 curr = LIST_NEXT(curr, next);
3145 dpaa2_flow_fs_entry_log("Before update", curr);
3148 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3149 priv->token, curr->tc_id, &curr->fs_rule);
3151 DPAA2_PMD_ERR("FS entry remove failed.");
3155 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3156 tc_id == curr->tc_id) {
3157 RTE_ASSERT(fs_ipsrc_offset >=
3158 curr->ipaddr_rule.fs_ipsrc_offset);
3159 extend1 = fs_ipsrc_offset -
3160 curr->ipaddr_rule.fs_ipsrc_offset;
3162 RTE_ASSERT(extend == extend1);
3167 (char *)(size_t)curr->fs_rule.key_iova +
3168 curr->ipaddr_rule.fs_ipsrc_offset,
3170 memset((char *)(size_t)curr->fs_rule.key_iova +
3171 curr->ipaddr_rule.fs_ipsrc_offset,
3175 (char *)(size_t)curr->fs_rule.mask_iova +
3176 curr->ipaddr_rule.fs_ipsrc_offset,
3178 memset((char *)(size_t)curr->fs_rule.mask_iova +
3179 curr->ipaddr_rule.fs_ipsrc_offset,
3182 curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3185 if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3186 tc_id == curr->tc_id) {
3187 RTE_ASSERT(fs_ipdst_offset >=
3188 curr->ipaddr_rule.fs_ipdst_offset);
3189 extend1 = fs_ipdst_offset -
3190 curr->ipaddr_rule.fs_ipdst_offset;
3192 RTE_ASSERT(extend == extend1);
3197 (char *)(size_t)curr->fs_rule.key_iova +
3198 curr->ipaddr_rule.fs_ipdst_offset,
3200 memset((char *)(size_t)curr->fs_rule.key_iova +
3201 curr->ipaddr_rule.fs_ipdst_offset,
3205 (char *)(size_t)curr->fs_rule.mask_iova +
3206 curr->ipaddr_rule.fs_ipdst_offset,
3208 memset((char *)(size_t)curr->fs_rule.mask_iova +
3209 curr->ipaddr_rule.fs_ipdst_offset,
3212 curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3215 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3216 memcpy((char *)(size_t)curr->fs_rule.key_iova +
3217 curr->ipaddr_rule.fs_ipsrc_offset,
3220 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3221 curr->ipaddr_rule.fs_ipsrc_offset,
3225 if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3226 memcpy((char *)(size_t)curr->fs_rule.key_iova +
3227 curr->ipaddr_rule.fs_ipdst_offset,
3230 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3231 curr->ipaddr_rule.fs_ipdst_offset,
3237 curr->fs_real_key_size += extend;
3238 curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3240 dpaa2_flow_fs_entry_log("Start update", curr);
3242 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3243 priv->token, curr->tc_id, curr->tc_index,
3244 &curr->fs_rule, &curr->action_cfg);
3246 DPAA2_PMD_ERR("FS entry update failed.");
3250 curr = LIST_NEXT(curr, next);
3257 dpaa2_flow_verify_attr(
3258 struct dpaa2_dev_priv *priv,
3259 const struct rte_flow_attr *attr)
3261 struct rte_flow *curr = LIST_FIRST(&priv->flows);
3264 if (curr->tc_id == attr->group &&
3265 curr->tc_index == attr->priority) {
3267 "Flow with group %d and priority %d already exists.",
3268 attr->group, attr->priority);
3272 curr = LIST_NEXT(curr, next);
3278 static inline struct rte_eth_dev *
3279 dpaa2_flow_redirect_dev(struct dpaa2_dev_priv *priv,
3280 const struct rte_flow_action *action)
3282 const struct rte_flow_action_phy_port *phy_port;
3283 const struct rte_flow_action_port_id *port_id;
3285 struct rte_eth_dev *dest_dev;
3287 if (action->type == RTE_FLOW_ACTION_TYPE_PHY_PORT) {
3288 phy_port = (const struct rte_flow_action_phy_port *)
3290 if (!phy_port->original)
3291 idx = phy_port->index;
3292 } else if (action->type == RTE_FLOW_ACTION_TYPE_PORT_ID) {
3293 port_id = (const struct rte_flow_action_port_id *)
3295 if (!port_id->original)
3302 if (!rte_eth_dev_is_valid_port(idx))
3304 dest_dev = &rte_eth_devices[idx];
3306 dest_dev = priv->eth_dev;
3309 if (!dpaa2_dev_is_dpaa2(dest_dev))
3316 dpaa2_flow_verify_action(
3317 struct dpaa2_dev_priv *priv,
3318 const struct rte_flow_attr *attr,
3319 const struct rte_flow_action actions[])
3321 int end_of_list = 0, i, j = 0;
3322 const struct rte_flow_action_queue *dest_queue;
3323 const struct rte_flow_action_rss *rss_conf;
3324 struct dpaa2_queue *rxq;
3326 while (!end_of_list) {
3327 switch (actions[j].type) {
3328 case RTE_FLOW_ACTION_TYPE_QUEUE:
3329 dest_queue = (const struct rte_flow_action_queue *)
3331 rxq = priv->rx_vq[dest_queue->index];
3332 if (attr->group != rxq->tc_index) {
3334 "RXQ[%d] does not belong to the group %d",
3335 dest_queue->index, attr->group);
3340 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3341 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3342 if (!dpaa2_flow_redirect_dev(priv, &actions[j])) {
3343 DPAA2_PMD_ERR("Invalid port id of action");
3347 case RTE_FLOW_ACTION_TYPE_RSS:
3348 rss_conf = (const struct rte_flow_action_rss *)
3350 if (rss_conf->queue_num > priv->dist_queues) {
3352 "RSS number exceeds the distrbution size");
3355 for (i = 0; i < (int)rss_conf->queue_num; i++) {
3356 if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3358 "RSS queue index exceeds the number of RXQs");
3361 rxq = priv->rx_vq[rss_conf->queue[i]];
3362 if (rxq->tc_index != attr->group) {
3364 "Queue/Group combination are not supported\n");
3370 case RTE_FLOW_ACTION_TYPE_END:
3374 DPAA2_PMD_ERR("Invalid action type");
3384 dpaa2_generic_flow_set(struct rte_flow *flow,
3385 struct rte_eth_dev *dev,
3386 const struct rte_flow_attr *attr,
3387 const struct rte_flow_item pattern[],
3388 const struct rte_flow_action actions[],
3389 struct rte_flow_error *error)
3391 const struct rte_flow_action_queue *dest_queue;
3392 const struct rte_flow_action_rss *rss_conf;
3393 int is_keycfg_configured = 0, end_of_list = 0;
3394 int ret = 0, i = 0, j = 0;
3395 struct dpni_rx_dist_cfg tc_cfg;
3396 struct dpni_qos_tbl_cfg qos_cfg;
3397 struct dpni_fs_action_cfg action;
3398 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3399 struct dpaa2_queue *dest_q;
3400 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3402 struct rte_flow *curr = LIST_FIRST(&priv->flows);
3404 struct rte_eth_dev *dest_dev;
3405 struct dpaa2_dev_priv *dest_priv;
3407 ret = dpaa2_flow_verify_attr(priv, attr);
3411 ret = dpaa2_flow_verify_action(priv, attr, actions);
3415 /* Parse pattern list to get the matching parameters */
3416 while (!end_of_list) {
3417 switch (pattern[i].type) {
3418 case RTE_FLOW_ITEM_TYPE_ETH:
3419 ret = dpaa2_configure_flow_eth(flow,
3420 dev, attr, &pattern[i], actions, error,
3421 &is_keycfg_configured);
3423 DPAA2_PMD_ERR("ETH flow configuration failed!");
3427 case RTE_FLOW_ITEM_TYPE_VLAN:
3428 ret = dpaa2_configure_flow_vlan(flow,
3429 dev, attr, &pattern[i], actions, error,
3430 &is_keycfg_configured);
3432 DPAA2_PMD_ERR("vLan flow configuration failed!");
3436 case RTE_FLOW_ITEM_TYPE_IPV4:
3437 case RTE_FLOW_ITEM_TYPE_IPV6:
3438 ret = dpaa2_configure_flow_generic_ip(flow,
3439 dev, attr, &pattern[i], actions, error,
3440 &is_keycfg_configured);
3442 DPAA2_PMD_ERR("IP flow configuration failed!");
3446 case RTE_FLOW_ITEM_TYPE_ICMP:
3447 ret = dpaa2_configure_flow_icmp(flow,
3448 dev, attr, &pattern[i], actions, error,
3449 &is_keycfg_configured);
3451 DPAA2_PMD_ERR("ICMP flow configuration failed!");
3455 case RTE_FLOW_ITEM_TYPE_UDP:
3456 ret = dpaa2_configure_flow_udp(flow,
3457 dev, attr, &pattern[i], actions, error,
3458 &is_keycfg_configured);
3460 DPAA2_PMD_ERR("UDP flow configuration failed!");
3464 case RTE_FLOW_ITEM_TYPE_TCP:
3465 ret = dpaa2_configure_flow_tcp(flow,
3466 dev, attr, &pattern[i], actions, error,
3467 &is_keycfg_configured);
3469 DPAA2_PMD_ERR("TCP flow configuration failed!");
3473 case RTE_FLOW_ITEM_TYPE_SCTP:
3474 ret = dpaa2_configure_flow_sctp(flow,
3475 dev, attr, &pattern[i], actions, error,
3476 &is_keycfg_configured);
3478 DPAA2_PMD_ERR("SCTP flow configuration failed!");
3482 case RTE_FLOW_ITEM_TYPE_GRE:
3483 ret = dpaa2_configure_flow_gre(flow,
3484 dev, attr, &pattern[i], actions, error,
3485 &is_keycfg_configured);
3487 DPAA2_PMD_ERR("GRE flow configuration failed!");
3491 case RTE_FLOW_ITEM_TYPE_RAW:
3492 ret = dpaa2_configure_flow_raw(flow,
3493 dev, attr, &pattern[i],
3495 &is_keycfg_configured);
3497 DPAA2_PMD_ERR("RAW flow configuration failed!");
3501 case RTE_FLOW_ITEM_TYPE_END:
3503 break; /*End of List*/
3505 DPAA2_PMD_ERR("Invalid action type");
3512 /* Let's parse action on matching traffic */
3514 while (!end_of_list) {
3515 switch (actions[j].type) {
3516 case RTE_FLOW_ACTION_TYPE_QUEUE:
3517 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
3518 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3519 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3520 flow->action = actions[j].type;
3522 if (actions[j].type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3523 dest_queue = (const struct rte_flow_action_queue *)
3525 dest_q = priv->rx_vq[dest_queue->index];
3526 action.flow_id = dest_q->flow_id;
3528 dest_dev = dpaa2_flow_redirect_dev(priv,
3531 DPAA2_PMD_ERR("Invalid destination device to redirect!");
3535 dest_priv = dest_dev->data->dev_private;
3536 dest_q = dest_priv->tx_vq[0];
3538 DPNI_FS_OPT_REDIRECT_TO_DPNI_TX;
3539 action.redirect_obj_token = dest_priv->token;
3540 action.flow_id = dest_q->flow_id;
3543 /* Configure FS table first*/
3544 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3545 dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3546 if (dpkg_prepare_key_cfg(
3547 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3548 (uint8_t *)(size_t)priv->extract
3549 .tc_extract_param[flow->tc_id]) < 0) {
3551 "Unable to prepare extract parameters");
3556 sizeof(struct dpni_rx_dist_cfg));
3557 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3558 tc_cfg.key_cfg_iova =
3559 (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3560 tc_cfg.tc = flow->tc_id;
3561 tc_cfg.enable = false;
3562 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3563 priv->token, &tc_cfg);
3566 "TC hash cannot be disabled.(%d)",
3570 tc_cfg.enable = true;
3571 tc_cfg.fs_miss_flow_id = dpaa2_flow_miss_flow_id;
3572 ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3573 priv->token, &tc_cfg);
3576 "TC distribution cannot be configured.(%d)",
3582 /* Configure QoS table then.*/
3583 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3584 dpaa2_flow_qos_table_extracts_log(priv);
3585 if (dpkg_prepare_key_cfg(
3586 &priv->extract.qos_key_extract.dpkg,
3587 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3589 "Unable to prepare extract parameters");
3593 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3594 qos_cfg.discard_on_miss = false;
3595 qos_cfg.default_tc = 0;
3596 qos_cfg.keep_entries = true;
3597 qos_cfg.key_cfg_iova =
3598 (size_t)priv->extract.qos_extract_param;
3599 /* QoS table is effecitive for multiple TCs.*/
3600 if (priv->num_rx_tc > 1) {
3601 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3602 priv->token, &qos_cfg);
3605 "RSS QoS table can not be configured(%d)\n",
3612 flow->qos_real_key_size = priv->extract
3613 .qos_key_extract.key_info.key_total_size;
3614 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3615 if (flow->ipaddr_rule.qos_ipdst_offset >=
3616 flow->ipaddr_rule.qos_ipsrc_offset) {
3617 flow->qos_real_key_size =
3618 flow->ipaddr_rule.qos_ipdst_offset +
3619 NH_FLD_IPV4_ADDR_SIZE;
3621 flow->qos_real_key_size =
3622 flow->ipaddr_rule.qos_ipsrc_offset +
3623 NH_FLD_IPV4_ADDR_SIZE;
3625 } else if (flow->ipaddr_rule.ipaddr_type ==
3627 if (flow->ipaddr_rule.qos_ipdst_offset >=
3628 flow->ipaddr_rule.qos_ipsrc_offset) {
3629 flow->qos_real_key_size =
3630 flow->ipaddr_rule.qos_ipdst_offset +
3631 NH_FLD_IPV6_ADDR_SIZE;
3633 flow->qos_real_key_size =
3634 flow->ipaddr_rule.qos_ipsrc_offset +
3635 NH_FLD_IPV6_ADDR_SIZE;
3639 /* QoS entry added is only effective for multiple TCs.*/
3640 if (priv->num_rx_tc > 1) {
3641 qos_index = flow->tc_id * priv->fs_entries +
3643 if (qos_index >= priv->qos_entries) {
3644 DPAA2_PMD_ERR("QoS table with %d entries full",
3648 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3650 dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3652 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3653 priv->token, &flow->qos_rule,
3654 flow->tc_id, qos_index,
3658 "Error in addnig entry to QoS table(%d)", ret);
3663 if (flow->tc_index >= priv->fs_entries) {
3664 DPAA2_PMD_ERR("FS table with %d entries full",
3669 flow->fs_real_key_size =
3670 priv->extract.tc_key_extract[flow->tc_id]
3671 .key_info.key_total_size;
3673 if (flow->ipaddr_rule.ipaddr_type ==
3675 if (flow->ipaddr_rule.fs_ipdst_offset >=
3676 flow->ipaddr_rule.fs_ipsrc_offset) {
3677 flow->fs_real_key_size =
3678 flow->ipaddr_rule.fs_ipdst_offset +
3679 NH_FLD_IPV4_ADDR_SIZE;
3681 flow->fs_real_key_size =
3682 flow->ipaddr_rule.fs_ipsrc_offset +
3683 NH_FLD_IPV4_ADDR_SIZE;
3685 } else if (flow->ipaddr_rule.ipaddr_type ==
3687 if (flow->ipaddr_rule.fs_ipdst_offset >=
3688 flow->ipaddr_rule.fs_ipsrc_offset) {
3689 flow->fs_real_key_size =
3690 flow->ipaddr_rule.fs_ipdst_offset +
3691 NH_FLD_IPV6_ADDR_SIZE;
3693 flow->fs_real_key_size =
3694 flow->ipaddr_rule.fs_ipsrc_offset +
3695 NH_FLD_IPV6_ADDR_SIZE;
3699 flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3701 dpaa2_flow_fs_entry_log("Start add", flow);
3703 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3704 flow->tc_id, flow->tc_index,
3705 &flow->fs_rule, &action);
3708 "Error in adding entry to FS table(%d)", ret);
3711 memcpy(&flow->action_cfg, &action,
3712 sizeof(struct dpni_fs_action_cfg));
3714 case RTE_FLOW_ACTION_TYPE_RSS:
3715 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3717 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3718 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3719 &priv->extract.tc_key_extract[flow->tc_id].dpkg);
3722 "unable to set flow distribution.please check queue config\n");
3726 /* Allocate DMA'ble memory to write the rules */
3727 param = (size_t)rte_malloc(NULL, 256, 64);
3729 DPAA2_PMD_ERR("Memory allocation failure\n");
3733 if (dpkg_prepare_key_cfg(
3734 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3735 (uint8_t *)param) < 0) {
3737 "Unable to prepare extract parameters");
3738 rte_free((void *)param);
3742 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3743 tc_cfg.dist_size = rss_conf->queue_num;
3744 tc_cfg.key_cfg_iova = (size_t)param;
3745 tc_cfg.enable = true;
3746 tc_cfg.tc = flow->tc_id;
3747 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3748 priv->token, &tc_cfg);
3751 "RSS TC table cannot be configured: %d\n",
3753 rte_free((void *)param);
3757 rte_free((void *)param);
3758 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3759 if (dpkg_prepare_key_cfg(
3760 &priv->extract.qos_key_extract.dpkg,
3761 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3763 "Unable to prepare extract parameters");
3767 sizeof(struct dpni_qos_tbl_cfg));
3768 qos_cfg.discard_on_miss = true;
3769 qos_cfg.keep_entries = true;
3770 qos_cfg.key_cfg_iova =
3771 (size_t)priv->extract.qos_extract_param;
3772 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3773 priv->token, &qos_cfg);
3776 "RSS QoS dist can't be configured-%d\n",
3782 /* Add Rule into QoS table */
3783 qos_index = flow->tc_id * priv->fs_entries +
3785 if (qos_index >= priv->qos_entries) {
3786 DPAA2_PMD_ERR("QoS table with %d entries full",
3791 flow->qos_real_key_size =
3792 priv->extract.qos_key_extract.key_info.key_total_size;
3793 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3794 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3795 &flow->qos_rule, flow->tc_id,
3799 "Error in entry addition in QoS table(%d)",
3804 case RTE_FLOW_ACTION_TYPE_END:
3808 DPAA2_PMD_ERR("Invalid action type");
3816 if (is_keycfg_configured &
3817 (DPAA2_QOS_TABLE_RECONFIGURE |
3818 DPAA2_FS_TABLE_RECONFIGURE)) {
3819 ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3821 DPAA2_PMD_ERR("Flow entry update failed.");
3826 /* New rules are inserted. */
3828 LIST_INSERT_HEAD(&priv->flows, flow, next);
3830 while (LIST_NEXT(curr, next))
3831 curr = LIST_NEXT(curr, next);
3832 LIST_INSERT_AFTER(curr, flow, next);
3839 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3840 const struct rte_flow_attr *attr)
3844 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3845 DPAA2_PMD_ERR("Priority group is out of range\n");
3848 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3849 DPAA2_PMD_ERR("Priority within the group is out of range\n");
3852 if (unlikely(attr->egress)) {
3854 "Flow configuration is not supported on egress side\n");
3857 if (unlikely(!attr->ingress)) {
3858 DPAA2_PMD_ERR("Ingress flag must be configured\n");
3865 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3867 unsigned int i, j, is_found = 0;
3870 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3871 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3872 if (dpaa2_supported_pattern_type[i]
3873 == pattern[j].type) {
3883 /* Lets verify other combinations of given pattern rules */
3884 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3885 if (!pattern[j].spec) {
3895 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3897 unsigned int i, j, is_found = 0;
3900 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3901 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3902 if (dpaa2_supported_action_type[i] == actions[j].type) {
3912 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3913 if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3921 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3922 const struct rte_flow_attr *flow_attr,
3923 const struct rte_flow_item pattern[],
3924 const struct rte_flow_action actions[],
3925 struct rte_flow_error *error)
3927 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3928 struct dpni_attr dpni_attr;
3929 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3930 uint16_t token = priv->token;
3933 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3934 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3937 "Failure to get dpni@%p attribute, err code %d\n",
3939 rte_flow_error_set(error, EPERM,
3940 RTE_FLOW_ERROR_TYPE_ATTR,
3941 flow_attr, "invalid");
3945 /* Verify input attributes */
3946 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3949 "Invalid attributes are given\n");
3950 rte_flow_error_set(error, EPERM,
3951 RTE_FLOW_ERROR_TYPE_ATTR,
3952 flow_attr, "invalid");
3953 goto not_valid_params;
3955 /* Verify input pattern list */
3956 ret = dpaa2_dev_verify_patterns(pattern);
3959 "Invalid pattern list is given\n");
3960 rte_flow_error_set(error, EPERM,
3961 RTE_FLOW_ERROR_TYPE_ITEM,
3962 pattern, "invalid");
3963 goto not_valid_params;
3965 /* Verify input action list */
3966 ret = dpaa2_dev_verify_actions(actions);
3969 "Invalid action list is given\n");
3970 rte_flow_error_set(error, EPERM,
3971 RTE_FLOW_ERROR_TYPE_ACTION,
3972 actions, "invalid");
3973 goto not_valid_params;
3980 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3981 const struct rte_flow_attr *attr,
3982 const struct rte_flow_item pattern[],
3983 const struct rte_flow_action actions[],
3984 struct rte_flow_error *error)
3986 struct rte_flow *flow = NULL;
3987 size_t key_iova = 0, mask_iova = 0;
3990 dpaa2_flow_control_log =
3991 getenv("DPAA2_FLOW_CONTROL_LOG");
3993 if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3994 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3996 dpaa2_flow_miss_flow_id =
3997 atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3998 if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
4000 "The missed flow ID %d exceeds the max flow ID %d",
4001 dpaa2_flow_miss_flow_id,
4002 priv->dist_queues - 1);
4007 flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
4009 DPAA2_PMD_ERR("Failure to allocate memory for flow");
4012 /* Allocate DMA'ble memory to write the rules */
4013 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4016 "Memory allocation failure for rule configuration\n");
4019 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4022 "Memory allocation failure for rule configuration\n");
4026 flow->qos_rule.key_iova = key_iova;
4027 flow->qos_rule.mask_iova = mask_iova;
4029 /* Allocate DMA'ble memory to write the rules */
4030 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4033 "Memory allocation failure for rule configuration\n");
4036 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
4039 "Memory allocation failure for rule configuration\n");
4043 flow->fs_rule.key_iova = key_iova;
4044 flow->fs_rule.mask_iova = mask_iova;
4046 flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
4047 flow->ipaddr_rule.qos_ipsrc_offset =
4048 IP_ADDRESS_OFFSET_INVALID;
4049 flow->ipaddr_rule.qos_ipdst_offset =
4050 IP_ADDRESS_OFFSET_INVALID;
4051 flow->ipaddr_rule.fs_ipsrc_offset =
4052 IP_ADDRESS_OFFSET_INVALID;
4053 flow->ipaddr_rule.fs_ipdst_offset =
4054 IP_ADDRESS_OFFSET_INVALID;
4056 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
4059 if (error && error->type > RTE_FLOW_ERROR_TYPE_ACTION)
4060 rte_flow_error_set(error, EPERM,
4061 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4063 DPAA2_PMD_ERR("Failure to create flow, return code (%d)", ret);
4064 goto creation_error;
4069 rte_flow_error_set(error, EPERM,
4070 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4071 NULL, "memory alloc");
4073 rte_free((void *)flow);
4074 rte_free((void *)key_iova);
4075 rte_free((void *)mask_iova);
4081 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4082 struct rte_flow *flow,
4083 struct rte_flow_error *error)
4086 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4087 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4089 switch (flow->action) {
4090 case RTE_FLOW_ACTION_TYPE_QUEUE:
4091 case RTE_FLOW_ACTION_TYPE_PHY_PORT:
4092 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4093 if (priv->num_rx_tc > 1) {
4094 /* Remove entry from QoS table first */
4095 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4099 "Error in removing entry from QoS table(%d)", ret);
4104 /* Then remove entry from FS table */
4105 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4106 flow->tc_id, &flow->fs_rule);
4109 "Error in removing entry from FS table(%d)", ret);
4113 case RTE_FLOW_ACTION_TYPE_RSS:
4114 if (priv->num_rx_tc > 1) {
4115 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4119 "Error in entry addition in QoS table(%d)", ret);
4126 "Action type (%d) is not supported", flow->action);
4131 LIST_REMOVE(flow, next);
4132 rte_free((void *)(size_t)flow->qos_rule.key_iova);
4133 rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4134 rte_free((void *)(size_t)flow->fs_rule.key_iova);
4135 rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4136 /* Now free the flow */
4141 rte_flow_error_set(error, EPERM,
4142 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4148 * Destroy user-configured flow rules.
4150 * This function skips internal flows rules.
4152 * @see rte_flow_flush()
4156 dpaa2_flow_flush(struct rte_eth_dev *dev,
4157 struct rte_flow_error *error)
4159 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4160 struct rte_flow *flow = LIST_FIRST(&priv->flows);
4163 struct rte_flow *next = LIST_NEXT(flow, next);
4165 dpaa2_flow_destroy(dev, flow, error);
4172 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4173 struct rte_flow *flow __rte_unused,
4174 const struct rte_flow_action *actions __rte_unused,
4175 void *data __rte_unused,
4176 struct rte_flow_error *error __rte_unused)
4182 * Clean up all flow rules.
4184 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4185 * rules regardless of whether they are internal or user-configured.
4188 * Pointer to private structure.
4191 dpaa2_flow_clean(struct rte_eth_dev *dev)
4193 struct rte_flow *flow;
4194 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4196 while ((flow = LIST_FIRST(&priv->flows)))
4197 dpaa2_flow_destroy(dev, flow, NULL);
4200 const struct rte_flow_ops dpaa2_flow_ops = {
4201 .create = dpaa2_flow_create,
4202 .validate = dpaa2_flow_validate,
4203 .destroy = dpaa2_flow_destroy,
4204 .flush = dpaa2_flow_flush,
4205 .query = dpaa2_flow_query,