1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
13 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
25 /* Workaround to discriminate the UDP/TCP/SCTP
26 * with next protocol of l3.
27 * MC/WRIOP are not able to identify
28 * the l4 protocol with l4 ports.
30 int mc_l4_port_identification;
32 #define FIXED_ENTRY_SIZE 54
34 enum flow_rule_ipaddr_type {
40 struct flow_rule_ipaddr {
41 enum flow_rule_ipaddr_type ipaddr_type;
49 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
50 struct dpni_rule_cfg qos_rule;
51 struct dpni_rule_cfg fs_rule;
52 uint8_t qos_real_key_size;
53 uint8_t fs_real_key_size;
54 uint8_t tc_id; /** Traffic Class ID. */
55 uint8_t tc_index; /** index within this Traffic Class. */
56 enum rte_flow_action_type action;
58 /* Special for IP address to specify the offset
61 struct flow_rule_ipaddr ipaddr_rule;
62 struct dpni_fs_action_cfg action_cfg;
66 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
67 RTE_FLOW_ITEM_TYPE_END,
68 RTE_FLOW_ITEM_TYPE_ETH,
69 RTE_FLOW_ITEM_TYPE_VLAN,
70 RTE_FLOW_ITEM_TYPE_IPV4,
71 RTE_FLOW_ITEM_TYPE_IPV6,
72 RTE_FLOW_ITEM_TYPE_ICMP,
73 RTE_FLOW_ITEM_TYPE_UDP,
74 RTE_FLOW_ITEM_TYPE_TCP,
75 RTE_FLOW_ITEM_TYPE_SCTP,
76 RTE_FLOW_ITEM_TYPE_GRE,
80 enum rte_flow_action_type dpaa2_supported_action_type[] = {
81 RTE_FLOW_ACTION_TYPE_END,
82 RTE_FLOW_ACTION_TYPE_QUEUE,
83 RTE_FLOW_ACTION_TYPE_RSS
86 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
87 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
89 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
92 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
93 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
94 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
95 .type = RTE_BE16(0xffff),
98 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
99 .tci = RTE_BE16(0xffff),
102 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
103 .hdr.src_addr = RTE_BE32(0xffffffff),
104 .hdr.dst_addr = RTE_BE32(0xffffffff),
105 .hdr.next_proto_id = 0xff,
108 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
111 "\xff\xff\xff\xff\xff\xff\xff\xff"
112 "\xff\xff\xff\xff\xff\xff\xff\xff",
114 "\xff\xff\xff\xff\xff\xff\xff\xff"
115 "\xff\xff\xff\xff\xff\xff\xff\xff",
120 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
121 .hdr.icmp_type = 0xff,
122 .hdr.icmp_code = 0xff,
125 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
127 .src_port = RTE_BE16(0xffff),
128 .dst_port = RTE_BE16(0xffff),
132 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
134 .src_port = RTE_BE16(0xffff),
135 .dst_port = RTE_BE16(0xffff),
139 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
141 .src_port = RTE_BE16(0xffff),
142 .dst_port = RTE_BE16(0xffff),
146 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
147 .protocol = RTE_BE16(0xffff),
153 static inline void dpaa2_flow_extract_key_set(
154 struct dpaa2_key_info *key_info, int index, uint8_t size)
156 key_info->key_size[index] = size;
158 key_info->key_offset[index] =
159 key_info->key_offset[index - 1] +
160 key_info->key_size[index - 1];
162 key_info->key_offset[index] = 0;
164 key_info->key_total_size += size;
167 static int dpaa2_flow_extract_add(
168 struct dpaa2_key_extract *key_extract,
170 uint32_t field, uint8_t field_size)
172 int index, ip_src = -1, ip_dst = -1;
173 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
174 struct dpaa2_key_info *key_info = &key_extract->key_info;
176 if (dpkg->num_extracts >=
177 DPKG_MAX_NUM_OF_EXTRACTS) {
178 DPAA2_PMD_WARN("Number of extracts overflows");
181 /* Before reorder, the IP SRC and IP DST are already last
184 for (index = 0; index < dpkg->num_extracts; index++) {
185 if (dpkg->extracts[index].extract.from_hdr.prot ==
187 if (dpkg->extracts[index].extract.from_hdr.field ==
191 if (dpkg->extracts[index].extract.from_hdr.field ==
199 RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
202 RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
204 if (prot == NET_PROT_IP &&
205 (field == NH_FLD_IP_SRC ||
206 field == NH_FLD_IP_DST)) {
207 index = dpkg->num_extracts;
209 if (ip_src >= 0 && ip_dst >= 0)
210 index = dpkg->num_extracts - 2;
211 else if (ip_src >= 0 || ip_dst >= 0)
212 index = dpkg->num_extracts - 1;
214 index = dpkg->num_extracts;
217 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_HDR;
218 dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
219 dpkg->extracts[index].extract.from_hdr.prot = prot;
220 dpkg->extracts[index].extract.from_hdr.field = field;
221 if (prot == NET_PROT_IP &&
222 (field == NH_FLD_IP_SRC ||
223 field == NH_FLD_IP_DST)) {
224 dpaa2_flow_extract_key_set(key_info, index, 0);
226 dpaa2_flow_extract_key_set(key_info, index, field_size);
229 if (prot == NET_PROT_IP) {
230 if (field == NH_FLD_IP_SRC) {
231 if (key_info->ipv4_dst_offset >= 0) {
232 key_info->ipv4_src_offset =
233 key_info->ipv4_dst_offset +
234 NH_FLD_IPV4_ADDR_SIZE;
236 key_info->ipv4_src_offset =
237 key_info->key_offset[index - 1] +
238 key_info->key_size[index - 1];
240 if (key_info->ipv6_dst_offset >= 0) {
241 key_info->ipv6_src_offset =
242 key_info->ipv6_dst_offset +
243 NH_FLD_IPV6_ADDR_SIZE;
245 key_info->ipv6_src_offset =
246 key_info->key_offset[index - 1] +
247 key_info->key_size[index - 1];
249 } else if (field == NH_FLD_IP_DST) {
250 if (key_info->ipv4_src_offset >= 0) {
251 key_info->ipv4_dst_offset =
252 key_info->ipv4_src_offset +
253 NH_FLD_IPV4_ADDR_SIZE;
255 key_info->ipv4_dst_offset =
256 key_info->key_offset[index - 1] +
257 key_info->key_size[index - 1];
259 if (key_info->ipv6_src_offset >= 0) {
260 key_info->ipv6_dst_offset =
261 key_info->ipv6_src_offset +
262 NH_FLD_IPV6_ADDR_SIZE;
264 key_info->ipv6_dst_offset =
265 key_info->key_offset[index - 1] +
266 key_info->key_size[index - 1];
271 if (index == dpkg->num_extracts) {
272 dpkg->num_extracts++;
278 dpkg->extracts[ip_src].type =
279 DPKG_EXTRACT_FROM_HDR;
280 dpkg->extracts[ip_src].extract.from_hdr.type =
282 dpkg->extracts[ip_src].extract.from_hdr.prot =
284 dpkg->extracts[ip_src].extract.from_hdr.field =
286 dpaa2_flow_extract_key_set(key_info, ip_src, 0);
287 key_info->ipv4_src_offset += field_size;
288 key_info->ipv6_src_offset += field_size;
292 dpkg->extracts[ip_dst].type =
293 DPKG_EXTRACT_FROM_HDR;
294 dpkg->extracts[ip_dst].extract.from_hdr.type =
296 dpkg->extracts[ip_dst].extract.from_hdr.prot =
298 dpkg->extracts[ip_dst].extract.from_hdr.field =
300 dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
301 key_info->ipv4_dst_offset += field_size;
302 key_info->ipv6_dst_offset += field_size;
305 dpkg->num_extracts++;
310 /* Protocol discrimination.
311 * Discriminate IPv4/IPv6/vLan by Eth type.
312 * Discriminate UDP/TCP/ICMP by next proto of IP.
315 dpaa2_flow_proto_discrimination_extract(
316 struct dpaa2_key_extract *key_extract,
317 enum rte_flow_item_type type)
319 if (type == RTE_FLOW_ITEM_TYPE_ETH) {
320 return dpaa2_flow_extract_add(
321 key_extract, NET_PROT_ETH,
324 } else if (type == (enum rte_flow_item_type)
325 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
326 return dpaa2_flow_extract_add(
327 key_extract, NET_PROT_IP,
329 NH_FLD_IP_PROTO_SIZE);
335 static inline int dpaa2_flow_extract_search(
336 struct dpkg_profile_cfg *dpkg,
337 enum net_prot prot, uint32_t field)
341 for (i = 0; i < dpkg->num_extracts; i++) {
342 if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
343 dpkg->extracts[i].extract.from_hdr.field == field) {
351 static inline int dpaa2_flow_extract_key_offset(
352 struct dpaa2_key_extract *key_extract,
353 enum net_prot prot, uint32_t field)
356 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
357 struct dpaa2_key_info *key_info = &key_extract->key_info;
359 if (prot == NET_PROT_IPV4 ||
360 prot == NET_PROT_IPV6)
361 i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
363 i = dpaa2_flow_extract_search(dpkg, prot, field);
366 if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
367 return key_info->ipv4_src_offset;
368 else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
369 return key_info->ipv4_dst_offset;
370 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
371 return key_info->ipv6_src_offset;
372 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
373 return key_info->ipv6_dst_offset;
375 return key_info->key_offset[i];
381 struct proto_discrimination {
382 enum rte_flow_item_type type;
390 dpaa2_flow_proto_discrimination_rule(
391 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
392 struct proto_discrimination proto, int group)
402 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
404 field = NH_FLD_ETH_TYPE;
405 } else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
407 field = NH_FLD_IP_PROTO;
410 "Only Eth and IP support to discriminate next proto.");
414 offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
417 DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
421 key_iova = flow->qos_rule.key_iova + offset;
422 mask_iova = flow->qos_rule.mask_iova + offset;
423 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
424 eth_type = proto.eth_type;
425 memcpy((void *)key_iova, (const void *)(ð_type),
428 memcpy((void *)mask_iova, (const void *)(ð_type),
431 ip_proto = proto.ip_proto;
432 memcpy((void *)key_iova, (const void *)(&ip_proto),
435 memcpy((void *)mask_iova, (const void *)(&ip_proto),
439 offset = dpaa2_flow_extract_key_offset(
440 &priv->extract.tc_key_extract[group],
443 DPAA2_PMD_ERR("FS prot %d field %d extract failed",
447 key_iova = flow->fs_rule.key_iova + offset;
448 mask_iova = flow->fs_rule.mask_iova + offset;
450 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
451 eth_type = proto.eth_type;
452 memcpy((void *)key_iova, (const void *)(ð_type),
455 memcpy((void *)mask_iova, (const void *)(ð_type),
458 ip_proto = proto.ip_proto;
459 memcpy((void *)key_iova, (const void *)(&ip_proto),
462 memcpy((void *)mask_iova, (const void *)(&ip_proto),
470 dpaa2_flow_rule_data_set(
471 struct dpaa2_key_extract *key_extract,
472 struct dpni_rule_cfg *rule,
473 enum net_prot prot, uint32_t field,
474 const void *key, const void *mask, int size)
476 int offset = dpaa2_flow_extract_key_offset(key_extract,
480 DPAA2_PMD_ERR("prot %d, field %d extract failed",
485 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
486 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
492 _dpaa2_flow_rule_move_ipaddr_tail(
493 struct dpaa2_key_extract *key_extract,
494 struct dpni_rule_cfg *rule, int src_offset,
495 uint32_t field, bool ipv4)
503 char tmp[NH_FLD_IPV6_ADDR_SIZE];
505 if (field != NH_FLD_IP_SRC &&
506 field != NH_FLD_IP_DST) {
507 DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
511 prot = NET_PROT_IPV4;
513 prot = NET_PROT_IPV6;
514 dst_offset = dpaa2_flow_extract_key_offset(key_extract,
516 if (dst_offset < 0) {
517 DPAA2_PMD_ERR("Field %d reorder extract failed", field);
520 key_src = rule->key_iova + src_offset;
521 mask_src = rule->mask_iova + src_offset;
522 key_dst = rule->key_iova + dst_offset;
523 mask_dst = rule->mask_iova + dst_offset;
525 len = sizeof(rte_be32_t);
527 len = NH_FLD_IPV6_ADDR_SIZE;
529 memcpy(tmp, (char *)key_src, len);
530 memset((char *)key_src, 0, len);
531 memcpy((char *)key_dst, tmp, len);
533 memcpy(tmp, (char *)mask_src, len);
534 memset((char *)mask_src, 0, len);
535 memcpy((char *)mask_dst, tmp, len);
541 dpaa2_flow_rule_move_ipaddr_tail(
542 struct rte_flow *flow, struct dpaa2_dev_priv *priv,
548 if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
551 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
552 prot = NET_PROT_IPV4;
554 prot = NET_PROT_IPV6;
556 if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
557 ret = _dpaa2_flow_rule_move_ipaddr_tail(
558 &priv->extract.qos_key_extract,
560 flow->ipaddr_rule.qos_ipsrc_offset,
561 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
563 DPAA2_PMD_ERR("QoS src address reorder failed");
566 flow->ipaddr_rule.qos_ipsrc_offset =
567 dpaa2_flow_extract_key_offset(
568 &priv->extract.qos_key_extract,
569 prot, NH_FLD_IP_SRC);
572 if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
573 ret = _dpaa2_flow_rule_move_ipaddr_tail(
574 &priv->extract.qos_key_extract,
576 flow->ipaddr_rule.qos_ipdst_offset,
577 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
579 DPAA2_PMD_ERR("QoS dst address reorder failed");
582 flow->ipaddr_rule.qos_ipdst_offset =
583 dpaa2_flow_extract_key_offset(
584 &priv->extract.qos_key_extract,
585 prot, NH_FLD_IP_DST);
588 if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
589 ret = _dpaa2_flow_rule_move_ipaddr_tail(
590 &priv->extract.tc_key_extract[fs_group],
592 flow->ipaddr_rule.fs_ipsrc_offset,
593 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
595 DPAA2_PMD_ERR("FS src address reorder failed");
598 flow->ipaddr_rule.fs_ipsrc_offset =
599 dpaa2_flow_extract_key_offset(
600 &priv->extract.tc_key_extract[fs_group],
601 prot, NH_FLD_IP_SRC);
603 if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
604 ret = _dpaa2_flow_rule_move_ipaddr_tail(
605 &priv->extract.tc_key_extract[fs_group],
607 flow->ipaddr_rule.fs_ipdst_offset,
608 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
610 DPAA2_PMD_ERR("FS dst address reorder failed");
613 flow->ipaddr_rule.fs_ipdst_offset =
614 dpaa2_flow_extract_key_offset(
615 &priv->extract.tc_key_extract[fs_group],
616 prot, NH_FLD_IP_DST);
623 dpaa2_flow_extract_support(
624 const uint8_t *mask_src,
625 enum rte_flow_item_type type)
629 const char *mask_support = 0;
632 case RTE_FLOW_ITEM_TYPE_ETH:
633 mask_support = (const char *)&dpaa2_flow_item_eth_mask;
634 size = sizeof(struct rte_flow_item_eth);
636 case RTE_FLOW_ITEM_TYPE_VLAN:
637 mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
638 size = sizeof(struct rte_flow_item_vlan);
640 case RTE_FLOW_ITEM_TYPE_IPV4:
641 mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
642 size = sizeof(struct rte_flow_item_ipv4);
644 case RTE_FLOW_ITEM_TYPE_IPV6:
645 mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
646 size = sizeof(struct rte_flow_item_ipv6);
648 case RTE_FLOW_ITEM_TYPE_ICMP:
649 mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
650 size = sizeof(struct rte_flow_item_icmp);
652 case RTE_FLOW_ITEM_TYPE_UDP:
653 mask_support = (const char *)&dpaa2_flow_item_udp_mask;
654 size = sizeof(struct rte_flow_item_udp);
656 case RTE_FLOW_ITEM_TYPE_TCP:
657 mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
658 size = sizeof(struct rte_flow_item_tcp);
660 case RTE_FLOW_ITEM_TYPE_SCTP:
661 mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
662 size = sizeof(struct rte_flow_item_sctp);
664 case RTE_FLOW_ITEM_TYPE_GRE:
665 mask_support = (const char *)&dpaa2_flow_item_gre_mask;
666 size = sizeof(struct rte_flow_item_gre);
672 memcpy(mask, mask_support, size);
674 for (i = 0; i < size; i++)
675 mask[i] = (mask[i] | mask_src[i]);
677 if (memcmp(mask, mask_support, size))
684 dpaa2_configure_flow_eth(struct rte_flow *flow,
685 struct rte_eth_dev *dev,
686 const struct rte_flow_attr *attr,
687 const struct rte_flow_item *pattern,
688 const struct rte_flow_action actions[] __rte_unused,
689 struct rte_flow_error *error __rte_unused,
690 int *device_configured)
695 const struct rte_flow_item_eth *spec, *mask;
697 /* TODO: Currently upper bound of range parameter is not implemented */
698 const struct rte_flow_item_eth *last __rte_unused;
699 struct dpaa2_dev_priv *priv = dev->data->dev_private;
700 const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
704 /* Parse pattern list to get the matching parameters */
705 spec = (const struct rte_flow_item_eth *)pattern->spec;
706 last = (const struct rte_flow_item_eth *)pattern->last;
707 mask = (const struct rte_flow_item_eth *)
708 (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
710 /* Don't care any field of eth header,
711 * only care eth protocol.
713 DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
717 /* Get traffic class index and flow id to be configured */
719 flow->tc_index = attr->priority;
721 if (dpaa2_flow_extract_support((const uint8_t *)mask,
722 RTE_FLOW_ITEM_TYPE_ETH)) {
723 DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
728 if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
729 index = dpaa2_flow_extract_search(
730 &priv->extract.qos_key_extract.dpkg,
731 NET_PROT_ETH, NH_FLD_ETH_SA);
733 ret = dpaa2_flow_extract_add(
734 &priv->extract.qos_key_extract,
735 NET_PROT_ETH, NH_FLD_ETH_SA,
738 DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
742 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
744 index = dpaa2_flow_extract_search(
745 &priv->extract.tc_key_extract[group].dpkg,
746 NET_PROT_ETH, NH_FLD_ETH_SA);
748 ret = dpaa2_flow_extract_add(
749 &priv->extract.tc_key_extract[group],
750 NET_PROT_ETH, NH_FLD_ETH_SA,
753 DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
756 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
759 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
762 "Move ipaddr before ETH_SA rule set failed");
766 ret = dpaa2_flow_rule_data_set(
767 &priv->extract.qos_key_extract,
771 &spec->src.addr_bytes,
772 &mask->src.addr_bytes,
773 sizeof(struct rte_ether_addr));
775 DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
779 ret = dpaa2_flow_rule_data_set(
780 &priv->extract.tc_key_extract[group],
784 &spec->src.addr_bytes,
785 &mask->src.addr_bytes,
786 sizeof(struct rte_ether_addr));
788 DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
793 if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
794 index = dpaa2_flow_extract_search(
795 &priv->extract.qos_key_extract.dpkg,
796 NET_PROT_ETH, NH_FLD_ETH_DA);
798 ret = dpaa2_flow_extract_add(
799 &priv->extract.qos_key_extract,
800 NET_PROT_ETH, NH_FLD_ETH_DA,
803 DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
807 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
810 index = dpaa2_flow_extract_search(
811 &priv->extract.tc_key_extract[group].dpkg,
812 NET_PROT_ETH, NH_FLD_ETH_DA);
814 ret = dpaa2_flow_extract_add(
815 &priv->extract.tc_key_extract[group],
816 NET_PROT_ETH, NH_FLD_ETH_DA,
819 DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
823 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
826 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
829 "Move ipaddr before ETH DA rule set failed");
833 ret = dpaa2_flow_rule_data_set(
834 &priv->extract.qos_key_extract,
838 &spec->dst.addr_bytes,
839 &mask->dst.addr_bytes,
840 sizeof(struct rte_ether_addr));
842 DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
846 ret = dpaa2_flow_rule_data_set(
847 &priv->extract.tc_key_extract[group],
851 &spec->dst.addr_bytes,
852 &mask->dst.addr_bytes,
853 sizeof(struct rte_ether_addr));
855 DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
860 if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
861 index = dpaa2_flow_extract_search(
862 &priv->extract.qos_key_extract.dpkg,
863 NET_PROT_ETH, NH_FLD_ETH_TYPE);
865 ret = dpaa2_flow_extract_add(
866 &priv->extract.qos_key_extract,
867 NET_PROT_ETH, NH_FLD_ETH_TYPE,
870 DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
874 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
876 index = dpaa2_flow_extract_search(
877 &priv->extract.tc_key_extract[group].dpkg,
878 NET_PROT_ETH, NH_FLD_ETH_TYPE);
880 ret = dpaa2_flow_extract_add(
881 &priv->extract.tc_key_extract[group],
882 NET_PROT_ETH, NH_FLD_ETH_TYPE,
885 DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
889 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
892 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
895 "Move ipaddr before ETH TYPE rule set failed");
899 ret = dpaa2_flow_rule_data_set(
900 &priv->extract.qos_key_extract,
908 DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
912 ret = dpaa2_flow_rule_data_set(
913 &priv->extract.tc_key_extract[group],
921 DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
926 (*device_configured) |= local_cfg;
932 dpaa2_configure_flow_vlan(struct rte_flow *flow,
933 struct rte_eth_dev *dev,
934 const struct rte_flow_attr *attr,
935 const struct rte_flow_item *pattern,
936 const struct rte_flow_action actions[] __rte_unused,
937 struct rte_flow_error *error __rte_unused,
938 int *device_configured)
943 const struct rte_flow_item_vlan *spec, *mask;
945 const struct rte_flow_item_vlan *last __rte_unused;
946 struct dpaa2_dev_priv *priv = dev->data->dev_private;
950 /* Parse pattern list to get the matching parameters */
951 spec = (const struct rte_flow_item_vlan *)pattern->spec;
952 last = (const struct rte_flow_item_vlan *)pattern->last;
953 mask = (const struct rte_flow_item_vlan *)
954 (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
956 /* Get traffic class index and flow id to be configured */
958 flow->tc_index = attr->priority;
961 /* Don't care any field of vlan header,
962 * only care vlan protocol.
964 /* Eth type is actually used for vLan classification.
966 struct proto_discrimination proto;
968 index = dpaa2_flow_extract_search(
969 &priv->extract.qos_key_extract.dpkg,
970 NET_PROT_ETH, NH_FLD_ETH_TYPE);
972 ret = dpaa2_flow_proto_discrimination_extract(
973 &priv->extract.qos_key_extract,
974 RTE_FLOW_ITEM_TYPE_ETH);
977 "QoS Ext ETH_TYPE to discriminate vLan failed");
981 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
984 index = dpaa2_flow_extract_search(
985 &priv->extract.tc_key_extract[group].dpkg,
986 NET_PROT_ETH, NH_FLD_ETH_TYPE);
988 ret = dpaa2_flow_proto_discrimination_extract(
989 &priv->extract.tc_key_extract[group],
990 RTE_FLOW_ITEM_TYPE_ETH);
993 "FS Ext ETH_TYPE to discriminate vLan failed.");
997 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1000 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1003 "Move ipaddr before vLan discrimination set failed");
1007 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1008 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1009 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1012 DPAA2_PMD_ERR("vLan discrimination rule set failed");
1016 (*device_configured) |= local_cfg;
1021 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1022 RTE_FLOW_ITEM_TYPE_VLAN)) {
1023 DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1031 index = dpaa2_flow_extract_search(
1032 &priv->extract.qos_key_extract.dpkg,
1033 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1035 ret = dpaa2_flow_extract_add(
1036 &priv->extract.qos_key_extract,
1039 sizeof(rte_be16_t));
1041 DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1045 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1048 index = dpaa2_flow_extract_search(
1049 &priv->extract.tc_key_extract[group].dpkg,
1050 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1052 ret = dpaa2_flow_extract_add(
1053 &priv->extract.tc_key_extract[group],
1056 sizeof(rte_be16_t));
1058 DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1062 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1065 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1068 "Move ipaddr before VLAN TCI rule set failed");
1072 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1078 sizeof(rte_be16_t));
1080 DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1084 ret = dpaa2_flow_rule_data_set(
1085 &priv->extract.tc_key_extract[group],
1091 sizeof(rte_be16_t));
1093 DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1097 (*device_configured) |= local_cfg;
1103 dpaa2_configure_flow_generic_ip(
1104 struct rte_flow *flow,
1105 struct rte_eth_dev *dev,
1106 const struct rte_flow_attr *attr,
1107 const struct rte_flow_item *pattern,
1108 const struct rte_flow_action actions[] __rte_unused,
1109 struct rte_flow_error *error __rte_unused,
1110 int *device_configured)
1115 const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1117 const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1119 const void *key, *mask;
1122 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1123 const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1126 group = attr->group;
1128 /* Parse pattern list to get the matching parameters */
1129 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1130 spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1131 mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1132 (pattern->mask ? pattern->mask :
1133 &dpaa2_flow_item_ipv4_mask);
1135 spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1136 mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1137 (pattern->mask ? pattern->mask :
1138 &dpaa2_flow_item_ipv6_mask);
1141 /* Get traffic class index and flow id to be configured */
1142 flow->tc_id = group;
1143 flow->tc_index = attr->priority;
1145 if (!spec_ipv4 && !spec_ipv6) {
1146 /* Don't care any field of IP header,
1147 * only care IP protocol.
1148 * Example: flow create 0 ingress pattern ipv6 /
1150 /* Eth type is actually used for IP identification.
1152 /* TODO: Current design only supports Eth + IP,
1153 * Eth + vLan + IP needs to add.
1155 struct proto_discrimination proto;
1157 index = dpaa2_flow_extract_search(
1158 &priv->extract.qos_key_extract.dpkg,
1159 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1161 ret = dpaa2_flow_proto_discrimination_extract(
1162 &priv->extract.qos_key_extract,
1163 RTE_FLOW_ITEM_TYPE_ETH);
1166 "QoS Ext ETH_TYPE to discriminate IP failed.");
1170 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1173 index = dpaa2_flow_extract_search(
1174 &priv->extract.tc_key_extract[group].dpkg,
1175 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1177 ret = dpaa2_flow_proto_discrimination_extract(
1178 &priv->extract.tc_key_extract[group],
1179 RTE_FLOW_ITEM_TYPE_ETH);
1182 "FS Ext ETH_TYPE to discriminate IP failed");
1186 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1189 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1192 "Move ipaddr before IP discrimination set failed");
1196 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1197 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1198 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1200 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1201 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1204 DPAA2_PMD_ERR("IP discrimination rule set failed");
1208 (*device_configured) |= local_cfg;
1214 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1215 RTE_FLOW_ITEM_TYPE_IPV4)) {
1216 DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1223 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1224 RTE_FLOW_ITEM_TYPE_IPV6)) {
1225 DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1231 if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1232 mask_ipv4->hdr.dst_addr)) {
1233 flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1234 } else if (mask_ipv6 &&
1235 (memcmp((const char *)mask_ipv6->hdr.src_addr,
1236 zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1237 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1238 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1239 flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1242 if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1244 memcmp((const char *)mask_ipv6->hdr.src_addr,
1245 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1246 index = dpaa2_flow_extract_search(
1247 &priv->extract.qos_key_extract.dpkg,
1248 NET_PROT_IP, NH_FLD_IP_SRC);
1250 ret = dpaa2_flow_extract_add(
1251 &priv->extract.qos_key_extract,
1256 DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1260 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1263 index = dpaa2_flow_extract_search(
1264 &priv->extract.tc_key_extract[group].dpkg,
1265 NET_PROT_IP, NH_FLD_IP_SRC);
1267 ret = dpaa2_flow_extract_add(
1268 &priv->extract.tc_key_extract[group],
1273 DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1277 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1281 key = &spec_ipv4->hdr.src_addr;
1283 key = &spec_ipv6->hdr.src_addr[0];
1285 mask = &mask_ipv4->hdr.src_addr;
1286 size = NH_FLD_IPV4_ADDR_SIZE;
1287 prot = NET_PROT_IPV4;
1289 mask = &mask_ipv6->hdr.src_addr[0];
1290 size = NH_FLD_IPV6_ADDR_SIZE;
1291 prot = NET_PROT_IPV6;
1294 ret = dpaa2_flow_rule_data_set(
1295 &priv->extract.qos_key_extract,
1297 prot, NH_FLD_IP_SRC,
1300 DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1304 ret = dpaa2_flow_rule_data_set(
1305 &priv->extract.tc_key_extract[group],
1307 prot, NH_FLD_IP_SRC,
1310 DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1314 flow->ipaddr_rule.qos_ipsrc_offset =
1315 dpaa2_flow_extract_key_offset(
1316 &priv->extract.qos_key_extract,
1317 prot, NH_FLD_IP_SRC);
1318 flow->ipaddr_rule.fs_ipsrc_offset =
1319 dpaa2_flow_extract_key_offset(
1320 &priv->extract.tc_key_extract[group],
1321 prot, NH_FLD_IP_SRC);
1324 if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1326 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1327 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1328 index = dpaa2_flow_extract_search(
1329 &priv->extract.qos_key_extract.dpkg,
1330 NET_PROT_IP, NH_FLD_IP_DST);
1333 size = NH_FLD_IPV4_ADDR_SIZE;
1335 size = NH_FLD_IPV6_ADDR_SIZE;
1336 ret = dpaa2_flow_extract_add(
1337 &priv->extract.qos_key_extract,
1342 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1346 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1349 index = dpaa2_flow_extract_search(
1350 &priv->extract.tc_key_extract[group].dpkg,
1351 NET_PROT_IP, NH_FLD_IP_DST);
1354 size = NH_FLD_IPV4_ADDR_SIZE;
1356 size = NH_FLD_IPV6_ADDR_SIZE;
1357 ret = dpaa2_flow_extract_add(
1358 &priv->extract.tc_key_extract[group],
1363 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1367 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1371 key = &spec_ipv4->hdr.dst_addr;
1373 key = spec_ipv6->hdr.dst_addr;
1375 mask = &mask_ipv4->hdr.dst_addr;
1376 size = NH_FLD_IPV4_ADDR_SIZE;
1377 prot = NET_PROT_IPV4;
1379 mask = &mask_ipv6->hdr.dst_addr[0];
1380 size = NH_FLD_IPV6_ADDR_SIZE;
1381 prot = NET_PROT_IPV6;
1384 ret = dpaa2_flow_rule_data_set(
1385 &priv->extract.qos_key_extract,
1387 prot, NH_FLD_IP_DST,
1390 DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1394 ret = dpaa2_flow_rule_data_set(
1395 &priv->extract.tc_key_extract[group],
1397 prot, NH_FLD_IP_DST,
1400 DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1403 flow->ipaddr_rule.qos_ipdst_offset =
1404 dpaa2_flow_extract_key_offset(
1405 &priv->extract.qos_key_extract,
1406 prot, NH_FLD_IP_DST);
1407 flow->ipaddr_rule.fs_ipdst_offset =
1408 dpaa2_flow_extract_key_offset(
1409 &priv->extract.tc_key_extract[group],
1410 prot, NH_FLD_IP_DST);
1413 if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1414 (mask_ipv6 && mask_ipv6->hdr.proto)) {
1415 index = dpaa2_flow_extract_search(
1416 &priv->extract.qos_key_extract.dpkg,
1417 NET_PROT_IP, NH_FLD_IP_PROTO);
1419 ret = dpaa2_flow_extract_add(
1420 &priv->extract.qos_key_extract,
1423 NH_FLD_IP_PROTO_SIZE);
1425 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1429 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1432 index = dpaa2_flow_extract_search(
1433 &priv->extract.tc_key_extract[group].dpkg,
1434 NET_PROT_IP, NH_FLD_IP_PROTO);
1436 ret = dpaa2_flow_extract_add(
1437 &priv->extract.tc_key_extract[group],
1440 NH_FLD_IP_PROTO_SIZE);
1442 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1446 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1449 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1452 "Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1457 key = &spec_ipv4->hdr.next_proto_id;
1459 key = &spec_ipv6->hdr.proto;
1461 mask = &mask_ipv4->hdr.next_proto_id;
1463 mask = &mask_ipv6->hdr.proto;
1465 ret = dpaa2_flow_rule_data_set(
1466 &priv->extract.qos_key_extract,
1470 key, mask, NH_FLD_IP_PROTO_SIZE);
1472 DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1476 ret = dpaa2_flow_rule_data_set(
1477 &priv->extract.tc_key_extract[group],
1481 key, mask, NH_FLD_IP_PROTO_SIZE);
1483 DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1488 (*device_configured) |= local_cfg;
1494 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1495 struct rte_eth_dev *dev,
1496 const struct rte_flow_attr *attr,
1497 const struct rte_flow_item *pattern,
1498 const struct rte_flow_action actions[] __rte_unused,
1499 struct rte_flow_error *error __rte_unused,
1500 int *device_configured)
1505 const struct rte_flow_item_icmp *spec, *mask;
1507 const struct rte_flow_item_icmp *last __rte_unused;
1508 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1510 group = attr->group;
1512 /* Parse pattern list to get the matching parameters */
1513 spec = (const struct rte_flow_item_icmp *)pattern->spec;
1514 last = (const struct rte_flow_item_icmp *)pattern->last;
1515 mask = (const struct rte_flow_item_icmp *)
1516 (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1518 /* Get traffic class index and flow id to be configured */
1519 flow->tc_id = group;
1520 flow->tc_index = attr->priority;
1523 /* Don't care any field of ICMP header,
1524 * only care ICMP protocol.
1525 * Example: flow create 0 ingress pattern icmp /
1527 /* Next proto of Generical IP is actually used
1528 * for ICMP identification.
1530 struct proto_discrimination proto;
1532 index = dpaa2_flow_extract_search(
1533 &priv->extract.qos_key_extract.dpkg,
1534 NET_PROT_IP, NH_FLD_IP_PROTO);
1536 ret = dpaa2_flow_proto_discrimination_extract(
1537 &priv->extract.qos_key_extract,
1538 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1541 "QoS Extract IP protocol to discriminate ICMP failed.");
1545 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1548 index = dpaa2_flow_extract_search(
1549 &priv->extract.tc_key_extract[group].dpkg,
1550 NET_PROT_IP, NH_FLD_IP_PROTO);
1552 ret = dpaa2_flow_proto_discrimination_extract(
1553 &priv->extract.tc_key_extract[group],
1554 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1557 "FS Extract IP protocol to discriminate ICMP failed.");
1561 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1564 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1567 "Move IP addr before ICMP discrimination set failed");
1571 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1572 proto.ip_proto = IPPROTO_ICMP;
1573 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1576 DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1580 (*device_configured) |= local_cfg;
1585 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1586 RTE_FLOW_ITEM_TYPE_ICMP)) {
1587 DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1592 if (mask->hdr.icmp_type) {
1593 index = dpaa2_flow_extract_search(
1594 &priv->extract.qos_key_extract.dpkg,
1595 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1597 ret = dpaa2_flow_extract_add(
1598 &priv->extract.qos_key_extract,
1601 NH_FLD_ICMP_TYPE_SIZE);
1603 DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1607 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1610 index = dpaa2_flow_extract_search(
1611 &priv->extract.tc_key_extract[group].dpkg,
1612 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1614 ret = dpaa2_flow_extract_add(
1615 &priv->extract.tc_key_extract[group],
1618 NH_FLD_ICMP_TYPE_SIZE);
1620 DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1624 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1627 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1630 "Move ipaddr before ICMP TYPE set failed");
1634 ret = dpaa2_flow_rule_data_set(
1635 &priv->extract.qos_key_extract,
1639 &spec->hdr.icmp_type,
1640 &mask->hdr.icmp_type,
1641 NH_FLD_ICMP_TYPE_SIZE);
1643 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1647 ret = dpaa2_flow_rule_data_set(
1648 &priv->extract.tc_key_extract[group],
1652 &spec->hdr.icmp_type,
1653 &mask->hdr.icmp_type,
1654 NH_FLD_ICMP_TYPE_SIZE);
1656 DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1661 if (mask->hdr.icmp_code) {
1662 index = dpaa2_flow_extract_search(
1663 &priv->extract.qos_key_extract.dpkg,
1664 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1666 ret = dpaa2_flow_extract_add(
1667 &priv->extract.qos_key_extract,
1670 NH_FLD_ICMP_CODE_SIZE);
1672 DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1676 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1679 index = dpaa2_flow_extract_search(
1680 &priv->extract.tc_key_extract[group].dpkg,
1681 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1683 ret = dpaa2_flow_extract_add(
1684 &priv->extract.tc_key_extract[group],
1687 NH_FLD_ICMP_CODE_SIZE);
1689 DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1693 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1696 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1699 "Move ipaddr after ICMP CODE set failed");
1703 ret = dpaa2_flow_rule_data_set(
1704 &priv->extract.qos_key_extract,
1708 &spec->hdr.icmp_code,
1709 &mask->hdr.icmp_code,
1710 NH_FLD_ICMP_CODE_SIZE);
1712 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1716 ret = dpaa2_flow_rule_data_set(
1717 &priv->extract.tc_key_extract[group],
1721 &spec->hdr.icmp_code,
1722 &mask->hdr.icmp_code,
1723 NH_FLD_ICMP_CODE_SIZE);
1725 DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1730 (*device_configured) |= local_cfg;
1736 dpaa2_configure_flow_udp(struct rte_flow *flow,
1737 struct rte_eth_dev *dev,
1738 const struct rte_flow_attr *attr,
1739 const struct rte_flow_item *pattern,
1740 const struct rte_flow_action actions[] __rte_unused,
1741 struct rte_flow_error *error __rte_unused,
1742 int *device_configured)
1747 const struct rte_flow_item_udp *spec, *mask;
1749 const struct rte_flow_item_udp *last __rte_unused;
1750 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1752 group = attr->group;
1754 /* Parse pattern list to get the matching parameters */
1755 spec = (const struct rte_flow_item_udp *)pattern->spec;
1756 last = (const struct rte_flow_item_udp *)pattern->last;
1757 mask = (const struct rte_flow_item_udp *)
1758 (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1760 /* Get traffic class index and flow id to be configured */
1761 flow->tc_id = group;
1762 flow->tc_index = attr->priority;
1764 if (!spec || !mc_l4_port_identification) {
1765 struct proto_discrimination proto;
1767 index = dpaa2_flow_extract_search(
1768 &priv->extract.qos_key_extract.dpkg,
1769 NET_PROT_IP, NH_FLD_IP_PROTO);
1771 ret = dpaa2_flow_proto_discrimination_extract(
1772 &priv->extract.qos_key_extract,
1773 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1776 "QoS Extract IP protocol to discriminate UDP failed.");
1780 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1783 index = dpaa2_flow_extract_search(
1784 &priv->extract.tc_key_extract[group].dpkg,
1785 NET_PROT_IP, NH_FLD_IP_PROTO);
1787 ret = dpaa2_flow_proto_discrimination_extract(
1788 &priv->extract.tc_key_extract[group],
1789 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1792 "FS Extract IP protocol to discriminate UDP failed.");
1796 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1799 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1802 "Move IP addr before UDP discrimination set failed");
1806 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1807 proto.ip_proto = IPPROTO_UDP;
1808 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1811 DPAA2_PMD_ERR("UDP discrimination rule set failed");
1815 (*device_configured) |= local_cfg;
1821 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1822 RTE_FLOW_ITEM_TYPE_UDP)) {
1823 DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
1828 if (mask->hdr.src_port) {
1829 index = dpaa2_flow_extract_search(
1830 &priv->extract.qos_key_extract.dpkg,
1831 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1833 ret = dpaa2_flow_extract_add(
1834 &priv->extract.qos_key_extract,
1836 NH_FLD_UDP_PORT_SRC,
1837 NH_FLD_UDP_PORT_SIZE);
1839 DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
1843 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1846 index = dpaa2_flow_extract_search(
1847 &priv->extract.tc_key_extract[group].dpkg,
1848 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1850 ret = dpaa2_flow_extract_add(
1851 &priv->extract.tc_key_extract[group],
1853 NH_FLD_UDP_PORT_SRC,
1854 NH_FLD_UDP_PORT_SIZE);
1856 DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
1860 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1863 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1866 "Move ipaddr before UDP_PORT_SRC set failed");
1870 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1873 NH_FLD_UDP_PORT_SRC,
1874 &spec->hdr.src_port,
1875 &mask->hdr.src_port,
1876 NH_FLD_UDP_PORT_SIZE);
1879 "QoS NH_FLD_UDP_PORT_SRC rule data set failed");
1883 ret = dpaa2_flow_rule_data_set(
1884 &priv->extract.tc_key_extract[group],
1887 NH_FLD_UDP_PORT_SRC,
1888 &spec->hdr.src_port,
1889 &mask->hdr.src_port,
1890 NH_FLD_UDP_PORT_SIZE);
1893 "FS NH_FLD_UDP_PORT_SRC rule data set failed");
1898 if (mask->hdr.dst_port) {
1899 index = dpaa2_flow_extract_search(
1900 &priv->extract.qos_key_extract.dpkg,
1901 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1903 ret = dpaa2_flow_extract_add(
1904 &priv->extract.qos_key_extract,
1906 NH_FLD_UDP_PORT_DST,
1907 NH_FLD_UDP_PORT_SIZE);
1909 DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
1913 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1916 index = dpaa2_flow_extract_search(
1917 &priv->extract.tc_key_extract[group].dpkg,
1918 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1920 ret = dpaa2_flow_extract_add(
1921 &priv->extract.tc_key_extract[group],
1923 NH_FLD_UDP_PORT_DST,
1924 NH_FLD_UDP_PORT_SIZE);
1926 DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
1930 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1933 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1936 "Move ipaddr before UDP_PORT_DST set failed");
1940 ret = dpaa2_flow_rule_data_set(
1941 &priv->extract.qos_key_extract,
1944 NH_FLD_UDP_PORT_DST,
1945 &spec->hdr.dst_port,
1946 &mask->hdr.dst_port,
1947 NH_FLD_UDP_PORT_SIZE);
1950 "QoS NH_FLD_UDP_PORT_DST rule data set failed");
1954 ret = dpaa2_flow_rule_data_set(
1955 &priv->extract.tc_key_extract[group],
1958 NH_FLD_UDP_PORT_DST,
1959 &spec->hdr.dst_port,
1960 &mask->hdr.dst_port,
1961 NH_FLD_UDP_PORT_SIZE);
1964 "FS NH_FLD_UDP_PORT_DST rule data set failed");
1969 (*device_configured) |= local_cfg;
1975 dpaa2_configure_flow_tcp(struct rte_flow *flow,
1976 struct rte_eth_dev *dev,
1977 const struct rte_flow_attr *attr,
1978 const struct rte_flow_item *pattern,
1979 const struct rte_flow_action actions[] __rte_unused,
1980 struct rte_flow_error *error __rte_unused,
1981 int *device_configured)
1986 const struct rte_flow_item_tcp *spec, *mask;
1988 const struct rte_flow_item_tcp *last __rte_unused;
1989 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1991 group = attr->group;
1993 /* Parse pattern list to get the matching parameters */
1994 spec = (const struct rte_flow_item_tcp *)pattern->spec;
1995 last = (const struct rte_flow_item_tcp *)pattern->last;
1996 mask = (const struct rte_flow_item_tcp *)
1997 (pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
1999 /* Get traffic class index and flow id to be configured */
2000 flow->tc_id = group;
2001 flow->tc_index = attr->priority;
2003 if (!spec || !mc_l4_port_identification) {
2004 struct proto_discrimination proto;
2006 index = dpaa2_flow_extract_search(
2007 &priv->extract.qos_key_extract.dpkg,
2008 NET_PROT_IP, NH_FLD_IP_PROTO);
2010 ret = dpaa2_flow_proto_discrimination_extract(
2011 &priv->extract.qos_key_extract,
2012 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2015 "QoS Extract IP protocol to discriminate TCP failed.");
2019 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2022 index = dpaa2_flow_extract_search(
2023 &priv->extract.tc_key_extract[group].dpkg,
2024 NET_PROT_IP, NH_FLD_IP_PROTO);
2026 ret = dpaa2_flow_proto_discrimination_extract(
2027 &priv->extract.tc_key_extract[group],
2028 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2031 "FS Extract IP protocol to discriminate TCP failed.");
2035 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2038 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2041 "Move IP addr before TCP discrimination set failed");
2045 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2046 proto.ip_proto = IPPROTO_TCP;
2047 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2050 DPAA2_PMD_ERR("TCP discrimination rule set failed");
2054 (*device_configured) |= local_cfg;
2060 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2061 RTE_FLOW_ITEM_TYPE_TCP)) {
2062 DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2067 if (mask->hdr.src_port) {
2068 index = dpaa2_flow_extract_search(
2069 &priv->extract.qos_key_extract.dpkg,
2070 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2072 ret = dpaa2_flow_extract_add(
2073 &priv->extract.qos_key_extract,
2075 NH_FLD_TCP_PORT_SRC,
2076 NH_FLD_TCP_PORT_SIZE);
2078 DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2082 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2085 index = dpaa2_flow_extract_search(
2086 &priv->extract.tc_key_extract[group].dpkg,
2087 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2089 ret = dpaa2_flow_extract_add(
2090 &priv->extract.tc_key_extract[group],
2092 NH_FLD_TCP_PORT_SRC,
2093 NH_FLD_TCP_PORT_SIZE);
2095 DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2099 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2102 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2105 "Move ipaddr before TCP_PORT_SRC set failed");
2109 ret = dpaa2_flow_rule_data_set(
2110 &priv->extract.qos_key_extract,
2113 NH_FLD_TCP_PORT_SRC,
2114 &spec->hdr.src_port,
2115 &mask->hdr.src_port,
2116 NH_FLD_TCP_PORT_SIZE);
2119 "QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2123 ret = dpaa2_flow_rule_data_set(
2124 &priv->extract.tc_key_extract[group],
2127 NH_FLD_TCP_PORT_SRC,
2128 &spec->hdr.src_port,
2129 &mask->hdr.src_port,
2130 NH_FLD_TCP_PORT_SIZE);
2133 "FS NH_FLD_TCP_PORT_SRC rule data set failed");
2138 if (mask->hdr.dst_port) {
2139 index = dpaa2_flow_extract_search(
2140 &priv->extract.qos_key_extract.dpkg,
2141 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2143 ret = dpaa2_flow_extract_add(
2144 &priv->extract.qos_key_extract,
2146 NH_FLD_TCP_PORT_DST,
2147 NH_FLD_TCP_PORT_SIZE);
2149 DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2153 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2156 index = dpaa2_flow_extract_search(
2157 &priv->extract.tc_key_extract[group].dpkg,
2158 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2160 ret = dpaa2_flow_extract_add(
2161 &priv->extract.tc_key_extract[group],
2163 NH_FLD_TCP_PORT_DST,
2164 NH_FLD_TCP_PORT_SIZE);
2166 DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2170 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2173 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2176 "Move ipaddr before TCP_PORT_DST set failed");
2180 ret = dpaa2_flow_rule_data_set(
2181 &priv->extract.qos_key_extract,
2184 NH_FLD_TCP_PORT_DST,
2185 &spec->hdr.dst_port,
2186 &mask->hdr.dst_port,
2187 NH_FLD_TCP_PORT_SIZE);
2190 "QoS NH_FLD_TCP_PORT_DST rule data set failed");
2194 ret = dpaa2_flow_rule_data_set(
2195 &priv->extract.tc_key_extract[group],
2198 NH_FLD_TCP_PORT_DST,
2199 &spec->hdr.dst_port,
2200 &mask->hdr.dst_port,
2201 NH_FLD_TCP_PORT_SIZE);
2204 "FS NH_FLD_TCP_PORT_DST rule data set failed");
2209 (*device_configured) |= local_cfg;
2215 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2216 struct rte_eth_dev *dev,
2217 const struct rte_flow_attr *attr,
2218 const struct rte_flow_item *pattern,
2219 const struct rte_flow_action actions[] __rte_unused,
2220 struct rte_flow_error *error __rte_unused,
2221 int *device_configured)
2226 const struct rte_flow_item_sctp *spec, *mask;
2228 const struct rte_flow_item_sctp *last __rte_unused;
2229 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2231 group = attr->group;
2233 /* Parse pattern list to get the matching parameters */
2234 spec = (const struct rte_flow_item_sctp *)pattern->spec;
2235 last = (const struct rte_flow_item_sctp *)pattern->last;
2236 mask = (const struct rte_flow_item_sctp *)
2237 (pattern->mask ? pattern->mask :
2238 &dpaa2_flow_item_sctp_mask);
2240 /* Get traffic class index and flow id to be configured */
2241 flow->tc_id = group;
2242 flow->tc_index = attr->priority;
2244 if (!spec || !mc_l4_port_identification) {
2245 struct proto_discrimination proto;
2247 index = dpaa2_flow_extract_search(
2248 &priv->extract.qos_key_extract.dpkg,
2249 NET_PROT_IP, NH_FLD_IP_PROTO);
2251 ret = dpaa2_flow_proto_discrimination_extract(
2252 &priv->extract.qos_key_extract,
2253 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2256 "QoS Extract IP protocol to discriminate SCTP failed.");
2260 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2263 index = dpaa2_flow_extract_search(
2264 &priv->extract.tc_key_extract[group].dpkg,
2265 NET_PROT_IP, NH_FLD_IP_PROTO);
2267 ret = dpaa2_flow_proto_discrimination_extract(
2268 &priv->extract.tc_key_extract[group],
2269 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2272 "FS Extract IP protocol to discriminate SCTP failed.");
2276 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2279 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2282 "Move ipaddr before SCTP discrimination set failed");
2286 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2287 proto.ip_proto = IPPROTO_SCTP;
2288 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2291 DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2295 (*device_configured) |= local_cfg;
2301 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2302 RTE_FLOW_ITEM_TYPE_SCTP)) {
2303 DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2308 if (mask->hdr.src_port) {
2309 index = dpaa2_flow_extract_search(
2310 &priv->extract.qos_key_extract.dpkg,
2311 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2313 ret = dpaa2_flow_extract_add(
2314 &priv->extract.qos_key_extract,
2316 NH_FLD_SCTP_PORT_SRC,
2317 NH_FLD_SCTP_PORT_SIZE);
2319 DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2323 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2326 index = dpaa2_flow_extract_search(
2327 &priv->extract.tc_key_extract[group].dpkg,
2328 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2330 ret = dpaa2_flow_extract_add(
2331 &priv->extract.tc_key_extract[group],
2333 NH_FLD_SCTP_PORT_SRC,
2334 NH_FLD_SCTP_PORT_SIZE);
2336 DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2340 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2343 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2346 "Move ipaddr before SCTP_PORT_SRC set failed");
2350 ret = dpaa2_flow_rule_data_set(
2351 &priv->extract.qos_key_extract,
2354 NH_FLD_SCTP_PORT_SRC,
2355 &spec->hdr.src_port,
2356 &mask->hdr.src_port,
2357 NH_FLD_SCTP_PORT_SIZE);
2360 "QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2364 ret = dpaa2_flow_rule_data_set(
2365 &priv->extract.tc_key_extract[group],
2368 NH_FLD_SCTP_PORT_SRC,
2369 &spec->hdr.src_port,
2370 &mask->hdr.src_port,
2371 NH_FLD_SCTP_PORT_SIZE);
2374 "FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2379 if (mask->hdr.dst_port) {
2380 index = dpaa2_flow_extract_search(
2381 &priv->extract.qos_key_extract.dpkg,
2382 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2384 ret = dpaa2_flow_extract_add(
2385 &priv->extract.qos_key_extract,
2387 NH_FLD_SCTP_PORT_DST,
2388 NH_FLD_SCTP_PORT_SIZE);
2390 DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2394 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2397 index = dpaa2_flow_extract_search(
2398 &priv->extract.tc_key_extract[group].dpkg,
2399 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2401 ret = dpaa2_flow_extract_add(
2402 &priv->extract.tc_key_extract[group],
2404 NH_FLD_SCTP_PORT_DST,
2405 NH_FLD_SCTP_PORT_SIZE);
2407 DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2411 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2414 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2417 "Move ipaddr before SCTP_PORT_DST set failed");
2421 ret = dpaa2_flow_rule_data_set(
2422 &priv->extract.qos_key_extract,
2425 NH_FLD_SCTP_PORT_DST,
2426 &spec->hdr.dst_port,
2427 &mask->hdr.dst_port,
2428 NH_FLD_SCTP_PORT_SIZE);
2431 "QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2435 ret = dpaa2_flow_rule_data_set(
2436 &priv->extract.tc_key_extract[group],
2439 NH_FLD_SCTP_PORT_DST,
2440 &spec->hdr.dst_port,
2441 &mask->hdr.dst_port,
2442 NH_FLD_SCTP_PORT_SIZE);
2445 "FS NH_FLD_SCTP_PORT_DST rule data set failed");
2450 (*device_configured) |= local_cfg;
2456 dpaa2_configure_flow_gre(struct rte_flow *flow,
2457 struct rte_eth_dev *dev,
2458 const struct rte_flow_attr *attr,
2459 const struct rte_flow_item *pattern,
2460 const struct rte_flow_action actions[] __rte_unused,
2461 struct rte_flow_error *error __rte_unused,
2462 int *device_configured)
2467 const struct rte_flow_item_gre *spec, *mask;
2469 const struct rte_flow_item_gre *last __rte_unused;
2470 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2472 group = attr->group;
2474 /* Parse pattern list to get the matching parameters */
2475 spec = (const struct rte_flow_item_gre *)pattern->spec;
2476 last = (const struct rte_flow_item_gre *)pattern->last;
2477 mask = (const struct rte_flow_item_gre *)
2478 (pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2480 /* Get traffic class index and flow id to be configured */
2481 flow->tc_id = group;
2482 flow->tc_index = attr->priority;
2485 struct proto_discrimination proto;
2487 index = dpaa2_flow_extract_search(
2488 &priv->extract.qos_key_extract.dpkg,
2489 NET_PROT_IP, NH_FLD_IP_PROTO);
2491 ret = dpaa2_flow_proto_discrimination_extract(
2492 &priv->extract.qos_key_extract,
2493 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2496 "QoS Extract IP protocol to discriminate GRE failed.");
2500 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2503 index = dpaa2_flow_extract_search(
2504 &priv->extract.tc_key_extract[group].dpkg,
2505 NET_PROT_IP, NH_FLD_IP_PROTO);
2507 ret = dpaa2_flow_proto_discrimination_extract(
2508 &priv->extract.tc_key_extract[group],
2509 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2512 "FS Extract IP protocol to discriminate GRE failed.");
2516 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2519 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2522 "Move IP addr before GRE discrimination set failed");
2526 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2527 proto.ip_proto = IPPROTO_GRE;
2528 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2531 DPAA2_PMD_ERR("GRE discrimination rule set failed");
2535 (*device_configured) |= local_cfg;
2540 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2541 RTE_FLOW_ITEM_TYPE_GRE)) {
2542 DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2547 if (!mask->protocol)
2550 index = dpaa2_flow_extract_search(
2551 &priv->extract.qos_key_extract.dpkg,
2552 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2554 ret = dpaa2_flow_extract_add(
2555 &priv->extract.qos_key_extract,
2558 sizeof(rte_be16_t));
2560 DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2564 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2567 index = dpaa2_flow_extract_search(
2568 &priv->extract.tc_key_extract[group].dpkg,
2569 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2571 ret = dpaa2_flow_extract_add(
2572 &priv->extract.tc_key_extract[group],
2575 sizeof(rte_be16_t));
2577 DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2581 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2584 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2587 "Move ipaddr before GRE_TYPE set failed");
2591 ret = dpaa2_flow_rule_data_set(
2592 &priv->extract.qos_key_extract,
2598 sizeof(rte_be16_t));
2601 "QoS NH_FLD_GRE_TYPE rule data set failed");
2605 ret = dpaa2_flow_rule_data_set(
2606 &priv->extract.tc_key_extract[group],
2612 sizeof(rte_be16_t));
2615 "FS NH_FLD_GRE_TYPE rule data set failed");
2619 (*device_configured) |= local_cfg;
2624 /* The existing QoS/FS entry with IP address(es)
2625 * needs update after
2626 * new extract(s) are inserted before IP
2627 * address(es) extract(s).
2630 dpaa2_flow_entry_update(
2631 struct dpaa2_dev_priv *priv, uint8_t tc_id)
2633 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2634 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2636 int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2637 int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2638 struct dpaa2_key_extract *qos_key_extract =
2639 &priv->extract.qos_key_extract;
2640 struct dpaa2_key_extract *tc_key_extract =
2641 &priv->extract.tc_key_extract[tc_id];
2642 char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2643 char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2644 char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2645 char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2646 int extend = -1, extend1, size = -1;
2650 if (curr->ipaddr_rule.ipaddr_type ==
2652 curr = LIST_NEXT(curr, next);
2656 if (curr->ipaddr_rule.ipaddr_type ==
2659 qos_key_extract->key_info.ipv4_src_offset;
2661 qos_key_extract->key_info.ipv4_dst_offset;
2663 tc_key_extract->key_info.ipv4_src_offset;
2665 tc_key_extract->key_info.ipv4_dst_offset;
2666 size = NH_FLD_IPV4_ADDR_SIZE;
2669 qos_key_extract->key_info.ipv6_src_offset;
2671 qos_key_extract->key_info.ipv6_dst_offset;
2673 tc_key_extract->key_info.ipv6_src_offset;
2675 tc_key_extract->key_info.ipv6_dst_offset;
2676 size = NH_FLD_IPV6_ADDR_SIZE;
2679 qos_index = curr->tc_id * priv->fs_entries +
2682 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
2683 priv->token, &curr->qos_rule);
2685 DPAA2_PMD_ERR("Qos entry remove failed.");
2691 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2692 RTE_ASSERT(qos_ipsrc_offset >=
2693 curr->ipaddr_rule.qos_ipsrc_offset);
2694 extend1 = qos_ipsrc_offset -
2695 curr->ipaddr_rule.qos_ipsrc_offset;
2697 RTE_ASSERT(extend == extend1);
2701 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2702 (size == NH_FLD_IPV6_ADDR_SIZE));
2705 (char *)(size_t)curr->qos_rule.key_iova +
2706 curr->ipaddr_rule.qos_ipsrc_offset,
2708 memset((char *)(size_t)curr->qos_rule.key_iova +
2709 curr->ipaddr_rule.qos_ipsrc_offset,
2713 (char *)(size_t)curr->qos_rule.mask_iova +
2714 curr->ipaddr_rule.qos_ipsrc_offset,
2716 memset((char *)(size_t)curr->qos_rule.mask_iova +
2717 curr->ipaddr_rule.qos_ipsrc_offset,
2720 curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
2723 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2724 RTE_ASSERT(qos_ipdst_offset >=
2725 curr->ipaddr_rule.qos_ipdst_offset);
2726 extend1 = qos_ipdst_offset -
2727 curr->ipaddr_rule.qos_ipdst_offset;
2729 RTE_ASSERT(extend == extend1);
2733 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2734 (size == NH_FLD_IPV6_ADDR_SIZE));
2737 (char *)(size_t)curr->qos_rule.key_iova +
2738 curr->ipaddr_rule.qos_ipdst_offset,
2740 memset((char *)(size_t)curr->qos_rule.key_iova +
2741 curr->ipaddr_rule.qos_ipdst_offset,
2745 (char *)(size_t)curr->qos_rule.mask_iova +
2746 curr->ipaddr_rule.qos_ipdst_offset,
2748 memset((char *)(size_t)curr->qos_rule.mask_iova +
2749 curr->ipaddr_rule.qos_ipdst_offset,
2752 curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
2755 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2756 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2757 (size == NH_FLD_IPV6_ADDR_SIZE));
2758 memcpy((char *)(size_t)curr->qos_rule.key_iova +
2759 curr->ipaddr_rule.qos_ipsrc_offset,
2762 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2763 curr->ipaddr_rule.qos_ipsrc_offset,
2767 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2768 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
2769 (size == NH_FLD_IPV6_ADDR_SIZE));
2770 memcpy((char *)(size_t)curr->qos_rule.key_iova +
2771 curr->ipaddr_rule.qos_ipdst_offset,
2774 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2775 curr->ipaddr_rule.qos_ipdst_offset,
2781 curr->qos_real_key_size += extend;
2783 curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
2785 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
2786 priv->token, &curr->qos_rule,
2787 curr->tc_id, qos_index,
2790 DPAA2_PMD_ERR("Qos entry update failed.");
2794 if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
2795 curr = LIST_NEXT(curr, next);
2801 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
2802 priv->token, curr->tc_id, &curr->fs_rule);
2804 DPAA2_PMD_ERR("FS entry remove failed.");
2808 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
2809 tc_id == curr->tc_id) {
2810 RTE_ASSERT(fs_ipsrc_offset >=
2811 curr->ipaddr_rule.fs_ipsrc_offset);
2812 extend1 = fs_ipsrc_offset -
2813 curr->ipaddr_rule.fs_ipsrc_offset;
2815 RTE_ASSERT(extend == extend1);
2820 (char *)(size_t)curr->fs_rule.key_iova +
2821 curr->ipaddr_rule.fs_ipsrc_offset,
2823 memset((char *)(size_t)curr->fs_rule.key_iova +
2824 curr->ipaddr_rule.fs_ipsrc_offset,
2828 (char *)(size_t)curr->fs_rule.mask_iova +
2829 curr->ipaddr_rule.fs_ipsrc_offset,
2831 memset((char *)(size_t)curr->fs_rule.mask_iova +
2832 curr->ipaddr_rule.fs_ipsrc_offset,
2835 curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
2838 if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
2839 tc_id == curr->tc_id) {
2840 RTE_ASSERT(fs_ipdst_offset >=
2841 curr->ipaddr_rule.fs_ipdst_offset);
2842 extend1 = fs_ipdst_offset -
2843 curr->ipaddr_rule.fs_ipdst_offset;
2845 RTE_ASSERT(extend == extend1);
2850 (char *)(size_t)curr->fs_rule.key_iova +
2851 curr->ipaddr_rule.fs_ipdst_offset,
2853 memset((char *)(size_t)curr->fs_rule.key_iova +
2854 curr->ipaddr_rule.fs_ipdst_offset,
2858 (char *)(size_t)curr->fs_rule.mask_iova +
2859 curr->ipaddr_rule.fs_ipdst_offset,
2861 memset((char *)(size_t)curr->fs_rule.mask_iova +
2862 curr->ipaddr_rule.fs_ipdst_offset,
2865 curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
2868 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
2869 memcpy((char *)(size_t)curr->fs_rule.key_iova +
2870 curr->ipaddr_rule.fs_ipsrc_offset,
2873 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2874 curr->ipaddr_rule.fs_ipsrc_offset,
2878 if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
2879 memcpy((char *)(size_t)curr->fs_rule.key_iova +
2880 curr->ipaddr_rule.fs_ipdst_offset,
2883 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2884 curr->ipaddr_rule.fs_ipdst_offset,
2890 curr->fs_real_key_size += extend;
2891 curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
2893 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
2894 priv->token, curr->tc_id, curr->tc_index,
2895 &curr->fs_rule, &curr->action_cfg);
2897 DPAA2_PMD_ERR("FS entry update failed.");
2901 curr = LIST_NEXT(curr, next);
2908 dpaa2_flow_verify_attr(
2909 struct dpaa2_dev_priv *priv,
2910 const struct rte_flow_attr *attr)
2912 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2915 if (curr->tc_id == attr->group &&
2916 curr->tc_index == attr->priority) {
2918 "Flow with group %d and priority %d already exists.",
2919 attr->group, attr->priority);
2923 curr = LIST_NEXT(curr, next);
2930 dpaa2_generic_flow_set(struct rte_flow *flow,
2931 struct rte_eth_dev *dev,
2932 const struct rte_flow_attr *attr,
2933 const struct rte_flow_item pattern[],
2934 const struct rte_flow_action actions[],
2935 struct rte_flow_error *error)
2937 const struct rte_flow_action_queue *dest_queue;
2938 const struct rte_flow_action_rss *rss_conf;
2939 int is_keycfg_configured = 0, end_of_list = 0;
2940 int ret = 0, i = 0, j = 0;
2941 struct dpni_rx_tc_dist_cfg tc_cfg;
2942 struct dpni_qos_tbl_cfg qos_cfg;
2943 struct dpni_fs_action_cfg action;
2944 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2945 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2947 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2950 ret = dpaa2_flow_verify_attr(priv, attr);
2954 /* Parse pattern list to get the matching parameters */
2955 while (!end_of_list) {
2956 switch (pattern[i].type) {
2957 case RTE_FLOW_ITEM_TYPE_ETH:
2958 ret = dpaa2_configure_flow_eth(flow,
2959 dev, attr, &pattern[i], actions, error,
2960 &is_keycfg_configured);
2962 DPAA2_PMD_ERR("ETH flow configuration failed!");
2966 case RTE_FLOW_ITEM_TYPE_VLAN:
2967 ret = dpaa2_configure_flow_vlan(flow,
2968 dev, attr, &pattern[i], actions, error,
2969 &is_keycfg_configured);
2971 DPAA2_PMD_ERR("vLan flow configuration failed!");
2975 case RTE_FLOW_ITEM_TYPE_IPV4:
2976 case RTE_FLOW_ITEM_TYPE_IPV6:
2977 ret = dpaa2_configure_flow_generic_ip(flow,
2978 dev, attr, &pattern[i], actions, error,
2979 &is_keycfg_configured);
2981 DPAA2_PMD_ERR("IP flow configuration failed!");
2985 case RTE_FLOW_ITEM_TYPE_ICMP:
2986 ret = dpaa2_configure_flow_icmp(flow,
2987 dev, attr, &pattern[i], actions, error,
2988 &is_keycfg_configured);
2990 DPAA2_PMD_ERR("ICMP flow configuration failed!");
2994 case RTE_FLOW_ITEM_TYPE_UDP:
2995 ret = dpaa2_configure_flow_udp(flow,
2996 dev, attr, &pattern[i], actions, error,
2997 &is_keycfg_configured);
2999 DPAA2_PMD_ERR("UDP flow configuration failed!");
3003 case RTE_FLOW_ITEM_TYPE_TCP:
3004 ret = dpaa2_configure_flow_tcp(flow,
3005 dev, attr, &pattern[i], actions, error,
3006 &is_keycfg_configured);
3008 DPAA2_PMD_ERR("TCP flow configuration failed!");
3012 case RTE_FLOW_ITEM_TYPE_SCTP:
3013 ret = dpaa2_configure_flow_sctp(flow,
3014 dev, attr, &pattern[i], actions, error,
3015 &is_keycfg_configured);
3017 DPAA2_PMD_ERR("SCTP flow configuration failed!");
3021 case RTE_FLOW_ITEM_TYPE_GRE:
3022 ret = dpaa2_configure_flow_gre(flow,
3023 dev, attr, &pattern[i], actions, error,
3024 &is_keycfg_configured);
3026 DPAA2_PMD_ERR("GRE flow configuration failed!");
3030 case RTE_FLOW_ITEM_TYPE_END:
3032 break; /*End of List*/
3034 DPAA2_PMD_ERR("Invalid action type");
3041 /* Let's parse action on matching traffic */
3043 while (!end_of_list) {
3044 switch (actions[j].type) {
3045 case RTE_FLOW_ACTION_TYPE_QUEUE:
3046 dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
3047 flow->flow_id = dest_queue->index;
3048 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3049 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3050 action.flow_id = flow->flow_id;
3051 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3052 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_extract.dpkg,
3053 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3055 "Unable to prepare extract parameters");
3059 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3060 qos_cfg.discard_on_miss = true;
3061 qos_cfg.keep_entries = true;
3062 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
3063 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3064 priv->token, &qos_cfg);
3067 "Distribution cannot be configured.(%d)"
3072 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3073 if (dpkg_prepare_key_cfg(
3074 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3075 (uint8_t *)(size_t)priv->extract
3076 .tc_extract_param[flow->tc_id]) < 0) {
3078 "Unable to prepare extract parameters");
3082 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3083 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3084 tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
3085 tc_cfg.key_cfg_iova =
3086 (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3087 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3088 tc_cfg.fs_cfg.keep_entries = true;
3089 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3091 flow->tc_id, &tc_cfg);
3094 "Distribution cannot be configured.(%d)"
3099 /* Configure QoS table first */
3101 action.flow_id = action.flow_id % priv->num_rx_tc;
3103 qos_index = flow->tc_id * priv->fs_entries +
3106 if (qos_index >= priv->qos_entries) {
3107 DPAA2_PMD_ERR("QoS table with %d entries full",
3111 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3112 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3113 if (flow->ipaddr_rule.qos_ipdst_offset >=
3114 flow->ipaddr_rule.qos_ipsrc_offset) {
3115 flow->qos_real_key_size =
3116 flow->ipaddr_rule.qos_ipdst_offset +
3117 NH_FLD_IPV4_ADDR_SIZE;
3119 flow->qos_real_key_size =
3120 flow->ipaddr_rule.qos_ipsrc_offset +
3121 NH_FLD_IPV4_ADDR_SIZE;
3123 } else if (flow->ipaddr_rule.ipaddr_type ==
3125 if (flow->ipaddr_rule.qos_ipdst_offset >=
3126 flow->ipaddr_rule.qos_ipsrc_offset) {
3127 flow->qos_real_key_size =
3128 flow->ipaddr_rule.qos_ipdst_offset +
3129 NH_FLD_IPV6_ADDR_SIZE;
3131 flow->qos_real_key_size =
3132 flow->ipaddr_rule.qos_ipsrc_offset +
3133 NH_FLD_IPV6_ADDR_SIZE;
3137 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3139 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3140 priv->token, &flow->qos_rule,
3141 flow->tc_id, qos_index,
3145 "Error in addnig entry to QoS table(%d)", ret);
3149 /* Then Configure FS table */
3150 if (flow->tc_index >= priv->fs_entries) {
3151 DPAA2_PMD_ERR("FS table with %d entries full",
3156 flow->fs_real_key_size =
3157 priv->extract.tc_key_extract[flow->tc_id]
3158 .key_info.key_total_size;
3160 if (flow->ipaddr_rule.ipaddr_type ==
3162 if (flow->ipaddr_rule.fs_ipdst_offset >=
3163 flow->ipaddr_rule.fs_ipsrc_offset) {
3164 flow->fs_real_key_size =
3165 flow->ipaddr_rule.fs_ipdst_offset +
3166 NH_FLD_IPV4_ADDR_SIZE;
3168 flow->fs_real_key_size =
3169 flow->ipaddr_rule.fs_ipsrc_offset +
3170 NH_FLD_IPV4_ADDR_SIZE;
3172 } else if (flow->ipaddr_rule.ipaddr_type ==
3174 if (flow->ipaddr_rule.fs_ipdst_offset >=
3175 flow->ipaddr_rule.fs_ipsrc_offset) {
3176 flow->fs_real_key_size =
3177 flow->ipaddr_rule.fs_ipdst_offset +
3178 NH_FLD_IPV6_ADDR_SIZE;
3180 flow->fs_real_key_size =
3181 flow->ipaddr_rule.fs_ipsrc_offset +
3182 NH_FLD_IPV6_ADDR_SIZE;
3186 flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3188 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3189 flow->tc_id, flow->tc_index,
3190 &flow->fs_rule, &action);
3193 "Error in adding entry to FS table(%d)", ret);
3196 memcpy(&flow->action_cfg, &action,
3197 sizeof(struct dpni_fs_action_cfg));
3199 case RTE_FLOW_ACTION_TYPE_RSS:
3200 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3201 for (i = 0; i < (int)rss_conf->queue_num; i++) {
3202 if (rss_conf->queue[i] <
3203 (attr->group * priv->dist_queues) ||
3204 rss_conf->queue[i] >=
3205 ((attr->group + 1) * priv->dist_queues)) {
3207 "Queue/Group combination are not supported\n");
3212 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3213 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3214 &priv->extract.tc_key_extract[flow->tc_id].dpkg);
3217 "unable to set flow distribution.please check queue config\n");
3221 /* Allocate DMA'ble memory to write the rules */
3222 param = (size_t)rte_malloc(NULL, 256, 64);
3224 DPAA2_PMD_ERR("Memory allocation failure\n");
3228 if (dpkg_prepare_key_cfg(
3229 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3230 (uint8_t *)param) < 0) {
3232 "Unable to prepare extract parameters");
3233 rte_free((void *)param);
3237 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3238 tc_cfg.dist_size = rss_conf->queue_num;
3239 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3240 tc_cfg.key_cfg_iova = (size_t)param;
3241 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3243 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3244 priv->token, flow->tc_id,
3248 "Distribution cannot be configured: %d\n", ret);
3249 rte_free((void *)param);
3253 rte_free((void *)param);
3254 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3255 if (dpkg_prepare_key_cfg(
3256 &priv->extract.qos_key_extract.dpkg,
3257 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3259 "Unable to prepare extract parameters");
3263 sizeof(struct dpni_qos_tbl_cfg));
3264 qos_cfg.discard_on_miss = true;
3265 qos_cfg.keep_entries = true;
3266 qos_cfg.key_cfg_iova =
3267 (size_t)priv->extract.qos_extract_param;
3268 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3269 priv->token, &qos_cfg);
3272 "Distribution can't be configured %d\n",
3278 /* Add Rule into QoS table */
3279 qos_index = flow->tc_id * priv->fs_entries +
3281 if (qos_index >= priv->qos_entries) {
3282 DPAA2_PMD_ERR("QoS table with %d entries full",
3287 flow->qos_real_key_size =
3288 priv->extract.qos_key_extract.key_info.key_total_size;
3289 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3290 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3291 &flow->qos_rule, flow->tc_id,
3295 "Error in entry addition in QoS table(%d)",
3300 case RTE_FLOW_ACTION_TYPE_END:
3304 DPAA2_PMD_ERR("Invalid action type");
3312 if (is_keycfg_configured &
3313 (DPAA2_QOS_TABLE_RECONFIGURE |
3314 DPAA2_FS_TABLE_RECONFIGURE)) {
3315 ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3317 DPAA2_PMD_ERR("Flow entry update failed.");
3322 /* New rules are inserted. */
3324 LIST_INSERT_HEAD(&priv->flows, flow, next);
3326 while (LIST_NEXT(curr, next))
3327 curr = LIST_NEXT(curr, next);
3328 LIST_INSERT_AFTER(curr, flow, next);
3335 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3336 const struct rte_flow_attr *attr)
3340 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3341 DPAA2_PMD_ERR("Priority group is out of range\n");
3344 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3345 DPAA2_PMD_ERR("Priority within the group is out of range\n");
3348 if (unlikely(attr->egress)) {
3350 "Flow configuration is not supported on egress side\n");
3353 if (unlikely(!attr->ingress)) {
3354 DPAA2_PMD_ERR("Ingress flag must be configured\n");
3361 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3363 unsigned int i, j, is_found = 0;
3366 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3367 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3368 if (dpaa2_supported_pattern_type[i]
3369 == pattern[j].type) {
3379 /* Lets verify other combinations of given pattern rules */
3380 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3381 if (!pattern[j].spec) {
3391 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3393 unsigned int i, j, is_found = 0;
3396 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3397 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3398 if (dpaa2_supported_action_type[i] == actions[j].type) {
3408 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3409 if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3417 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3418 const struct rte_flow_attr *flow_attr,
3419 const struct rte_flow_item pattern[],
3420 const struct rte_flow_action actions[],
3421 struct rte_flow_error *error)
3423 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3424 struct dpni_attr dpni_attr;
3425 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3426 uint16_t token = priv->token;
3429 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3430 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3433 "Failure to get dpni@%p attribute, err code %d\n",
3435 rte_flow_error_set(error, EPERM,
3436 RTE_FLOW_ERROR_TYPE_ATTR,
3437 flow_attr, "invalid");
3441 /* Verify input attributes */
3442 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3445 "Invalid attributes are given\n");
3446 rte_flow_error_set(error, EPERM,
3447 RTE_FLOW_ERROR_TYPE_ATTR,
3448 flow_attr, "invalid");
3449 goto not_valid_params;
3451 /* Verify input pattern list */
3452 ret = dpaa2_dev_verify_patterns(pattern);
3455 "Invalid pattern list is given\n");
3456 rte_flow_error_set(error, EPERM,
3457 RTE_FLOW_ERROR_TYPE_ITEM,
3458 pattern, "invalid");
3459 goto not_valid_params;
3461 /* Verify input action list */
3462 ret = dpaa2_dev_verify_actions(actions);
3465 "Invalid action list is given\n");
3466 rte_flow_error_set(error, EPERM,
3467 RTE_FLOW_ERROR_TYPE_ACTION,
3468 actions, "invalid");
3469 goto not_valid_params;
3476 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3477 const struct rte_flow_attr *attr,
3478 const struct rte_flow_item pattern[],
3479 const struct rte_flow_action actions[],
3480 struct rte_flow_error *error)
3482 struct rte_flow *flow = NULL;
3483 size_t key_iova = 0, mask_iova = 0;
3486 flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3488 DPAA2_PMD_ERR("Failure to allocate memory for flow");
3491 /* Allocate DMA'ble memory to write the rules */
3492 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3495 "Memory allocation failure for rule configuration\n");
3498 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3501 "Memory allocation failure for rule configuration\n");
3505 flow->qos_rule.key_iova = key_iova;
3506 flow->qos_rule.mask_iova = mask_iova;
3508 /* Allocate DMA'ble memory to write the rules */
3509 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3512 "Memory allocation failure for rule configuration\n");
3515 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3518 "Memory allocation failure for rule configuration\n");
3522 flow->fs_rule.key_iova = key_iova;
3523 flow->fs_rule.mask_iova = mask_iova;
3525 flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3526 flow->ipaddr_rule.qos_ipsrc_offset =
3527 IP_ADDRESS_OFFSET_INVALID;
3528 flow->ipaddr_rule.qos_ipdst_offset =
3529 IP_ADDRESS_OFFSET_INVALID;
3530 flow->ipaddr_rule.fs_ipsrc_offset =
3531 IP_ADDRESS_OFFSET_INVALID;
3532 flow->ipaddr_rule.fs_ipdst_offset =
3533 IP_ADDRESS_OFFSET_INVALID;
3535 switch (dpaa2_filter_type) {
3536 case RTE_ETH_FILTER_GENERIC:
3537 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3540 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3541 rte_flow_error_set(error, EPERM,
3542 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3545 "Failure to create flow, return code (%d)", ret);
3546 goto creation_error;
3550 DPAA2_PMD_ERR("Filter type (%d) not supported",
3557 rte_flow_error_set(error, EPERM,
3558 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3559 NULL, "memory alloc");
3561 rte_free((void *)flow);
3562 rte_free((void *)key_iova);
3563 rte_free((void *)mask_iova);
3569 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3570 struct rte_flow *flow,
3571 struct rte_flow_error *error)
3574 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3575 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3577 switch (flow->action) {
3578 case RTE_FLOW_ACTION_TYPE_QUEUE:
3579 /* Remove entry from QoS table first */
3580 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3584 "Error in adding entry to QoS table(%d)", ret);
3588 /* Then remove entry from FS table */
3589 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3590 flow->tc_id, &flow->fs_rule);
3593 "Error in entry addition in FS table(%d)", ret);
3597 case RTE_FLOW_ACTION_TYPE_RSS:
3598 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3602 "Error in entry addition in QoS table(%d)", ret);
3608 "Action type (%d) is not supported", flow->action);
3613 LIST_REMOVE(flow, next);
3614 rte_free((void *)(size_t)flow->qos_rule.key_iova);
3615 rte_free((void *)(size_t)flow->qos_rule.mask_iova);
3616 rte_free((void *)(size_t)flow->fs_rule.key_iova);
3617 rte_free((void *)(size_t)flow->fs_rule.mask_iova);
3618 /* Now free the flow */
3623 rte_flow_error_set(error, EPERM,
3624 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3630 * Destroy user-configured flow rules.
3632 * This function skips internal flows rules.
3634 * @see rte_flow_flush()
3638 dpaa2_flow_flush(struct rte_eth_dev *dev,
3639 struct rte_flow_error *error)
3641 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3642 struct rte_flow *flow = LIST_FIRST(&priv->flows);
3645 struct rte_flow *next = LIST_NEXT(flow, next);
3647 dpaa2_flow_destroy(dev, flow, error);
3654 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3655 struct rte_flow *flow __rte_unused,
3656 const struct rte_flow_action *actions __rte_unused,
3657 void *data __rte_unused,
3658 struct rte_flow_error *error __rte_unused)
3664 * Clean up all flow rules.
3666 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3667 * rules regardless of whether they are internal or user-configured.
3670 * Pointer to private structure.
3673 dpaa2_flow_clean(struct rte_eth_dev *dev)
3675 struct rte_flow *flow;
3676 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3678 while ((flow = LIST_FIRST(&priv->flows)))
3679 dpaa2_flow_destroy(dev, flow, NULL);
3682 const struct rte_flow_ops dpaa2_flow_ops = {
3683 .create = dpaa2_flow_create,
3684 .validate = dpaa2_flow_validate,
3685 .destroy = dpaa2_flow_destroy,
3686 .flush = dpaa2_flow_flush,
3687 .query = dpaa2_flow_query,