1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
13 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
25 /* Workaround to discriminate the UDP/TCP/SCTP
26 * with next protocol of l3.
27 * MC/WRIOP are not able to identify
28 * the l4 protocol with l4 ports.
30 int mc_l4_port_identification;
32 enum flow_rule_ipaddr_type {
38 struct flow_rule_ipaddr {
39 enum flow_rule_ipaddr_type ipaddr_type;
47 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
48 struct dpni_rule_cfg qos_rule;
49 struct dpni_rule_cfg fs_rule;
53 uint8_t tc_id; /** Traffic Class ID. */
55 uint8_t tc_index; /** index within this Traffic Class. */
56 enum rte_flow_action_type action;
58 /* Special for IP address to specify the offset
61 struct flow_rule_ipaddr ipaddr_rule;
62 struct dpni_fs_action_cfg action_cfg;
66 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
67 RTE_FLOW_ITEM_TYPE_END,
68 RTE_FLOW_ITEM_TYPE_ETH,
69 RTE_FLOW_ITEM_TYPE_VLAN,
70 RTE_FLOW_ITEM_TYPE_IPV4,
71 RTE_FLOW_ITEM_TYPE_IPV6,
72 RTE_FLOW_ITEM_TYPE_ICMP,
73 RTE_FLOW_ITEM_TYPE_UDP,
74 RTE_FLOW_ITEM_TYPE_TCP,
75 RTE_FLOW_ITEM_TYPE_SCTP,
76 RTE_FLOW_ITEM_TYPE_GRE,
80 enum rte_flow_action_type dpaa2_supported_action_type[] = {
81 RTE_FLOW_ACTION_TYPE_END,
82 RTE_FLOW_ACTION_TYPE_QUEUE,
83 RTE_FLOW_ACTION_TYPE_RSS
86 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
87 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
89 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
92 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
93 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
94 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
95 .type = RTE_BE16(0xffff),
98 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
99 .tci = RTE_BE16(0xffff),
102 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
103 .hdr.src_addr = RTE_BE32(0xffffffff),
104 .hdr.dst_addr = RTE_BE32(0xffffffff),
105 .hdr.next_proto_id = 0xff,
108 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
111 "\xff\xff\xff\xff\xff\xff\xff\xff"
112 "\xff\xff\xff\xff\xff\xff\xff\xff",
114 "\xff\xff\xff\xff\xff\xff\xff\xff"
115 "\xff\xff\xff\xff\xff\xff\xff\xff",
120 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
121 .hdr.icmp_type = 0xff,
122 .hdr.icmp_code = 0xff,
125 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
127 .src_port = RTE_BE16(0xffff),
128 .dst_port = RTE_BE16(0xffff),
132 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
134 .src_port = RTE_BE16(0xffff),
135 .dst_port = RTE_BE16(0xffff),
139 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
141 .src_port = RTE_BE16(0xffff),
142 .dst_port = RTE_BE16(0xffff),
146 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
147 .protocol = RTE_BE16(0xffff),
153 static inline void dpaa2_flow_extract_key_set(
154 struct dpaa2_key_info *key_info, int index, uint8_t size)
156 key_info->key_size[index] = size;
158 key_info->key_offset[index] =
159 key_info->key_offset[index - 1] +
160 key_info->key_size[index - 1];
162 key_info->key_offset[index] = 0;
164 key_info->key_total_size += size;
167 static int dpaa2_flow_extract_add(
168 struct dpaa2_key_extract *key_extract,
170 uint32_t field, uint8_t field_size)
172 int index, ip_src = -1, ip_dst = -1;
173 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
174 struct dpaa2_key_info *key_info = &key_extract->key_info;
176 if (dpkg->num_extracts >=
177 DPKG_MAX_NUM_OF_EXTRACTS) {
178 DPAA2_PMD_WARN("Number of extracts overflows");
181 /* Before reorder, the IP SRC and IP DST are already last
184 for (index = 0; index < dpkg->num_extracts; index++) {
185 if (dpkg->extracts[index].extract.from_hdr.prot ==
187 if (dpkg->extracts[index].extract.from_hdr.field ==
191 if (dpkg->extracts[index].extract.from_hdr.field ==
199 RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
202 RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
204 if (prot == NET_PROT_IP &&
205 (field == NH_FLD_IP_SRC ||
206 field == NH_FLD_IP_DST)) {
207 index = dpkg->num_extracts;
209 if (ip_src >= 0 && ip_dst >= 0)
210 index = dpkg->num_extracts - 2;
211 else if (ip_src >= 0 || ip_dst >= 0)
212 index = dpkg->num_extracts - 1;
214 index = dpkg->num_extracts;
217 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_HDR;
218 dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
219 dpkg->extracts[index].extract.from_hdr.prot = prot;
220 dpkg->extracts[index].extract.from_hdr.field = field;
221 if (prot == NET_PROT_IP &&
222 (field == NH_FLD_IP_SRC ||
223 field == NH_FLD_IP_DST)) {
224 dpaa2_flow_extract_key_set(key_info, index, 0);
226 dpaa2_flow_extract_key_set(key_info, index, field_size);
229 if (prot == NET_PROT_IP) {
230 if (field == NH_FLD_IP_SRC) {
231 if (key_info->ipv4_dst_offset >= 0) {
232 key_info->ipv4_src_offset =
233 key_info->ipv4_dst_offset +
234 NH_FLD_IPV4_ADDR_SIZE;
236 key_info->ipv4_src_offset =
237 key_info->key_offset[index - 1] +
238 key_info->key_size[index - 1];
240 if (key_info->ipv6_dst_offset >= 0) {
241 key_info->ipv6_src_offset =
242 key_info->ipv6_dst_offset +
243 NH_FLD_IPV6_ADDR_SIZE;
245 key_info->ipv6_src_offset =
246 key_info->key_offset[index - 1] +
247 key_info->key_size[index - 1];
249 } else if (field == NH_FLD_IP_DST) {
250 if (key_info->ipv4_src_offset >= 0) {
251 key_info->ipv4_dst_offset =
252 key_info->ipv4_src_offset +
253 NH_FLD_IPV4_ADDR_SIZE;
255 key_info->ipv4_dst_offset =
256 key_info->key_offset[index - 1] +
257 key_info->key_size[index - 1];
259 if (key_info->ipv6_src_offset >= 0) {
260 key_info->ipv6_dst_offset =
261 key_info->ipv6_src_offset +
262 NH_FLD_IPV6_ADDR_SIZE;
264 key_info->ipv6_dst_offset =
265 key_info->key_offset[index - 1] +
266 key_info->key_size[index - 1];
271 if (index == dpkg->num_extracts) {
272 dpkg->num_extracts++;
278 dpkg->extracts[ip_src].type =
279 DPKG_EXTRACT_FROM_HDR;
280 dpkg->extracts[ip_src].extract.from_hdr.type =
282 dpkg->extracts[ip_src].extract.from_hdr.prot =
284 dpkg->extracts[ip_src].extract.from_hdr.field =
286 dpaa2_flow_extract_key_set(key_info, ip_src, 0);
287 key_info->ipv4_src_offset += field_size;
288 key_info->ipv6_src_offset += field_size;
292 dpkg->extracts[ip_dst].type =
293 DPKG_EXTRACT_FROM_HDR;
294 dpkg->extracts[ip_dst].extract.from_hdr.type =
296 dpkg->extracts[ip_dst].extract.from_hdr.prot =
298 dpkg->extracts[ip_dst].extract.from_hdr.field =
300 dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
301 key_info->ipv4_dst_offset += field_size;
302 key_info->ipv6_dst_offset += field_size;
305 dpkg->num_extracts++;
310 /* Protocol discrimination.
311 * Discriminate IPv4/IPv6/vLan by Eth type.
312 * Discriminate UDP/TCP/ICMP by next proto of IP.
315 dpaa2_flow_proto_discrimination_extract(
316 struct dpaa2_key_extract *key_extract,
317 enum rte_flow_item_type type)
319 if (type == RTE_FLOW_ITEM_TYPE_ETH) {
320 return dpaa2_flow_extract_add(
321 key_extract, NET_PROT_ETH,
324 } else if (type == (enum rte_flow_item_type)
325 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
326 return dpaa2_flow_extract_add(
327 key_extract, NET_PROT_IP,
329 NH_FLD_IP_PROTO_SIZE);
335 static inline int dpaa2_flow_extract_search(
336 struct dpkg_profile_cfg *dpkg,
337 enum net_prot prot, uint32_t field)
341 for (i = 0; i < dpkg->num_extracts; i++) {
342 if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
343 dpkg->extracts[i].extract.from_hdr.field == field) {
351 static inline int dpaa2_flow_extract_key_offset(
352 struct dpaa2_key_extract *key_extract,
353 enum net_prot prot, uint32_t field)
356 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
357 struct dpaa2_key_info *key_info = &key_extract->key_info;
359 if (prot == NET_PROT_IPV4 ||
360 prot == NET_PROT_IPV6)
361 i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
363 i = dpaa2_flow_extract_search(dpkg, prot, field);
366 if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
367 return key_info->ipv4_src_offset;
368 else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
369 return key_info->ipv4_dst_offset;
370 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
371 return key_info->ipv6_src_offset;
372 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
373 return key_info->ipv6_dst_offset;
375 return key_info->key_offset[i];
381 struct proto_discrimination {
382 enum rte_flow_item_type type;
390 dpaa2_flow_proto_discrimination_rule(
391 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
392 struct proto_discrimination proto, int group)
402 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
404 field = NH_FLD_ETH_TYPE;
405 } else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
407 field = NH_FLD_IP_PROTO;
410 "Only Eth and IP support to discriminate next proto.");
414 offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
417 DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
421 key_iova = flow->qos_rule.key_iova + offset;
422 mask_iova = flow->qos_rule.mask_iova + offset;
423 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
424 eth_type = proto.eth_type;
425 memcpy((void *)key_iova, (const void *)(ð_type),
428 memcpy((void *)mask_iova, (const void *)(ð_type),
431 ip_proto = proto.ip_proto;
432 memcpy((void *)key_iova, (const void *)(&ip_proto),
435 memcpy((void *)mask_iova, (const void *)(&ip_proto),
439 offset = dpaa2_flow_extract_key_offset(
440 &priv->extract.tc_key_extract[group],
443 DPAA2_PMD_ERR("FS prot %d field %d extract failed",
447 key_iova = flow->fs_rule.key_iova + offset;
448 mask_iova = flow->fs_rule.mask_iova + offset;
450 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
451 eth_type = proto.eth_type;
452 memcpy((void *)key_iova, (const void *)(ð_type),
455 memcpy((void *)mask_iova, (const void *)(ð_type),
458 ip_proto = proto.ip_proto;
459 memcpy((void *)key_iova, (const void *)(&ip_proto),
462 memcpy((void *)mask_iova, (const void *)(&ip_proto),
470 dpaa2_flow_rule_data_set(
471 struct dpaa2_key_extract *key_extract,
472 struct dpni_rule_cfg *rule,
473 enum net_prot prot, uint32_t field,
474 const void *key, const void *mask, int size)
476 int offset = dpaa2_flow_extract_key_offset(key_extract,
480 DPAA2_PMD_ERR("prot %d, field %d extract failed",
484 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
485 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
491 _dpaa2_flow_rule_move_ipaddr_tail(
492 struct dpaa2_key_extract *key_extract,
493 struct dpni_rule_cfg *rule, int src_offset,
494 uint32_t field, bool ipv4)
502 char tmp[NH_FLD_IPV6_ADDR_SIZE];
504 if (field != NH_FLD_IP_SRC &&
505 field != NH_FLD_IP_DST) {
506 DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
510 prot = NET_PROT_IPV4;
512 prot = NET_PROT_IPV6;
513 dst_offset = dpaa2_flow_extract_key_offset(key_extract,
515 if (dst_offset < 0) {
516 DPAA2_PMD_ERR("Field %d reorder extract failed", field);
519 key_src = rule->key_iova + src_offset;
520 mask_src = rule->mask_iova + src_offset;
521 key_dst = rule->key_iova + dst_offset;
522 mask_dst = rule->mask_iova + dst_offset;
524 len = sizeof(rte_be32_t);
526 len = NH_FLD_IPV6_ADDR_SIZE;
528 memcpy(tmp, (char *)key_src, len);
529 memcpy((char *)key_dst, tmp, len);
531 memcpy(tmp, (char *)mask_src, len);
532 memcpy((char *)mask_dst, tmp, len);
538 dpaa2_flow_rule_move_ipaddr_tail(
539 struct rte_flow *flow, struct dpaa2_dev_priv *priv,
545 if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
548 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
549 prot = NET_PROT_IPV4;
551 prot = NET_PROT_IPV6;
553 if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
554 ret = _dpaa2_flow_rule_move_ipaddr_tail(
555 &priv->extract.qos_key_extract,
557 flow->ipaddr_rule.qos_ipsrc_offset,
558 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
560 DPAA2_PMD_ERR("QoS src address reorder failed");
563 flow->ipaddr_rule.qos_ipsrc_offset =
564 dpaa2_flow_extract_key_offset(
565 &priv->extract.qos_key_extract,
566 prot, NH_FLD_IP_SRC);
569 if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
570 ret = _dpaa2_flow_rule_move_ipaddr_tail(
571 &priv->extract.qos_key_extract,
573 flow->ipaddr_rule.qos_ipdst_offset,
574 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
576 DPAA2_PMD_ERR("QoS dst address reorder failed");
579 flow->ipaddr_rule.qos_ipdst_offset =
580 dpaa2_flow_extract_key_offset(
581 &priv->extract.qos_key_extract,
582 prot, NH_FLD_IP_DST);
585 if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
586 ret = _dpaa2_flow_rule_move_ipaddr_tail(
587 &priv->extract.tc_key_extract[fs_group],
589 flow->ipaddr_rule.fs_ipsrc_offset,
590 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
592 DPAA2_PMD_ERR("FS src address reorder failed");
595 flow->ipaddr_rule.fs_ipsrc_offset =
596 dpaa2_flow_extract_key_offset(
597 &priv->extract.tc_key_extract[fs_group],
598 prot, NH_FLD_IP_SRC);
600 if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
601 ret = _dpaa2_flow_rule_move_ipaddr_tail(
602 &priv->extract.tc_key_extract[fs_group],
604 flow->ipaddr_rule.fs_ipdst_offset,
605 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
607 DPAA2_PMD_ERR("FS dst address reorder failed");
610 flow->ipaddr_rule.fs_ipdst_offset =
611 dpaa2_flow_extract_key_offset(
612 &priv->extract.tc_key_extract[fs_group],
613 prot, NH_FLD_IP_DST);
620 dpaa2_flow_extract_support(
621 const uint8_t *mask_src,
622 enum rte_flow_item_type type)
626 const char *mask_support = 0;
629 case RTE_FLOW_ITEM_TYPE_ETH:
630 mask_support = (const char *)&dpaa2_flow_item_eth_mask;
631 size = sizeof(struct rte_flow_item_eth);
633 case RTE_FLOW_ITEM_TYPE_VLAN:
634 mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
635 size = sizeof(struct rte_flow_item_vlan);
637 case RTE_FLOW_ITEM_TYPE_IPV4:
638 mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
639 size = sizeof(struct rte_flow_item_ipv4);
641 case RTE_FLOW_ITEM_TYPE_IPV6:
642 mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
643 size = sizeof(struct rte_flow_item_ipv6);
645 case RTE_FLOW_ITEM_TYPE_ICMP:
646 mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
647 size = sizeof(struct rte_flow_item_icmp);
649 case RTE_FLOW_ITEM_TYPE_UDP:
650 mask_support = (const char *)&dpaa2_flow_item_udp_mask;
651 size = sizeof(struct rte_flow_item_udp);
653 case RTE_FLOW_ITEM_TYPE_TCP:
654 mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
655 size = sizeof(struct rte_flow_item_tcp);
657 case RTE_FLOW_ITEM_TYPE_SCTP:
658 mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
659 size = sizeof(struct rte_flow_item_sctp);
661 case RTE_FLOW_ITEM_TYPE_GRE:
662 mask_support = (const char *)&dpaa2_flow_item_gre_mask;
663 size = sizeof(struct rte_flow_item_gre);
669 memcpy(mask, mask_support, size);
671 for (i = 0; i < size; i++)
672 mask[i] = (mask[i] | mask_src[i]);
674 if (memcmp(mask, mask_support, size))
681 dpaa2_configure_flow_eth(struct rte_flow *flow,
682 struct rte_eth_dev *dev,
683 const struct rte_flow_attr *attr,
684 const struct rte_flow_item *pattern,
685 const struct rte_flow_action actions[] __rte_unused,
686 struct rte_flow_error *error __rte_unused,
687 int *device_configured)
692 const struct rte_flow_item_eth *spec, *mask;
694 /* TODO: Currently upper bound of range parameter is not implemented */
695 const struct rte_flow_item_eth *last __rte_unused;
696 struct dpaa2_dev_priv *priv = dev->data->dev_private;
697 const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
701 /* Parse pattern list to get the matching parameters */
702 spec = (const struct rte_flow_item_eth *)pattern->spec;
703 last = (const struct rte_flow_item_eth *)pattern->last;
704 mask = (const struct rte_flow_item_eth *)
705 (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
707 /* Don't care any field of eth header,
708 * only care eth protocol.
710 DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
714 /* Get traffic class index and flow id to be configured */
716 flow->tc_index = attr->priority;
718 if (dpaa2_flow_extract_support((const uint8_t *)mask,
719 RTE_FLOW_ITEM_TYPE_ETH)) {
720 DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
725 if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
726 index = dpaa2_flow_extract_search(
727 &priv->extract.qos_key_extract.dpkg,
728 NET_PROT_ETH, NH_FLD_ETH_SA);
730 ret = dpaa2_flow_extract_add(
731 &priv->extract.qos_key_extract,
732 NET_PROT_ETH, NH_FLD_ETH_SA,
735 DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
739 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
741 index = dpaa2_flow_extract_search(
742 &priv->extract.tc_key_extract[group].dpkg,
743 NET_PROT_ETH, NH_FLD_ETH_SA);
745 ret = dpaa2_flow_extract_add(
746 &priv->extract.tc_key_extract[group],
747 NET_PROT_ETH, NH_FLD_ETH_SA,
750 DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
753 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
756 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
759 "Move ipaddr before ETH_SA rule set failed");
763 ret = dpaa2_flow_rule_data_set(
764 &priv->extract.qos_key_extract,
768 &spec->src.addr_bytes,
769 &mask->src.addr_bytes,
770 sizeof(struct rte_ether_addr));
772 DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
776 ret = dpaa2_flow_rule_data_set(
777 &priv->extract.tc_key_extract[group],
781 &spec->src.addr_bytes,
782 &mask->src.addr_bytes,
783 sizeof(struct rte_ether_addr));
785 DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
790 if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
791 index = dpaa2_flow_extract_search(
792 &priv->extract.qos_key_extract.dpkg,
793 NET_PROT_ETH, NH_FLD_ETH_DA);
795 ret = dpaa2_flow_extract_add(
796 &priv->extract.qos_key_extract,
797 NET_PROT_ETH, NH_FLD_ETH_DA,
800 DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
804 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
807 index = dpaa2_flow_extract_search(
808 &priv->extract.tc_key_extract[group].dpkg,
809 NET_PROT_ETH, NH_FLD_ETH_DA);
811 ret = dpaa2_flow_extract_add(
812 &priv->extract.tc_key_extract[group],
813 NET_PROT_ETH, NH_FLD_ETH_DA,
816 DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
820 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
823 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
826 "Move ipaddr before ETH DA rule set failed");
830 ret = dpaa2_flow_rule_data_set(
831 &priv->extract.qos_key_extract,
835 &spec->dst.addr_bytes,
836 &mask->dst.addr_bytes,
837 sizeof(struct rte_ether_addr));
839 DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
843 ret = dpaa2_flow_rule_data_set(
844 &priv->extract.tc_key_extract[group],
848 &spec->dst.addr_bytes,
849 &mask->dst.addr_bytes,
850 sizeof(struct rte_ether_addr));
852 DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
857 if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
858 index = dpaa2_flow_extract_search(
859 &priv->extract.qos_key_extract.dpkg,
860 NET_PROT_ETH, NH_FLD_ETH_TYPE);
862 ret = dpaa2_flow_extract_add(
863 &priv->extract.qos_key_extract,
864 NET_PROT_ETH, NH_FLD_ETH_TYPE,
867 DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
871 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
873 index = dpaa2_flow_extract_search(
874 &priv->extract.tc_key_extract[group].dpkg,
875 NET_PROT_ETH, NH_FLD_ETH_TYPE);
877 ret = dpaa2_flow_extract_add(
878 &priv->extract.tc_key_extract[group],
879 NET_PROT_ETH, NH_FLD_ETH_TYPE,
882 DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
886 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
889 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
892 "Move ipaddr before ETH TYPE rule set failed");
896 ret = dpaa2_flow_rule_data_set(
897 &priv->extract.qos_key_extract,
905 DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
909 ret = dpaa2_flow_rule_data_set(
910 &priv->extract.tc_key_extract[group],
918 DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
923 (*device_configured) |= local_cfg;
929 dpaa2_configure_flow_vlan(struct rte_flow *flow,
930 struct rte_eth_dev *dev,
931 const struct rte_flow_attr *attr,
932 const struct rte_flow_item *pattern,
933 const struct rte_flow_action actions[] __rte_unused,
934 struct rte_flow_error *error __rte_unused,
935 int *device_configured)
940 const struct rte_flow_item_vlan *spec, *mask;
942 const struct rte_flow_item_vlan *last __rte_unused;
943 struct dpaa2_dev_priv *priv = dev->data->dev_private;
947 /* Parse pattern list to get the matching parameters */
948 spec = (const struct rte_flow_item_vlan *)pattern->spec;
949 last = (const struct rte_flow_item_vlan *)pattern->last;
950 mask = (const struct rte_flow_item_vlan *)
951 (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
953 /* Get traffic class index and flow id to be configured */
955 flow->tc_index = attr->priority;
958 /* Don't care any field of vlan header,
959 * only care vlan protocol.
961 /* Eth type is actually used for vLan classification.
963 struct proto_discrimination proto;
965 index = dpaa2_flow_extract_search(
966 &priv->extract.qos_key_extract.dpkg,
967 NET_PROT_ETH, NH_FLD_ETH_TYPE);
969 ret = dpaa2_flow_proto_discrimination_extract(
970 &priv->extract.qos_key_extract,
971 RTE_FLOW_ITEM_TYPE_ETH);
974 "QoS Ext ETH_TYPE to discriminate vLan failed");
978 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
981 index = dpaa2_flow_extract_search(
982 &priv->extract.tc_key_extract[group].dpkg,
983 NET_PROT_ETH, NH_FLD_ETH_TYPE);
985 ret = dpaa2_flow_proto_discrimination_extract(
986 &priv->extract.tc_key_extract[group],
987 RTE_FLOW_ITEM_TYPE_ETH);
990 "FS Ext ETH_TYPE to discriminate vLan failed.");
994 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
997 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1000 "Move ipaddr before vLan discrimination set failed");
1004 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1005 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1006 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1009 DPAA2_PMD_ERR("vLan discrimination rule set failed");
1013 (*device_configured) |= local_cfg;
1018 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1019 RTE_FLOW_ITEM_TYPE_VLAN)) {
1020 DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1028 index = dpaa2_flow_extract_search(
1029 &priv->extract.qos_key_extract.dpkg,
1030 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1032 ret = dpaa2_flow_extract_add(
1033 &priv->extract.qos_key_extract,
1036 sizeof(rte_be16_t));
1038 DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1042 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1045 index = dpaa2_flow_extract_search(
1046 &priv->extract.tc_key_extract[group].dpkg,
1047 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1049 ret = dpaa2_flow_extract_add(
1050 &priv->extract.tc_key_extract[group],
1053 sizeof(rte_be16_t));
1055 DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1059 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1062 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1065 "Move ipaddr before VLAN TCI rule set failed");
1069 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1075 sizeof(rte_be16_t));
1077 DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1081 ret = dpaa2_flow_rule_data_set(
1082 &priv->extract.tc_key_extract[group],
1088 sizeof(rte_be16_t));
1090 DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1094 (*device_configured) |= local_cfg;
1100 dpaa2_configure_flow_generic_ip(
1101 struct rte_flow *flow,
1102 struct rte_eth_dev *dev,
1103 const struct rte_flow_attr *attr,
1104 const struct rte_flow_item *pattern,
1105 const struct rte_flow_action actions[] __rte_unused,
1106 struct rte_flow_error *error __rte_unused,
1107 int *device_configured)
1112 const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1114 const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1116 const void *key, *mask;
1119 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1120 const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1123 group = attr->group;
1125 /* Parse pattern list to get the matching parameters */
1126 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1127 spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1128 mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1129 (pattern->mask ? pattern->mask :
1130 &dpaa2_flow_item_ipv4_mask);
1132 spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1133 mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1134 (pattern->mask ? pattern->mask :
1135 &dpaa2_flow_item_ipv6_mask);
1138 /* Get traffic class index and flow id to be configured */
1139 flow->tc_id = group;
1140 flow->tc_index = attr->priority;
1142 if (!spec_ipv4 && !spec_ipv6) {
1143 /* Don't care any field of IP header,
1144 * only care IP protocol.
1145 * Example: flow create 0 ingress pattern ipv6 /
1147 /* Eth type is actually used for IP identification.
1149 /* TODO: Current design only supports Eth + IP,
1150 * Eth + vLan + IP needs to add.
1152 struct proto_discrimination proto;
1154 index = dpaa2_flow_extract_search(
1155 &priv->extract.qos_key_extract.dpkg,
1156 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1158 ret = dpaa2_flow_proto_discrimination_extract(
1159 &priv->extract.qos_key_extract,
1160 RTE_FLOW_ITEM_TYPE_ETH);
1163 "QoS Ext ETH_TYPE to discriminate IP failed.");
1167 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1170 index = dpaa2_flow_extract_search(
1171 &priv->extract.tc_key_extract[group].dpkg,
1172 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1174 ret = dpaa2_flow_proto_discrimination_extract(
1175 &priv->extract.tc_key_extract[group],
1176 RTE_FLOW_ITEM_TYPE_ETH);
1179 "FS Ext ETH_TYPE to discriminate IP failed");
1183 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1186 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1189 "Move ipaddr before IP discrimination set failed");
1193 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1194 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1195 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1197 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1198 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1201 DPAA2_PMD_ERR("IP discrimination rule set failed");
1205 (*device_configured) |= local_cfg;
1211 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1212 RTE_FLOW_ITEM_TYPE_IPV4)) {
1213 DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1220 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1221 RTE_FLOW_ITEM_TYPE_IPV6)) {
1222 DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1228 if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1229 mask_ipv4->hdr.dst_addr)) {
1230 flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1231 } else if (mask_ipv6 &&
1232 (memcmp((const char *)mask_ipv6->hdr.src_addr,
1233 zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1234 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1235 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1236 flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1239 if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1241 memcmp((const char *)mask_ipv6->hdr.src_addr,
1242 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1243 index = dpaa2_flow_extract_search(
1244 &priv->extract.qos_key_extract.dpkg,
1245 NET_PROT_IP, NH_FLD_IP_SRC);
1247 ret = dpaa2_flow_extract_add(
1248 &priv->extract.qos_key_extract,
1253 DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1257 local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
1258 DPAA2_QOS_TABLE_IPADDR_EXTRACT);
1261 index = dpaa2_flow_extract_search(
1262 &priv->extract.tc_key_extract[group].dpkg,
1263 NET_PROT_IP, NH_FLD_IP_SRC);
1265 ret = dpaa2_flow_extract_add(
1266 &priv->extract.tc_key_extract[group],
1271 DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1275 local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
1276 DPAA2_FS_TABLE_IPADDR_EXTRACT);
1280 key = &spec_ipv4->hdr.src_addr;
1282 key = &spec_ipv6->hdr.src_addr[0];
1284 mask = &mask_ipv4->hdr.src_addr;
1285 size = NH_FLD_IPV4_ADDR_SIZE;
1286 prot = NET_PROT_IPV4;
1288 mask = &mask_ipv6->hdr.src_addr[0];
1289 size = NH_FLD_IPV6_ADDR_SIZE;
1290 prot = NET_PROT_IPV6;
1293 ret = dpaa2_flow_rule_data_set(
1294 &priv->extract.qos_key_extract,
1296 prot, NH_FLD_IP_SRC,
1299 DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1303 ret = dpaa2_flow_rule_data_set(
1304 &priv->extract.tc_key_extract[group],
1306 prot, NH_FLD_IP_SRC,
1309 DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1313 flow->ipaddr_rule.qos_ipsrc_offset =
1314 dpaa2_flow_extract_key_offset(
1315 &priv->extract.qos_key_extract,
1316 prot, NH_FLD_IP_SRC);
1317 flow->ipaddr_rule.fs_ipsrc_offset =
1318 dpaa2_flow_extract_key_offset(
1319 &priv->extract.tc_key_extract[group],
1320 prot, NH_FLD_IP_SRC);
1323 if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1325 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1326 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1327 index = dpaa2_flow_extract_search(
1328 &priv->extract.qos_key_extract.dpkg,
1329 NET_PROT_IP, NH_FLD_IP_DST);
1332 size = NH_FLD_IPV4_ADDR_SIZE;
1334 size = NH_FLD_IPV6_ADDR_SIZE;
1335 ret = dpaa2_flow_extract_add(
1336 &priv->extract.qos_key_extract,
1341 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1345 local_cfg |= (DPAA2_QOS_TABLE_RECONFIGURE |
1346 DPAA2_QOS_TABLE_IPADDR_EXTRACT);
1349 index = dpaa2_flow_extract_search(
1350 &priv->extract.tc_key_extract[group].dpkg,
1351 NET_PROT_IP, NH_FLD_IP_DST);
1354 size = NH_FLD_IPV4_ADDR_SIZE;
1356 size = NH_FLD_IPV6_ADDR_SIZE;
1357 ret = dpaa2_flow_extract_add(
1358 &priv->extract.tc_key_extract[group],
1363 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1367 local_cfg |= (DPAA2_FS_TABLE_RECONFIGURE |
1368 DPAA2_FS_TABLE_IPADDR_EXTRACT);
1372 key = &spec_ipv4->hdr.dst_addr;
1374 key = spec_ipv6->hdr.dst_addr;
1376 mask = &mask_ipv4->hdr.dst_addr;
1377 size = NH_FLD_IPV4_ADDR_SIZE;
1378 prot = NET_PROT_IPV4;
1380 mask = &mask_ipv6->hdr.dst_addr[0];
1381 size = NH_FLD_IPV6_ADDR_SIZE;
1382 prot = NET_PROT_IPV6;
1385 ret = dpaa2_flow_rule_data_set(
1386 &priv->extract.qos_key_extract,
1388 prot, NH_FLD_IP_DST,
1391 DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1395 ret = dpaa2_flow_rule_data_set(
1396 &priv->extract.tc_key_extract[group],
1398 prot, NH_FLD_IP_DST,
1401 DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1404 flow->ipaddr_rule.qos_ipdst_offset =
1405 dpaa2_flow_extract_key_offset(
1406 &priv->extract.qos_key_extract,
1407 prot, NH_FLD_IP_DST);
1408 flow->ipaddr_rule.fs_ipdst_offset =
1409 dpaa2_flow_extract_key_offset(
1410 &priv->extract.tc_key_extract[group],
1411 prot, NH_FLD_IP_DST);
1414 if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1415 (mask_ipv6 && mask_ipv6->hdr.proto)) {
1416 index = dpaa2_flow_extract_search(
1417 &priv->extract.qos_key_extract.dpkg,
1418 NET_PROT_IP, NH_FLD_IP_PROTO);
1420 ret = dpaa2_flow_extract_add(
1421 &priv->extract.qos_key_extract,
1424 NH_FLD_IP_PROTO_SIZE);
1426 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1430 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1433 index = dpaa2_flow_extract_search(
1434 &priv->extract.tc_key_extract[group].dpkg,
1435 NET_PROT_IP, NH_FLD_IP_PROTO);
1437 ret = dpaa2_flow_extract_add(
1438 &priv->extract.tc_key_extract[group],
1441 NH_FLD_IP_PROTO_SIZE);
1443 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1447 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1450 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1453 "Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1458 key = &spec_ipv4->hdr.next_proto_id;
1460 key = &spec_ipv6->hdr.proto;
1462 mask = &mask_ipv4->hdr.next_proto_id;
1464 mask = &mask_ipv6->hdr.proto;
1466 ret = dpaa2_flow_rule_data_set(
1467 &priv->extract.qos_key_extract,
1471 key, mask, NH_FLD_IP_PROTO_SIZE);
1473 DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1477 ret = dpaa2_flow_rule_data_set(
1478 &priv->extract.tc_key_extract[group],
1482 key, mask, NH_FLD_IP_PROTO_SIZE);
1484 DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1489 (*device_configured) |= local_cfg;
1495 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1496 struct rte_eth_dev *dev,
1497 const struct rte_flow_attr *attr,
1498 const struct rte_flow_item *pattern,
1499 const struct rte_flow_action actions[] __rte_unused,
1500 struct rte_flow_error *error __rte_unused,
1501 int *device_configured)
1506 const struct rte_flow_item_icmp *spec, *mask;
1508 const struct rte_flow_item_icmp *last __rte_unused;
1509 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1511 group = attr->group;
1513 /* Parse pattern list to get the matching parameters */
1514 spec = (const struct rte_flow_item_icmp *)pattern->spec;
1515 last = (const struct rte_flow_item_icmp *)pattern->last;
1516 mask = (const struct rte_flow_item_icmp *)
1517 (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1519 /* Get traffic class index and flow id to be configured */
1520 flow->tc_id = group;
1521 flow->tc_index = attr->priority;
1524 /* Don't care any field of ICMP header,
1525 * only care ICMP protocol.
1526 * Example: flow create 0 ingress pattern icmp /
1528 /* Next proto of Generical IP is actually used
1529 * for ICMP identification.
1531 struct proto_discrimination proto;
1533 index = dpaa2_flow_extract_search(
1534 &priv->extract.qos_key_extract.dpkg,
1535 NET_PROT_IP, NH_FLD_IP_PROTO);
1537 ret = dpaa2_flow_proto_discrimination_extract(
1538 &priv->extract.qos_key_extract,
1539 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1542 "QoS Extract IP protocol to discriminate ICMP failed.");
1546 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1549 index = dpaa2_flow_extract_search(
1550 &priv->extract.tc_key_extract[group].dpkg,
1551 NET_PROT_IP, NH_FLD_IP_PROTO);
1553 ret = dpaa2_flow_proto_discrimination_extract(
1554 &priv->extract.tc_key_extract[group],
1555 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1558 "FS Extract IP protocol to discriminate ICMP failed.");
1562 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1565 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1568 "Move IP addr before ICMP discrimination set failed");
1572 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1573 proto.ip_proto = IPPROTO_ICMP;
1574 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1577 DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1581 (*device_configured) |= local_cfg;
1586 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1587 RTE_FLOW_ITEM_TYPE_ICMP)) {
1588 DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1593 if (mask->hdr.icmp_type) {
1594 index = dpaa2_flow_extract_search(
1595 &priv->extract.qos_key_extract.dpkg,
1596 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1598 ret = dpaa2_flow_extract_add(
1599 &priv->extract.qos_key_extract,
1602 NH_FLD_ICMP_TYPE_SIZE);
1604 DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1608 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1611 index = dpaa2_flow_extract_search(
1612 &priv->extract.tc_key_extract[group].dpkg,
1613 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1615 ret = dpaa2_flow_extract_add(
1616 &priv->extract.tc_key_extract[group],
1619 NH_FLD_ICMP_TYPE_SIZE);
1621 DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1625 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1628 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1631 "Move ipaddr before ICMP TYPE set failed");
1635 ret = dpaa2_flow_rule_data_set(
1636 &priv->extract.qos_key_extract,
1640 &spec->hdr.icmp_type,
1641 &mask->hdr.icmp_type,
1642 NH_FLD_ICMP_TYPE_SIZE);
1644 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1648 ret = dpaa2_flow_rule_data_set(
1649 &priv->extract.tc_key_extract[group],
1653 &spec->hdr.icmp_type,
1654 &mask->hdr.icmp_type,
1655 NH_FLD_ICMP_TYPE_SIZE);
1657 DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1662 if (mask->hdr.icmp_code) {
1663 index = dpaa2_flow_extract_search(
1664 &priv->extract.qos_key_extract.dpkg,
1665 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1667 ret = dpaa2_flow_extract_add(
1668 &priv->extract.qos_key_extract,
1671 NH_FLD_ICMP_CODE_SIZE);
1673 DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1677 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1680 index = dpaa2_flow_extract_search(
1681 &priv->extract.tc_key_extract[group].dpkg,
1682 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1684 ret = dpaa2_flow_extract_add(
1685 &priv->extract.tc_key_extract[group],
1688 NH_FLD_ICMP_CODE_SIZE);
1690 DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1694 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1697 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1700 "Move ipaddr after ICMP CODE set failed");
1704 ret = dpaa2_flow_rule_data_set(
1705 &priv->extract.qos_key_extract,
1709 &spec->hdr.icmp_code,
1710 &mask->hdr.icmp_code,
1711 NH_FLD_ICMP_CODE_SIZE);
1713 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1717 ret = dpaa2_flow_rule_data_set(
1718 &priv->extract.tc_key_extract[group],
1722 &spec->hdr.icmp_code,
1723 &mask->hdr.icmp_code,
1724 NH_FLD_ICMP_CODE_SIZE);
1726 DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1731 (*device_configured) |= local_cfg;
1737 dpaa2_configure_flow_udp(struct rte_flow *flow,
1738 struct rte_eth_dev *dev,
1739 const struct rte_flow_attr *attr,
1740 const struct rte_flow_item *pattern,
1741 const struct rte_flow_action actions[] __rte_unused,
1742 struct rte_flow_error *error __rte_unused,
1743 int *device_configured)
1748 const struct rte_flow_item_udp *spec, *mask;
1750 const struct rte_flow_item_udp *last __rte_unused;
1751 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1753 group = attr->group;
1755 /* Parse pattern list to get the matching parameters */
1756 spec = (const struct rte_flow_item_udp *)pattern->spec;
1757 last = (const struct rte_flow_item_udp *)pattern->last;
1758 mask = (const struct rte_flow_item_udp *)
1759 (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
1761 /* Get traffic class index and flow id to be configured */
1762 flow->tc_id = group;
1763 flow->tc_index = attr->priority;
1765 if (!spec || !mc_l4_port_identification) {
1766 struct proto_discrimination proto;
1768 index = dpaa2_flow_extract_search(
1769 &priv->extract.qos_key_extract.dpkg,
1770 NET_PROT_IP, NH_FLD_IP_PROTO);
1772 ret = dpaa2_flow_proto_discrimination_extract(
1773 &priv->extract.qos_key_extract,
1774 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1777 "QoS Extract IP protocol to discriminate UDP failed.");
1781 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1784 index = dpaa2_flow_extract_search(
1785 &priv->extract.tc_key_extract[group].dpkg,
1786 NET_PROT_IP, NH_FLD_IP_PROTO);
1788 ret = dpaa2_flow_proto_discrimination_extract(
1789 &priv->extract.tc_key_extract[group],
1790 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1793 "FS Extract IP protocol to discriminate UDP failed.");
1797 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1800 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1803 "Move IP addr before UDP discrimination set failed");
1807 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1808 proto.ip_proto = IPPROTO_UDP;
1809 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1812 DPAA2_PMD_ERR("UDP discrimination rule set failed");
1816 (*device_configured) |= local_cfg;
1822 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1823 RTE_FLOW_ITEM_TYPE_UDP)) {
1824 DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
1829 if (mask->hdr.src_port) {
1830 index = dpaa2_flow_extract_search(
1831 &priv->extract.qos_key_extract.dpkg,
1832 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1834 ret = dpaa2_flow_extract_add(
1835 &priv->extract.qos_key_extract,
1837 NH_FLD_UDP_PORT_SRC,
1838 NH_FLD_UDP_PORT_SIZE);
1840 DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
1844 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1847 index = dpaa2_flow_extract_search(
1848 &priv->extract.tc_key_extract[group].dpkg,
1849 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
1851 ret = dpaa2_flow_extract_add(
1852 &priv->extract.tc_key_extract[group],
1854 NH_FLD_UDP_PORT_SRC,
1855 NH_FLD_UDP_PORT_SIZE);
1857 DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
1861 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1864 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1867 "Move ipaddr before UDP_PORT_SRC set failed");
1871 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1874 NH_FLD_UDP_PORT_SRC,
1875 &spec->hdr.src_port,
1876 &mask->hdr.src_port,
1877 NH_FLD_UDP_PORT_SIZE);
1880 "QoS NH_FLD_UDP_PORT_SRC rule data set failed");
1884 ret = dpaa2_flow_rule_data_set(
1885 &priv->extract.tc_key_extract[group],
1888 NH_FLD_UDP_PORT_SRC,
1889 &spec->hdr.src_port,
1890 &mask->hdr.src_port,
1891 NH_FLD_UDP_PORT_SIZE);
1894 "FS NH_FLD_UDP_PORT_SRC rule data set failed");
1899 if (mask->hdr.dst_port) {
1900 index = dpaa2_flow_extract_search(
1901 &priv->extract.qos_key_extract.dpkg,
1902 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1904 ret = dpaa2_flow_extract_add(
1905 &priv->extract.qos_key_extract,
1907 NH_FLD_UDP_PORT_DST,
1908 NH_FLD_UDP_PORT_SIZE);
1910 DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
1914 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1917 index = dpaa2_flow_extract_search(
1918 &priv->extract.tc_key_extract[group].dpkg,
1919 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
1921 ret = dpaa2_flow_extract_add(
1922 &priv->extract.tc_key_extract[group],
1924 NH_FLD_UDP_PORT_DST,
1925 NH_FLD_UDP_PORT_SIZE);
1927 DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
1931 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1934 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1937 "Move ipaddr before UDP_PORT_DST set failed");
1941 ret = dpaa2_flow_rule_data_set(
1942 &priv->extract.qos_key_extract,
1945 NH_FLD_UDP_PORT_DST,
1946 &spec->hdr.dst_port,
1947 &mask->hdr.dst_port,
1948 NH_FLD_UDP_PORT_SIZE);
1951 "QoS NH_FLD_UDP_PORT_DST rule data set failed");
1955 ret = dpaa2_flow_rule_data_set(
1956 &priv->extract.tc_key_extract[group],
1959 NH_FLD_UDP_PORT_DST,
1960 &spec->hdr.dst_port,
1961 &mask->hdr.dst_port,
1962 NH_FLD_UDP_PORT_SIZE);
1965 "FS NH_FLD_UDP_PORT_DST rule data set failed");
1970 (*device_configured) |= local_cfg;
1976 dpaa2_configure_flow_tcp(struct rte_flow *flow,
1977 struct rte_eth_dev *dev,
1978 const struct rte_flow_attr *attr,
1979 const struct rte_flow_item *pattern,
1980 const struct rte_flow_action actions[] __rte_unused,
1981 struct rte_flow_error *error __rte_unused,
1982 int *device_configured)
1987 const struct rte_flow_item_tcp *spec, *mask;
1989 const struct rte_flow_item_tcp *last __rte_unused;
1990 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1992 group = attr->group;
1994 /* Parse pattern list to get the matching parameters */
1995 spec = (const struct rte_flow_item_tcp *)pattern->spec;
1996 last = (const struct rte_flow_item_tcp *)pattern->last;
1997 mask = (const struct rte_flow_item_tcp *)
1998 (pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2000 /* Get traffic class index and flow id to be configured */
2001 flow->tc_id = group;
2002 flow->tc_index = attr->priority;
2004 if (!spec || !mc_l4_port_identification) {
2005 struct proto_discrimination proto;
2007 index = dpaa2_flow_extract_search(
2008 &priv->extract.qos_key_extract.dpkg,
2009 NET_PROT_IP, NH_FLD_IP_PROTO);
2011 ret = dpaa2_flow_proto_discrimination_extract(
2012 &priv->extract.qos_key_extract,
2013 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2016 "QoS Extract IP protocol to discriminate TCP failed.");
2020 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2023 index = dpaa2_flow_extract_search(
2024 &priv->extract.tc_key_extract[group].dpkg,
2025 NET_PROT_IP, NH_FLD_IP_PROTO);
2027 ret = dpaa2_flow_proto_discrimination_extract(
2028 &priv->extract.tc_key_extract[group],
2029 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2032 "FS Extract IP protocol to discriminate TCP failed.");
2036 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2039 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2042 "Move IP addr before TCP discrimination set failed");
2046 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2047 proto.ip_proto = IPPROTO_TCP;
2048 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2051 DPAA2_PMD_ERR("TCP discrimination rule set failed");
2055 (*device_configured) |= local_cfg;
2061 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2062 RTE_FLOW_ITEM_TYPE_TCP)) {
2063 DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2068 if (mask->hdr.src_port) {
2069 index = dpaa2_flow_extract_search(
2070 &priv->extract.qos_key_extract.dpkg,
2071 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2073 ret = dpaa2_flow_extract_add(
2074 &priv->extract.qos_key_extract,
2076 NH_FLD_TCP_PORT_SRC,
2077 NH_FLD_TCP_PORT_SIZE);
2079 DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2083 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2086 index = dpaa2_flow_extract_search(
2087 &priv->extract.tc_key_extract[group].dpkg,
2088 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2090 ret = dpaa2_flow_extract_add(
2091 &priv->extract.tc_key_extract[group],
2093 NH_FLD_TCP_PORT_SRC,
2094 NH_FLD_TCP_PORT_SIZE);
2096 DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2100 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2103 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2106 "Move ipaddr before TCP_PORT_SRC set failed");
2110 ret = dpaa2_flow_rule_data_set(
2111 &priv->extract.qos_key_extract,
2114 NH_FLD_TCP_PORT_SRC,
2115 &spec->hdr.src_port,
2116 &mask->hdr.src_port,
2117 NH_FLD_TCP_PORT_SIZE);
2120 "QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2124 ret = dpaa2_flow_rule_data_set(
2125 &priv->extract.tc_key_extract[group],
2128 NH_FLD_TCP_PORT_SRC,
2129 &spec->hdr.src_port,
2130 &mask->hdr.src_port,
2131 NH_FLD_TCP_PORT_SIZE);
2134 "FS NH_FLD_TCP_PORT_SRC rule data set failed");
2139 if (mask->hdr.dst_port) {
2140 index = dpaa2_flow_extract_search(
2141 &priv->extract.qos_key_extract.dpkg,
2142 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2144 ret = dpaa2_flow_extract_add(
2145 &priv->extract.qos_key_extract,
2147 NH_FLD_TCP_PORT_DST,
2148 NH_FLD_TCP_PORT_SIZE);
2150 DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2154 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2157 index = dpaa2_flow_extract_search(
2158 &priv->extract.tc_key_extract[group].dpkg,
2159 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2161 ret = dpaa2_flow_extract_add(
2162 &priv->extract.tc_key_extract[group],
2164 NH_FLD_TCP_PORT_DST,
2165 NH_FLD_TCP_PORT_SIZE);
2167 DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2171 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2174 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2177 "Move ipaddr before TCP_PORT_DST set failed");
2181 ret = dpaa2_flow_rule_data_set(
2182 &priv->extract.qos_key_extract,
2185 NH_FLD_TCP_PORT_DST,
2186 &spec->hdr.dst_port,
2187 &mask->hdr.dst_port,
2188 NH_FLD_TCP_PORT_SIZE);
2191 "QoS NH_FLD_TCP_PORT_DST rule data set failed");
2195 ret = dpaa2_flow_rule_data_set(
2196 &priv->extract.tc_key_extract[group],
2199 NH_FLD_TCP_PORT_DST,
2200 &spec->hdr.dst_port,
2201 &mask->hdr.dst_port,
2202 NH_FLD_TCP_PORT_SIZE);
2205 "FS NH_FLD_TCP_PORT_DST rule data set failed");
2210 (*device_configured) |= local_cfg;
2216 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2217 struct rte_eth_dev *dev,
2218 const struct rte_flow_attr *attr,
2219 const struct rte_flow_item *pattern,
2220 const struct rte_flow_action actions[] __rte_unused,
2221 struct rte_flow_error *error __rte_unused,
2222 int *device_configured)
2227 const struct rte_flow_item_sctp *spec, *mask;
2229 const struct rte_flow_item_sctp *last __rte_unused;
2230 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2232 group = attr->group;
2234 /* Parse pattern list to get the matching parameters */
2235 spec = (const struct rte_flow_item_sctp *)pattern->spec;
2236 last = (const struct rte_flow_item_sctp *)pattern->last;
2237 mask = (const struct rte_flow_item_sctp *)
2238 (pattern->mask ? pattern->mask :
2239 &dpaa2_flow_item_sctp_mask);
2241 /* Get traffic class index and flow id to be configured */
2242 flow->tc_id = group;
2243 flow->tc_index = attr->priority;
2245 if (!spec || !mc_l4_port_identification) {
2246 struct proto_discrimination proto;
2248 index = dpaa2_flow_extract_search(
2249 &priv->extract.qos_key_extract.dpkg,
2250 NET_PROT_IP, NH_FLD_IP_PROTO);
2252 ret = dpaa2_flow_proto_discrimination_extract(
2253 &priv->extract.qos_key_extract,
2254 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2257 "QoS Extract IP protocol to discriminate SCTP failed.");
2261 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2264 index = dpaa2_flow_extract_search(
2265 &priv->extract.tc_key_extract[group].dpkg,
2266 NET_PROT_IP, NH_FLD_IP_PROTO);
2268 ret = dpaa2_flow_proto_discrimination_extract(
2269 &priv->extract.tc_key_extract[group],
2270 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2273 "FS Extract IP protocol to discriminate SCTP failed.");
2277 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2280 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2283 "Move ipaddr before SCTP discrimination set failed");
2287 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2288 proto.ip_proto = IPPROTO_SCTP;
2289 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2292 DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2296 (*device_configured) |= local_cfg;
2302 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2303 RTE_FLOW_ITEM_TYPE_SCTP)) {
2304 DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2309 if (mask->hdr.src_port) {
2310 index = dpaa2_flow_extract_search(
2311 &priv->extract.qos_key_extract.dpkg,
2312 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2314 ret = dpaa2_flow_extract_add(
2315 &priv->extract.qos_key_extract,
2317 NH_FLD_SCTP_PORT_SRC,
2318 NH_FLD_SCTP_PORT_SIZE);
2320 DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2324 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2327 index = dpaa2_flow_extract_search(
2328 &priv->extract.tc_key_extract[group].dpkg,
2329 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2331 ret = dpaa2_flow_extract_add(
2332 &priv->extract.tc_key_extract[group],
2334 NH_FLD_SCTP_PORT_SRC,
2335 NH_FLD_SCTP_PORT_SIZE);
2337 DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2341 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2344 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2347 "Move ipaddr before SCTP_PORT_SRC set failed");
2351 ret = dpaa2_flow_rule_data_set(
2352 &priv->extract.qos_key_extract,
2355 NH_FLD_SCTP_PORT_SRC,
2356 &spec->hdr.src_port,
2357 &mask->hdr.src_port,
2358 NH_FLD_SCTP_PORT_SIZE);
2361 "QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2365 ret = dpaa2_flow_rule_data_set(
2366 &priv->extract.tc_key_extract[group],
2369 NH_FLD_SCTP_PORT_SRC,
2370 &spec->hdr.src_port,
2371 &mask->hdr.src_port,
2372 NH_FLD_SCTP_PORT_SIZE);
2375 "FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2380 if (mask->hdr.dst_port) {
2381 index = dpaa2_flow_extract_search(
2382 &priv->extract.qos_key_extract.dpkg,
2383 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2385 ret = dpaa2_flow_extract_add(
2386 &priv->extract.qos_key_extract,
2388 NH_FLD_SCTP_PORT_DST,
2389 NH_FLD_SCTP_PORT_SIZE);
2391 DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2395 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2398 index = dpaa2_flow_extract_search(
2399 &priv->extract.tc_key_extract[group].dpkg,
2400 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2402 ret = dpaa2_flow_extract_add(
2403 &priv->extract.tc_key_extract[group],
2405 NH_FLD_SCTP_PORT_DST,
2406 NH_FLD_SCTP_PORT_SIZE);
2408 DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2412 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2415 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2418 "Move ipaddr before SCTP_PORT_DST set failed");
2422 ret = dpaa2_flow_rule_data_set(
2423 &priv->extract.qos_key_extract,
2426 NH_FLD_SCTP_PORT_DST,
2427 &spec->hdr.dst_port,
2428 &mask->hdr.dst_port,
2429 NH_FLD_SCTP_PORT_SIZE);
2432 "QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2436 ret = dpaa2_flow_rule_data_set(
2437 &priv->extract.tc_key_extract[group],
2440 NH_FLD_SCTP_PORT_DST,
2441 &spec->hdr.dst_port,
2442 &mask->hdr.dst_port,
2443 NH_FLD_SCTP_PORT_SIZE);
2446 "FS NH_FLD_SCTP_PORT_DST rule data set failed");
2451 (*device_configured) |= local_cfg;
2457 dpaa2_configure_flow_gre(struct rte_flow *flow,
2458 struct rte_eth_dev *dev,
2459 const struct rte_flow_attr *attr,
2460 const struct rte_flow_item *pattern,
2461 const struct rte_flow_action actions[] __rte_unused,
2462 struct rte_flow_error *error __rte_unused,
2463 int *device_configured)
2468 const struct rte_flow_item_gre *spec, *mask;
2470 const struct rte_flow_item_gre *last __rte_unused;
2471 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2473 group = attr->group;
2475 /* Parse pattern list to get the matching parameters */
2476 spec = (const struct rte_flow_item_gre *)pattern->spec;
2477 last = (const struct rte_flow_item_gre *)pattern->last;
2478 mask = (const struct rte_flow_item_gre *)
2479 (pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2481 /* Get traffic class index and flow id to be configured */
2482 flow->tc_id = group;
2483 flow->tc_index = attr->priority;
2486 struct proto_discrimination proto;
2488 index = dpaa2_flow_extract_search(
2489 &priv->extract.qos_key_extract.dpkg,
2490 NET_PROT_IP, NH_FLD_IP_PROTO);
2492 ret = dpaa2_flow_proto_discrimination_extract(
2493 &priv->extract.qos_key_extract,
2494 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2497 "QoS Extract IP protocol to discriminate GRE failed.");
2501 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2504 index = dpaa2_flow_extract_search(
2505 &priv->extract.tc_key_extract[group].dpkg,
2506 NET_PROT_IP, NH_FLD_IP_PROTO);
2508 ret = dpaa2_flow_proto_discrimination_extract(
2509 &priv->extract.tc_key_extract[group],
2510 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2513 "FS Extract IP protocol to discriminate GRE failed.");
2517 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2520 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2523 "Move IP addr before GRE discrimination set failed");
2527 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2528 proto.ip_proto = IPPROTO_GRE;
2529 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2532 DPAA2_PMD_ERR("GRE discrimination rule set failed");
2536 (*device_configured) |= local_cfg;
2541 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2542 RTE_FLOW_ITEM_TYPE_GRE)) {
2543 DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2548 if (!mask->protocol)
2551 index = dpaa2_flow_extract_search(
2552 &priv->extract.qos_key_extract.dpkg,
2553 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2555 ret = dpaa2_flow_extract_add(
2556 &priv->extract.qos_key_extract,
2559 sizeof(rte_be16_t));
2561 DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2565 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2568 index = dpaa2_flow_extract_search(
2569 &priv->extract.tc_key_extract[group].dpkg,
2570 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2572 ret = dpaa2_flow_extract_add(
2573 &priv->extract.tc_key_extract[group],
2576 sizeof(rte_be16_t));
2578 DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2582 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2585 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2588 "Move ipaddr before GRE_TYPE set failed");
2592 ret = dpaa2_flow_rule_data_set(
2593 &priv->extract.qos_key_extract,
2599 sizeof(rte_be16_t));
2602 "QoS NH_FLD_GRE_TYPE rule data set failed");
2606 ret = dpaa2_flow_rule_data_set(
2607 &priv->extract.tc_key_extract[group],
2613 sizeof(rte_be16_t));
2616 "FS NH_FLD_GRE_TYPE rule data set failed");
2620 (*device_configured) |= local_cfg;
2625 /* The existing QoS/FS entry with IP address(es)
2626 * needs update after
2627 * new extract(s) are inserted before IP
2628 * address(es) extract(s).
2631 dpaa2_flow_entry_update(
2632 struct dpaa2_dev_priv *priv, uint8_t tc_id)
2634 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2635 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2637 int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2638 int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2639 struct dpaa2_key_extract *qos_key_extract =
2640 &priv->extract.qos_key_extract;
2641 struct dpaa2_key_extract *tc_key_extract =
2642 &priv->extract.tc_key_extract[tc_id];
2643 char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2644 char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2645 char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2646 char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2647 int extend = -1, extend1, size;
2650 if (curr->ipaddr_rule.ipaddr_type ==
2652 curr = LIST_NEXT(curr, next);
2656 if (curr->ipaddr_rule.ipaddr_type ==
2659 qos_key_extract->key_info.ipv4_src_offset;
2661 qos_key_extract->key_info.ipv4_dst_offset;
2663 tc_key_extract->key_info.ipv4_src_offset;
2665 tc_key_extract->key_info.ipv4_dst_offset;
2666 size = NH_FLD_IPV4_ADDR_SIZE;
2669 qos_key_extract->key_info.ipv6_src_offset;
2671 qos_key_extract->key_info.ipv6_dst_offset;
2673 tc_key_extract->key_info.ipv6_src_offset;
2675 tc_key_extract->key_info.ipv6_dst_offset;
2676 size = NH_FLD_IPV6_ADDR_SIZE;
2679 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
2680 priv->token, &curr->qos_rule);
2682 DPAA2_PMD_ERR("Qos entry remove failed.");
2688 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2689 RTE_ASSERT(qos_ipsrc_offset >=
2690 curr->ipaddr_rule.qos_ipsrc_offset);
2691 extend1 = qos_ipsrc_offset -
2692 curr->ipaddr_rule.qos_ipsrc_offset;
2694 RTE_ASSERT(extend == extend1);
2699 (char *)(size_t)curr->qos_rule.key_iova +
2700 curr->ipaddr_rule.qos_ipsrc_offset,
2702 memset((char *)(size_t)curr->qos_rule.key_iova +
2703 curr->ipaddr_rule.qos_ipsrc_offset,
2707 (char *)(size_t)curr->qos_rule.mask_iova +
2708 curr->ipaddr_rule.qos_ipsrc_offset,
2710 memset((char *)(size_t)curr->qos_rule.mask_iova +
2711 curr->ipaddr_rule.qos_ipsrc_offset,
2714 curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
2717 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2718 RTE_ASSERT(qos_ipdst_offset >=
2719 curr->ipaddr_rule.qos_ipdst_offset);
2720 extend1 = qos_ipdst_offset -
2721 curr->ipaddr_rule.qos_ipdst_offset;
2723 RTE_ASSERT(extend == extend1);
2728 (char *)(size_t)curr->qos_rule.key_iova +
2729 curr->ipaddr_rule.qos_ipdst_offset,
2731 memset((char *)(size_t)curr->qos_rule.key_iova +
2732 curr->ipaddr_rule.qos_ipdst_offset,
2736 (char *)(size_t)curr->qos_rule.mask_iova +
2737 curr->ipaddr_rule.qos_ipdst_offset,
2739 memset((char *)(size_t)curr->qos_rule.mask_iova +
2740 curr->ipaddr_rule.qos_ipdst_offset,
2743 curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
2746 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
2747 memcpy((char *)(size_t)curr->qos_rule.key_iova +
2748 curr->ipaddr_rule.qos_ipsrc_offset,
2751 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2752 curr->ipaddr_rule.qos_ipsrc_offset,
2756 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
2757 memcpy((char *)(size_t)curr->qos_rule.key_iova +
2758 curr->ipaddr_rule.qos_ipdst_offset,
2761 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
2762 curr->ipaddr_rule.qos_ipdst_offset,
2768 curr->qos_rule.key_size += extend;
2770 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
2771 priv->token, &curr->qos_rule,
2772 curr->tc_id, curr->qos_index,
2775 DPAA2_PMD_ERR("Qos entry update failed.");
2779 if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
2780 curr = LIST_NEXT(curr, next);
2786 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
2787 priv->token, curr->tc_id, &curr->fs_rule);
2789 DPAA2_PMD_ERR("FS entry remove failed.");
2793 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
2794 tc_id == curr->tc_id) {
2795 RTE_ASSERT(fs_ipsrc_offset >=
2796 curr->ipaddr_rule.fs_ipsrc_offset);
2797 extend1 = fs_ipsrc_offset -
2798 curr->ipaddr_rule.fs_ipsrc_offset;
2800 RTE_ASSERT(extend == extend1);
2805 (char *)(size_t)curr->fs_rule.key_iova +
2806 curr->ipaddr_rule.fs_ipsrc_offset,
2808 memset((char *)(size_t)curr->fs_rule.key_iova +
2809 curr->ipaddr_rule.fs_ipsrc_offset,
2813 (char *)(size_t)curr->fs_rule.mask_iova +
2814 curr->ipaddr_rule.fs_ipsrc_offset,
2816 memset((char *)(size_t)curr->fs_rule.mask_iova +
2817 curr->ipaddr_rule.fs_ipsrc_offset,
2820 curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
2823 if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
2824 tc_id == curr->tc_id) {
2825 RTE_ASSERT(fs_ipdst_offset >=
2826 curr->ipaddr_rule.fs_ipdst_offset);
2827 extend1 = fs_ipdst_offset -
2828 curr->ipaddr_rule.fs_ipdst_offset;
2830 RTE_ASSERT(extend == extend1);
2835 (char *)(size_t)curr->fs_rule.key_iova +
2836 curr->ipaddr_rule.fs_ipdst_offset,
2838 memset((char *)(size_t)curr->fs_rule.key_iova +
2839 curr->ipaddr_rule.fs_ipdst_offset,
2843 (char *)(size_t)curr->fs_rule.mask_iova +
2844 curr->ipaddr_rule.fs_ipdst_offset,
2846 memset((char *)(size_t)curr->fs_rule.mask_iova +
2847 curr->ipaddr_rule.fs_ipdst_offset,
2850 curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
2853 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
2854 memcpy((char *)(size_t)curr->fs_rule.key_iova +
2855 curr->ipaddr_rule.fs_ipsrc_offset,
2858 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2859 curr->ipaddr_rule.fs_ipsrc_offset,
2863 if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
2864 memcpy((char *)(size_t)curr->fs_rule.key_iova +
2865 curr->ipaddr_rule.fs_ipdst_offset,
2868 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
2869 curr->ipaddr_rule.fs_ipdst_offset,
2875 curr->fs_rule.key_size += extend;
2877 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
2878 priv->token, curr->tc_id, curr->fs_index,
2879 &curr->fs_rule, &curr->action_cfg);
2881 DPAA2_PMD_ERR("FS entry update failed.");
2885 curr = LIST_NEXT(curr, next);
2892 dpaa2_generic_flow_set(struct rte_flow *flow,
2893 struct rte_eth_dev *dev,
2894 const struct rte_flow_attr *attr,
2895 const struct rte_flow_item pattern[],
2896 const struct rte_flow_action actions[],
2897 struct rte_flow_error *error)
2899 const struct rte_flow_action_queue *dest_queue;
2900 const struct rte_flow_action_rss *rss_conf;
2902 int is_keycfg_configured = 0, end_of_list = 0;
2903 int ret = 0, i = 0, j = 0;
2904 struct dpni_attr nic_attr;
2905 struct dpni_rx_tc_dist_cfg tc_cfg;
2906 struct dpni_qos_tbl_cfg qos_cfg;
2907 struct dpni_fs_action_cfg action;
2908 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2909 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2911 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2913 /* Parse pattern list to get the matching parameters */
2914 while (!end_of_list) {
2915 switch (pattern[i].type) {
2916 case RTE_FLOW_ITEM_TYPE_ETH:
2917 ret = dpaa2_configure_flow_eth(flow,
2918 dev, attr, &pattern[i], actions, error,
2919 &is_keycfg_configured);
2921 DPAA2_PMD_ERR("ETH flow configuration failed!");
2925 case RTE_FLOW_ITEM_TYPE_VLAN:
2926 ret = dpaa2_configure_flow_vlan(flow,
2927 dev, attr, &pattern[i], actions, error,
2928 &is_keycfg_configured);
2930 DPAA2_PMD_ERR("vLan flow configuration failed!");
2934 case RTE_FLOW_ITEM_TYPE_IPV4:
2935 case RTE_FLOW_ITEM_TYPE_IPV6:
2936 ret = dpaa2_configure_flow_generic_ip(flow,
2937 dev, attr, &pattern[i], actions, error,
2938 &is_keycfg_configured);
2940 DPAA2_PMD_ERR("IP flow configuration failed!");
2944 case RTE_FLOW_ITEM_TYPE_ICMP:
2945 ret = dpaa2_configure_flow_icmp(flow,
2946 dev, attr, &pattern[i], actions, error,
2947 &is_keycfg_configured);
2949 DPAA2_PMD_ERR("ICMP flow configuration failed!");
2953 case RTE_FLOW_ITEM_TYPE_UDP:
2954 ret = dpaa2_configure_flow_udp(flow,
2955 dev, attr, &pattern[i], actions, error,
2956 &is_keycfg_configured);
2958 DPAA2_PMD_ERR("UDP flow configuration failed!");
2962 case RTE_FLOW_ITEM_TYPE_TCP:
2963 ret = dpaa2_configure_flow_tcp(flow,
2964 dev, attr, &pattern[i], actions, error,
2965 &is_keycfg_configured);
2967 DPAA2_PMD_ERR("TCP flow configuration failed!");
2971 case RTE_FLOW_ITEM_TYPE_SCTP:
2972 ret = dpaa2_configure_flow_sctp(flow,
2973 dev, attr, &pattern[i], actions, error,
2974 &is_keycfg_configured);
2976 DPAA2_PMD_ERR("SCTP flow configuration failed!");
2980 case RTE_FLOW_ITEM_TYPE_GRE:
2981 ret = dpaa2_configure_flow_gre(flow,
2982 dev, attr, &pattern[i], actions, error,
2983 &is_keycfg_configured);
2985 DPAA2_PMD_ERR("GRE flow configuration failed!");
2989 case RTE_FLOW_ITEM_TYPE_END:
2991 break; /*End of List*/
2993 DPAA2_PMD_ERR("Invalid action type");
3000 /* Let's parse action on matching traffic */
3002 while (!end_of_list) {
3003 switch (actions[j].type) {
3004 case RTE_FLOW_ACTION_TYPE_QUEUE:
3005 dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
3006 flow->flow_id = dest_queue->index;
3007 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3008 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3009 action.flow_id = flow->flow_id;
3010 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3011 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_extract.dpkg,
3012 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3014 "Unable to prepare extract parameters");
3018 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3019 qos_cfg.discard_on_miss = true;
3020 qos_cfg.keep_entries = true;
3021 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
3022 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3023 priv->token, &qos_cfg);
3026 "Distribution cannot be configured.(%d)"
3031 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3032 if (dpkg_prepare_key_cfg(
3033 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3034 (uint8_t *)(size_t)priv->extract
3035 .tc_extract_param[flow->tc_id]) < 0) {
3037 "Unable to prepare extract parameters");
3041 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3042 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3043 tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
3044 tc_cfg.key_cfg_iova =
3045 (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3046 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3047 tc_cfg.fs_cfg.keep_entries = true;
3048 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3050 flow->tc_id, &tc_cfg);
3053 "Distribution cannot be configured.(%d)"
3058 /* Configure QoS table first */
3059 memset(&nic_attr, 0, sizeof(struct dpni_attr));
3060 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
3061 priv->token, &nic_attr);
3064 "Failure to get attribute. dpni@%p err code(%d)\n",
3069 action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
3071 if (!priv->qos_index) {
3072 priv->qos_index = rte_zmalloc(0,
3073 nic_attr.qos_entries, 64);
3075 for (index = 0; index < nic_attr.qos_entries; index++) {
3076 if (!priv->qos_index[index]) {
3077 priv->qos_index[index] = 1;
3081 if (index >= nic_attr.qos_entries) {
3082 DPAA2_PMD_ERR("QoS table with %d entries full",
3083 nic_attr.qos_entries);
3086 flow->qos_rule.key_size = priv->extract
3087 .qos_key_extract.key_info.key_total_size;
3088 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3089 if (flow->ipaddr_rule.qos_ipdst_offset >=
3090 flow->ipaddr_rule.qos_ipsrc_offset) {
3091 flow->qos_rule.key_size =
3092 flow->ipaddr_rule.qos_ipdst_offset +
3093 NH_FLD_IPV4_ADDR_SIZE;
3095 flow->qos_rule.key_size =
3096 flow->ipaddr_rule.qos_ipsrc_offset +
3097 NH_FLD_IPV4_ADDR_SIZE;
3099 } else if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV6_ADDR) {
3100 if (flow->ipaddr_rule.qos_ipdst_offset >=
3101 flow->ipaddr_rule.qos_ipsrc_offset) {
3102 flow->qos_rule.key_size =
3103 flow->ipaddr_rule.qos_ipdst_offset +
3104 NH_FLD_IPV6_ADDR_SIZE;
3106 flow->qos_rule.key_size =
3107 flow->ipaddr_rule.qos_ipsrc_offset +
3108 NH_FLD_IPV6_ADDR_SIZE;
3111 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3112 priv->token, &flow->qos_rule,
3117 "Error in addnig entry to QoS table(%d)", ret);
3118 priv->qos_index[index] = 0;
3121 flow->qos_index = index;
3123 /* Then Configure FS table */
3124 if (!priv->fs_index) {
3125 priv->fs_index = rte_zmalloc(0,
3126 nic_attr.fs_entries, 64);
3128 for (index = 0; index < nic_attr.fs_entries; index++) {
3129 if (!priv->fs_index[index]) {
3130 priv->fs_index[index] = 1;
3134 if (index >= nic_attr.fs_entries) {
3135 DPAA2_PMD_ERR("FS table with %d entries full",
3136 nic_attr.fs_entries);
3139 flow->fs_rule.key_size = priv->extract
3140 .tc_key_extract[attr->group].key_info.key_total_size;
3141 if (flow->ipaddr_rule.ipaddr_type ==
3143 if (flow->ipaddr_rule.fs_ipdst_offset >=
3144 flow->ipaddr_rule.fs_ipsrc_offset) {
3145 flow->fs_rule.key_size =
3146 flow->ipaddr_rule.fs_ipdst_offset +
3147 NH_FLD_IPV4_ADDR_SIZE;
3149 flow->fs_rule.key_size =
3150 flow->ipaddr_rule.fs_ipsrc_offset +
3151 NH_FLD_IPV4_ADDR_SIZE;
3153 } else if (flow->ipaddr_rule.ipaddr_type ==
3155 if (flow->ipaddr_rule.fs_ipdst_offset >=
3156 flow->ipaddr_rule.fs_ipsrc_offset) {
3157 flow->fs_rule.key_size =
3158 flow->ipaddr_rule.fs_ipdst_offset +
3159 NH_FLD_IPV6_ADDR_SIZE;
3161 flow->fs_rule.key_size =
3162 flow->ipaddr_rule.fs_ipsrc_offset +
3163 NH_FLD_IPV6_ADDR_SIZE;
3166 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3168 &flow->fs_rule, &action);
3171 "Error in adding entry to FS table(%d)", ret);
3172 priv->fs_index[index] = 0;
3175 flow->fs_index = index;
3176 memcpy(&flow->action_cfg, &action,
3177 sizeof(struct dpni_fs_action_cfg));
3179 case RTE_FLOW_ACTION_TYPE_RSS:
3180 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
3181 priv->token, &nic_attr);
3184 "Failure to get attribute. dpni@%p err code(%d)\n",
3188 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3189 for (i = 0; i < (int)rss_conf->queue_num; i++) {
3190 if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
3191 rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
3193 "Queue/Group combination are not supported\n");
3198 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3199 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3200 &priv->extract.tc_key_extract[flow->tc_id].dpkg);
3203 "unable to set flow distribution.please check queue config\n");
3207 /* Allocate DMA'ble memory to write the rules */
3208 param = (size_t)rte_malloc(NULL, 256, 64);
3210 DPAA2_PMD_ERR("Memory allocation failure\n");
3214 if (dpkg_prepare_key_cfg(
3215 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3216 (uint8_t *)param) < 0) {
3218 "Unable to prepare extract parameters");
3219 rte_free((void *)param);
3223 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
3224 tc_cfg.dist_size = rss_conf->queue_num;
3225 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
3226 tc_cfg.key_cfg_iova = (size_t)param;
3227 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
3229 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
3230 priv->token, flow->tc_id,
3234 "Distribution cannot be configured: %d\n", ret);
3235 rte_free((void *)param);
3239 rte_free((void *)param);
3240 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3241 if (dpkg_prepare_key_cfg(
3242 &priv->extract.qos_key_extract.dpkg,
3243 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3245 "Unable to prepare extract parameters");
3249 sizeof(struct dpni_qos_tbl_cfg));
3250 qos_cfg.discard_on_miss = true;
3251 qos_cfg.keep_entries = true;
3252 qos_cfg.key_cfg_iova =
3253 (size_t)priv->extract.qos_extract_param;
3254 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3255 priv->token, &qos_cfg);
3258 "Distribution can't be configured %d\n",
3264 /* Add Rule into QoS table */
3265 if (!priv->qos_index) {
3266 priv->qos_index = rte_zmalloc(0,
3267 nic_attr.qos_entries, 64);
3269 for (index = 0; index < nic_attr.qos_entries; index++) {
3270 if (!priv->qos_index[index]) {
3271 priv->qos_index[index] = 1;
3275 if (index >= nic_attr.qos_entries) {
3276 DPAA2_PMD_ERR("QoS table with %d entries full",
3277 nic_attr.qos_entries);
3280 flow->qos_rule.key_size =
3281 priv->extract.qos_key_extract.key_info.key_total_size;
3282 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3283 &flow->qos_rule, flow->tc_id,
3287 "Error in entry addition in QoS table(%d)",
3289 priv->qos_index[index] = 0;
3292 flow->qos_index = index;
3294 case RTE_FLOW_ACTION_TYPE_END:
3298 DPAA2_PMD_ERR("Invalid action type");
3306 ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3308 DPAA2_PMD_ERR("Flow entry update failed.");
3312 /* New rules are inserted. */
3314 LIST_INSERT_HEAD(&priv->flows, flow, next);
3316 while (LIST_NEXT(curr, next))
3317 curr = LIST_NEXT(curr, next);
3318 LIST_INSERT_AFTER(curr, flow, next);
3325 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3326 const struct rte_flow_attr *attr)
3330 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3331 DPAA2_PMD_ERR("Priority group is out of range\n");
3334 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3335 DPAA2_PMD_ERR("Priority within the group is out of range\n");
3338 if (unlikely(attr->egress)) {
3340 "Flow configuration is not supported on egress side\n");
3343 if (unlikely(!attr->ingress)) {
3344 DPAA2_PMD_ERR("Ingress flag must be configured\n");
3351 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3353 unsigned int i, j, is_found = 0;
3356 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3357 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3358 if (dpaa2_supported_pattern_type[i]
3359 == pattern[j].type) {
3369 /* Lets verify other combinations of given pattern rules */
3370 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3371 if (!pattern[j].spec) {
3381 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3383 unsigned int i, j, is_found = 0;
3386 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3387 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3388 if (dpaa2_supported_action_type[i] == actions[j].type) {
3398 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3399 if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3407 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3408 const struct rte_flow_attr *flow_attr,
3409 const struct rte_flow_item pattern[],
3410 const struct rte_flow_action actions[],
3411 struct rte_flow_error *error)
3413 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3414 struct dpni_attr dpni_attr;
3415 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3416 uint16_t token = priv->token;
3419 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3420 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3423 "Failure to get dpni@%p attribute, err code %d\n",
3425 rte_flow_error_set(error, EPERM,
3426 RTE_FLOW_ERROR_TYPE_ATTR,
3427 flow_attr, "invalid");
3431 /* Verify input attributes */
3432 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3435 "Invalid attributes are given\n");
3436 rte_flow_error_set(error, EPERM,
3437 RTE_FLOW_ERROR_TYPE_ATTR,
3438 flow_attr, "invalid");
3439 goto not_valid_params;
3441 /* Verify input pattern list */
3442 ret = dpaa2_dev_verify_patterns(pattern);
3445 "Invalid pattern list is given\n");
3446 rte_flow_error_set(error, EPERM,
3447 RTE_FLOW_ERROR_TYPE_ITEM,
3448 pattern, "invalid");
3449 goto not_valid_params;
3451 /* Verify input action list */
3452 ret = dpaa2_dev_verify_actions(actions);
3455 "Invalid action list is given\n");
3456 rte_flow_error_set(error, EPERM,
3457 RTE_FLOW_ERROR_TYPE_ACTION,
3458 actions, "invalid");
3459 goto not_valid_params;
3466 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3467 const struct rte_flow_attr *attr,
3468 const struct rte_flow_item pattern[],
3469 const struct rte_flow_action actions[],
3470 struct rte_flow_error *error)
3472 struct rte_flow *flow = NULL;
3473 size_t key_iova = 0, mask_iova = 0;
3476 flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3478 DPAA2_PMD_ERR("Failure to allocate memory for flow");
3481 /* Allocate DMA'ble memory to write the rules */
3482 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3485 "Memory allocation failure for rule configuration\n");
3488 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3491 "Memory allocation failure for rule configuration\n");
3495 flow->qos_rule.key_iova = key_iova;
3496 flow->qos_rule.mask_iova = mask_iova;
3498 /* Allocate DMA'ble memory to write the rules */
3499 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3502 "Memory allocation failure for rule configuration\n");
3505 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3508 "Memory allocation failure for rule configuration\n");
3512 flow->fs_rule.key_iova = key_iova;
3513 flow->fs_rule.mask_iova = mask_iova;
3515 flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3516 flow->ipaddr_rule.qos_ipsrc_offset =
3517 IP_ADDRESS_OFFSET_INVALID;
3518 flow->ipaddr_rule.qos_ipdst_offset =
3519 IP_ADDRESS_OFFSET_INVALID;
3520 flow->ipaddr_rule.fs_ipsrc_offset =
3521 IP_ADDRESS_OFFSET_INVALID;
3522 flow->ipaddr_rule.fs_ipdst_offset =
3523 IP_ADDRESS_OFFSET_INVALID;
3525 switch (dpaa2_filter_type) {
3526 case RTE_ETH_FILTER_GENERIC:
3527 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3530 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3531 rte_flow_error_set(error, EPERM,
3532 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3535 "Failure to create flow, return code (%d)", ret);
3536 goto creation_error;
3540 DPAA2_PMD_ERR("Filter type (%d) not supported",
3547 rte_flow_error_set(error, EPERM,
3548 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3549 NULL, "memory alloc");
3551 rte_free((void *)flow);
3552 rte_free((void *)key_iova);
3553 rte_free((void *)mask_iova);
3559 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
3560 struct rte_flow *flow,
3561 struct rte_flow_error *error)
3564 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3565 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3567 switch (flow->action) {
3568 case RTE_FLOW_ACTION_TYPE_QUEUE:
3569 /* Remove entry from QoS table first */
3570 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3574 "Error in adding entry to QoS table(%d)", ret);
3577 priv->qos_index[flow->qos_index] = 0;
3579 /* Then remove entry from FS table */
3580 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3581 flow->tc_id, &flow->fs_rule);
3584 "Error in entry addition in FS table(%d)", ret);
3587 priv->fs_index[flow->fs_index] = 0;
3589 case RTE_FLOW_ACTION_TYPE_RSS:
3590 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3594 "Error in entry addition in QoS table(%d)", ret);
3597 priv->qos_index[flow->qos_index] = 0;
3601 "Action type (%d) is not supported", flow->action);
3606 LIST_REMOVE(flow, next);
3607 rte_free((void *)(size_t)flow->qos_rule.key_iova);
3608 rte_free((void *)(size_t)flow->qos_rule.mask_iova);
3609 rte_free((void *)(size_t)flow->fs_rule.key_iova);
3610 rte_free((void *)(size_t)flow->fs_rule.mask_iova);
3611 /* Now free the flow */
3616 rte_flow_error_set(error, EPERM,
3617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3623 * Destroy user-configured flow rules.
3625 * This function skips internal flows rules.
3627 * @see rte_flow_flush()
3631 dpaa2_flow_flush(struct rte_eth_dev *dev,
3632 struct rte_flow_error *error)
3634 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3635 struct rte_flow *flow = LIST_FIRST(&priv->flows);
3638 struct rte_flow *next = LIST_NEXT(flow, next);
3640 dpaa2_flow_destroy(dev, flow, error);
3647 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
3648 struct rte_flow *flow __rte_unused,
3649 const struct rte_flow_action *actions __rte_unused,
3650 void *data __rte_unused,
3651 struct rte_flow_error *error __rte_unused)
3657 * Clean up all flow rules.
3659 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
3660 * rules regardless of whether they are internal or user-configured.
3663 * Pointer to private structure.
3666 dpaa2_flow_clean(struct rte_eth_dev *dev)
3668 struct rte_flow *flow;
3669 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3671 while ((flow = LIST_FIRST(&priv->flows)))
3672 dpaa2_flow_destroy(dev, flow, NULL);
3675 const struct rte_flow_ops dpaa2_flow_ops = {
3676 .create = dpaa2_flow_create,
3677 .validate = dpaa2_flow_validate,
3678 .destroy = dpaa2_flow_destroy,
3679 .flush = dpaa2_flow_flush,
3680 .query = dpaa2_flow_query,