1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2020 NXP
13 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
25 /* Workaround to discriminate the UDP/TCP/SCTP
26 * with next protocol of l3.
27 * MC/WRIOP are not able to identify
28 * the l4 protocol with l4 ports.
30 int mc_l4_port_identification;
32 static char *dpaa2_flow_control_log;
33 static int dpaa2_flow_miss_flow_id =
36 #define FIXED_ENTRY_SIZE 54
38 enum flow_rule_ipaddr_type {
44 struct flow_rule_ipaddr {
45 enum flow_rule_ipaddr_type ipaddr_type;
53 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
54 struct dpni_rule_cfg qos_rule;
55 struct dpni_rule_cfg fs_rule;
56 uint8_t qos_real_key_size;
57 uint8_t fs_real_key_size;
58 uint8_t tc_id; /** Traffic Class ID. */
59 uint8_t tc_index; /** index within this Traffic Class. */
60 enum rte_flow_action_type action;
61 /* Special for IP address to specify the offset
64 struct flow_rule_ipaddr ipaddr_rule;
65 struct dpni_fs_action_cfg action_cfg;
69 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
70 RTE_FLOW_ITEM_TYPE_END,
71 RTE_FLOW_ITEM_TYPE_ETH,
72 RTE_FLOW_ITEM_TYPE_VLAN,
73 RTE_FLOW_ITEM_TYPE_IPV4,
74 RTE_FLOW_ITEM_TYPE_IPV6,
75 RTE_FLOW_ITEM_TYPE_ICMP,
76 RTE_FLOW_ITEM_TYPE_UDP,
77 RTE_FLOW_ITEM_TYPE_TCP,
78 RTE_FLOW_ITEM_TYPE_SCTP,
79 RTE_FLOW_ITEM_TYPE_GRE,
83 enum rte_flow_action_type dpaa2_supported_action_type[] = {
84 RTE_FLOW_ACTION_TYPE_END,
85 RTE_FLOW_ACTION_TYPE_QUEUE,
86 RTE_FLOW_ACTION_TYPE_RSS
89 /* Max of enum rte_flow_item_type + 1, for both IPv4 and IPv6*/
90 #define DPAA2_FLOW_ITEM_TYPE_GENERIC_IP (RTE_FLOW_ITEM_TYPE_META + 1)
92 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
95 static const struct rte_flow_item_eth dpaa2_flow_item_eth_mask = {
96 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
97 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
98 .type = RTE_BE16(0xffff),
101 static const struct rte_flow_item_vlan dpaa2_flow_item_vlan_mask = {
102 .tci = RTE_BE16(0xffff),
105 static const struct rte_flow_item_ipv4 dpaa2_flow_item_ipv4_mask = {
106 .hdr.src_addr = RTE_BE32(0xffffffff),
107 .hdr.dst_addr = RTE_BE32(0xffffffff),
108 .hdr.next_proto_id = 0xff,
111 static const struct rte_flow_item_ipv6 dpaa2_flow_item_ipv6_mask = {
114 "\xff\xff\xff\xff\xff\xff\xff\xff"
115 "\xff\xff\xff\xff\xff\xff\xff\xff",
117 "\xff\xff\xff\xff\xff\xff\xff\xff"
118 "\xff\xff\xff\xff\xff\xff\xff\xff",
123 static const struct rte_flow_item_icmp dpaa2_flow_item_icmp_mask = {
124 .hdr.icmp_type = 0xff,
125 .hdr.icmp_code = 0xff,
128 static const struct rte_flow_item_udp dpaa2_flow_item_udp_mask = {
130 .src_port = RTE_BE16(0xffff),
131 .dst_port = RTE_BE16(0xffff),
135 static const struct rte_flow_item_tcp dpaa2_flow_item_tcp_mask = {
137 .src_port = RTE_BE16(0xffff),
138 .dst_port = RTE_BE16(0xffff),
142 static const struct rte_flow_item_sctp dpaa2_flow_item_sctp_mask = {
144 .src_port = RTE_BE16(0xffff),
145 .dst_port = RTE_BE16(0xffff),
149 static const struct rte_flow_item_gre dpaa2_flow_item_gre_mask = {
150 .protocol = RTE_BE16(0xffff),
155 static inline void dpaa2_prot_field_string(
156 enum net_prot prot, uint32_t field,
159 if (!dpaa2_flow_control_log)
162 if (prot == NET_PROT_ETH) {
163 strcpy(string, "eth");
164 if (field == NH_FLD_ETH_DA)
165 strcat(string, ".dst");
166 else if (field == NH_FLD_ETH_SA)
167 strcat(string, ".src");
168 else if (field == NH_FLD_ETH_TYPE)
169 strcat(string, ".type");
171 strcat(string, ".unknown field");
172 } else if (prot == NET_PROT_VLAN) {
173 strcpy(string, "vlan");
174 if (field == NH_FLD_VLAN_TCI)
175 strcat(string, ".tci");
177 strcat(string, ".unknown field");
178 } else if (prot == NET_PROT_IP) {
179 strcpy(string, "ip");
180 if (field == NH_FLD_IP_SRC)
181 strcat(string, ".src");
182 else if (field == NH_FLD_IP_DST)
183 strcat(string, ".dst");
184 else if (field == NH_FLD_IP_PROTO)
185 strcat(string, ".proto");
187 strcat(string, ".unknown field");
188 } else if (prot == NET_PROT_TCP) {
189 strcpy(string, "tcp");
190 if (field == NH_FLD_TCP_PORT_SRC)
191 strcat(string, ".src");
192 else if (field == NH_FLD_TCP_PORT_DST)
193 strcat(string, ".dst");
195 strcat(string, ".unknown field");
196 } else if (prot == NET_PROT_UDP) {
197 strcpy(string, "udp");
198 if (field == NH_FLD_UDP_PORT_SRC)
199 strcat(string, ".src");
200 else if (field == NH_FLD_UDP_PORT_DST)
201 strcat(string, ".dst");
203 strcat(string, ".unknown field");
204 } else if (prot == NET_PROT_ICMP) {
205 strcpy(string, "icmp");
206 if (field == NH_FLD_ICMP_TYPE)
207 strcat(string, ".type");
208 else if (field == NH_FLD_ICMP_CODE)
209 strcat(string, ".code");
211 strcat(string, ".unknown field");
212 } else if (prot == NET_PROT_SCTP) {
213 strcpy(string, "sctp");
214 if (field == NH_FLD_SCTP_PORT_SRC)
215 strcat(string, ".src");
216 else if (field == NH_FLD_SCTP_PORT_DST)
217 strcat(string, ".dst");
219 strcat(string, ".unknown field");
220 } else if (prot == NET_PROT_GRE) {
221 strcpy(string, "gre");
222 if (field == NH_FLD_GRE_TYPE)
223 strcat(string, ".type");
225 strcat(string, ".unknown field");
227 strcpy(string, "unknown protocol");
231 static inline void dpaa2_flow_qos_table_extracts_log(
232 const struct dpaa2_dev_priv *priv)
237 if (!dpaa2_flow_control_log)
240 printf("Setup QoS table: number of extracts: %d\r\n",
241 priv->extract.qos_key_extract.dpkg.num_extracts);
242 for (idx = 0; idx < priv->extract.qos_key_extract.dpkg.num_extracts;
244 dpaa2_prot_field_string(priv->extract.qos_key_extract.dpkg
245 .extracts[idx].extract.from_hdr.prot,
246 priv->extract.qos_key_extract.dpkg.extracts[idx]
247 .extract.from_hdr.field,
249 printf("%s", string);
250 if ((idx + 1) < priv->extract.qos_key_extract.dpkg.num_extracts)
256 static inline void dpaa2_flow_fs_table_extracts_log(
257 const struct dpaa2_dev_priv *priv, int tc_id)
262 if (!dpaa2_flow_control_log)
265 printf("Setup FS table: number of extracts of TC[%d]: %d\r\n",
266 tc_id, priv->extract.tc_key_extract[tc_id]
268 for (idx = 0; idx < priv->extract.tc_key_extract[tc_id]
269 .dpkg.num_extracts; idx++) {
270 dpaa2_prot_field_string(priv->extract.tc_key_extract[tc_id]
271 .dpkg.extracts[idx].extract.from_hdr.prot,
272 priv->extract.tc_key_extract[tc_id].dpkg.extracts[idx]
273 .extract.from_hdr.field,
275 printf("%s", string);
276 if ((idx + 1) < priv->extract.tc_key_extract[tc_id]
283 static inline void dpaa2_flow_qos_entry_log(
284 const char *log_info, const struct rte_flow *flow, int qos_index)
289 if (!dpaa2_flow_control_log)
292 printf("\r\n%s QoS entry[%d] for TC[%d], extracts size is %d\r\n",
293 log_info, qos_index, flow->tc_id, flow->qos_real_key_size);
295 key = (uint8_t *)(size_t)flow->qos_rule.key_iova;
296 mask = (uint8_t *)(size_t)flow->qos_rule.mask_iova;
299 for (idx = 0; idx < flow->qos_real_key_size; idx++)
300 printf("%02x ", key[idx]);
302 printf("\r\nmask:\r\n");
303 for (idx = 0; idx < flow->qos_real_key_size; idx++)
304 printf("%02x ", mask[idx]);
306 printf("\r\n%s QoS ipsrc: %d, ipdst: %d\r\n", log_info,
307 flow->ipaddr_rule.qos_ipsrc_offset,
308 flow->ipaddr_rule.qos_ipdst_offset);
311 static inline void dpaa2_flow_fs_entry_log(
312 const char *log_info, const struct rte_flow *flow)
317 if (!dpaa2_flow_control_log)
320 printf("\r\n%s FS/TC entry[%d] of TC[%d], extracts size is %d\r\n",
321 log_info, flow->tc_index, flow->tc_id, flow->fs_real_key_size);
323 key = (uint8_t *)(size_t)flow->fs_rule.key_iova;
324 mask = (uint8_t *)(size_t)flow->fs_rule.mask_iova;
327 for (idx = 0; idx < flow->fs_real_key_size; idx++)
328 printf("%02x ", key[idx]);
330 printf("\r\nmask:\r\n");
331 for (idx = 0; idx < flow->fs_real_key_size; idx++)
332 printf("%02x ", mask[idx]);
334 printf("\r\n%s FS ipsrc: %d, ipdst: %d\r\n", log_info,
335 flow->ipaddr_rule.fs_ipsrc_offset,
336 flow->ipaddr_rule.fs_ipdst_offset);
339 static inline void dpaa2_flow_extract_key_set(
340 struct dpaa2_key_info *key_info, int index, uint8_t size)
342 key_info->key_size[index] = size;
344 key_info->key_offset[index] =
345 key_info->key_offset[index - 1] +
346 key_info->key_size[index - 1];
348 key_info->key_offset[index] = 0;
350 key_info->key_total_size += size;
353 static int dpaa2_flow_extract_add(
354 struct dpaa2_key_extract *key_extract,
356 uint32_t field, uint8_t field_size)
358 int index, ip_src = -1, ip_dst = -1;
359 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
360 struct dpaa2_key_info *key_info = &key_extract->key_info;
362 if (dpkg->num_extracts >=
363 DPKG_MAX_NUM_OF_EXTRACTS) {
364 DPAA2_PMD_WARN("Number of extracts overflows");
367 /* Before reorder, the IP SRC and IP DST are already last
370 for (index = 0; index < dpkg->num_extracts; index++) {
371 if (dpkg->extracts[index].extract.from_hdr.prot ==
373 if (dpkg->extracts[index].extract.from_hdr.field ==
377 if (dpkg->extracts[index].extract.from_hdr.field ==
385 RTE_ASSERT((ip_src + 2) >= dpkg->num_extracts);
388 RTE_ASSERT((ip_dst + 2) >= dpkg->num_extracts);
390 if (prot == NET_PROT_IP &&
391 (field == NH_FLD_IP_SRC ||
392 field == NH_FLD_IP_DST)) {
393 index = dpkg->num_extracts;
395 if (ip_src >= 0 && ip_dst >= 0)
396 index = dpkg->num_extracts - 2;
397 else if (ip_src >= 0 || ip_dst >= 0)
398 index = dpkg->num_extracts - 1;
400 index = dpkg->num_extracts;
403 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_HDR;
404 dpkg->extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
405 dpkg->extracts[index].extract.from_hdr.prot = prot;
406 dpkg->extracts[index].extract.from_hdr.field = field;
407 if (prot == NET_PROT_IP &&
408 (field == NH_FLD_IP_SRC ||
409 field == NH_FLD_IP_DST)) {
410 dpaa2_flow_extract_key_set(key_info, index, 0);
412 dpaa2_flow_extract_key_set(key_info, index, field_size);
415 if (prot == NET_PROT_IP) {
416 if (field == NH_FLD_IP_SRC) {
417 if (key_info->ipv4_dst_offset >= 0) {
418 key_info->ipv4_src_offset =
419 key_info->ipv4_dst_offset +
420 NH_FLD_IPV4_ADDR_SIZE;
422 key_info->ipv4_src_offset =
423 key_info->key_offset[index - 1] +
424 key_info->key_size[index - 1];
426 if (key_info->ipv6_dst_offset >= 0) {
427 key_info->ipv6_src_offset =
428 key_info->ipv6_dst_offset +
429 NH_FLD_IPV6_ADDR_SIZE;
431 key_info->ipv6_src_offset =
432 key_info->key_offset[index - 1] +
433 key_info->key_size[index - 1];
435 } else if (field == NH_FLD_IP_DST) {
436 if (key_info->ipv4_src_offset >= 0) {
437 key_info->ipv4_dst_offset =
438 key_info->ipv4_src_offset +
439 NH_FLD_IPV4_ADDR_SIZE;
441 key_info->ipv4_dst_offset =
442 key_info->key_offset[index - 1] +
443 key_info->key_size[index - 1];
445 if (key_info->ipv6_src_offset >= 0) {
446 key_info->ipv6_dst_offset =
447 key_info->ipv6_src_offset +
448 NH_FLD_IPV6_ADDR_SIZE;
450 key_info->ipv6_dst_offset =
451 key_info->key_offset[index - 1] +
452 key_info->key_size[index - 1];
457 if (index == dpkg->num_extracts) {
458 dpkg->num_extracts++;
464 dpkg->extracts[ip_src].type =
465 DPKG_EXTRACT_FROM_HDR;
466 dpkg->extracts[ip_src].extract.from_hdr.type =
468 dpkg->extracts[ip_src].extract.from_hdr.prot =
470 dpkg->extracts[ip_src].extract.from_hdr.field =
472 dpaa2_flow_extract_key_set(key_info, ip_src, 0);
473 key_info->ipv4_src_offset += field_size;
474 key_info->ipv6_src_offset += field_size;
478 dpkg->extracts[ip_dst].type =
479 DPKG_EXTRACT_FROM_HDR;
480 dpkg->extracts[ip_dst].extract.from_hdr.type =
482 dpkg->extracts[ip_dst].extract.from_hdr.prot =
484 dpkg->extracts[ip_dst].extract.from_hdr.field =
486 dpaa2_flow_extract_key_set(key_info, ip_dst, 0);
487 key_info->ipv4_dst_offset += field_size;
488 key_info->ipv6_dst_offset += field_size;
491 dpkg->num_extracts++;
496 static int dpaa2_flow_extract_add_raw(struct dpaa2_key_extract *key_extract,
499 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
500 struct dpaa2_key_info *key_info = &key_extract->key_info;
501 int last_extract_size, index;
503 if (dpkg->num_extracts != 0 && dpkg->extracts[0].type !=
504 DPKG_EXTRACT_FROM_DATA) {
505 DPAA2_PMD_WARN("RAW extract cannot be combined with others");
509 last_extract_size = (size % DPAA2_FLOW_MAX_KEY_SIZE);
510 dpkg->num_extracts = (size / DPAA2_FLOW_MAX_KEY_SIZE);
511 if (last_extract_size)
512 dpkg->num_extracts++;
514 last_extract_size = DPAA2_FLOW_MAX_KEY_SIZE;
516 for (index = 0; index < dpkg->num_extracts; index++) {
517 dpkg->extracts[index].type = DPKG_EXTRACT_FROM_DATA;
518 if (index == dpkg->num_extracts - 1)
519 dpkg->extracts[index].extract.from_data.size =
522 dpkg->extracts[index].extract.from_data.size =
523 DPAA2_FLOW_MAX_KEY_SIZE;
524 dpkg->extracts[index].extract.from_data.offset =
525 DPAA2_FLOW_MAX_KEY_SIZE * index;
528 key_info->key_total_size = size;
532 /* Protocol discrimination.
533 * Discriminate IPv4/IPv6/vLan by Eth type.
534 * Discriminate UDP/TCP/ICMP by next proto of IP.
537 dpaa2_flow_proto_discrimination_extract(
538 struct dpaa2_key_extract *key_extract,
539 enum rte_flow_item_type type)
541 if (type == RTE_FLOW_ITEM_TYPE_ETH) {
542 return dpaa2_flow_extract_add(
543 key_extract, NET_PROT_ETH,
546 } else if (type == (enum rte_flow_item_type)
547 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
548 return dpaa2_flow_extract_add(
549 key_extract, NET_PROT_IP,
551 NH_FLD_IP_PROTO_SIZE);
557 static inline int dpaa2_flow_extract_search(
558 struct dpkg_profile_cfg *dpkg,
559 enum net_prot prot, uint32_t field)
563 for (i = 0; i < dpkg->num_extracts; i++) {
564 if (dpkg->extracts[i].extract.from_hdr.prot == prot &&
565 dpkg->extracts[i].extract.from_hdr.field == field) {
573 static inline int dpaa2_flow_extract_key_offset(
574 struct dpaa2_key_extract *key_extract,
575 enum net_prot prot, uint32_t field)
578 struct dpkg_profile_cfg *dpkg = &key_extract->dpkg;
579 struct dpaa2_key_info *key_info = &key_extract->key_info;
581 if (prot == NET_PROT_IPV4 ||
582 prot == NET_PROT_IPV6)
583 i = dpaa2_flow_extract_search(dpkg, NET_PROT_IP, field);
585 i = dpaa2_flow_extract_search(dpkg, prot, field);
588 if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_SRC)
589 return key_info->ipv4_src_offset;
590 else if (prot == NET_PROT_IPV4 && field == NH_FLD_IP_DST)
591 return key_info->ipv4_dst_offset;
592 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_SRC)
593 return key_info->ipv6_src_offset;
594 else if (prot == NET_PROT_IPV6 && field == NH_FLD_IP_DST)
595 return key_info->ipv6_dst_offset;
597 return key_info->key_offset[i];
603 struct proto_discrimination {
604 enum rte_flow_item_type type;
612 dpaa2_flow_proto_discrimination_rule(
613 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
614 struct proto_discrimination proto, int group)
624 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
626 field = NH_FLD_ETH_TYPE;
627 } else if (proto.type == DPAA2_FLOW_ITEM_TYPE_GENERIC_IP) {
629 field = NH_FLD_IP_PROTO;
632 "Only Eth and IP support to discriminate next proto.");
636 offset = dpaa2_flow_extract_key_offset(&priv->extract.qos_key_extract,
639 DPAA2_PMD_ERR("QoS prot %d field %d extract failed",
643 key_iova = flow->qos_rule.key_iova + offset;
644 mask_iova = flow->qos_rule.mask_iova + offset;
645 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
646 eth_type = proto.eth_type;
647 memcpy((void *)key_iova, (const void *)(ð_type),
650 memcpy((void *)mask_iova, (const void *)(ð_type),
653 ip_proto = proto.ip_proto;
654 memcpy((void *)key_iova, (const void *)(&ip_proto),
657 memcpy((void *)mask_iova, (const void *)(&ip_proto),
661 offset = dpaa2_flow_extract_key_offset(
662 &priv->extract.tc_key_extract[group],
665 DPAA2_PMD_ERR("FS prot %d field %d extract failed",
669 key_iova = flow->fs_rule.key_iova + offset;
670 mask_iova = flow->fs_rule.mask_iova + offset;
672 if (proto.type == RTE_FLOW_ITEM_TYPE_ETH) {
673 eth_type = proto.eth_type;
674 memcpy((void *)key_iova, (const void *)(ð_type),
677 memcpy((void *)mask_iova, (const void *)(ð_type),
680 ip_proto = proto.ip_proto;
681 memcpy((void *)key_iova, (const void *)(&ip_proto),
684 memcpy((void *)mask_iova, (const void *)(&ip_proto),
692 dpaa2_flow_rule_data_set(
693 struct dpaa2_key_extract *key_extract,
694 struct dpni_rule_cfg *rule,
695 enum net_prot prot, uint32_t field,
696 const void *key, const void *mask, int size)
698 int offset = dpaa2_flow_extract_key_offset(key_extract,
702 DPAA2_PMD_ERR("prot %d, field %d extract failed",
707 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
708 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
714 dpaa2_flow_rule_data_set_raw(struct dpni_rule_cfg *rule,
715 const void *key, const void *mask, int size)
719 memcpy((void *)(size_t)(rule->key_iova + offset), key, size);
720 memcpy((void *)(size_t)(rule->mask_iova + offset), mask, size);
726 _dpaa2_flow_rule_move_ipaddr_tail(
727 struct dpaa2_key_extract *key_extract,
728 struct dpni_rule_cfg *rule, int src_offset,
729 uint32_t field, bool ipv4)
737 char tmp[NH_FLD_IPV6_ADDR_SIZE];
739 if (field != NH_FLD_IP_SRC &&
740 field != NH_FLD_IP_DST) {
741 DPAA2_PMD_ERR("Field of IP addr reorder must be IP SRC/DST");
745 prot = NET_PROT_IPV4;
747 prot = NET_PROT_IPV6;
748 dst_offset = dpaa2_flow_extract_key_offset(key_extract,
750 if (dst_offset < 0) {
751 DPAA2_PMD_ERR("Field %d reorder extract failed", field);
754 key_src = rule->key_iova + src_offset;
755 mask_src = rule->mask_iova + src_offset;
756 key_dst = rule->key_iova + dst_offset;
757 mask_dst = rule->mask_iova + dst_offset;
759 len = sizeof(rte_be32_t);
761 len = NH_FLD_IPV6_ADDR_SIZE;
763 memcpy(tmp, (char *)key_src, len);
764 memset((char *)key_src, 0, len);
765 memcpy((char *)key_dst, tmp, len);
767 memcpy(tmp, (char *)mask_src, len);
768 memset((char *)mask_src, 0, len);
769 memcpy((char *)mask_dst, tmp, len);
775 dpaa2_flow_rule_move_ipaddr_tail(
776 struct rte_flow *flow, struct dpaa2_dev_priv *priv,
782 if (flow->ipaddr_rule.ipaddr_type == FLOW_NONE_IPADDR)
785 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR)
786 prot = NET_PROT_IPV4;
788 prot = NET_PROT_IPV6;
790 if (flow->ipaddr_rule.qos_ipsrc_offset >= 0) {
791 ret = _dpaa2_flow_rule_move_ipaddr_tail(
792 &priv->extract.qos_key_extract,
794 flow->ipaddr_rule.qos_ipsrc_offset,
795 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
797 DPAA2_PMD_ERR("QoS src address reorder failed");
800 flow->ipaddr_rule.qos_ipsrc_offset =
801 dpaa2_flow_extract_key_offset(
802 &priv->extract.qos_key_extract,
803 prot, NH_FLD_IP_SRC);
806 if (flow->ipaddr_rule.qos_ipdst_offset >= 0) {
807 ret = _dpaa2_flow_rule_move_ipaddr_tail(
808 &priv->extract.qos_key_extract,
810 flow->ipaddr_rule.qos_ipdst_offset,
811 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
813 DPAA2_PMD_ERR("QoS dst address reorder failed");
816 flow->ipaddr_rule.qos_ipdst_offset =
817 dpaa2_flow_extract_key_offset(
818 &priv->extract.qos_key_extract,
819 prot, NH_FLD_IP_DST);
822 if (flow->ipaddr_rule.fs_ipsrc_offset >= 0) {
823 ret = _dpaa2_flow_rule_move_ipaddr_tail(
824 &priv->extract.tc_key_extract[fs_group],
826 flow->ipaddr_rule.fs_ipsrc_offset,
827 NH_FLD_IP_SRC, prot == NET_PROT_IPV4);
829 DPAA2_PMD_ERR("FS src address reorder failed");
832 flow->ipaddr_rule.fs_ipsrc_offset =
833 dpaa2_flow_extract_key_offset(
834 &priv->extract.tc_key_extract[fs_group],
835 prot, NH_FLD_IP_SRC);
837 if (flow->ipaddr_rule.fs_ipdst_offset >= 0) {
838 ret = _dpaa2_flow_rule_move_ipaddr_tail(
839 &priv->extract.tc_key_extract[fs_group],
841 flow->ipaddr_rule.fs_ipdst_offset,
842 NH_FLD_IP_DST, prot == NET_PROT_IPV4);
844 DPAA2_PMD_ERR("FS dst address reorder failed");
847 flow->ipaddr_rule.fs_ipdst_offset =
848 dpaa2_flow_extract_key_offset(
849 &priv->extract.tc_key_extract[fs_group],
850 prot, NH_FLD_IP_DST);
857 dpaa2_flow_extract_support(
858 const uint8_t *mask_src,
859 enum rte_flow_item_type type)
863 const char *mask_support = 0;
866 case RTE_FLOW_ITEM_TYPE_ETH:
867 mask_support = (const char *)&dpaa2_flow_item_eth_mask;
868 size = sizeof(struct rte_flow_item_eth);
870 case RTE_FLOW_ITEM_TYPE_VLAN:
871 mask_support = (const char *)&dpaa2_flow_item_vlan_mask;
872 size = sizeof(struct rte_flow_item_vlan);
874 case RTE_FLOW_ITEM_TYPE_IPV4:
875 mask_support = (const char *)&dpaa2_flow_item_ipv4_mask;
876 size = sizeof(struct rte_flow_item_ipv4);
878 case RTE_FLOW_ITEM_TYPE_IPV6:
879 mask_support = (const char *)&dpaa2_flow_item_ipv6_mask;
880 size = sizeof(struct rte_flow_item_ipv6);
882 case RTE_FLOW_ITEM_TYPE_ICMP:
883 mask_support = (const char *)&dpaa2_flow_item_icmp_mask;
884 size = sizeof(struct rte_flow_item_icmp);
886 case RTE_FLOW_ITEM_TYPE_UDP:
887 mask_support = (const char *)&dpaa2_flow_item_udp_mask;
888 size = sizeof(struct rte_flow_item_udp);
890 case RTE_FLOW_ITEM_TYPE_TCP:
891 mask_support = (const char *)&dpaa2_flow_item_tcp_mask;
892 size = sizeof(struct rte_flow_item_tcp);
894 case RTE_FLOW_ITEM_TYPE_SCTP:
895 mask_support = (const char *)&dpaa2_flow_item_sctp_mask;
896 size = sizeof(struct rte_flow_item_sctp);
898 case RTE_FLOW_ITEM_TYPE_GRE:
899 mask_support = (const char *)&dpaa2_flow_item_gre_mask;
900 size = sizeof(struct rte_flow_item_gre);
906 memcpy(mask, mask_support, size);
908 for (i = 0; i < size; i++)
909 mask[i] = (mask[i] | mask_src[i]);
911 if (memcmp(mask, mask_support, size))
918 dpaa2_configure_flow_eth(struct rte_flow *flow,
919 struct rte_eth_dev *dev,
920 const struct rte_flow_attr *attr,
921 const struct rte_flow_item *pattern,
922 const struct rte_flow_action actions[] __rte_unused,
923 struct rte_flow_error *error __rte_unused,
924 int *device_configured)
929 const struct rte_flow_item_eth *spec, *mask;
931 /* TODO: Currently upper bound of range parameter is not implemented */
932 const struct rte_flow_item_eth *last __rte_unused;
933 struct dpaa2_dev_priv *priv = dev->data->dev_private;
934 const char zero_cmp[RTE_ETHER_ADDR_LEN] = {0};
938 /* Parse pattern list to get the matching parameters */
939 spec = (const struct rte_flow_item_eth *)pattern->spec;
940 last = (const struct rte_flow_item_eth *)pattern->last;
941 mask = (const struct rte_flow_item_eth *)
942 (pattern->mask ? pattern->mask : &dpaa2_flow_item_eth_mask);
944 /* Don't care any field of eth header,
945 * only care eth protocol.
947 DPAA2_PMD_WARN("No pattern spec for Eth flow, just skip");
951 /* Get traffic class index and flow id to be configured */
953 flow->tc_index = attr->priority;
955 if (dpaa2_flow_extract_support((const uint8_t *)mask,
956 RTE_FLOW_ITEM_TYPE_ETH)) {
957 DPAA2_PMD_WARN("Extract field(s) of ethernet not support.");
962 if (memcmp((const char *)&mask->src, zero_cmp, RTE_ETHER_ADDR_LEN)) {
963 index = dpaa2_flow_extract_search(
964 &priv->extract.qos_key_extract.dpkg,
965 NET_PROT_ETH, NH_FLD_ETH_SA);
967 ret = dpaa2_flow_extract_add(
968 &priv->extract.qos_key_extract,
969 NET_PROT_ETH, NH_FLD_ETH_SA,
972 DPAA2_PMD_ERR("QoS Extract add ETH_SA failed.");
976 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
978 index = dpaa2_flow_extract_search(
979 &priv->extract.tc_key_extract[group].dpkg,
980 NET_PROT_ETH, NH_FLD_ETH_SA);
982 ret = dpaa2_flow_extract_add(
983 &priv->extract.tc_key_extract[group],
984 NET_PROT_ETH, NH_FLD_ETH_SA,
987 DPAA2_PMD_ERR("FS Extract add ETH_SA failed.");
990 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
993 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
996 "Move ipaddr before ETH_SA rule set failed");
1000 ret = dpaa2_flow_rule_data_set(
1001 &priv->extract.qos_key_extract,
1005 &spec->src.addr_bytes,
1006 &mask->src.addr_bytes,
1007 sizeof(struct rte_ether_addr));
1009 DPAA2_PMD_ERR("QoS NH_FLD_ETH_SA rule data set failed");
1013 ret = dpaa2_flow_rule_data_set(
1014 &priv->extract.tc_key_extract[group],
1018 &spec->src.addr_bytes,
1019 &mask->src.addr_bytes,
1020 sizeof(struct rte_ether_addr));
1022 DPAA2_PMD_ERR("FS NH_FLD_ETH_SA rule data set failed");
1027 if (memcmp((const char *)&mask->dst, zero_cmp, RTE_ETHER_ADDR_LEN)) {
1028 index = dpaa2_flow_extract_search(
1029 &priv->extract.qos_key_extract.dpkg,
1030 NET_PROT_ETH, NH_FLD_ETH_DA);
1032 ret = dpaa2_flow_extract_add(
1033 &priv->extract.qos_key_extract,
1034 NET_PROT_ETH, NH_FLD_ETH_DA,
1035 RTE_ETHER_ADDR_LEN);
1037 DPAA2_PMD_ERR("QoS Extract add ETH_DA failed.");
1041 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1044 index = dpaa2_flow_extract_search(
1045 &priv->extract.tc_key_extract[group].dpkg,
1046 NET_PROT_ETH, NH_FLD_ETH_DA);
1048 ret = dpaa2_flow_extract_add(
1049 &priv->extract.tc_key_extract[group],
1050 NET_PROT_ETH, NH_FLD_ETH_DA,
1051 RTE_ETHER_ADDR_LEN);
1053 DPAA2_PMD_ERR("FS Extract add ETH_DA failed.");
1057 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1060 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1063 "Move ipaddr before ETH DA rule set failed");
1067 ret = dpaa2_flow_rule_data_set(
1068 &priv->extract.qos_key_extract,
1072 &spec->dst.addr_bytes,
1073 &mask->dst.addr_bytes,
1074 sizeof(struct rte_ether_addr));
1076 DPAA2_PMD_ERR("QoS NH_FLD_ETH_DA rule data set failed");
1080 ret = dpaa2_flow_rule_data_set(
1081 &priv->extract.tc_key_extract[group],
1085 &spec->dst.addr_bytes,
1086 &mask->dst.addr_bytes,
1087 sizeof(struct rte_ether_addr));
1089 DPAA2_PMD_ERR("FS NH_FLD_ETH_DA rule data set failed");
1094 if (memcmp((const char *)&mask->type, zero_cmp, sizeof(rte_be16_t))) {
1095 index = dpaa2_flow_extract_search(
1096 &priv->extract.qos_key_extract.dpkg,
1097 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1099 ret = dpaa2_flow_extract_add(
1100 &priv->extract.qos_key_extract,
1101 NET_PROT_ETH, NH_FLD_ETH_TYPE,
1102 RTE_ETHER_TYPE_LEN);
1104 DPAA2_PMD_ERR("QoS Extract add ETH_TYPE failed.");
1108 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1110 index = dpaa2_flow_extract_search(
1111 &priv->extract.tc_key_extract[group].dpkg,
1112 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1114 ret = dpaa2_flow_extract_add(
1115 &priv->extract.tc_key_extract[group],
1116 NET_PROT_ETH, NH_FLD_ETH_TYPE,
1117 RTE_ETHER_TYPE_LEN);
1119 DPAA2_PMD_ERR("FS Extract add ETH_TYPE failed.");
1123 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1126 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1129 "Move ipaddr before ETH TYPE rule set failed");
1133 ret = dpaa2_flow_rule_data_set(
1134 &priv->extract.qos_key_extract,
1140 sizeof(rte_be16_t));
1142 DPAA2_PMD_ERR("QoS NH_FLD_ETH_TYPE rule data set failed");
1146 ret = dpaa2_flow_rule_data_set(
1147 &priv->extract.tc_key_extract[group],
1153 sizeof(rte_be16_t));
1155 DPAA2_PMD_ERR("FS NH_FLD_ETH_TYPE rule data set failed");
1160 (*device_configured) |= local_cfg;
1166 dpaa2_configure_flow_vlan(struct rte_flow *flow,
1167 struct rte_eth_dev *dev,
1168 const struct rte_flow_attr *attr,
1169 const struct rte_flow_item *pattern,
1170 const struct rte_flow_action actions[] __rte_unused,
1171 struct rte_flow_error *error __rte_unused,
1172 int *device_configured)
1177 const struct rte_flow_item_vlan *spec, *mask;
1179 const struct rte_flow_item_vlan *last __rte_unused;
1180 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1182 group = attr->group;
1184 /* Parse pattern list to get the matching parameters */
1185 spec = (const struct rte_flow_item_vlan *)pattern->spec;
1186 last = (const struct rte_flow_item_vlan *)pattern->last;
1187 mask = (const struct rte_flow_item_vlan *)
1188 (pattern->mask ? pattern->mask : &dpaa2_flow_item_vlan_mask);
1190 /* Get traffic class index and flow id to be configured */
1191 flow->tc_id = group;
1192 flow->tc_index = attr->priority;
1195 /* Don't care any field of vlan header,
1196 * only care vlan protocol.
1198 /* Eth type is actually used for vLan classification.
1200 struct proto_discrimination proto;
1202 index = dpaa2_flow_extract_search(
1203 &priv->extract.qos_key_extract.dpkg,
1204 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1206 ret = dpaa2_flow_proto_discrimination_extract(
1207 &priv->extract.qos_key_extract,
1208 RTE_FLOW_ITEM_TYPE_ETH);
1211 "QoS Ext ETH_TYPE to discriminate vLan failed");
1215 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1218 index = dpaa2_flow_extract_search(
1219 &priv->extract.tc_key_extract[group].dpkg,
1220 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1222 ret = dpaa2_flow_proto_discrimination_extract(
1223 &priv->extract.tc_key_extract[group],
1224 RTE_FLOW_ITEM_TYPE_ETH);
1227 "FS Ext ETH_TYPE to discriminate vLan failed.");
1231 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1234 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1237 "Move ipaddr before vLan discrimination set failed");
1241 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1242 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN);
1243 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1246 DPAA2_PMD_ERR("vLan discrimination rule set failed");
1250 (*device_configured) |= local_cfg;
1255 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1256 RTE_FLOW_ITEM_TYPE_VLAN)) {
1257 DPAA2_PMD_WARN("Extract field(s) of vlan not support.");
1265 index = dpaa2_flow_extract_search(
1266 &priv->extract.qos_key_extract.dpkg,
1267 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1269 ret = dpaa2_flow_extract_add(
1270 &priv->extract.qos_key_extract,
1273 sizeof(rte_be16_t));
1275 DPAA2_PMD_ERR("QoS Extract add VLAN_TCI failed.");
1279 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1282 index = dpaa2_flow_extract_search(
1283 &priv->extract.tc_key_extract[group].dpkg,
1284 NET_PROT_VLAN, NH_FLD_VLAN_TCI);
1286 ret = dpaa2_flow_extract_add(
1287 &priv->extract.tc_key_extract[group],
1290 sizeof(rte_be16_t));
1292 DPAA2_PMD_ERR("FS Extract add VLAN_TCI failed.");
1296 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1299 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1302 "Move ipaddr before VLAN TCI rule set failed");
1306 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
1312 sizeof(rte_be16_t));
1314 DPAA2_PMD_ERR("QoS NH_FLD_VLAN_TCI rule data set failed");
1318 ret = dpaa2_flow_rule_data_set(
1319 &priv->extract.tc_key_extract[group],
1325 sizeof(rte_be16_t));
1327 DPAA2_PMD_ERR("FS NH_FLD_VLAN_TCI rule data set failed");
1331 (*device_configured) |= local_cfg;
1337 dpaa2_configure_flow_ip_discrimation(
1338 struct dpaa2_dev_priv *priv, struct rte_flow *flow,
1339 const struct rte_flow_item *pattern,
1340 int *local_cfg, int *device_configured,
1344 struct proto_discrimination proto;
1346 index = dpaa2_flow_extract_search(
1347 &priv->extract.qos_key_extract.dpkg,
1348 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1350 ret = dpaa2_flow_proto_discrimination_extract(
1351 &priv->extract.qos_key_extract,
1352 RTE_FLOW_ITEM_TYPE_ETH);
1355 "QoS Extract ETH_TYPE to discriminate IP failed.");
1358 (*local_cfg) |= DPAA2_QOS_TABLE_RECONFIGURE;
1361 index = dpaa2_flow_extract_search(
1362 &priv->extract.tc_key_extract[group].dpkg,
1363 NET_PROT_ETH, NH_FLD_ETH_TYPE);
1365 ret = dpaa2_flow_proto_discrimination_extract(
1366 &priv->extract.tc_key_extract[group],
1367 RTE_FLOW_ITEM_TYPE_ETH);
1370 "FS Extract ETH_TYPE to discriminate IP failed.");
1373 (*local_cfg) |= DPAA2_FS_TABLE_RECONFIGURE;
1376 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1379 "Move ipaddr before IP discrimination set failed");
1383 proto.type = RTE_FLOW_ITEM_TYPE_ETH;
1384 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4)
1385 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4);
1387 proto.eth_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6);
1388 ret = dpaa2_flow_proto_discrimination_rule(priv, flow, proto, group);
1390 DPAA2_PMD_ERR("IP discrimination rule set failed");
1394 (*device_configured) |= (*local_cfg);
1401 dpaa2_configure_flow_generic_ip(
1402 struct rte_flow *flow,
1403 struct rte_eth_dev *dev,
1404 const struct rte_flow_attr *attr,
1405 const struct rte_flow_item *pattern,
1406 const struct rte_flow_action actions[] __rte_unused,
1407 struct rte_flow_error *error __rte_unused,
1408 int *device_configured)
1413 const struct rte_flow_item_ipv4 *spec_ipv4 = 0,
1415 const struct rte_flow_item_ipv6 *spec_ipv6 = 0,
1417 const void *key, *mask;
1420 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1421 const char zero_cmp[NH_FLD_IPV6_ADDR_SIZE] = {0};
1424 group = attr->group;
1426 /* Parse pattern list to get the matching parameters */
1427 if (pattern->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1428 spec_ipv4 = (const struct rte_flow_item_ipv4 *)pattern->spec;
1429 mask_ipv4 = (const struct rte_flow_item_ipv4 *)
1430 (pattern->mask ? pattern->mask :
1431 &dpaa2_flow_item_ipv4_mask);
1433 spec_ipv6 = (const struct rte_flow_item_ipv6 *)pattern->spec;
1434 mask_ipv6 = (const struct rte_flow_item_ipv6 *)
1435 (pattern->mask ? pattern->mask :
1436 &dpaa2_flow_item_ipv6_mask);
1439 /* Get traffic class index and flow id to be configured */
1440 flow->tc_id = group;
1441 flow->tc_index = attr->priority;
1443 ret = dpaa2_configure_flow_ip_discrimation(priv,
1444 flow, pattern, &local_cfg,
1445 device_configured, group);
1447 DPAA2_PMD_ERR("IP discrimation failed!");
1451 if (!spec_ipv4 && !spec_ipv6)
1455 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv4,
1456 RTE_FLOW_ITEM_TYPE_IPV4)) {
1457 DPAA2_PMD_WARN("Extract field(s) of IPv4 not support.");
1464 if (dpaa2_flow_extract_support((const uint8_t *)mask_ipv6,
1465 RTE_FLOW_ITEM_TYPE_IPV6)) {
1466 DPAA2_PMD_WARN("Extract field(s) of IPv6 not support.");
1472 if (mask_ipv4 && (mask_ipv4->hdr.src_addr ||
1473 mask_ipv4->hdr.dst_addr)) {
1474 flow->ipaddr_rule.ipaddr_type = FLOW_IPV4_ADDR;
1475 } else if (mask_ipv6 &&
1476 (memcmp((const char *)mask_ipv6->hdr.src_addr,
1477 zero_cmp, NH_FLD_IPV6_ADDR_SIZE) ||
1478 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1479 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1480 flow->ipaddr_rule.ipaddr_type = FLOW_IPV6_ADDR;
1483 if ((mask_ipv4 && mask_ipv4->hdr.src_addr) ||
1485 memcmp((const char *)mask_ipv6->hdr.src_addr,
1486 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1487 index = dpaa2_flow_extract_search(
1488 &priv->extract.qos_key_extract.dpkg,
1489 NET_PROT_IP, NH_FLD_IP_SRC);
1491 ret = dpaa2_flow_extract_add(
1492 &priv->extract.qos_key_extract,
1497 DPAA2_PMD_ERR("QoS Extract add IP_SRC failed.");
1501 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1504 index = dpaa2_flow_extract_search(
1505 &priv->extract.tc_key_extract[group].dpkg,
1506 NET_PROT_IP, NH_FLD_IP_SRC);
1508 ret = dpaa2_flow_extract_add(
1509 &priv->extract.tc_key_extract[group],
1514 DPAA2_PMD_ERR("FS Extract add IP_SRC failed.");
1518 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1522 key = &spec_ipv4->hdr.src_addr;
1524 key = &spec_ipv6->hdr.src_addr[0];
1526 mask = &mask_ipv4->hdr.src_addr;
1527 size = NH_FLD_IPV4_ADDR_SIZE;
1528 prot = NET_PROT_IPV4;
1530 mask = &mask_ipv6->hdr.src_addr[0];
1531 size = NH_FLD_IPV6_ADDR_SIZE;
1532 prot = NET_PROT_IPV6;
1535 ret = dpaa2_flow_rule_data_set(
1536 &priv->extract.qos_key_extract,
1538 prot, NH_FLD_IP_SRC,
1541 DPAA2_PMD_ERR("QoS NH_FLD_IP_SRC rule data set failed");
1545 ret = dpaa2_flow_rule_data_set(
1546 &priv->extract.tc_key_extract[group],
1548 prot, NH_FLD_IP_SRC,
1551 DPAA2_PMD_ERR("FS NH_FLD_IP_SRC rule data set failed");
1555 flow->ipaddr_rule.qos_ipsrc_offset =
1556 dpaa2_flow_extract_key_offset(
1557 &priv->extract.qos_key_extract,
1558 prot, NH_FLD_IP_SRC);
1559 flow->ipaddr_rule.fs_ipsrc_offset =
1560 dpaa2_flow_extract_key_offset(
1561 &priv->extract.tc_key_extract[group],
1562 prot, NH_FLD_IP_SRC);
1565 if ((mask_ipv4 && mask_ipv4->hdr.dst_addr) ||
1567 memcmp((const char *)mask_ipv6->hdr.dst_addr,
1568 zero_cmp, NH_FLD_IPV6_ADDR_SIZE))) {
1569 index = dpaa2_flow_extract_search(
1570 &priv->extract.qos_key_extract.dpkg,
1571 NET_PROT_IP, NH_FLD_IP_DST);
1574 size = NH_FLD_IPV4_ADDR_SIZE;
1576 size = NH_FLD_IPV6_ADDR_SIZE;
1577 ret = dpaa2_flow_extract_add(
1578 &priv->extract.qos_key_extract,
1583 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1587 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1590 index = dpaa2_flow_extract_search(
1591 &priv->extract.tc_key_extract[group].dpkg,
1592 NET_PROT_IP, NH_FLD_IP_DST);
1595 size = NH_FLD_IPV4_ADDR_SIZE;
1597 size = NH_FLD_IPV6_ADDR_SIZE;
1598 ret = dpaa2_flow_extract_add(
1599 &priv->extract.tc_key_extract[group],
1604 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1608 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1612 key = &spec_ipv4->hdr.dst_addr;
1614 key = spec_ipv6->hdr.dst_addr;
1616 mask = &mask_ipv4->hdr.dst_addr;
1617 size = NH_FLD_IPV4_ADDR_SIZE;
1618 prot = NET_PROT_IPV4;
1620 mask = &mask_ipv6->hdr.dst_addr[0];
1621 size = NH_FLD_IPV6_ADDR_SIZE;
1622 prot = NET_PROT_IPV6;
1625 ret = dpaa2_flow_rule_data_set(
1626 &priv->extract.qos_key_extract,
1628 prot, NH_FLD_IP_DST,
1631 DPAA2_PMD_ERR("QoS NH_FLD_IP_DST rule data set failed");
1635 ret = dpaa2_flow_rule_data_set(
1636 &priv->extract.tc_key_extract[group],
1638 prot, NH_FLD_IP_DST,
1641 DPAA2_PMD_ERR("FS NH_FLD_IP_DST rule data set failed");
1644 flow->ipaddr_rule.qos_ipdst_offset =
1645 dpaa2_flow_extract_key_offset(
1646 &priv->extract.qos_key_extract,
1647 prot, NH_FLD_IP_DST);
1648 flow->ipaddr_rule.fs_ipdst_offset =
1649 dpaa2_flow_extract_key_offset(
1650 &priv->extract.tc_key_extract[group],
1651 prot, NH_FLD_IP_DST);
1654 if ((mask_ipv4 && mask_ipv4->hdr.next_proto_id) ||
1655 (mask_ipv6 && mask_ipv6->hdr.proto)) {
1656 index = dpaa2_flow_extract_search(
1657 &priv->extract.qos_key_extract.dpkg,
1658 NET_PROT_IP, NH_FLD_IP_PROTO);
1660 ret = dpaa2_flow_extract_add(
1661 &priv->extract.qos_key_extract,
1664 NH_FLD_IP_PROTO_SIZE);
1666 DPAA2_PMD_ERR("QoS Extract add IP_DST failed.");
1670 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1673 index = dpaa2_flow_extract_search(
1674 &priv->extract.tc_key_extract[group].dpkg,
1675 NET_PROT_IP, NH_FLD_IP_PROTO);
1677 ret = dpaa2_flow_extract_add(
1678 &priv->extract.tc_key_extract[group],
1681 NH_FLD_IP_PROTO_SIZE);
1683 DPAA2_PMD_ERR("FS Extract add IP_DST failed.");
1687 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1690 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1693 "Move ipaddr after NH_FLD_IP_PROTO rule set failed");
1698 key = &spec_ipv4->hdr.next_proto_id;
1700 key = &spec_ipv6->hdr.proto;
1702 mask = &mask_ipv4->hdr.next_proto_id;
1704 mask = &mask_ipv6->hdr.proto;
1706 ret = dpaa2_flow_rule_data_set(
1707 &priv->extract.qos_key_extract,
1711 key, mask, NH_FLD_IP_PROTO_SIZE);
1713 DPAA2_PMD_ERR("QoS NH_FLD_IP_PROTO rule data set failed");
1717 ret = dpaa2_flow_rule_data_set(
1718 &priv->extract.tc_key_extract[group],
1722 key, mask, NH_FLD_IP_PROTO_SIZE);
1724 DPAA2_PMD_ERR("FS NH_FLD_IP_PROTO rule data set failed");
1729 (*device_configured) |= local_cfg;
1735 dpaa2_configure_flow_icmp(struct rte_flow *flow,
1736 struct rte_eth_dev *dev,
1737 const struct rte_flow_attr *attr,
1738 const struct rte_flow_item *pattern,
1739 const struct rte_flow_action actions[] __rte_unused,
1740 struct rte_flow_error *error __rte_unused,
1741 int *device_configured)
1746 const struct rte_flow_item_icmp *spec, *mask;
1748 const struct rte_flow_item_icmp *last __rte_unused;
1749 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1751 group = attr->group;
1753 /* Parse pattern list to get the matching parameters */
1754 spec = (const struct rte_flow_item_icmp *)pattern->spec;
1755 last = (const struct rte_flow_item_icmp *)pattern->last;
1756 mask = (const struct rte_flow_item_icmp *)
1757 (pattern->mask ? pattern->mask : &dpaa2_flow_item_icmp_mask);
1759 /* Get traffic class index and flow id to be configured */
1760 flow->tc_id = group;
1761 flow->tc_index = attr->priority;
1764 /* Don't care any field of ICMP header,
1765 * only care ICMP protocol.
1766 * Example: flow create 0 ingress pattern icmp /
1768 /* Next proto of Generical IP is actually used
1769 * for ICMP identification.
1771 struct proto_discrimination proto;
1773 index = dpaa2_flow_extract_search(
1774 &priv->extract.qos_key_extract.dpkg,
1775 NET_PROT_IP, NH_FLD_IP_PROTO);
1777 ret = dpaa2_flow_proto_discrimination_extract(
1778 &priv->extract.qos_key_extract,
1779 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1782 "QoS Extract IP protocol to discriminate ICMP failed.");
1786 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1789 index = dpaa2_flow_extract_search(
1790 &priv->extract.tc_key_extract[group].dpkg,
1791 NET_PROT_IP, NH_FLD_IP_PROTO);
1793 ret = dpaa2_flow_proto_discrimination_extract(
1794 &priv->extract.tc_key_extract[group],
1795 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
1798 "FS Extract IP protocol to discriminate ICMP failed.");
1802 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1805 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1808 "Move IP addr before ICMP discrimination set failed");
1812 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
1813 proto.ip_proto = IPPROTO_ICMP;
1814 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
1817 DPAA2_PMD_ERR("ICMP discrimination rule set failed");
1821 (*device_configured) |= local_cfg;
1826 if (dpaa2_flow_extract_support((const uint8_t *)mask,
1827 RTE_FLOW_ITEM_TYPE_ICMP)) {
1828 DPAA2_PMD_WARN("Extract field(s) of ICMP not support.");
1833 if (mask->hdr.icmp_type) {
1834 index = dpaa2_flow_extract_search(
1835 &priv->extract.qos_key_extract.dpkg,
1836 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1838 ret = dpaa2_flow_extract_add(
1839 &priv->extract.qos_key_extract,
1842 NH_FLD_ICMP_TYPE_SIZE);
1844 DPAA2_PMD_ERR("QoS Extract add ICMP_TYPE failed.");
1848 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1851 index = dpaa2_flow_extract_search(
1852 &priv->extract.tc_key_extract[group].dpkg,
1853 NET_PROT_ICMP, NH_FLD_ICMP_TYPE);
1855 ret = dpaa2_flow_extract_add(
1856 &priv->extract.tc_key_extract[group],
1859 NH_FLD_ICMP_TYPE_SIZE);
1861 DPAA2_PMD_ERR("FS Extract add ICMP_TYPE failed.");
1865 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1868 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1871 "Move ipaddr before ICMP TYPE set failed");
1875 ret = dpaa2_flow_rule_data_set(
1876 &priv->extract.qos_key_extract,
1880 &spec->hdr.icmp_type,
1881 &mask->hdr.icmp_type,
1882 NH_FLD_ICMP_TYPE_SIZE);
1884 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_TYPE rule data set failed");
1888 ret = dpaa2_flow_rule_data_set(
1889 &priv->extract.tc_key_extract[group],
1893 &spec->hdr.icmp_type,
1894 &mask->hdr.icmp_type,
1895 NH_FLD_ICMP_TYPE_SIZE);
1897 DPAA2_PMD_ERR("FS NH_FLD_ICMP_TYPE rule data set failed");
1902 if (mask->hdr.icmp_code) {
1903 index = dpaa2_flow_extract_search(
1904 &priv->extract.qos_key_extract.dpkg,
1905 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1907 ret = dpaa2_flow_extract_add(
1908 &priv->extract.qos_key_extract,
1911 NH_FLD_ICMP_CODE_SIZE);
1913 DPAA2_PMD_ERR("QoS Extract add ICMP_CODE failed.");
1917 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
1920 index = dpaa2_flow_extract_search(
1921 &priv->extract.tc_key_extract[group].dpkg,
1922 NET_PROT_ICMP, NH_FLD_ICMP_CODE);
1924 ret = dpaa2_flow_extract_add(
1925 &priv->extract.tc_key_extract[group],
1928 NH_FLD_ICMP_CODE_SIZE);
1930 DPAA2_PMD_ERR("FS Extract add ICMP_CODE failed.");
1934 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
1937 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
1940 "Move ipaddr after ICMP CODE set failed");
1944 ret = dpaa2_flow_rule_data_set(
1945 &priv->extract.qos_key_extract,
1949 &spec->hdr.icmp_code,
1950 &mask->hdr.icmp_code,
1951 NH_FLD_ICMP_CODE_SIZE);
1953 DPAA2_PMD_ERR("QoS NH_FLD_ICMP_CODE rule data set failed");
1957 ret = dpaa2_flow_rule_data_set(
1958 &priv->extract.tc_key_extract[group],
1962 &spec->hdr.icmp_code,
1963 &mask->hdr.icmp_code,
1964 NH_FLD_ICMP_CODE_SIZE);
1966 DPAA2_PMD_ERR("FS NH_FLD_ICMP_CODE rule data set failed");
1971 (*device_configured) |= local_cfg;
1977 dpaa2_configure_flow_udp(struct rte_flow *flow,
1978 struct rte_eth_dev *dev,
1979 const struct rte_flow_attr *attr,
1980 const struct rte_flow_item *pattern,
1981 const struct rte_flow_action actions[] __rte_unused,
1982 struct rte_flow_error *error __rte_unused,
1983 int *device_configured)
1988 const struct rte_flow_item_udp *spec, *mask;
1990 const struct rte_flow_item_udp *last __rte_unused;
1991 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1993 group = attr->group;
1995 /* Parse pattern list to get the matching parameters */
1996 spec = (const struct rte_flow_item_udp *)pattern->spec;
1997 last = (const struct rte_flow_item_udp *)pattern->last;
1998 mask = (const struct rte_flow_item_udp *)
1999 (pattern->mask ? pattern->mask : &dpaa2_flow_item_udp_mask);
2001 /* Get traffic class index and flow id to be configured */
2002 flow->tc_id = group;
2003 flow->tc_index = attr->priority;
2005 if (!spec || !mc_l4_port_identification) {
2006 struct proto_discrimination proto;
2008 index = dpaa2_flow_extract_search(
2009 &priv->extract.qos_key_extract.dpkg,
2010 NET_PROT_IP, NH_FLD_IP_PROTO);
2012 ret = dpaa2_flow_proto_discrimination_extract(
2013 &priv->extract.qos_key_extract,
2014 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2017 "QoS Extract IP protocol to discriminate UDP failed.");
2021 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2024 index = dpaa2_flow_extract_search(
2025 &priv->extract.tc_key_extract[group].dpkg,
2026 NET_PROT_IP, NH_FLD_IP_PROTO);
2028 ret = dpaa2_flow_proto_discrimination_extract(
2029 &priv->extract.tc_key_extract[group],
2030 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2033 "FS Extract IP protocol to discriminate UDP failed.");
2037 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2040 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2043 "Move IP addr before UDP discrimination set failed");
2047 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2048 proto.ip_proto = IPPROTO_UDP;
2049 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2052 DPAA2_PMD_ERR("UDP discrimination rule set failed");
2056 (*device_configured) |= local_cfg;
2062 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2063 RTE_FLOW_ITEM_TYPE_UDP)) {
2064 DPAA2_PMD_WARN("Extract field(s) of UDP not support.");
2069 if (mask->hdr.src_port) {
2070 index = dpaa2_flow_extract_search(
2071 &priv->extract.qos_key_extract.dpkg,
2072 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2074 ret = dpaa2_flow_extract_add(
2075 &priv->extract.qos_key_extract,
2077 NH_FLD_UDP_PORT_SRC,
2078 NH_FLD_UDP_PORT_SIZE);
2080 DPAA2_PMD_ERR("QoS Extract add UDP_SRC failed.");
2084 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2087 index = dpaa2_flow_extract_search(
2088 &priv->extract.tc_key_extract[group].dpkg,
2089 NET_PROT_UDP, NH_FLD_UDP_PORT_SRC);
2091 ret = dpaa2_flow_extract_add(
2092 &priv->extract.tc_key_extract[group],
2094 NH_FLD_UDP_PORT_SRC,
2095 NH_FLD_UDP_PORT_SIZE);
2097 DPAA2_PMD_ERR("FS Extract add UDP_SRC failed.");
2101 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2104 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2107 "Move ipaddr before UDP_PORT_SRC set failed");
2111 ret = dpaa2_flow_rule_data_set(&priv->extract.qos_key_extract,
2114 NH_FLD_UDP_PORT_SRC,
2115 &spec->hdr.src_port,
2116 &mask->hdr.src_port,
2117 NH_FLD_UDP_PORT_SIZE);
2120 "QoS NH_FLD_UDP_PORT_SRC rule data set failed");
2124 ret = dpaa2_flow_rule_data_set(
2125 &priv->extract.tc_key_extract[group],
2128 NH_FLD_UDP_PORT_SRC,
2129 &spec->hdr.src_port,
2130 &mask->hdr.src_port,
2131 NH_FLD_UDP_PORT_SIZE);
2134 "FS NH_FLD_UDP_PORT_SRC rule data set failed");
2139 if (mask->hdr.dst_port) {
2140 index = dpaa2_flow_extract_search(
2141 &priv->extract.qos_key_extract.dpkg,
2142 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2144 ret = dpaa2_flow_extract_add(
2145 &priv->extract.qos_key_extract,
2147 NH_FLD_UDP_PORT_DST,
2148 NH_FLD_UDP_PORT_SIZE);
2150 DPAA2_PMD_ERR("QoS Extract add UDP_DST failed.");
2154 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2157 index = dpaa2_flow_extract_search(
2158 &priv->extract.tc_key_extract[group].dpkg,
2159 NET_PROT_UDP, NH_FLD_UDP_PORT_DST);
2161 ret = dpaa2_flow_extract_add(
2162 &priv->extract.tc_key_extract[group],
2164 NH_FLD_UDP_PORT_DST,
2165 NH_FLD_UDP_PORT_SIZE);
2167 DPAA2_PMD_ERR("FS Extract add UDP_DST failed.");
2171 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2174 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2177 "Move ipaddr before UDP_PORT_DST set failed");
2181 ret = dpaa2_flow_rule_data_set(
2182 &priv->extract.qos_key_extract,
2185 NH_FLD_UDP_PORT_DST,
2186 &spec->hdr.dst_port,
2187 &mask->hdr.dst_port,
2188 NH_FLD_UDP_PORT_SIZE);
2191 "QoS NH_FLD_UDP_PORT_DST rule data set failed");
2195 ret = dpaa2_flow_rule_data_set(
2196 &priv->extract.tc_key_extract[group],
2199 NH_FLD_UDP_PORT_DST,
2200 &spec->hdr.dst_port,
2201 &mask->hdr.dst_port,
2202 NH_FLD_UDP_PORT_SIZE);
2205 "FS NH_FLD_UDP_PORT_DST rule data set failed");
2210 (*device_configured) |= local_cfg;
2216 dpaa2_configure_flow_tcp(struct rte_flow *flow,
2217 struct rte_eth_dev *dev,
2218 const struct rte_flow_attr *attr,
2219 const struct rte_flow_item *pattern,
2220 const struct rte_flow_action actions[] __rte_unused,
2221 struct rte_flow_error *error __rte_unused,
2222 int *device_configured)
2227 const struct rte_flow_item_tcp *spec, *mask;
2229 const struct rte_flow_item_tcp *last __rte_unused;
2230 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2232 group = attr->group;
2234 /* Parse pattern list to get the matching parameters */
2235 spec = (const struct rte_flow_item_tcp *)pattern->spec;
2236 last = (const struct rte_flow_item_tcp *)pattern->last;
2237 mask = (const struct rte_flow_item_tcp *)
2238 (pattern->mask ? pattern->mask : &dpaa2_flow_item_tcp_mask);
2240 /* Get traffic class index and flow id to be configured */
2241 flow->tc_id = group;
2242 flow->tc_index = attr->priority;
2244 if (!spec || !mc_l4_port_identification) {
2245 struct proto_discrimination proto;
2247 index = dpaa2_flow_extract_search(
2248 &priv->extract.qos_key_extract.dpkg,
2249 NET_PROT_IP, NH_FLD_IP_PROTO);
2251 ret = dpaa2_flow_proto_discrimination_extract(
2252 &priv->extract.qos_key_extract,
2253 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2256 "QoS Extract IP protocol to discriminate TCP failed.");
2260 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2263 index = dpaa2_flow_extract_search(
2264 &priv->extract.tc_key_extract[group].dpkg,
2265 NET_PROT_IP, NH_FLD_IP_PROTO);
2267 ret = dpaa2_flow_proto_discrimination_extract(
2268 &priv->extract.tc_key_extract[group],
2269 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2272 "FS Extract IP protocol to discriminate TCP failed.");
2276 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2279 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2282 "Move IP addr before TCP discrimination set failed");
2286 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2287 proto.ip_proto = IPPROTO_TCP;
2288 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2291 DPAA2_PMD_ERR("TCP discrimination rule set failed");
2295 (*device_configured) |= local_cfg;
2301 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2302 RTE_FLOW_ITEM_TYPE_TCP)) {
2303 DPAA2_PMD_WARN("Extract field(s) of TCP not support.");
2308 if (mask->hdr.src_port) {
2309 index = dpaa2_flow_extract_search(
2310 &priv->extract.qos_key_extract.dpkg,
2311 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2313 ret = dpaa2_flow_extract_add(
2314 &priv->extract.qos_key_extract,
2316 NH_FLD_TCP_PORT_SRC,
2317 NH_FLD_TCP_PORT_SIZE);
2319 DPAA2_PMD_ERR("QoS Extract add TCP_SRC failed.");
2323 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2326 index = dpaa2_flow_extract_search(
2327 &priv->extract.tc_key_extract[group].dpkg,
2328 NET_PROT_TCP, NH_FLD_TCP_PORT_SRC);
2330 ret = dpaa2_flow_extract_add(
2331 &priv->extract.tc_key_extract[group],
2333 NH_FLD_TCP_PORT_SRC,
2334 NH_FLD_TCP_PORT_SIZE);
2336 DPAA2_PMD_ERR("FS Extract add TCP_SRC failed.");
2340 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2343 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2346 "Move ipaddr before TCP_PORT_SRC set failed");
2350 ret = dpaa2_flow_rule_data_set(
2351 &priv->extract.qos_key_extract,
2354 NH_FLD_TCP_PORT_SRC,
2355 &spec->hdr.src_port,
2356 &mask->hdr.src_port,
2357 NH_FLD_TCP_PORT_SIZE);
2360 "QoS NH_FLD_TCP_PORT_SRC rule data set failed");
2364 ret = dpaa2_flow_rule_data_set(
2365 &priv->extract.tc_key_extract[group],
2368 NH_FLD_TCP_PORT_SRC,
2369 &spec->hdr.src_port,
2370 &mask->hdr.src_port,
2371 NH_FLD_TCP_PORT_SIZE);
2374 "FS NH_FLD_TCP_PORT_SRC rule data set failed");
2379 if (mask->hdr.dst_port) {
2380 index = dpaa2_flow_extract_search(
2381 &priv->extract.qos_key_extract.dpkg,
2382 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2384 ret = dpaa2_flow_extract_add(
2385 &priv->extract.qos_key_extract,
2387 NH_FLD_TCP_PORT_DST,
2388 NH_FLD_TCP_PORT_SIZE);
2390 DPAA2_PMD_ERR("QoS Extract add TCP_DST failed.");
2394 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2397 index = dpaa2_flow_extract_search(
2398 &priv->extract.tc_key_extract[group].dpkg,
2399 NET_PROT_TCP, NH_FLD_TCP_PORT_DST);
2401 ret = dpaa2_flow_extract_add(
2402 &priv->extract.tc_key_extract[group],
2404 NH_FLD_TCP_PORT_DST,
2405 NH_FLD_TCP_PORT_SIZE);
2407 DPAA2_PMD_ERR("FS Extract add TCP_DST failed.");
2411 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2414 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2417 "Move ipaddr before TCP_PORT_DST set failed");
2421 ret = dpaa2_flow_rule_data_set(
2422 &priv->extract.qos_key_extract,
2425 NH_FLD_TCP_PORT_DST,
2426 &spec->hdr.dst_port,
2427 &mask->hdr.dst_port,
2428 NH_FLD_TCP_PORT_SIZE);
2431 "QoS NH_FLD_TCP_PORT_DST rule data set failed");
2435 ret = dpaa2_flow_rule_data_set(
2436 &priv->extract.tc_key_extract[group],
2439 NH_FLD_TCP_PORT_DST,
2440 &spec->hdr.dst_port,
2441 &mask->hdr.dst_port,
2442 NH_FLD_TCP_PORT_SIZE);
2445 "FS NH_FLD_TCP_PORT_DST rule data set failed");
2450 (*device_configured) |= local_cfg;
2456 dpaa2_configure_flow_sctp(struct rte_flow *flow,
2457 struct rte_eth_dev *dev,
2458 const struct rte_flow_attr *attr,
2459 const struct rte_flow_item *pattern,
2460 const struct rte_flow_action actions[] __rte_unused,
2461 struct rte_flow_error *error __rte_unused,
2462 int *device_configured)
2467 const struct rte_flow_item_sctp *spec, *mask;
2469 const struct rte_flow_item_sctp *last __rte_unused;
2470 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2472 group = attr->group;
2474 /* Parse pattern list to get the matching parameters */
2475 spec = (const struct rte_flow_item_sctp *)pattern->spec;
2476 last = (const struct rte_flow_item_sctp *)pattern->last;
2477 mask = (const struct rte_flow_item_sctp *)
2478 (pattern->mask ? pattern->mask :
2479 &dpaa2_flow_item_sctp_mask);
2481 /* Get traffic class index and flow id to be configured */
2482 flow->tc_id = group;
2483 flow->tc_index = attr->priority;
2485 if (!spec || !mc_l4_port_identification) {
2486 struct proto_discrimination proto;
2488 index = dpaa2_flow_extract_search(
2489 &priv->extract.qos_key_extract.dpkg,
2490 NET_PROT_IP, NH_FLD_IP_PROTO);
2492 ret = dpaa2_flow_proto_discrimination_extract(
2493 &priv->extract.qos_key_extract,
2494 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2497 "QoS Extract IP protocol to discriminate SCTP failed.");
2501 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2504 index = dpaa2_flow_extract_search(
2505 &priv->extract.tc_key_extract[group].dpkg,
2506 NET_PROT_IP, NH_FLD_IP_PROTO);
2508 ret = dpaa2_flow_proto_discrimination_extract(
2509 &priv->extract.tc_key_extract[group],
2510 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2513 "FS Extract IP protocol to discriminate SCTP failed.");
2517 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2520 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2523 "Move ipaddr before SCTP discrimination set failed");
2527 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2528 proto.ip_proto = IPPROTO_SCTP;
2529 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2532 DPAA2_PMD_ERR("SCTP discrimination rule set failed");
2536 (*device_configured) |= local_cfg;
2542 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2543 RTE_FLOW_ITEM_TYPE_SCTP)) {
2544 DPAA2_PMD_WARN("Extract field(s) of SCTP not support.");
2549 if (mask->hdr.src_port) {
2550 index = dpaa2_flow_extract_search(
2551 &priv->extract.qos_key_extract.dpkg,
2552 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2554 ret = dpaa2_flow_extract_add(
2555 &priv->extract.qos_key_extract,
2557 NH_FLD_SCTP_PORT_SRC,
2558 NH_FLD_SCTP_PORT_SIZE);
2560 DPAA2_PMD_ERR("QoS Extract add SCTP_SRC failed.");
2564 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2567 index = dpaa2_flow_extract_search(
2568 &priv->extract.tc_key_extract[group].dpkg,
2569 NET_PROT_SCTP, NH_FLD_SCTP_PORT_SRC);
2571 ret = dpaa2_flow_extract_add(
2572 &priv->extract.tc_key_extract[group],
2574 NH_FLD_SCTP_PORT_SRC,
2575 NH_FLD_SCTP_PORT_SIZE);
2577 DPAA2_PMD_ERR("FS Extract add SCTP_SRC failed.");
2581 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2584 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2587 "Move ipaddr before SCTP_PORT_SRC set failed");
2591 ret = dpaa2_flow_rule_data_set(
2592 &priv->extract.qos_key_extract,
2595 NH_FLD_SCTP_PORT_SRC,
2596 &spec->hdr.src_port,
2597 &mask->hdr.src_port,
2598 NH_FLD_SCTP_PORT_SIZE);
2601 "QoS NH_FLD_SCTP_PORT_SRC rule data set failed");
2605 ret = dpaa2_flow_rule_data_set(
2606 &priv->extract.tc_key_extract[group],
2609 NH_FLD_SCTP_PORT_SRC,
2610 &spec->hdr.src_port,
2611 &mask->hdr.src_port,
2612 NH_FLD_SCTP_PORT_SIZE);
2615 "FS NH_FLD_SCTP_PORT_SRC rule data set failed");
2620 if (mask->hdr.dst_port) {
2621 index = dpaa2_flow_extract_search(
2622 &priv->extract.qos_key_extract.dpkg,
2623 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2625 ret = dpaa2_flow_extract_add(
2626 &priv->extract.qos_key_extract,
2628 NH_FLD_SCTP_PORT_DST,
2629 NH_FLD_SCTP_PORT_SIZE);
2631 DPAA2_PMD_ERR("QoS Extract add SCTP_DST failed.");
2635 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2638 index = dpaa2_flow_extract_search(
2639 &priv->extract.tc_key_extract[group].dpkg,
2640 NET_PROT_SCTP, NH_FLD_SCTP_PORT_DST);
2642 ret = dpaa2_flow_extract_add(
2643 &priv->extract.tc_key_extract[group],
2645 NH_FLD_SCTP_PORT_DST,
2646 NH_FLD_SCTP_PORT_SIZE);
2648 DPAA2_PMD_ERR("FS Extract add SCTP_DST failed.");
2652 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2655 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2658 "Move ipaddr before SCTP_PORT_DST set failed");
2662 ret = dpaa2_flow_rule_data_set(
2663 &priv->extract.qos_key_extract,
2666 NH_FLD_SCTP_PORT_DST,
2667 &spec->hdr.dst_port,
2668 &mask->hdr.dst_port,
2669 NH_FLD_SCTP_PORT_SIZE);
2672 "QoS NH_FLD_SCTP_PORT_DST rule data set failed");
2676 ret = dpaa2_flow_rule_data_set(
2677 &priv->extract.tc_key_extract[group],
2680 NH_FLD_SCTP_PORT_DST,
2681 &spec->hdr.dst_port,
2682 &mask->hdr.dst_port,
2683 NH_FLD_SCTP_PORT_SIZE);
2686 "FS NH_FLD_SCTP_PORT_DST rule data set failed");
2691 (*device_configured) |= local_cfg;
2697 dpaa2_configure_flow_gre(struct rte_flow *flow,
2698 struct rte_eth_dev *dev,
2699 const struct rte_flow_attr *attr,
2700 const struct rte_flow_item *pattern,
2701 const struct rte_flow_action actions[] __rte_unused,
2702 struct rte_flow_error *error __rte_unused,
2703 int *device_configured)
2708 const struct rte_flow_item_gre *spec, *mask;
2710 const struct rte_flow_item_gre *last __rte_unused;
2711 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2713 group = attr->group;
2715 /* Parse pattern list to get the matching parameters */
2716 spec = (const struct rte_flow_item_gre *)pattern->spec;
2717 last = (const struct rte_flow_item_gre *)pattern->last;
2718 mask = (const struct rte_flow_item_gre *)
2719 (pattern->mask ? pattern->mask : &dpaa2_flow_item_gre_mask);
2721 /* Get traffic class index and flow id to be configured */
2722 flow->tc_id = group;
2723 flow->tc_index = attr->priority;
2726 struct proto_discrimination proto;
2728 index = dpaa2_flow_extract_search(
2729 &priv->extract.qos_key_extract.dpkg,
2730 NET_PROT_IP, NH_FLD_IP_PROTO);
2732 ret = dpaa2_flow_proto_discrimination_extract(
2733 &priv->extract.qos_key_extract,
2734 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2737 "QoS Extract IP protocol to discriminate GRE failed.");
2741 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2744 index = dpaa2_flow_extract_search(
2745 &priv->extract.tc_key_extract[group].dpkg,
2746 NET_PROT_IP, NH_FLD_IP_PROTO);
2748 ret = dpaa2_flow_proto_discrimination_extract(
2749 &priv->extract.tc_key_extract[group],
2750 DPAA2_FLOW_ITEM_TYPE_GENERIC_IP);
2753 "FS Extract IP protocol to discriminate GRE failed.");
2757 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2760 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2763 "Move IP addr before GRE discrimination set failed");
2767 proto.type = DPAA2_FLOW_ITEM_TYPE_GENERIC_IP;
2768 proto.ip_proto = IPPROTO_GRE;
2769 ret = dpaa2_flow_proto_discrimination_rule(priv, flow,
2772 DPAA2_PMD_ERR("GRE discrimination rule set failed");
2776 (*device_configured) |= local_cfg;
2781 if (dpaa2_flow_extract_support((const uint8_t *)mask,
2782 RTE_FLOW_ITEM_TYPE_GRE)) {
2783 DPAA2_PMD_WARN("Extract field(s) of GRE not support.");
2788 if (!mask->protocol)
2791 index = dpaa2_flow_extract_search(
2792 &priv->extract.qos_key_extract.dpkg,
2793 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2795 ret = dpaa2_flow_extract_add(
2796 &priv->extract.qos_key_extract,
2799 sizeof(rte_be16_t));
2801 DPAA2_PMD_ERR("QoS Extract add GRE_TYPE failed.");
2805 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2808 index = dpaa2_flow_extract_search(
2809 &priv->extract.tc_key_extract[group].dpkg,
2810 NET_PROT_GRE, NH_FLD_GRE_TYPE);
2812 ret = dpaa2_flow_extract_add(
2813 &priv->extract.tc_key_extract[group],
2816 sizeof(rte_be16_t));
2818 DPAA2_PMD_ERR("FS Extract add GRE_TYPE failed.");
2822 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2825 ret = dpaa2_flow_rule_move_ipaddr_tail(flow, priv, group);
2828 "Move ipaddr before GRE_TYPE set failed");
2832 ret = dpaa2_flow_rule_data_set(
2833 &priv->extract.qos_key_extract,
2839 sizeof(rte_be16_t));
2842 "QoS NH_FLD_GRE_TYPE rule data set failed");
2846 ret = dpaa2_flow_rule_data_set(
2847 &priv->extract.tc_key_extract[group],
2853 sizeof(rte_be16_t));
2856 "FS NH_FLD_GRE_TYPE rule data set failed");
2860 (*device_configured) |= local_cfg;
2866 dpaa2_configure_flow_raw(struct rte_flow *flow,
2867 struct rte_eth_dev *dev,
2868 const struct rte_flow_attr *attr,
2869 const struct rte_flow_item *pattern,
2870 const struct rte_flow_action actions[] __rte_unused,
2871 struct rte_flow_error *error __rte_unused,
2872 int *device_configured)
2874 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2875 const struct rte_flow_item_raw *spec = pattern->spec;
2876 const struct rte_flow_item_raw *mask = pattern->mask;
2878 priv->extract.qos_key_extract.key_info.key_total_size;
2879 int local_cfg = 0, ret;
2882 /* Need both spec and mask */
2883 if (!spec || !mask) {
2884 DPAA2_PMD_ERR("spec or mask not present.");
2887 /* Only supports non-relative with offset 0 */
2888 if (spec->relative || spec->offset != 0 ||
2889 spec->search || spec->limit) {
2890 DPAA2_PMD_ERR("relative and non zero offset not supported.");
2893 /* Spec len and mask len should be same */
2894 if (spec->length != mask->length) {
2895 DPAA2_PMD_ERR("Spec len and mask len mismatch.");
2899 /* Get traffic class index and flow id to be configured */
2900 group = attr->group;
2901 flow->tc_id = group;
2902 flow->tc_index = attr->priority;
2904 if (prev_key_size < spec->length) {
2905 ret = dpaa2_flow_extract_add_raw(&priv->extract.qos_key_extract,
2908 DPAA2_PMD_ERR("QoS Extract RAW add failed.");
2911 local_cfg |= DPAA2_QOS_TABLE_RECONFIGURE;
2913 ret = dpaa2_flow_extract_add_raw(
2914 &priv->extract.tc_key_extract[group],
2917 DPAA2_PMD_ERR("FS Extract RAW add failed.");
2920 local_cfg |= DPAA2_FS_TABLE_RECONFIGURE;
2923 ret = dpaa2_flow_rule_data_set_raw(&flow->qos_rule, spec->pattern,
2924 mask->pattern, spec->length);
2926 DPAA2_PMD_ERR("QoS RAW rule data set failed");
2930 ret = dpaa2_flow_rule_data_set_raw(&flow->fs_rule, spec->pattern,
2931 mask->pattern, spec->length);
2933 DPAA2_PMD_ERR("FS RAW rule data set failed");
2937 (*device_configured) |= local_cfg;
2942 /* The existing QoS/FS entry with IP address(es)
2943 * needs update after
2944 * new extract(s) are inserted before IP
2945 * address(es) extract(s).
2948 dpaa2_flow_entry_update(
2949 struct dpaa2_dev_priv *priv, uint8_t tc_id)
2951 struct rte_flow *curr = LIST_FIRST(&priv->flows);
2952 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
2954 int qos_ipsrc_offset = -1, qos_ipdst_offset = -1;
2955 int fs_ipsrc_offset = -1, fs_ipdst_offset = -1;
2956 struct dpaa2_key_extract *qos_key_extract =
2957 &priv->extract.qos_key_extract;
2958 struct dpaa2_key_extract *tc_key_extract =
2959 &priv->extract.tc_key_extract[tc_id];
2960 char ipsrc_key[NH_FLD_IPV6_ADDR_SIZE];
2961 char ipdst_key[NH_FLD_IPV6_ADDR_SIZE];
2962 char ipsrc_mask[NH_FLD_IPV6_ADDR_SIZE];
2963 char ipdst_mask[NH_FLD_IPV6_ADDR_SIZE];
2964 int extend = -1, extend1, size = -1;
2968 if (curr->ipaddr_rule.ipaddr_type ==
2970 curr = LIST_NEXT(curr, next);
2974 if (curr->ipaddr_rule.ipaddr_type ==
2977 qos_key_extract->key_info.ipv4_src_offset;
2979 qos_key_extract->key_info.ipv4_dst_offset;
2981 tc_key_extract->key_info.ipv4_src_offset;
2983 tc_key_extract->key_info.ipv4_dst_offset;
2984 size = NH_FLD_IPV4_ADDR_SIZE;
2987 qos_key_extract->key_info.ipv6_src_offset;
2989 qos_key_extract->key_info.ipv6_dst_offset;
2991 tc_key_extract->key_info.ipv6_src_offset;
2993 tc_key_extract->key_info.ipv6_dst_offset;
2994 size = NH_FLD_IPV6_ADDR_SIZE;
2997 qos_index = curr->tc_id * priv->fs_entries +
3000 dpaa2_flow_qos_entry_log("Before update", curr, qos_index);
3002 if (priv->num_rx_tc > 1) {
3003 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW,
3004 priv->token, &curr->qos_rule);
3006 DPAA2_PMD_ERR("Qos entry remove failed.");
3013 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3014 RTE_ASSERT(qos_ipsrc_offset >=
3015 curr->ipaddr_rule.qos_ipsrc_offset);
3016 extend1 = qos_ipsrc_offset -
3017 curr->ipaddr_rule.qos_ipsrc_offset;
3019 RTE_ASSERT(extend == extend1);
3023 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3024 (size == NH_FLD_IPV6_ADDR_SIZE));
3027 (char *)(size_t)curr->qos_rule.key_iova +
3028 curr->ipaddr_rule.qos_ipsrc_offset,
3030 memset((char *)(size_t)curr->qos_rule.key_iova +
3031 curr->ipaddr_rule.qos_ipsrc_offset,
3035 (char *)(size_t)curr->qos_rule.mask_iova +
3036 curr->ipaddr_rule.qos_ipsrc_offset,
3038 memset((char *)(size_t)curr->qos_rule.mask_iova +
3039 curr->ipaddr_rule.qos_ipsrc_offset,
3042 curr->ipaddr_rule.qos_ipsrc_offset = qos_ipsrc_offset;
3045 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3046 RTE_ASSERT(qos_ipdst_offset >=
3047 curr->ipaddr_rule.qos_ipdst_offset);
3048 extend1 = qos_ipdst_offset -
3049 curr->ipaddr_rule.qos_ipdst_offset;
3051 RTE_ASSERT(extend == extend1);
3055 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3056 (size == NH_FLD_IPV6_ADDR_SIZE));
3059 (char *)(size_t)curr->qos_rule.key_iova +
3060 curr->ipaddr_rule.qos_ipdst_offset,
3062 memset((char *)(size_t)curr->qos_rule.key_iova +
3063 curr->ipaddr_rule.qos_ipdst_offset,
3067 (char *)(size_t)curr->qos_rule.mask_iova +
3068 curr->ipaddr_rule.qos_ipdst_offset,
3070 memset((char *)(size_t)curr->qos_rule.mask_iova +
3071 curr->ipaddr_rule.qos_ipdst_offset,
3074 curr->ipaddr_rule.qos_ipdst_offset = qos_ipdst_offset;
3077 if (curr->ipaddr_rule.qos_ipsrc_offset >= 0) {
3078 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3079 (size == NH_FLD_IPV6_ADDR_SIZE));
3080 memcpy((char *)(size_t)curr->qos_rule.key_iova +
3081 curr->ipaddr_rule.qos_ipsrc_offset,
3084 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3085 curr->ipaddr_rule.qos_ipsrc_offset,
3089 if (curr->ipaddr_rule.qos_ipdst_offset >= 0) {
3090 RTE_ASSERT((size == NH_FLD_IPV4_ADDR_SIZE) ||
3091 (size == NH_FLD_IPV6_ADDR_SIZE));
3092 memcpy((char *)(size_t)curr->qos_rule.key_iova +
3093 curr->ipaddr_rule.qos_ipdst_offset,
3096 memcpy((char *)(size_t)curr->qos_rule.mask_iova +
3097 curr->ipaddr_rule.qos_ipdst_offset,
3103 curr->qos_real_key_size += extend;
3105 curr->qos_rule.key_size = FIXED_ENTRY_SIZE;
3107 dpaa2_flow_qos_entry_log("Start update", curr, qos_index);
3109 if (priv->num_rx_tc > 1) {
3110 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3111 priv->token, &curr->qos_rule,
3112 curr->tc_id, qos_index,
3115 DPAA2_PMD_ERR("Qos entry update failed.");
3120 if (curr->action != RTE_FLOW_ACTION_TYPE_QUEUE) {
3121 curr = LIST_NEXT(curr, next);
3125 dpaa2_flow_fs_entry_log("Before update", curr);
3128 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW,
3129 priv->token, curr->tc_id, &curr->fs_rule);
3131 DPAA2_PMD_ERR("FS entry remove failed.");
3135 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0 &&
3136 tc_id == curr->tc_id) {
3137 RTE_ASSERT(fs_ipsrc_offset >=
3138 curr->ipaddr_rule.fs_ipsrc_offset);
3139 extend1 = fs_ipsrc_offset -
3140 curr->ipaddr_rule.fs_ipsrc_offset;
3142 RTE_ASSERT(extend == extend1);
3147 (char *)(size_t)curr->fs_rule.key_iova +
3148 curr->ipaddr_rule.fs_ipsrc_offset,
3150 memset((char *)(size_t)curr->fs_rule.key_iova +
3151 curr->ipaddr_rule.fs_ipsrc_offset,
3155 (char *)(size_t)curr->fs_rule.mask_iova +
3156 curr->ipaddr_rule.fs_ipsrc_offset,
3158 memset((char *)(size_t)curr->fs_rule.mask_iova +
3159 curr->ipaddr_rule.fs_ipsrc_offset,
3162 curr->ipaddr_rule.fs_ipsrc_offset = fs_ipsrc_offset;
3165 if (curr->ipaddr_rule.fs_ipdst_offset >= 0 &&
3166 tc_id == curr->tc_id) {
3167 RTE_ASSERT(fs_ipdst_offset >=
3168 curr->ipaddr_rule.fs_ipdst_offset);
3169 extend1 = fs_ipdst_offset -
3170 curr->ipaddr_rule.fs_ipdst_offset;
3172 RTE_ASSERT(extend == extend1);
3177 (char *)(size_t)curr->fs_rule.key_iova +
3178 curr->ipaddr_rule.fs_ipdst_offset,
3180 memset((char *)(size_t)curr->fs_rule.key_iova +
3181 curr->ipaddr_rule.fs_ipdst_offset,
3185 (char *)(size_t)curr->fs_rule.mask_iova +
3186 curr->ipaddr_rule.fs_ipdst_offset,
3188 memset((char *)(size_t)curr->fs_rule.mask_iova +
3189 curr->ipaddr_rule.fs_ipdst_offset,
3192 curr->ipaddr_rule.fs_ipdst_offset = fs_ipdst_offset;
3195 if (curr->ipaddr_rule.fs_ipsrc_offset >= 0) {
3196 memcpy((char *)(size_t)curr->fs_rule.key_iova +
3197 curr->ipaddr_rule.fs_ipsrc_offset,
3200 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3201 curr->ipaddr_rule.fs_ipsrc_offset,
3205 if (curr->ipaddr_rule.fs_ipdst_offset >= 0) {
3206 memcpy((char *)(size_t)curr->fs_rule.key_iova +
3207 curr->ipaddr_rule.fs_ipdst_offset,
3210 memcpy((char *)(size_t)curr->fs_rule.mask_iova +
3211 curr->ipaddr_rule.fs_ipdst_offset,
3217 curr->fs_real_key_size += extend;
3218 curr->fs_rule.key_size = FIXED_ENTRY_SIZE;
3220 dpaa2_flow_fs_entry_log("Start update", curr);
3222 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW,
3223 priv->token, curr->tc_id, curr->tc_index,
3224 &curr->fs_rule, &curr->action_cfg);
3226 DPAA2_PMD_ERR("FS entry update failed.");
3230 curr = LIST_NEXT(curr, next);
3237 dpaa2_flow_verify_attr(
3238 struct dpaa2_dev_priv *priv,
3239 const struct rte_flow_attr *attr)
3241 struct rte_flow *curr = LIST_FIRST(&priv->flows);
3244 if (curr->tc_id == attr->group &&
3245 curr->tc_index == attr->priority) {
3247 "Flow with group %d and priority %d already exists.",
3248 attr->group, attr->priority);
3252 curr = LIST_NEXT(curr, next);
3259 dpaa2_flow_verify_action(
3260 struct dpaa2_dev_priv *priv,
3261 const struct rte_flow_attr *attr,
3262 const struct rte_flow_action actions[])
3264 int end_of_list = 0, i, j = 0;
3265 const struct rte_flow_action_queue *dest_queue;
3266 const struct rte_flow_action_rss *rss_conf;
3267 struct dpaa2_queue *rxq;
3269 while (!end_of_list) {
3270 switch (actions[j].type) {
3271 case RTE_FLOW_ACTION_TYPE_QUEUE:
3272 dest_queue = (const struct rte_flow_action_queue *)
3274 rxq = priv->rx_vq[dest_queue->index];
3275 if (attr->group != rxq->tc_index) {
3277 "RXQ[%d] does not belong to the group %d",
3278 dest_queue->index, attr->group);
3283 case RTE_FLOW_ACTION_TYPE_RSS:
3284 rss_conf = (const struct rte_flow_action_rss *)
3286 if (rss_conf->queue_num > priv->dist_queues) {
3288 "RSS number exceeds the distrbution size");
3291 for (i = 0; i < (int)rss_conf->queue_num; i++) {
3292 if (rss_conf->queue[i] >= priv->nb_rx_queues) {
3294 "RSS queue index exceeds the number of RXQs");
3297 rxq = priv->rx_vq[rss_conf->queue[i]];
3298 if (rxq->tc_index != attr->group) {
3300 "Queue/Group combination are not supported\n");
3306 case RTE_FLOW_ACTION_TYPE_END:
3310 DPAA2_PMD_ERR("Invalid action type");
3320 dpaa2_generic_flow_set(struct rte_flow *flow,
3321 struct rte_eth_dev *dev,
3322 const struct rte_flow_attr *attr,
3323 const struct rte_flow_item pattern[],
3324 const struct rte_flow_action actions[],
3325 struct rte_flow_error *error)
3327 const struct rte_flow_action_queue *dest_queue;
3328 const struct rte_flow_action_rss *rss_conf;
3329 int is_keycfg_configured = 0, end_of_list = 0;
3330 int ret = 0, i = 0, j = 0;
3331 struct dpni_rx_dist_cfg tc_cfg;
3332 struct dpni_qos_tbl_cfg qos_cfg;
3333 struct dpni_fs_action_cfg action;
3334 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3335 struct dpaa2_queue *rxq;
3336 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3338 struct rte_flow *curr = LIST_FIRST(&priv->flows);
3341 ret = dpaa2_flow_verify_attr(priv, attr);
3345 ret = dpaa2_flow_verify_action(priv, attr, actions);
3349 /* Parse pattern list to get the matching parameters */
3350 while (!end_of_list) {
3351 switch (pattern[i].type) {
3352 case RTE_FLOW_ITEM_TYPE_ETH:
3353 ret = dpaa2_configure_flow_eth(flow,
3354 dev, attr, &pattern[i], actions, error,
3355 &is_keycfg_configured);
3357 DPAA2_PMD_ERR("ETH flow configuration failed!");
3361 case RTE_FLOW_ITEM_TYPE_VLAN:
3362 ret = dpaa2_configure_flow_vlan(flow,
3363 dev, attr, &pattern[i], actions, error,
3364 &is_keycfg_configured);
3366 DPAA2_PMD_ERR("vLan flow configuration failed!");
3370 case RTE_FLOW_ITEM_TYPE_IPV4:
3371 case RTE_FLOW_ITEM_TYPE_IPV6:
3372 ret = dpaa2_configure_flow_generic_ip(flow,
3373 dev, attr, &pattern[i], actions, error,
3374 &is_keycfg_configured);
3376 DPAA2_PMD_ERR("IP flow configuration failed!");
3380 case RTE_FLOW_ITEM_TYPE_ICMP:
3381 ret = dpaa2_configure_flow_icmp(flow,
3382 dev, attr, &pattern[i], actions, error,
3383 &is_keycfg_configured);
3385 DPAA2_PMD_ERR("ICMP flow configuration failed!");
3389 case RTE_FLOW_ITEM_TYPE_UDP:
3390 ret = dpaa2_configure_flow_udp(flow,
3391 dev, attr, &pattern[i], actions, error,
3392 &is_keycfg_configured);
3394 DPAA2_PMD_ERR("UDP flow configuration failed!");
3398 case RTE_FLOW_ITEM_TYPE_TCP:
3399 ret = dpaa2_configure_flow_tcp(flow,
3400 dev, attr, &pattern[i], actions, error,
3401 &is_keycfg_configured);
3403 DPAA2_PMD_ERR("TCP flow configuration failed!");
3407 case RTE_FLOW_ITEM_TYPE_SCTP:
3408 ret = dpaa2_configure_flow_sctp(flow,
3409 dev, attr, &pattern[i], actions, error,
3410 &is_keycfg_configured);
3412 DPAA2_PMD_ERR("SCTP flow configuration failed!");
3416 case RTE_FLOW_ITEM_TYPE_GRE:
3417 ret = dpaa2_configure_flow_gre(flow,
3418 dev, attr, &pattern[i], actions, error,
3419 &is_keycfg_configured);
3421 DPAA2_PMD_ERR("GRE flow configuration failed!");
3425 case RTE_FLOW_ITEM_TYPE_RAW:
3426 ret = dpaa2_configure_flow_raw(flow,
3427 dev, attr, &pattern[i],
3429 &is_keycfg_configured);
3431 DPAA2_PMD_ERR("RAW flow configuration failed!");
3435 case RTE_FLOW_ITEM_TYPE_END:
3437 break; /*End of List*/
3439 DPAA2_PMD_ERR("Invalid action type");
3446 /* Let's parse action on matching traffic */
3448 while (!end_of_list) {
3449 switch (actions[j].type) {
3450 case RTE_FLOW_ACTION_TYPE_QUEUE:
3452 (const struct rte_flow_action_queue *)(actions[j].conf);
3453 rxq = priv->rx_vq[dest_queue->index];
3454 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
3455 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
3456 action.flow_id = rxq->flow_id;
3458 /* Configure FS table first*/
3459 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
3460 dpaa2_flow_fs_table_extracts_log(priv, flow->tc_id);
3461 if (dpkg_prepare_key_cfg(
3462 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3463 (uint8_t *)(size_t)priv->extract
3464 .tc_extract_param[flow->tc_id]) < 0) {
3466 "Unable to prepare extract parameters");
3471 sizeof(struct dpni_rx_dist_cfg));
3472 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
3473 tc_cfg.key_cfg_iova =
3474 (uint64_t)priv->extract.tc_extract_param[flow->tc_id];
3475 tc_cfg.tc = flow->tc_id;
3476 tc_cfg.enable = false;
3477 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3478 priv->token, &tc_cfg);
3481 "TC hash cannot be disabled.(%d)",
3485 tc_cfg.enable = true;
3486 tc_cfg.fs_miss_flow_id =
3487 dpaa2_flow_miss_flow_id;
3488 ret = dpni_set_rx_fs_dist(dpni, CMD_PRI_LOW,
3489 priv->token, &tc_cfg);
3492 "TC distribution cannot be configured.(%d)",
3498 /* Configure QoS table then.*/
3499 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3500 dpaa2_flow_qos_table_extracts_log(priv);
3501 if (dpkg_prepare_key_cfg(
3502 &priv->extract.qos_key_extract.dpkg,
3503 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3505 "Unable to prepare extract parameters");
3509 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
3510 qos_cfg.discard_on_miss = false;
3511 qos_cfg.default_tc = 0;
3512 qos_cfg.keep_entries = true;
3513 qos_cfg.key_cfg_iova =
3514 (size_t)priv->extract.qos_extract_param;
3515 /* QoS table is effecitive for multiple TCs.*/
3516 if (priv->num_rx_tc > 1) {
3517 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3518 priv->token, &qos_cfg);
3521 "RSS QoS table can not be configured(%d)\n",
3528 flow->qos_real_key_size = priv->extract
3529 .qos_key_extract.key_info.key_total_size;
3530 if (flow->ipaddr_rule.ipaddr_type == FLOW_IPV4_ADDR) {
3531 if (flow->ipaddr_rule.qos_ipdst_offset >=
3532 flow->ipaddr_rule.qos_ipsrc_offset) {
3533 flow->qos_real_key_size =
3534 flow->ipaddr_rule.qos_ipdst_offset +
3535 NH_FLD_IPV4_ADDR_SIZE;
3537 flow->qos_real_key_size =
3538 flow->ipaddr_rule.qos_ipsrc_offset +
3539 NH_FLD_IPV4_ADDR_SIZE;
3541 } else if (flow->ipaddr_rule.ipaddr_type ==
3543 if (flow->ipaddr_rule.qos_ipdst_offset >=
3544 flow->ipaddr_rule.qos_ipsrc_offset) {
3545 flow->qos_real_key_size =
3546 flow->ipaddr_rule.qos_ipdst_offset +
3547 NH_FLD_IPV6_ADDR_SIZE;
3549 flow->qos_real_key_size =
3550 flow->ipaddr_rule.qos_ipsrc_offset +
3551 NH_FLD_IPV6_ADDR_SIZE;
3555 /* QoS entry added is only effective for multiple TCs.*/
3556 if (priv->num_rx_tc > 1) {
3557 qos_index = flow->tc_id * priv->fs_entries +
3559 if (qos_index >= priv->qos_entries) {
3560 DPAA2_PMD_ERR("QoS table with %d entries full",
3564 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3566 dpaa2_flow_qos_entry_log("Start add", flow, qos_index);
3568 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
3569 priv->token, &flow->qos_rule,
3570 flow->tc_id, qos_index,
3574 "Error in addnig entry to QoS table(%d)", ret);
3579 if (flow->tc_index >= priv->fs_entries) {
3580 DPAA2_PMD_ERR("FS table with %d entries full",
3585 flow->fs_real_key_size =
3586 priv->extract.tc_key_extract[flow->tc_id]
3587 .key_info.key_total_size;
3589 if (flow->ipaddr_rule.ipaddr_type ==
3591 if (flow->ipaddr_rule.fs_ipdst_offset >=
3592 flow->ipaddr_rule.fs_ipsrc_offset) {
3593 flow->fs_real_key_size =
3594 flow->ipaddr_rule.fs_ipdst_offset +
3595 NH_FLD_IPV4_ADDR_SIZE;
3597 flow->fs_real_key_size =
3598 flow->ipaddr_rule.fs_ipsrc_offset +
3599 NH_FLD_IPV4_ADDR_SIZE;
3601 } else if (flow->ipaddr_rule.ipaddr_type ==
3603 if (flow->ipaddr_rule.fs_ipdst_offset >=
3604 flow->ipaddr_rule.fs_ipsrc_offset) {
3605 flow->fs_real_key_size =
3606 flow->ipaddr_rule.fs_ipdst_offset +
3607 NH_FLD_IPV6_ADDR_SIZE;
3609 flow->fs_real_key_size =
3610 flow->ipaddr_rule.fs_ipsrc_offset +
3611 NH_FLD_IPV6_ADDR_SIZE;
3615 flow->fs_rule.key_size = FIXED_ENTRY_SIZE;
3617 dpaa2_flow_fs_entry_log("Start add", flow);
3619 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
3620 flow->tc_id, flow->tc_index,
3621 &flow->fs_rule, &action);
3624 "Error in adding entry to FS table(%d)", ret);
3627 memcpy(&flow->action_cfg, &action,
3628 sizeof(struct dpni_fs_action_cfg));
3630 case RTE_FLOW_ACTION_TYPE_RSS:
3631 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
3633 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
3634 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
3635 &priv->extract.tc_key_extract[flow->tc_id].dpkg);
3638 "unable to set flow distribution.please check queue config\n");
3642 /* Allocate DMA'ble memory to write the rules */
3643 param = (size_t)rte_malloc(NULL, 256, 64);
3645 DPAA2_PMD_ERR("Memory allocation failure\n");
3649 if (dpkg_prepare_key_cfg(
3650 &priv->extract.tc_key_extract[flow->tc_id].dpkg,
3651 (uint8_t *)param) < 0) {
3653 "Unable to prepare extract parameters");
3654 rte_free((void *)param);
3658 memset(&tc_cfg, 0, sizeof(struct dpni_rx_dist_cfg));
3659 tc_cfg.dist_size = rss_conf->queue_num;
3660 tc_cfg.key_cfg_iova = (size_t)param;
3661 tc_cfg.enable = true;
3662 tc_cfg.tc = flow->tc_id;
3663 ret = dpni_set_rx_hash_dist(dpni, CMD_PRI_LOW,
3664 priv->token, &tc_cfg);
3667 "RSS TC table cannot be configured: %d\n",
3669 rte_free((void *)param);
3673 rte_free((void *)param);
3674 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
3675 if (dpkg_prepare_key_cfg(
3676 &priv->extract.qos_key_extract.dpkg,
3677 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
3679 "Unable to prepare extract parameters");
3683 sizeof(struct dpni_qos_tbl_cfg));
3684 qos_cfg.discard_on_miss = true;
3685 qos_cfg.keep_entries = true;
3686 qos_cfg.key_cfg_iova =
3687 (size_t)priv->extract.qos_extract_param;
3688 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
3689 priv->token, &qos_cfg);
3692 "RSS QoS dist can't be configured-%d\n",
3698 /* Add Rule into QoS table */
3699 qos_index = flow->tc_id * priv->fs_entries +
3701 if (qos_index >= priv->qos_entries) {
3702 DPAA2_PMD_ERR("QoS table with %d entries full",
3707 flow->qos_real_key_size =
3708 priv->extract.qos_key_extract.key_info.key_total_size;
3709 flow->qos_rule.key_size = FIXED_ENTRY_SIZE;
3710 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
3711 &flow->qos_rule, flow->tc_id,
3715 "Error in entry addition in QoS table(%d)",
3720 case RTE_FLOW_ACTION_TYPE_END:
3724 DPAA2_PMD_ERR("Invalid action type");
3732 if (is_keycfg_configured &
3733 (DPAA2_QOS_TABLE_RECONFIGURE |
3734 DPAA2_FS_TABLE_RECONFIGURE)) {
3735 ret = dpaa2_flow_entry_update(priv, flow->tc_id);
3737 DPAA2_PMD_ERR("Flow entry update failed.");
3742 /* New rules are inserted. */
3744 LIST_INSERT_HEAD(&priv->flows, flow, next);
3746 while (LIST_NEXT(curr, next))
3747 curr = LIST_NEXT(curr, next);
3748 LIST_INSERT_AFTER(curr, flow, next);
3755 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
3756 const struct rte_flow_attr *attr)
3760 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
3761 DPAA2_PMD_ERR("Priority group is out of range\n");
3764 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
3765 DPAA2_PMD_ERR("Priority within the group is out of range\n");
3768 if (unlikely(attr->egress)) {
3770 "Flow configuration is not supported on egress side\n");
3773 if (unlikely(!attr->ingress)) {
3774 DPAA2_PMD_ERR("Ingress flag must be configured\n");
3781 dpaa2_dev_verify_patterns(const struct rte_flow_item pattern[])
3783 unsigned int i, j, is_found = 0;
3786 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3787 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
3788 if (dpaa2_supported_pattern_type[i]
3789 == pattern[j].type) {
3799 /* Lets verify other combinations of given pattern rules */
3800 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
3801 if (!pattern[j].spec) {
3811 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
3813 unsigned int i, j, is_found = 0;
3816 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3817 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
3818 if (dpaa2_supported_action_type[i] == actions[j].type) {
3828 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
3829 if (actions[j].type != RTE_FLOW_ACTION_TYPE_DROP &&
3837 int dpaa2_flow_validate(struct rte_eth_dev *dev,
3838 const struct rte_flow_attr *flow_attr,
3839 const struct rte_flow_item pattern[],
3840 const struct rte_flow_action actions[],
3841 struct rte_flow_error *error)
3843 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3844 struct dpni_attr dpni_attr;
3845 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
3846 uint16_t token = priv->token;
3849 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
3850 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
3853 "Failure to get dpni@%p attribute, err code %d\n",
3855 rte_flow_error_set(error, EPERM,
3856 RTE_FLOW_ERROR_TYPE_ATTR,
3857 flow_attr, "invalid");
3861 /* Verify input attributes */
3862 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
3865 "Invalid attributes are given\n");
3866 rte_flow_error_set(error, EPERM,
3867 RTE_FLOW_ERROR_TYPE_ATTR,
3868 flow_attr, "invalid");
3869 goto not_valid_params;
3871 /* Verify input pattern list */
3872 ret = dpaa2_dev_verify_patterns(pattern);
3875 "Invalid pattern list is given\n");
3876 rte_flow_error_set(error, EPERM,
3877 RTE_FLOW_ERROR_TYPE_ITEM,
3878 pattern, "invalid");
3879 goto not_valid_params;
3881 /* Verify input action list */
3882 ret = dpaa2_dev_verify_actions(actions);
3885 "Invalid action list is given\n");
3886 rte_flow_error_set(error, EPERM,
3887 RTE_FLOW_ERROR_TYPE_ACTION,
3888 actions, "invalid");
3889 goto not_valid_params;
3896 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
3897 const struct rte_flow_attr *attr,
3898 const struct rte_flow_item pattern[],
3899 const struct rte_flow_action actions[],
3900 struct rte_flow_error *error)
3902 struct rte_flow *flow = NULL;
3903 size_t key_iova = 0, mask_iova = 0;
3906 dpaa2_flow_control_log =
3907 getenv("DPAA2_FLOW_CONTROL_LOG");
3909 if (getenv("DPAA2_FLOW_CONTROL_MISS_FLOW")) {
3910 struct dpaa2_dev_priv *priv = dev->data->dev_private;
3912 dpaa2_flow_miss_flow_id =
3913 atoi(getenv("DPAA2_FLOW_CONTROL_MISS_FLOW"));
3914 if (dpaa2_flow_miss_flow_id >= priv->dist_queues) {
3916 "The missed flow ID %d exceeds the max flow ID %d",
3917 dpaa2_flow_miss_flow_id,
3918 priv->dist_queues - 1);
3923 flow = rte_zmalloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
3925 DPAA2_PMD_ERR("Failure to allocate memory for flow");
3928 /* Allocate DMA'ble memory to write the rules */
3929 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3932 "Memory allocation failure for rule configuration\n");
3935 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3938 "Memory allocation failure for rule configuration\n");
3942 flow->qos_rule.key_iova = key_iova;
3943 flow->qos_rule.mask_iova = mask_iova;
3945 /* Allocate DMA'ble memory to write the rules */
3946 key_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3949 "Memory allocation failure for rule configuration\n");
3952 mask_iova = (size_t)rte_zmalloc(NULL, 256, 64);
3955 "Memory allocation failure for rule configuration\n");
3959 flow->fs_rule.key_iova = key_iova;
3960 flow->fs_rule.mask_iova = mask_iova;
3962 flow->ipaddr_rule.ipaddr_type = FLOW_NONE_IPADDR;
3963 flow->ipaddr_rule.qos_ipsrc_offset =
3964 IP_ADDRESS_OFFSET_INVALID;
3965 flow->ipaddr_rule.qos_ipdst_offset =
3966 IP_ADDRESS_OFFSET_INVALID;
3967 flow->ipaddr_rule.fs_ipsrc_offset =
3968 IP_ADDRESS_OFFSET_INVALID;
3969 flow->ipaddr_rule.fs_ipdst_offset =
3970 IP_ADDRESS_OFFSET_INVALID;
3972 switch (dpaa2_filter_type) {
3973 case RTE_ETH_FILTER_GENERIC:
3974 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
3977 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
3978 rte_flow_error_set(error, EPERM,
3979 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3982 "Failure to create flow, return code (%d)", ret);
3983 goto creation_error;
3987 DPAA2_PMD_ERR("Filter type (%d) not supported",
3994 rte_flow_error_set(error, EPERM,
3995 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3996 NULL, "memory alloc");
3998 rte_free((void *)flow);
3999 rte_free((void *)key_iova);
4000 rte_free((void *)mask_iova);
4006 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
4007 struct rte_flow *flow,
4008 struct rte_flow_error *error)
4011 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4012 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
4014 switch (flow->action) {
4015 case RTE_FLOW_ACTION_TYPE_QUEUE:
4016 if (priv->num_rx_tc > 1) {
4017 /* Remove entry from QoS table first */
4018 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4022 "Error in removing entry from QoS table(%d)", ret);
4027 /* Then remove entry from FS table */
4028 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
4029 flow->tc_id, &flow->fs_rule);
4032 "Error in removing entry from FS table(%d)", ret);
4036 case RTE_FLOW_ACTION_TYPE_RSS:
4037 if (priv->num_rx_tc > 1) {
4038 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
4042 "Error in entry addition in QoS table(%d)", ret);
4049 "Action type (%d) is not supported", flow->action);
4054 LIST_REMOVE(flow, next);
4055 rte_free((void *)(size_t)flow->qos_rule.key_iova);
4056 rte_free((void *)(size_t)flow->qos_rule.mask_iova);
4057 rte_free((void *)(size_t)flow->fs_rule.key_iova);
4058 rte_free((void *)(size_t)flow->fs_rule.mask_iova);
4059 /* Now free the flow */
4064 rte_flow_error_set(error, EPERM,
4065 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4071 * Destroy user-configured flow rules.
4073 * This function skips internal flows rules.
4075 * @see rte_flow_flush()
4079 dpaa2_flow_flush(struct rte_eth_dev *dev,
4080 struct rte_flow_error *error)
4082 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4083 struct rte_flow *flow = LIST_FIRST(&priv->flows);
4086 struct rte_flow *next = LIST_NEXT(flow, next);
4088 dpaa2_flow_destroy(dev, flow, error);
4095 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
4096 struct rte_flow *flow __rte_unused,
4097 const struct rte_flow_action *actions __rte_unused,
4098 void *data __rte_unused,
4099 struct rte_flow_error *error __rte_unused)
4105 * Clean up all flow rules.
4107 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
4108 * rules regardless of whether they are internal or user-configured.
4111 * Pointer to private structure.
4114 dpaa2_flow_clean(struct rte_eth_dev *dev)
4116 struct rte_flow *flow;
4117 struct dpaa2_dev_priv *priv = dev->data->dev_private;
4119 while ((flow = LIST_FIRST(&priv->flows)))
4120 dpaa2_flow_destroy(dev, flow, NULL);
4123 const struct rte_flow_ops dpaa2_flow_ops = {
4124 .create = dpaa2_flow_create,
4125 .validate = dpaa2_flow_validate,
4126 .destroy = dpaa2_flow_destroy,
4127 .flush = dpaa2_flow_flush,
4128 .query = dpaa2_flow_query,