1 /* * SPDX-License-Identifier: BSD-3-Clause
13 #include <rte_ethdev.h>
15 #include <rte_eth_ctrl.h>
16 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include <rte_tailq.h>
23 #include <dpaa2_ethdev.h>
24 #include <dpaa2_pmd_logs.h>
27 struct dpni_rule_cfg rule;
32 enum rte_flow_action_type action;
36 /* Layout for rule compositions for supported patterns */
37 /* TODO: Current design only supports Ethernet + IPv4 based classification. */
38 /* So corresponding offset macros are valid only. Rest are placeholder for */
39 /* now. Once support for other netwrok headers will be added then */
40 /* corresponding macros will be updated with correct values*/
41 #define DPAA2_CLS_RULE_OFFSET_ETH 0 /*Start of buffer*/
42 #define DPAA2_CLS_RULE_OFFSET_VLAN 14 /* DPAA2_CLS_RULE_OFFSET_ETH */
43 /* + Sizeof Eth fields */
44 #define DPAA2_CLS_RULE_OFFSET_IPV4 14 /* DPAA2_CLS_RULE_OFFSET_VLAN */
45 /* + Sizeof VLAN fields */
46 #define DPAA2_CLS_RULE_OFFSET_IPV6 25 /* DPAA2_CLS_RULE_OFFSET_IPV4 */
47 /* + Sizeof IPV4 fields */
48 #define DPAA2_CLS_RULE_OFFSET_ICMP 58 /* DPAA2_CLS_RULE_OFFSET_IPV6 */
49 /* + Sizeof IPV6 fields */
50 #define DPAA2_CLS_RULE_OFFSET_UDP 60 /* DPAA2_CLS_RULE_OFFSET_ICMP */
51 /* + Sizeof ICMP fields */
52 #define DPAA2_CLS_RULE_OFFSET_TCP 64 /* DPAA2_CLS_RULE_OFFSET_UDP */
53 /* + Sizeof UDP fields */
54 #define DPAA2_CLS_RULE_OFFSET_SCTP 68 /* DPAA2_CLS_RULE_OFFSET_TCP */
55 /* + Sizeof TCP fields */
56 #define DPAA2_CLS_RULE_OFFSET_GRE 72 /* DPAA2_CLS_RULE_OFFSET_SCTP */
57 /* + Sizeof SCTP fields */
60 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
61 RTE_FLOW_ITEM_TYPE_END,
62 RTE_FLOW_ITEM_TYPE_ETH,
63 RTE_FLOW_ITEM_TYPE_VLAN,
64 RTE_FLOW_ITEM_TYPE_IPV4,
65 RTE_FLOW_ITEM_TYPE_IPV6,
66 RTE_FLOW_ITEM_TYPE_ICMP,
67 RTE_FLOW_ITEM_TYPE_UDP,
68 RTE_FLOW_ITEM_TYPE_TCP,
69 RTE_FLOW_ITEM_TYPE_SCTP,
70 RTE_FLOW_ITEM_TYPE_GRE,
74 enum rte_flow_action_type dpaa2_supported_action_type[] = {
75 RTE_FLOW_ACTION_TYPE_END,
76 RTE_FLOW_ACTION_TYPE_QUEUE,
77 RTE_FLOW_ACTION_TYPE_RSS
80 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
81 static const void *default_mask;
84 dpaa2_configure_flow_eth(struct rte_flow *flow,
85 struct rte_eth_dev *dev,
86 const struct rte_flow_attr *attr,
87 const struct rte_flow_item *pattern,
88 const struct rte_flow_action actions[] __rte_unused,
89 struct rte_flow_error *error __rte_unused)
94 int device_configured = 0, entry_found = 0;
96 const struct rte_flow_item_eth *spec, *mask;
98 /* TODO: Currently upper bound of range parameter is not implemented */
99 const struct rte_flow_item_eth *last __rte_unused;
100 struct dpaa2_dev_priv *priv = dev->data->dev_private;
104 /* DPAA2 platform has a limitation that extract parameter can not be */
105 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
106 /* TODO: pattern is an array of 9 elements where 9th pattern element */
107 /* is for QoS table and 1-8th pattern element is for FS tables. */
108 /* It can be changed to macro. */
109 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
110 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
111 DPKG_MAX_NUM_OF_EXTRACTS);
115 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
116 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
117 DPKG_MAX_NUM_OF_EXTRACTS);
121 for (j = 0; j < priv->pattern[8].item_count; j++) {
122 if (priv->pattern[8].pattern_type[j] != pattern->type) {
131 priv->pattern[8].pattern_type[j] = pattern->type;
132 priv->pattern[8].item_count++;
133 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
137 for (j = 0; j < priv->pattern[group].item_count; j++) {
138 if (priv->pattern[group].pattern_type[j] != pattern->type) {
147 priv->pattern[group].pattern_type[j] = pattern->type;
148 priv->pattern[group].item_count++;
149 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
152 /* Get traffic class index and flow id to be configured */
154 flow->index = attr->priority;
156 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
157 index = priv->extract.qos_key_cfg.num_extracts;
158 priv->extract.qos_key_cfg.extracts[index].type =
159 DPKG_EXTRACT_FROM_HDR;
160 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
161 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
162 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
165 priv->extract.qos_key_cfg.extracts[index].type =
166 DPKG_EXTRACT_FROM_HDR;
167 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
168 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
169 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
172 priv->extract.qos_key_cfg.extracts[index].type =
173 DPKG_EXTRACT_FROM_HDR;
174 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
175 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
176 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
179 priv->extract.qos_key_cfg.num_extracts = index;
182 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
183 index = priv->extract.fs_key_cfg[group].num_extracts;
184 priv->extract.fs_key_cfg[group].extracts[index].type =
185 DPKG_EXTRACT_FROM_HDR;
186 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
187 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
188 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
191 priv->extract.fs_key_cfg[group].extracts[index].type =
192 DPKG_EXTRACT_FROM_HDR;
193 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
194 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
195 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
198 priv->extract.fs_key_cfg[group].extracts[index].type =
199 DPKG_EXTRACT_FROM_HDR;
200 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
201 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
202 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
205 priv->extract.fs_key_cfg[group].num_extracts = index;
208 /* Parse pattern list to get the matching parameters */
209 spec = (const struct rte_flow_item_eth *)pattern->spec;
210 last = (const struct rte_flow_item_eth *)pattern->last;
211 mask = (const struct rte_flow_item_eth *)
212 (pattern->mask ? pattern->mask : default_mask);
215 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH;
216 memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
217 sizeof(struct ether_addr));
218 key_iova += sizeof(struct ether_addr);
219 memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
220 sizeof(struct ether_addr));
221 key_iova += sizeof(struct ether_addr);
222 memcpy((void *)key_iova, (const void *)(&spec->type),
226 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH;
227 memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
228 sizeof(struct ether_addr));
229 mask_iova += sizeof(struct ether_addr);
230 memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
231 sizeof(struct ether_addr));
232 mask_iova += sizeof(struct ether_addr);
233 memcpy((void *)mask_iova, (const void *)(&mask->type),
236 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH +
237 ((2 * sizeof(struct ether_addr)) +
238 sizeof(rte_be16_t)));
239 return device_configured;
243 dpaa2_configure_flow_vlan(struct rte_flow *flow,
244 struct rte_eth_dev *dev,
245 const struct rte_flow_attr *attr,
246 const struct rte_flow_item *pattern,
247 const struct rte_flow_action actions[] __rte_unused,
248 struct rte_flow_error *error __rte_unused)
253 int device_configured = 0, entry_found = 0;
255 const struct rte_flow_item_vlan *spec, *mask;
257 const struct rte_flow_item_vlan *last __rte_unused;
258 struct dpaa2_dev_priv *priv = dev->data->dev_private;
262 /* DPAA2 platform has a limitation that extract parameter can not be */
263 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
264 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
265 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
266 DPKG_MAX_NUM_OF_EXTRACTS);
270 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
271 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
272 DPKG_MAX_NUM_OF_EXTRACTS);
276 for (j = 0; j < priv->pattern[8].item_count; j++) {
277 if (priv->pattern[8].pattern_type[j] != pattern->type) {
286 priv->pattern[8].pattern_type[j] = pattern->type;
287 priv->pattern[8].item_count++;
288 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
292 for (j = 0; j < priv->pattern[group].item_count; j++) {
293 if (priv->pattern[group].pattern_type[j] != pattern->type) {
302 priv->pattern[group].pattern_type[j] = pattern->type;
303 priv->pattern[group].item_count++;
304 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
308 /* Get traffic class index and flow id to be configured */
310 flow->index = attr->priority;
312 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
313 index = priv->extract.qos_key_cfg.num_extracts;
314 priv->extract.qos_key_cfg.extracts[index].type =
315 DPKG_EXTRACT_FROM_HDR;
316 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
317 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
318 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
319 priv->extract.qos_key_cfg.num_extracts++;
322 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
323 index = priv->extract.fs_key_cfg[group].num_extracts;
324 priv->extract.fs_key_cfg[group].extracts[index].type =
325 DPKG_EXTRACT_FROM_HDR;
326 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
327 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
328 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
329 priv->extract.fs_key_cfg[group].num_extracts++;
332 /* Parse pattern list to get the matching parameters */
333 spec = (const struct rte_flow_item_vlan *)pattern->spec;
334 last = (const struct rte_flow_item_vlan *)pattern->last;
335 mask = (const struct rte_flow_item_vlan *)
336 (pattern->mask ? pattern->mask : default_mask);
338 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
339 memcpy((void *)key_iova, (const void *)(&spec->tci),
342 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
343 memcpy((void *)mask_iova, (const void *)(&mask->tci),
346 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t));
347 return device_configured;
351 dpaa2_configure_flow_ipv4(struct rte_flow *flow,
352 struct rte_eth_dev *dev,
353 const struct rte_flow_attr *attr,
354 const struct rte_flow_item *pattern,
355 const struct rte_flow_action actions[] __rte_unused,
356 struct rte_flow_error *error __rte_unused)
361 int device_configured = 0, entry_found = 0;
363 const struct rte_flow_item_ipv4 *spec, *mask;
365 const struct rte_flow_item_ipv4 *last __rte_unused;
366 struct dpaa2_dev_priv *priv = dev->data->dev_private;
370 /* DPAA2 platform has a limitation that extract parameter can not be */
371 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
372 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
373 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
374 DPKG_MAX_NUM_OF_EXTRACTS);
378 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
379 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
380 DPKG_MAX_NUM_OF_EXTRACTS);
384 for (j = 0; j < priv->pattern[8].item_count; j++) {
385 if (priv->pattern[8].pattern_type[j] != pattern->type) {
394 priv->pattern[8].pattern_type[j] = pattern->type;
395 priv->pattern[8].item_count++;
396 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
400 for (j = 0; j < priv->pattern[group].item_count; j++) {
401 if (priv->pattern[group].pattern_type[j] != pattern->type) {
410 priv->pattern[group].pattern_type[j] = pattern->type;
411 priv->pattern[group].item_count++;
412 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
415 /* Get traffic class index and flow id to be configured */
417 flow->index = attr->priority;
419 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
420 index = priv->extract.qos_key_cfg.num_extracts;
421 priv->extract.qos_key_cfg.extracts[index].type =
422 DPKG_EXTRACT_FROM_HDR;
423 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
424 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
425 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
428 priv->extract.qos_key_cfg.extracts[index].type =
429 DPKG_EXTRACT_FROM_HDR;
430 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
431 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
432 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
435 priv->extract.qos_key_cfg.extracts[index].type =
436 DPKG_EXTRACT_FROM_HDR;
437 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
438 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
439 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
442 priv->extract.qos_key_cfg.num_extracts = index;
445 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
446 index = priv->extract.fs_key_cfg[group].num_extracts;
447 priv->extract.fs_key_cfg[group].extracts[index].type =
448 DPKG_EXTRACT_FROM_HDR;
449 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
450 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
451 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
454 priv->extract.fs_key_cfg[group].extracts[index].type =
455 DPKG_EXTRACT_FROM_HDR;
456 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
457 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
458 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
461 priv->extract.fs_key_cfg[group].extracts[index].type =
462 DPKG_EXTRACT_FROM_HDR;
463 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
464 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
465 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
468 priv->extract.fs_key_cfg[group].num_extracts = index;
471 /* Parse pattern list to get the matching parameters */
472 spec = (const struct rte_flow_item_ipv4 *)pattern->spec;
473 last = (const struct rte_flow_item_ipv4 *)pattern->last;
474 mask = (const struct rte_flow_item_ipv4 *)
475 (pattern->mask ? pattern->mask : default_mask);
477 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
478 memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
480 key_iova += sizeof(uint32_t);
481 memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
483 key_iova += sizeof(uint32_t);
484 memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
487 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
488 memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
490 mask_iova += sizeof(uint32_t);
491 memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
493 mask_iova += sizeof(uint32_t);
494 memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
497 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 +
498 (2 * sizeof(uint32_t)) + sizeof(uint8_t));
500 return device_configured;
504 dpaa2_configure_flow_ipv6(struct rte_flow *flow,
505 struct rte_eth_dev *dev,
506 const struct rte_flow_attr *attr,
507 const struct rte_flow_item *pattern,
508 const struct rte_flow_action actions[] __rte_unused,
509 struct rte_flow_error *error __rte_unused)
514 int device_configured = 0, entry_found = 0;
516 const struct rte_flow_item_ipv6 *spec, *mask;
518 const struct rte_flow_item_ipv6 *last __rte_unused;
519 struct dpaa2_dev_priv *priv = dev->data->dev_private;
523 /* DPAA2 platform has a limitation that extract parameter can not be */
524 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
525 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
526 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
527 DPKG_MAX_NUM_OF_EXTRACTS);
531 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
532 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
533 DPKG_MAX_NUM_OF_EXTRACTS);
537 for (j = 0; j < priv->pattern[8].item_count; j++) {
538 if (priv->pattern[8].pattern_type[j] != pattern->type) {
547 priv->pattern[8].pattern_type[j] = pattern->type;
548 priv->pattern[8].item_count++;
549 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
553 for (j = 0; j < priv->pattern[group].item_count; j++) {
554 if (priv->pattern[group].pattern_type[j] != pattern->type) {
563 priv->pattern[group].pattern_type[j] = pattern->type;
564 priv->pattern[group].item_count++;
565 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
568 /* Get traffic class index and flow id to be configured */
570 flow->index = attr->priority;
572 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
573 index = priv->extract.qos_key_cfg.num_extracts;
574 priv->extract.qos_key_cfg.extracts[index].type =
575 DPKG_EXTRACT_FROM_HDR;
576 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
577 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
578 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
581 priv->extract.qos_key_cfg.extracts[index].type =
582 DPKG_EXTRACT_FROM_HDR;
583 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
584 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
585 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
588 priv->extract.qos_key_cfg.num_extracts = index;
591 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
592 index = priv->extract.fs_key_cfg[group].num_extracts;
593 priv->extract.fs_key_cfg[group].extracts[index].type =
594 DPKG_EXTRACT_FROM_HDR;
595 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
596 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
597 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
600 priv->extract.fs_key_cfg[group].extracts[index].type =
601 DPKG_EXTRACT_FROM_HDR;
602 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
603 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
604 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
607 priv->extract.fs_key_cfg[group].num_extracts = index;
610 /* Parse pattern list to get the matching parameters */
611 spec = (const struct rte_flow_item_ipv6 *)pattern->spec;
612 last = (const struct rte_flow_item_ipv6 *)pattern->last;
613 mask = (const struct rte_flow_item_ipv6 *)
614 (pattern->mask ? pattern->mask : default_mask);
616 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
617 memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
618 sizeof(spec->hdr.src_addr));
619 key_iova += sizeof(spec->hdr.src_addr);
620 memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
621 sizeof(spec->hdr.dst_addr));
623 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
624 memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
625 sizeof(mask->hdr.src_addr));
626 mask_iova += sizeof(mask->hdr.src_addr);
627 memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
628 sizeof(mask->hdr.dst_addr));
630 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 +
631 sizeof(spec->hdr.src_addr) +
632 sizeof(mask->hdr.dst_addr));
633 return device_configured;
637 dpaa2_configure_flow_icmp(struct rte_flow *flow,
638 struct rte_eth_dev *dev,
639 const struct rte_flow_attr *attr,
640 const struct rte_flow_item *pattern,
641 const struct rte_flow_action actions[] __rte_unused,
642 struct rte_flow_error *error __rte_unused)
647 int device_configured = 0, entry_found = 0;
649 const struct rte_flow_item_icmp *spec, *mask;
651 const struct rte_flow_item_icmp *last __rte_unused;
652 struct dpaa2_dev_priv *priv = dev->data->dev_private;
656 /* DPAA2 platform has a limitation that extract parameter can not be */
657 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
658 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
659 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
660 DPKG_MAX_NUM_OF_EXTRACTS);
664 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
665 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
666 DPKG_MAX_NUM_OF_EXTRACTS);
670 for (j = 0; j < priv->pattern[8].item_count; j++) {
671 if (priv->pattern[8].pattern_type[j] != pattern->type) {
680 priv->pattern[8].pattern_type[j] = pattern->type;
681 priv->pattern[8].item_count++;
682 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
686 for (j = 0; j < priv->pattern[group].item_count; j++) {
687 if (priv->pattern[group].pattern_type[j] != pattern->type) {
696 priv->pattern[group].pattern_type[j] = pattern->type;
697 priv->pattern[group].item_count++;
698 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
701 /* Get traffic class index and flow id to be configured */
703 flow->index = attr->priority;
705 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
706 index = priv->extract.qos_key_cfg.num_extracts;
707 priv->extract.qos_key_cfg.extracts[index].type =
708 DPKG_EXTRACT_FROM_HDR;
709 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
710 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
711 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
714 priv->extract.qos_key_cfg.extracts[index].type =
715 DPKG_EXTRACT_FROM_HDR;
716 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
717 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
718 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
721 priv->extract.qos_key_cfg.num_extracts = index;
724 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
725 index = priv->extract.fs_key_cfg[group].num_extracts;
726 priv->extract.fs_key_cfg[group].extracts[index].type =
727 DPKG_EXTRACT_FROM_HDR;
728 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
729 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
730 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
733 priv->extract.fs_key_cfg[group].extracts[index].type =
734 DPKG_EXTRACT_FROM_HDR;
735 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
736 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
737 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
740 priv->extract.fs_key_cfg[group].num_extracts = index;
743 /* Parse pattern list to get the matching parameters */
744 spec = (const struct rte_flow_item_icmp *)pattern->spec;
745 last = (const struct rte_flow_item_icmp *)pattern->last;
746 mask = (const struct rte_flow_item_icmp *)
747 (pattern->mask ? pattern->mask : default_mask);
749 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
750 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
752 key_iova += sizeof(uint8_t);
753 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
756 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
757 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
759 key_iova += sizeof(uint8_t);
760 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
763 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP +
764 (2 * sizeof(uint8_t)));
766 return device_configured;
770 dpaa2_configure_flow_udp(struct rte_flow *flow,
771 struct rte_eth_dev *dev,
772 const struct rte_flow_attr *attr,
773 const struct rte_flow_item *pattern,
774 const struct rte_flow_action actions[] __rte_unused,
775 struct rte_flow_error *error __rte_unused)
780 int device_configured = 0, entry_found = 0;
782 const struct rte_flow_item_udp *spec, *mask;
784 const struct rte_flow_item_udp *last __rte_unused;
785 struct dpaa2_dev_priv *priv = dev->data->dev_private;
789 /* DPAA2 platform has a limitation that extract parameter can not be */
790 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
791 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
792 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
793 DPKG_MAX_NUM_OF_EXTRACTS);
797 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
798 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
799 DPKG_MAX_NUM_OF_EXTRACTS);
803 for (j = 0; j < priv->pattern[8].item_count; j++) {
804 if (priv->pattern[8].pattern_type[j] != pattern->type) {
813 priv->pattern[8].pattern_type[j] = pattern->type;
814 priv->pattern[8].item_count++;
815 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
819 for (j = 0; j < priv->pattern[group].item_count; j++) {
820 if (priv->pattern[group].pattern_type[j] != pattern->type) {
829 priv->pattern[group].pattern_type[j] = pattern->type;
830 priv->pattern[group].item_count++;
831 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
834 /* Get traffic class index and flow id to be configured */
836 flow->index = attr->priority;
838 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
839 index = priv->extract.qos_key_cfg.num_extracts;
840 priv->extract.qos_key_cfg.extracts[index].type =
841 DPKG_EXTRACT_FROM_HDR;
842 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
843 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
844 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
847 priv->extract.qos_key_cfg.extracts[index].type =
848 DPKG_EXTRACT_FROM_HDR;
849 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
850 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
851 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
854 priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
855 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
856 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
857 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
860 priv->extract.qos_key_cfg.num_extracts = index;
863 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
864 index = priv->extract.fs_key_cfg[group].num_extracts;
865 priv->extract.fs_key_cfg[group].extracts[index].type =
866 DPKG_EXTRACT_FROM_HDR;
867 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
868 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
869 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
872 priv->extract.fs_key_cfg[group].extracts[index].type =
873 DPKG_EXTRACT_FROM_HDR;
874 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
875 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
876 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
879 priv->extract.fs_key_cfg[group].extracts[index].type =
880 DPKG_EXTRACT_FROM_HDR;
881 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
882 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
883 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
886 priv->extract.fs_key_cfg[group].num_extracts = index;
889 /* Parse pattern list to get the matching parameters */
890 spec = (const struct rte_flow_item_udp *)pattern->spec;
891 last = (const struct rte_flow_item_udp *)pattern->last;
892 mask = (const struct rte_flow_item_udp *)
893 (pattern->mask ? pattern->mask : default_mask);
895 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
896 (2 * sizeof(uint32_t));
897 memset((void *)key_iova, 0x11, sizeof(uint8_t));
898 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP;
899 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
901 key_iova += sizeof(uint16_t);
902 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
905 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP;
906 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
908 mask_iova += sizeof(uint16_t);
909 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
912 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP +
913 (2 * sizeof(uint16_t)));
915 return device_configured;
919 dpaa2_configure_flow_tcp(struct rte_flow *flow,
920 struct rte_eth_dev *dev,
921 const struct rte_flow_attr *attr,
922 const struct rte_flow_item *pattern,
923 const struct rte_flow_action actions[] __rte_unused,
924 struct rte_flow_error *error __rte_unused)
929 int device_configured = 0, entry_found = 0;
931 const struct rte_flow_item_tcp *spec, *mask;
933 const struct rte_flow_item_tcp *last __rte_unused;
934 struct dpaa2_dev_priv *priv = dev->data->dev_private;
938 /* DPAA2 platform has a limitation that extract parameter can not be */
939 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
940 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
941 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
942 DPKG_MAX_NUM_OF_EXTRACTS);
946 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
947 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
948 DPKG_MAX_NUM_OF_EXTRACTS);
952 for (j = 0; j < priv->pattern[8].item_count; j++) {
953 if (priv->pattern[8].pattern_type[j] != pattern->type) {
962 priv->pattern[8].pattern_type[j] = pattern->type;
963 priv->pattern[8].item_count++;
964 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
968 for (j = 0; j < priv->pattern[group].item_count; j++) {
969 if (priv->pattern[group].pattern_type[j] != pattern->type) {
978 priv->pattern[group].pattern_type[j] = pattern->type;
979 priv->pattern[group].item_count++;
980 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
983 /* Get traffic class index and flow id to be configured */
985 flow->index = attr->priority;
987 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
988 index = priv->extract.qos_key_cfg.num_extracts;
989 priv->extract.qos_key_cfg.extracts[index].type =
990 DPKG_EXTRACT_FROM_HDR;
991 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
992 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
993 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
996 priv->extract.qos_key_cfg.extracts[index].type =
997 DPKG_EXTRACT_FROM_HDR;
998 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
999 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1000 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1003 priv->extract.qos_key_cfg.extracts[index].type =
1004 DPKG_EXTRACT_FROM_HDR;
1005 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1006 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1007 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1010 priv->extract.qos_key_cfg.num_extracts = index;
1013 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1014 index = priv->extract.fs_key_cfg[group].num_extracts;
1015 priv->extract.fs_key_cfg[group].extracts[index].type =
1016 DPKG_EXTRACT_FROM_HDR;
1017 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1018 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1019 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1022 priv->extract.fs_key_cfg[group].extracts[index].type =
1023 DPKG_EXTRACT_FROM_HDR;
1024 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1025 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1026 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1029 priv->extract.fs_key_cfg[group].extracts[index].type =
1030 DPKG_EXTRACT_FROM_HDR;
1031 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1032 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1033 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1036 priv->extract.fs_key_cfg[group].num_extracts = index;
1039 /* Parse pattern list to get the matching parameters */
1040 spec = (const struct rte_flow_item_tcp *)pattern->spec;
1041 last = (const struct rte_flow_item_tcp *)pattern->last;
1042 mask = (const struct rte_flow_item_tcp *)
1043 (pattern->mask ? pattern->mask : default_mask);
1045 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1046 (2 * sizeof(uint32_t));
1047 memset((void *)key_iova, 0x06, sizeof(uint8_t));
1048 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1049 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1051 key_iova += sizeof(uint16_t);
1052 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1055 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1056 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1058 mask_iova += sizeof(uint16_t);
1059 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1062 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP +
1063 (2 * sizeof(uint16_t)));
1065 return device_configured;
1069 dpaa2_configure_flow_sctp(struct rte_flow *flow,
1070 struct rte_eth_dev *dev,
1071 const struct rte_flow_attr *attr,
1072 const struct rte_flow_item *pattern,
1073 const struct rte_flow_action actions[] __rte_unused,
1074 struct rte_flow_error *error __rte_unused)
1079 int device_configured = 0, entry_found = 0;
1081 const struct rte_flow_item_sctp *spec, *mask;
1083 const struct rte_flow_item_sctp *last __rte_unused;
1084 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1086 group = attr->group;
1088 /* DPAA2 platform has a limitation that extract parameter can not be */
1089 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1090 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1091 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1092 DPKG_MAX_NUM_OF_EXTRACTS);
1096 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1097 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1098 DPKG_MAX_NUM_OF_EXTRACTS);
1102 for (j = 0; j < priv->pattern[8].item_count; j++) {
1103 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1112 priv->pattern[8].pattern_type[j] = pattern->type;
1113 priv->pattern[8].item_count++;
1114 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1118 for (j = 0; j < priv->pattern[group].item_count; j++) {
1119 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1128 priv->pattern[group].pattern_type[j] = pattern->type;
1129 priv->pattern[group].item_count++;
1130 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1133 /* Get traffic class index and flow id to be configured */
1134 flow->tc_id = group;
1135 flow->index = attr->priority;
1137 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1138 index = priv->extract.qos_key_cfg.num_extracts;
1139 priv->extract.qos_key_cfg.extracts[index].type =
1140 DPKG_EXTRACT_FROM_HDR;
1141 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1142 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1143 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1146 priv->extract.qos_key_cfg.extracts[index].type =
1147 DPKG_EXTRACT_FROM_HDR;
1148 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1149 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1150 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1153 priv->extract.qos_key_cfg.extracts[index].type =
1154 DPKG_EXTRACT_FROM_HDR;
1155 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1156 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1157 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1160 priv->extract.qos_key_cfg.num_extracts = index;
1163 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1164 index = priv->extract.fs_key_cfg[group].num_extracts;
1165 priv->extract.fs_key_cfg[group].extracts[index].type =
1166 DPKG_EXTRACT_FROM_HDR;
1167 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1168 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1169 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1172 priv->extract.fs_key_cfg[group].extracts[index].type =
1173 DPKG_EXTRACT_FROM_HDR;
1174 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1175 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1176 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1179 priv->extract.fs_key_cfg[group].extracts[index].type =
1180 DPKG_EXTRACT_FROM_HDR;
1181 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1182 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1183 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1186 priv->extract.fs_key_cfg[group].num_extracts = index;
1189 /* Parse pattern list to get the matching parameters */
1190 spec = (const struct rte_flow_item_sctp *)pattern->spec;
1191 last = (const struct rte_flow_item_sctp *)pattern->last;
1192 mask = (const struct rte_flow_item_sctp *)
1193 (pattern->mask ? pattern->mask : default_mask);
1195 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1196 (2 * sizeof(uint32_t));
1197 memset((void *)key_iova, 0x84, sizeof(uint8_t));
1198 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1199 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1201 key_iova += sizeof(uint16_t);
1202 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1205 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1206 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1208 mask_iova += sizeof(uint16_t);
1209 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1212 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP +
1213 (2 * sizeof(uint16_t)));
1214 return device_configured;
1218 dpaa2_configure_flow_gre(struct rte_flow *flow,
1219 struct rte_eth_dev *dev,
1220 const struct rte_flow_attr *attr,
1221 const struct rte_flow_item *pattern,
1222 const struct rte_flow_action actions[] __rte_unused,
1223 struct rte_flow_error *error __rte_unused)
1228 int device_configured = 0, entry_found = 0;
1230 const struct rte_flow_item_gre *spec, *mask;
1232 const struct rte_flow_item_gre *last __rte_unused;
1233 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1235 group = attr->group;
1237 /* DPAA2 platform has a limitation that extract parameter can not be */
1238 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1239 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1240 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1241 DPKG_MAX_NUM_OF_EXTRACTS);
1245 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1246 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1247 DPKG_MAX_NUM_OF_EXTRACTS);
1251 for (j = 0; j < priv->pattern[8].item_count; j++) {
1252 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1261 priv->pattern[8].pattern_type[j] = pattern->type;
1262 priv->pattern[8].item_count++;
1263 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1267 for (j = 0; j < priv->pattern[group].item_count; j++) {
1268 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1277 priv->pattern[group].pattern_type[j] = pattern->type;
1278 priv->pattern[group].item_count++;
1279 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1282 /* Get traffic class index and flow id to be configured */
1283 flow->tc_id = group;
1284 flow->index = attr->priority;
1286 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1287 index = priv->extract.qos_key_cfg.num_extracts;
1288 priv->extract.qos_key_cfg.extracts[index].type =
1289 DPKG_EXTRACT_FROM_HDR;
1290 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1291 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1292 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1295 priv->extract.qos_key_cfg.num_extracts = index;
1298 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1299 index = priv->extract.fs_key_cfg[group].num_extracts;
1300 priv->extract.fs_key_cfg[group].extracts[index].type =
1301 DPKG_EXTRACT_FROM_HDR;
1302 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1303 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1304 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1307 priv->extract.fs_key_cfg[group].num_extracts = index;
1310 /* Parse pattern list to get the matching parameters */
1311 spec = (const struct rte_flow_item_gre *)pattern->spec;
1312 last = (const struct rte_flow_item_gre *)pattern->last;
1313 mask = (const struct rte_flow_item_gre *)
1314 (pattern->mask ? pattern->mask : default_mask);
1316 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1317 memcpy((void *)key_iova, (const void *)(&spec->protocol),
1318 sizeof(rte_be16_t));
1320 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1321 memcpy((void *)mask_iova, (const void *)(&mask->protocol),
1322 sizeof(rte_be16_t));
1324 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t));
1326 return device_configured;
1330 dpaa2_generic_flow_set(struct rte_flow *flow,
1331 struct rte_eth_dev *dev,
1332 const struct rte_flow_attr *attr,
1333 const struct rte_flow_item pattern[],
1334 const struct rte_flow_action actions[],
1335 struct rte_flow_error *error)
1337 const struct rte_flow_action_queue *dest_queue;
1338 const struct rte_flow_action_rss *rss_conf;
1340 int is_keycfg_configured = 0, end_of_list = 0;
1341 int ret = 0, i = 0, j = 0;
1342 struct dpni_attr nic_attr;
1343 struct dpni_rx_tc_dist_cfg tc_cfg;
1344 struct dpni_qos_tbl_cfg qos_cfg;
1345 struct dpkg_profile_cfg key_cfg;
1346 struct dpni_fs_action_cfg action;
1347 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1348 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1351 /* Parse pattern list to get the matching parameters */
1352 while (!end_of_list) {
1353 switch (pattern[i].type) {
1354 case RTE_FLOW_ITEM_TYPE_ETH:
1355 is_keycfg_configured = dpaa2_configure_flow_eth(flow,
1362 case RTE_FLOW_ITEM_TYPE_VLAN:
1363 is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
1370 case RTE_FLOW_ITEM_TYPE_IPV4:
1371 is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
1378 case RTE_FLOW_ITEM_TYPE_IPV6:
1379 is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
1386 case RTE_FLOW_ITEM_TYPE_ICMP:
1387 is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
1394 case RTE_FLOW_ITEM_TYPE_UDP:
1395 is_keycfg_configured = dpaa2_configure_flow_udp(flow,
1402 case RTE_FLOW_ITEM_TYPE_TCP:
1403 is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
1410 case RTE_FLOW_ITEM_TYPE_SCTP:
1411 is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
1417 case RTE_FLOW_ITEM_TYPE_GRE:
1418 is_keycfg_configured = dpaa2_configure_flow_gre(flow,
1425 case RTE_FLOW_ITEM_TYPE_END:
1427 break; /*End of List*/
1429 DPAA2_PMD_ERR("Invalid action type");
1436 /* Let's parse action on matching traffic */
1438 while (!end_of_list) {
1439 switch (actions[j].type) {
1440 case RTE_FLOW_ACTION_TYPE_QUEUE:
1441 dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
1442 flow->flow_id = dest_queue->index;
1443 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
1444 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
1445 action.flow_id = flow->flow_id;
1446 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1447 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1448 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1450 "Unable to prepare extract parameters");
1454 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
1455 qos_cfg.discard_on_miss = true;
1456 qos_cfg.keep_entries = true;
1457 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1458 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1459 priv->token, &qos_cfg);
1462 "Distribution cannot be configured.(%d)"
1467 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1468 if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
1469 (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
1471 "Unable to prepare extract parameters");
1475 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1476 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
1477 tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
1478 tc_cfg.key_cfg_iova =
1479 (uint64_t)priv->extract.fs_extract_param[flow->tc_id];
1480 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1481 tc_cfg.fs_cfg.keep_entries = true;
1482 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1484 flow->tc_id, &tc_cfg);
1487 "Distribution cannot be configured.(%d)"
1492 /* Configure QoS table first */
1493 memset(&nic_attr, 0, sizeof(struct dpni_attr));
1494 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1495 priv->token, &nic_attr);
1498 "Failure to get attribute. dpni@%p err code(%d)\n",
1503 action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
1504 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1505 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
1506 priv->token, &flow->rule,
1507 flow->tc_id, index);
1510 "Error in addnig entry to QoS table(%d)", ret);
1514 /* Then Configure FS table */
1515 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1516 flow->tc_id, flow->index,
1517 &flow->rule, &action);
1520 "Error in adding entry to FS table(%d)", ret);
1524 case RTE_FLOW_ACTION_TYPE_RSS:
1525 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1526 priv->token, &nic_attr);
1529 "Failure to get attribute. dpni@%p err code(%d)\n",
1533 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
1534 for (i = 0; i < (int)rss_conf->queue_num; i++) {
1535 if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
1536 rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
1538 "Queue/Group combination are not supported\n");
1543 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
1544 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
1548 "unable to set flow distribution.please check queue config\n");
1552 /* Allocate DMA'ble memory to write the rules */
1553 param = (size_t)rte_malloc(NULL, 256, 64);
1555 DPAA2_PMD_ERR("Memory allocation failure\n");
1559 if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
1561 "Unable to prepare extract parameters");
1562 rte_free((void *)param);
1566 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1567 tc_cfg.dist_size = rss_conf->queue_num;
1568 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
1569 tc_cfg.key_cfg_iova = (size_t)param;
1570 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1572 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1573 priv->token, flow->tc_id,
1577 "Distribution cannot be configured: %d\n", ret);
1578 rte_free((void *)param);
1582 rte_free((void *)param);
1583 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1584 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1585 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1587 "Unable to prepare extract parameters");
1591 sizeof(struct dpni_qos_tbl_cfg));
1592 qos_cfg.discard_on_miss = true;
1593 qos_cfg.keep_entries = true;
1594 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1595 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1596 priv->token, &qos_cfg);
1599 "Distribution can not be configured(%d)\n",
1605 /* Add Rule into QoS table */
1606 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1607 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1608 &flow->rule, flow->tc_id,
1612 "Error in entry addition in QoS table(%d)",
1617 case RTE_FLOW_ACTION_TYPE_END:
1621 DPAA2_PMD_ERR("Invalid action type");
1632 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
1633 const struct rte_flow_attr *attr)
1637 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
1638 DPAA2_PMD_ERR("Priority group is out of range\n");
1641 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
1642 DPAA2_PMD_ERR("Priority within the group is out of range\n");
1645 if (unlikely(attr->egress)) {
1647 "Flow configuration is not supported on egress side\n");
1650 if (unlikely(!attr->ingress)) {
1651 DPAA2_PMD_ERR("Ingress flag must be configured\n");
1658 dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
1660 switch (pattern->type) {
1661 case RTE_FLOW_ITEM_TYPE_ETH:
1662 default_mask = (const void *)&rte_flow_item_eth_mask;
1664 case RTE_FLOW_ITEM_TYPE_VLAN:
1665 default_mask = (const void *)&rte_flow_item_vlan_mask;
1667 case RTE_FLOW_ITEM_TYPE_IPV4:
1668 default_mask = (const void *)&rte_flow_item_ipv4_mask;
1670 case RTE_FLOW_ITEM_TYPE_IPV6:
1671 default_mask = (const void *)&rte_flow_item_ipv6_mask;
1673 case RTE_FLOW_ITEM_TYPE_ICMP:
1674 default_mask = (const void *)&rte_flow_item_icmp_mask;
1676 case RTE_FLOW_ITEM_TYPE_UDP:
1677 default_mask = (const void *)&rte_flow_item_udp_mask;
1679 case RTE_FLOW_ITEM_TYPE_TCP:
1680 default_mask = (const void *)&rte_flow_item_tcp_mask;
1682 case RTE_FLOW_ITEM_TYPE_SCTP:
1683 default_mask = (const void *)&rte_flow_item_sctp_mask;
1685 case RTE_FLOW_ITEM_TYPE_GRE:
1686 default_mask = (const void *)&rte_flow_item_gre_mask;
1689 DPAA2_PMD_ERR("Invalid pattern type");
1694 dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
1695 const struct rte_flow_item pattern[])
1697 unsigned int i, j, k, is_found = 0;
1700 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1701 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
1702 if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
1712 /* Lets verify other combinations of given pattern rules */
1713 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1714 if (!pattern[j].spec) {
1718 if ((pattern[j].last) && (!pattern[j].mask))
1719 dpaa2_dev_update_default_mask(&pattern[j]);
1722 /* DPAA2 platform has a limitation that extract parameter can not be */
1723 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1724 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1725 for (j = 0; j < MAX_TCS + 1; j++) {
1726 for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; j++) {
1727 if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
1730 if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
1738 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
1740 unsigned int i, j, is_found = 0;
1743 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1744 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
1745 if (dpaa2_supported_action_type[i] == actions[j].type) {
1755 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1756 if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
1763 int dpaa2_flow_validate(struct rte_eth_dev *dev,
1764 const struct rte_flow_attr *flow_attr,
1765 const struct rte_flow_item pattern[],
1766 const struct rte_flow_action actions[],
1767 struct rte_flow_error *error __rte_unused)
1769 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1770 struct dpni_attr dpni_attr;
1771 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1772 uint16_t token = priv->token;
1775 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
1776 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
1779 "Failure to get dpni@%p attribute, err code %d\n",
1784 /* Verify input attributes */
1785 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
1788 "Invalid attributes are given\n");
1789 goto not_valid_params;
1791 /* Verify input pattern list */
1792 ret = dpaa2_dev_verify_patterns(priv, pattern);
1795 "Invalid pattern list is given\n");
1796 goto not_valid_params;
1798 /* Verify input action list */
1799 ret = dpaa2_dev_verify_actions(actions);
1802 "Invalid action list is given\n");
1803 goto not_valid_params;
1810 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
1811 const struct rte_flow_attr *attr,
1812 const struct rte_flow_item pattern[],
1813 const struct rte_flow_action actions[],
1814 struct rte_flow_error *error)
1816 struct rte_flow *flow = NULL;
1817 size_t key_iova = 0, mask_iova = 0;
1820 flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
1822 DPAA2_PMD_ERR("Failure to allocate memory for flow");
1825 /* Allocate DMA'ble memory to write the rules */
1826 key_iova = (size_t)rte_malloc(NULL, 256, 64);
1829 "Memory allocation failure for rule configration\n");
1830 goto creation_error;
1832 mask_iova = (size_t)rte_malloc(NULL, 256, 64);
1835 "Memory allocation failure for rule configration\n");
1836 goto creation_error;
1839 flow->rule.key_iova = key_iova;
1840 flow->rule.mask_iova = mask_iova;
1841 flow->rule.key_size = 0;
1843 switch (dpaa2_filter_type) {
1844 case RTE_ETH_FILTER_GENERIC:
1845 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
1849 "Failure to create flow, return code (%d)", ret);
1850 goto creation_error;
1854 DPAA2_PMD_ERR("Filter type (%d) not supported",
1863 rte_free((void *)flow);
1865 rte_free((void *)key_iova);
1867 rte_free((void *)mask_iova);
1872 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
1873 struct rte_flow *flow,
1874 struct rte_flow_error *error __rte_unused)
1877 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1878 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1880 switch (flow->action) {
1881 case RTE_FLOW_ACTION_TYPE_QUEUE:
1882 /* Remove entry from QoS table first */
1883 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1887 "Error in adding entry to QoS table(%d)", ret);
1891 /* Then remove entry from FS table */
1892 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1893 flow->tc_id, &flow->rule);
1896 "Error in entry addition in FS table(%d)", ret);
1900 case RTE_FLOW_ACTION_TYPE_RSS:
1901 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1905 "Error in entry addition in QoS table(%d)", ret);
1911 "Action type (%d) is not supported", flow->action);
1916 /* Now free the flow */
1924 dpaa2_flow_flush(struct rte_eth_dev *dev,
1925 struct rte_flow_error *error __rte_unused)
1928 struct dpni_rx_tc_dist_cfg tc_cfg;
1929 struct dpni_qos_tbl_cfg qos_cfg;
1930 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1931 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1933 /* Reset QoS table */
1934 qos_cfg.default_tc = 0;
1935 qos_cfg.discard_on_miss = false;
1936 qos_cfg.keep_entries = false;
1937 qos_cfg.key_cfg_iova = priv->extract.qos_extract_param;
1938 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW, priv->token, &qos_cfg);
1941 "QoS table is not reset to default: %d\n", ret);
1943 for (tc_id = 0; tc_id < priv->num_rx_tc; tc_id++) {
1944 /* Reset FS table */
1945 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1946 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW, priv->token,
1950 "Error (%d) in flushing entries for TC (%d)",
1957 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
1958 struct rte_flow *flow __rte_unused,
1959 const struct rte_flow_action *actions __rte_unused,
1960 void *data __rte_unused,
1961 struct rte_flow_error *error __rte_unused)
1966 const struct rte_flow_ops dpaa2_flow_ops = {
1967 .create = dpaa2_flow_create,
1968 .validate = dpaa2_flow_validate,
1969 .destroy = dpaa2_flow_destroy,
1970 .flush = dpaa2_flow_flush,
1971 .query = dpaa2_flow_query,