1 /* * SPDX-License-Identifier: BSD-3-Clause
13 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
26 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
27 struct dpni_rule_cfg rule;
32 enum rte_flow_action_type action;
36 /* Layout for rule compositions for supported patterns */
37 /* TODO: Current design only supports Ethernet + IPv4 based classification. */
38 /* So corresponding offset macros are valid only. Rest are placeholder for */
39 /* now. Once support for other netwrok headers will be added then */
40 /* corresponding macros will be updated with correct values*/
41 #define DPAA2_CLS_RULE_OFFSET_ETH 0 /*Start of buffer*/
42 #define DPAA2_CLS_RULE_OFFSET_VLAN 14 /* DPAA2_CLS_RULE_OFFSET_ETH */
43 /* + Sizeof Eth fields */
44 #define DPAA2_CLS_RULE_OFFSET_IPV4 14 /* DPAA2_CLS_RULE_OFFSET_VLAN */
45 /* + Sizeof VLAN fields */
46 #define DPAA2_CLS_RULE_OFFSET_IPV6 25 /* DPAA2_CLS_RULE_OFFSET_IPV4 */
47 /* + Sizeof IPV4 fields */
48 #define DPAA2_CLS_RULE_OFFSET_ICMP 58 /* DPAA2_CLS_RULE_OFFSET_IPV6 */
49 /* + Sizeof IPV6 fields */
50 #define DPAA2_CLS_RULE_OFFSET_UDP 60 /* DPAA2_CLS_RULE_OFFSET_ICMP */
51 /* + Sizeof ICMP fields */
52 #define DPAA2_CLS_RULE_OFFSET_TCP 64 /* DPAA2_CLS_RULE_OFFSET_UDP */
53 /* + Sizeof UDP fields */
54 #define DPAA2_CLS_RULE_OFFSET_SCTP 68 /* DPAA2_CLS_RULE_OFFSET_TCP */
55 /* + Sizeof TCP fields */
56 #define DPAA2_CLS_RULE_OFFSET_GRE 72 /* DPAA2_CLS_RULE_OFFSET_SCTP */
57 /* + Sizeof SCTP fields */
60 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
61 RTE_FLOW_ITEM_TYPE_END,
62 RTE_FLOW_ITEM_TYPE_ETH,
63 RTE_FLOW_ITEM_TYPE_VLAN,
64 RTE_FLOW_ITEM_TYPE_IPV4,
65 RTE_FLOW_ITEM_TYPE_IPV6,
66 RTE_FLOW_ITEM_TYPE_ICMP,
67 RTE_FLOW_ITEM_TYPE_UDP,
68 RTE_FLOW_ITEM_TYPE_TCP,
69 RTE_FLOW_ITEM_TYPE_SCTP,
70 RTE_FLOW_ITEM_TYPE_GRE,
74 enum rte_flow_action_type dpaa2_supported_action_type[] = {
75 RTE_FLOW_ACTION_TYPE_END,
76 RTE_FLOW_ACTION_TYPE_QUEUE,
77 RTE_FLOW_ACTION_TYPE_RSS
80 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
81 static const void *default_mask;
84 dpaa2_configure_flow_eth(struct rte_flow *flow,
85 struct rte_eth_dev *dev,
86 const struct rte_flow_attr *attr,
87 const struct rte_flow_item *pattern,
88 const struct rte_flow_action actions[] __rte_unused,
89 struct rte_flow_error *error __rte_unused)
94 int device_configured = 0, entry_found = 0;
96 const struct rte_flow_item_eth *spec, *mask;
98 /* TODO: Currently upper bound of range parameter is not implemented */
99 const struct rte_flow_item_eth *last __rte_unused;
100 struct dpaa2_dev_priv *priv = dev->data->dev_private;
104 /* DPAA2 platform has a limitation that extract parameter can not be */
105 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
106 /* TODO: pattern is an array of 9 elements where 9th pattern element */
107 /* is for QoS table and 1-8th pattern element is for FS tables. */
108 /* It can be changed to macro. */
109 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
110 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
111 DPKG_MAX_NUM_OF_EXTRACTS);
115 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
116 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
117 DPKG_MAX_NUM_OF_EXTRACTS);
121 for (j = 0; j < priv->pattern[8].item_count; j++) {
122 if (priv->pattern[8].pattern_type[j] != pattern->type) {
131 priv->pattern[8].pattern_type[j] = pattern->type;
132 priv->pattern[8].item_count++;
133 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
137 for (j = 0; j < priv->pattern[group].item_count; j++) {
138 if (priv->pattern[group].pattern_type[j] != pattern->type) {
147 priv->pattern[group].pattern_type[j] = pattern->type;
148 priv->pattern[group].item_count++;
149 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
152 /* Get traffic class index and flow id to be configured */
154 flow->index = attr->priority;
156 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
157 index = priv->extract.qos_key_cfg.num_extracts;
158 priv->extract.qos_key_cfg.extracts[index].type =
159 DPKG_EXTRACT_FROM_HDR;
160 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
161 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
162 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
165 priv->extract.qos_key_cfg.extracts[index].type =
166 DPKG_EXTRACT_FROM_HDR;
167 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
168 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
169 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
172 priv->extract.qos_key_cfg.extracts[index].type =
173 DPKG_EXTRACT_FROM_HDR;
174 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
175 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
176 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
179 priv->extract.qos_key_cfg.num_extracts = index;
182 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
183 index = priv->extract.fs_key_cfg[group].num_extracts;
184 priv->extract.fs_key_cfg[group].extracts[index].type =
185 DPKG_EXTRACT_FROM_HDR;
186 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
187 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
188 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
191 priv->extract.fs_key_cfg[group].extracts[index].type =
192 DPKG_EXTRACT_FROM_HDR;
193 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
194 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
195 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
198 priv->extract.fs_key_cfg[group].extracts[index].type =
199 DPKG_EXTRACT_FROM_HDR;
200 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
201 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
202 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
205 priv->extract.fs_key_cfg[group].num_extracts = index;
208 /* Parse pattern list to get the matching parameters */
209 spec = (const struct rte_flow_item_eth *)pattern->spec;
210 last = (const struct rte_flow_item_eth *)pattern->last;
211 mask = (const struct rte_flow_item_eth *)
212 (pattern->mask ? pattern->mask : default_mask);
215 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH;
216 memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
217 sizeof(struct rte_ether_addr));
218 key_iova += sizeof(struct rte_ether_addr);
219 memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
220 sizeof(struct rte_ether_addr));
221 key_iova += sizeof(struct rte_ether_addr);
222 memcpy((void *)key_iova, (const void *)(&spec->type),
226 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH;
227 memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
228 sizeof(struct rte_ether_addr));
229 mask_iova += sizeof(struct rte_ether_addr);
230 memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
231 sizeof(struct rte_ether_addr));
232 mask_iova += sizeof(struct rte_ether_addr);
233 memcpy((void *)mask_iova, (const void *)(&mask->type),
236 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH +
237 ((2 * sizeof(struct rte_ether_addr)) +
238 sizeof(rte_be16_t)));
239 return device_configured;
243 dpaa2_configure_flow_vlan(struct rte_flow *flow,
244 struct rte_eth_dev *dev,
245 const struct rte_flow_attr *attr,
246 const struct rte_flow_item *pattern,
247 const struct rte_flow_action actions[] __rte_unused,
248 struct rte_flow_error *error __rte_unused)
253 int device_configured = 0, entry_found = 0;
255 const struct rte_flow_item_vlan *spec, *mask;
257 const struct rte_flow_item_vlan *last __rte_unused;
258 struct dpaa2_dev_priv *priv = dev->data->dev_private;
262 /* DPAA2 platform has a limitation that extract parameter can not be */
263 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
264 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
265 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
266 DPKG_MAX_NUM_OF_EXTRACTS);
270 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
271 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
272 DPKG_MAX_NUM_OF_EXTRACTS);
276 for (j = 0; j < priv->pattern[8].item_count; j++) {
277 if (priv->pattern[8].pattern_type[j] != pattern->type) {
286 priv->pattern[8].pattern_type[j] = pattern->type;
287 priv->pattern[8].item_count++;
288 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
292 for (j = 0; j < priv->pattern[group].item_count; j++) {
293 if (priv->pattern[group].pattern_type[j] != pattern->type) {
302 priv->pattern[group].pattern_type[j] = pattern->type;
303 priv->pattern[group].item_count++;
304 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
308 /* Get traffic class index and flow id to be configured */
310 flow->index = attr->priority;
312 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
313 index = priv->extract.qos_key_cfg.num_extracts;
314 priv->extract.qos_key_cfg.extracts[index].type =
315 DPKG_EXTRACT_FROM_HDR;
316 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
317 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
318 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
319 priv->extract.qos_key_cfg.num_extracts++;
322 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
323 index = priv->extract.fs_key_cfg[group].num_extracts;
324 priv->extract.fs_key_cfg[group].extracts[index].type =
325 DPKG_EXTRACT_FROM_HDR;
326 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
327 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
328 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
329 priv->extract.fs_key_cfg[group].num_extracts++;
332 /* Parse pattern list to get the matching parameters */
333 spec = (const struct rte_flow_item_vlan *)pattern->spec;
334 last = (const struct rte_flow_item_vlan *)pattern->last;
335 mask = (const struct rte_flow_item_vlan *)
336 (pattern->mask ? pattern->mask : default_mask);
338 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
339 memcpy((void *)key_iova, (const void *)(&spec->tci),
342 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
343 memcpy((void *)mask_iova, (const void *)(&mask->tci),
346 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t));
347 return device_configured;
351 dpaa2_configure_flow_ipv4(struct rte_flow *flow,
352 struct rte_eth_dev *dev,
353 const struct rte_flow_attr *attr,
354 const struct rte_flow_item *pattern,
355 const struct rte_flow_action actions[] __rte_unused,
356 struct rte_flow_error *error __rte_unused)
361 int device_configured = 0, entry_found = 0;
363 const struct rte_flow_item_ipv4 *spec, *mask;
365 const struct rte_flow_item_ipv4 *last __rte_unused;
366 struct dpaa2_dev_priv *priv = dev->data->dev_private;
370 /* DPAA2 platform has a limitation that extract parameter can not be */
371 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
372 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
373 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
374 DPKG_MAX_NUM_OF_EXTRACTS);
378 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
379 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
380 DPKG_MAX_NUM_OF_EXTRACTS);
384 for (j = 0; j < priv->pattern[8].item_count; j++) {
385 if (priv->pattern[8].pattern_type[j] != pattern->type) {
394 priv->pattern[8].pattern_type[j] = pattern->type;
395 priv->pattern[8].item_count++;
396 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
400 for (j = 0; j < priv->pattern[group].item_count; j++) {
401 if (priv->pattern[group].pattern_type[j] != pattern->type) {
410 priv->pattern[group].pattern_type[j] = pattern->type;
411 priv->pattern[group].item_count++;
412 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
415 /* Get traffic class index and flow id to be configured */
417 flow->index = attr->priority;
419 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
420 index = priv->extract.qos_key_cfg.num_extracts;
421 priv->extract.qos_key_cfg.extracts[index].type =
422 DPKG_EXTRACT_FROM_HDR;
423 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
424 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
425 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
428 priv->extract.qos_key_cfg.extracts[index].type =
429 DPKG_EXTRACT_FROM_HDR;
430 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
431 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
432 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
435 priv->extract.qos_key_cfg.extracts[index].type =
436 DPKG_EXTRACT_FROM_HDR;
437 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
438 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
439 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
442 priv->extract.qos_key_cfg.num_extracts = index;
445 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
446 index = priv->extract.fs_key_cfg[group].num_extracts;
447 priv->extract.fs_key_cfg[group].extracts[index].type =
448 DPKG_EXTRACT_FROM_HDR;
449 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
450 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
451 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
454 priv->extract.fs_key_cfg[group].extracts[index].type =
455 DPKG_EXTRACT_FROM_HDR;
456 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
457 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
458 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
461 priv->extract.fs_key_cfg[group].extracts[index].type =
462 DPKG_EXTRACT_FROM_HDR;
463 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
464 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
465 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
468 priv->extract.fs_key_cfg[group].num_extracts = index;
471 /* Parse pattern list to get the matching parameters */
472 spec = (const struct rte_flow_item_ipv4 *)pattern->spec;
473 last = (const struct rte_flow_item_ipv4 *)pattern->last;
474 mask = (const struct rte_flow_item_ipv4 *)
475 (pattern->mask ? pattern->mask : default_mask);
477 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
478 memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
480 key_iova += sizeof(uint32_t);
481 memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
483 key_iova += sizeof(uint32_t);
484 memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
487 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
488 memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
490 mask_iova += sizeof(uint32_t);
491 memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
493 mask_iova += sizeof(uint32_t);
494 memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
497 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 +
498 (2 * sizeof(uint32_t)) + sizeof(uint8_t));
500 return device_configured;
504 dpaa2_configure_flow_ipv6(struct rte_flow *flow,
505 struct rte_eth_dev *dev,
506 const struct rte_flow_attr *attr,
507 const struct rte_flow_item *pattern,
508 const struct rte_flow_action actions[] __rte_unused,
509 struct rte_flow_error *error __rte_unused)
514 int device_configured = 0, entry_found = 0;
516 const struct rte_flow_item_ipv6 *spec, *mask;
518 const struct rte_flow_item_ipv6 *last __rte_unused;
519 struct dpaa2_dev_priv *priv = dev->data->dev_private;
523 /* DPAA2 platform has a limitation that extract parameter can not be */
524 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
525 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
526 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
527 DPKG_MAX_NUM_OF_EXTRACTS);
531 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
532 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
533 DPKG_MAX_NUM_OF_EXTRACTS);
537 for (j = 0; j < priv->pattern[8].item_count; j++) {
538 if (priv->pattern[8].pattern_type[j] != pattern->type) {
547 priv->pattern[8].pattern_type[j] = pattern->type;
548 priv->pattern[8].item_count++;
549 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
553 for (j = 0; j < priv->pattern[group].item_count; j++) {
554 if (priv->pattern[group].pattern_type[j] != pattern->type) {
563 priv->pattern[group].pattern_type[j] = pattern->type;
564 priv->pattern[group].item_count++;
565 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
568 /* Get traffic class index and flow id to be configured */
570 flow->index = attr->priority;
572 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
573 index = priv->extract.qos_key_cfg.num_extracts;
574 priv->extract.qos_key_cfg.extracts[index].type =
575 DPKG_EXTRACT_FROM_HDR;
576 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
577 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
578 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
581 priv->extract.qos_key_cfg.extracts[index].type =
582 DPKG_EXTRACT_FROM_HDR;
583 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
584 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
585 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
588 priv->extract.qos_key_cfg.num_extracts = index;
591 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
592 index = priv->extract.fs_key_cfg[group].num_extracts;
593 priv->extract.fs_key_cfg[group].extracts[index].type =
594 DPKG_EXTRACT_FROM_HDR;
595 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
596 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
597 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
600 priv->extract.fs_key_cfg[group].extracts[index].type =
601 DPKG_EXTRACT_FROM_HDR;
602 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
603 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
604 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
607 priv->extract.fs_key_cfg[group].num_extracts = index;
610 /* Parse pattern list to get the matching parameters */
611 spec = (const struct rte_flow_item_ipv6 *)pattern->spec;
612 last = (const struct rte_flow_item_ipv6 *)pattern->last;
613 mask = (const struct rte_flow_item_ipv6 *)
614 (pattern->mask ? pattern->mask : default_mask);
616 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
617 memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
618 sizeof(spec->hdr.src_addr));
619 key_iova += sizeof(spec->hdr.src_addr);
620 memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
621 sizeof(spec->hdr.dst_addr));
623 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
624 memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
625 sizeof(mask->hdr.src_addr));
626 mask_iova += sizeof(mask->hdr.src_addr);
627 memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
628 sizeof(mask->hdr.dst_addr));
630 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 +
631 sizeof(spec->hdr.src_addr) +
632 sizeof(mask->hdr.dst_addr));
633 return device_configured;
637 dpaa2_configure_flow_icmp(struct rte_flow *flow,
638 struct rte_eth_dev *dev,
639 const struct rte_flow_attr *attr,
640 const struct rte_flow_item *pattern,
641 const struct rte_flow_action actions[] __rte_unused,
642 struct rte_flow_error *error __rte_unused)
647 int device_configured = 0, entry_found = 0;
649 const struct rte_flow_item_icmp *spec, *mask;
651 const struct rte_flow_item_icmp *last __rte_unused;
652 struct dpaa2_dev_priv *priv = dev->data->dev_private;
656 /* DPAA2 platform has a limitation that extract parameter can not be */
657 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
658 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
659 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
660 DPKG_MAX_NUM_OF_EXTRACTS);
664 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
665 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
666 DPKG_MAX_NUM_OF_EXTRACTS);
670 for (j = 0; j < priv->pattern[8].item_count; j++) {
671 if (priv->pattern[8].pattern_type[j] != pattern->type) {
680 priv->pattern[8].pattern_type[j] = pattern->type;
681 priv->pattern[8].item_count++;
682 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
686 for (j = 0; j < priv->pattern[group].item_count; j++) {
687 if (priv->pattern[group].pattern_type[j] != pattern->type) {
696 priv->pattern[group].pattern_type[j] = pattern->type;
697 priv->pattern[group].item_count++;
698 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
701 /* Get traffic class index and flow id to be configured */
703 flow->index = attr->priority;
705 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
706 index = priv->extract.qos_key_cfg.num_extracts;
707 priv->extract.qos_key_cfg.extracts[index].type =
708 DPKG_EXTRACT_FROM_HDR;
709 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
710 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
711 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
714 priv->extract.qos_key_cfg.extracts[index].type =
715 DPKG_EXTRACT_FROM_HDR;
716 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
717 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
718 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
721 priv->extract.qos_key_cfg.num_extracts = index;
724 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
725 index = priv->extract.fs_key_cfg[group].num_extracts;
726 priv->extract.fs_key_cfg[group].extracts[index].type =
727 DPKG_EXTRACT_FROM_HDR;
728 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
729 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
730 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
733 priv->extract.fs_key_cfg[group].extracts[index].type =
734 DPKG_EXTRACT_FROM_HDR;
735 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
736 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
737 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
740 priv->extract.fs_key_cfg[group].num_extracts = index;
743 /* Parse pattern list to get the matching parameters */
744 spec = (const struct rte_flow_item_icmp *)pattern->spec;
745 last = (const struct rte_flow_item_icmp *)pattern->last;
746 mask = (const struct rte_flow_item_icmp *)
747 (pattern->mask ? pattern->mask : default_mask);
749 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
750 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
752 key_iova += sizeof(uint8_t);
753 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
756 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
757 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
759 key_iova += sizeof(uint8_t);
760 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
763 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP +
764 (2 * sizeof(uint8_t)));
766 return device_configured;
770 dpaa2_configure_flow_udp(struct rte_flow *flow,
771 struct rte_eth_dev *dev,
772 const struct rte_flow_attr *attr,
773 const struct rte_flow_item *pattern,
774 const struct rte_flow_action actions[] __rte_unused,
775 struct rte_flow_error *error __rte_unused)
780 int device_configured = 0, entry_found = 0;
782 const struct rte_flow_item_udp *spec, *mask;
784 const struct rte_flow_item_udp *last __rte_unused;
785 struct dpaa2_dev_priv *priv = dev->data->dev_private;
789 /* DPAA2 platform has a limitation that extract parameter can not be */
790 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
791 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
792 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
793 DPKG_MAX_NUM_OF_EXTRACTS);
797 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
798 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
799 DPKG_MAX_NUM_OF_EXTRACTS);
803 for (j = 0; j < priv->pattern[8].item_count; j++) {
804 if (priv->pattern[8].pattern_type[j] != pattern->type) {
813 priv->pattern[8].pattern_type[j] = pattern->type;
814 priv->pattern[8].item_count++;
815 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
819 for (j = 0; j < priv->pattern[group].item_count; j++) {
820 if (priv->pattern[group].pattern_type[j] != pattern->type) {
829 priv->pattern[group].pattern_type[j] = pattern->type;
830 priv->pattern[group].item_count++;
831 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
834 /* Get traffic class index and flow id to be configured */
836 flow->index = attr->priority;
838 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
839 index = priv->extract.qos_key_cfg.num_extracts;
840 priv->extract.qos_key_cfg.extracts[index].type =
841 DPKG_EXTRACT_FROM_HDR;
842 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
843 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
844 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
847 priv->extract.qos_key_cfg.extracts[index].type =
848 DPKG_EXTRACT_FROM_HDR;
849 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
850 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
851 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
854 priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
855 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
856 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
857 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
860 priv->extract.qos_key_cfg.num_extracts = index;
863 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
864 index = priv->extract.fs_key_cfg[group].num_extracts;
865 priv->extract.fs_key_cfg[group].extracts[index].type =
866 DPKG_EXTRACT_FROM_HDR;
867 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
868 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
869 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
872 priv->extract.fs_key_cfg[group].extracts[index].type =
873 DPKG_EXTRACT_FROM_HDR;
874 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
875 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
876 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
879 priv->extract.fs_key_cfg[group].extracts[index].type =
880 DPKG_EXTRACT_FROM_HDR;
881 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
882 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
883 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
886 priv->extract.fs_key_cfg[group].num_extracts = index;
889 /* Parse pattern list to get the matching parameters */
890 spec = (const struct rte_flow_item_udp *)pattern->spec;
891 last = (const struct rte_flow_item_udp *)pattern->last;
892 mask = (const struct rte_flow_item_udp *)
893 (pattern->mask ? pattern->mask : default_mask);
895 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
896 (2 * sizeof(uint32_t));
897 memset((void *)key_iova, 0x11, sizeof(uint8_t));
898 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP;
899 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
901 key_iova += sizeof(uint16_t);
902 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
905 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP;
906 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
908 mask_iova += sizeof(uint16_t);
909 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
912 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP +
913 (2 * sizeof(uint16_t)));
915 return device_configured;
919 dpaa2_configure_flow_tcp(struct rte_flow *flow,
920 struct rte_eth_dev *dev,
921 const struct rte_flow_attr *attr,
922 const struct rte_flow_item *pattern,
923 const struct rte_flow_action actions[] __rte_unused,
924 struct rte_flow_error *error __rte_unused)
929 int device_configured = 0, entry_found = 0;
931 const struct rte_flow_item_tcp *spec, *mask;
933 const struct rte_flow_item_tcp *last __rte_unused;
934 struct dpaa2_dev_priv *priv = dev->data->dev_private;
938 /* DPAA2 platform has a limitation that extract parameter can not be */
939 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
940 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
941 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
942 DPKG_MAX_NUM_OF_EXTRACTS);
946 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
947 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
948 DPKG_MAX_NUM_OF_EXTRACTS);
952 for (j = 0; j < priv->pattern[8].item_count; j++) {
953 if (priv->pattern[8].pattern_type[j] != pattern->type) {
962 priv->pattern[8].pattern_type[j] = pattern->type;
963 priv->pattern[8].item_count++;
964 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
968 for (j = 0; j < priv->pattern[group].item_count; j++) {
969 if (priv->pattern[group].pattern_type[j] != pattern->type) {
978 priv->pattern[group].pattern_type[j] = pattern->type;
979 priv->pattern[group].item_count++;
980 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
983 /* Get traffic class index and flow id to be configured */
985 flow->index = attr->priority;
987 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
988 index = priv->extract.qos_key_cfg.num_extracts;
989 priv->extract.qos_key_cfg.extracts[index].type =
990 DPKG_EXTRACT_FROM_HDR;
991 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
992 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
993 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
996 priv->extract.qos_key_cfg.extracts[index].type =
997 DPKG_EXTRACT_FROM_HDR;
998 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
999 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1000 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1003 priv->extract.qos_key_cfg.extracts[index].type =
1004 DPKG_EXTRACT_FROM_HDR;
1005 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1006 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1007 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1010 priv->extract.qos_key_cfg.num_extracts = index;
1013 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1014 index = priv->extract.fs_key_cfg[group].num_extracts;
1015 priv->extract.fs_key_cfg[group].extracts[index].type =
1016 DPKG_EXTRACT_FROM_HDR;
1017 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1018 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1019 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1022 priv->extract.fs_key_cfg[group].extracts[index].type =
1023 DPKG_EXTRACT_FROM_HDR;
1024 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1025 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1026 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1029 priv->extract.fs_key_cfg[group].extracts[index].type =
1030 DPKG_EXTRACT_FROM_HDR;
1031 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1032 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1033 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1036 priv->extract.fs_key_cfg[group].num_extracts = index;
1039 /* Parse pattern list to get the matching parameters */
1040 spec = (const struct rte_flow_item_tcp *)pattern->spec;
1041 last = (const struct rte_flow_item_tcp *)pattern->last;
1042 mask = (const struct rte_flow_item_tcp *)
1043 (pattern->mask ? pattern->mask : default_mask);
1045 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1046 (2 * sizeof(uint32_t));
1047 memset((void *)key_iova, 0x06, sizeof(uint8_t));
1048 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1049 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1051 key_iova += sizeof(uint16_t);
1052 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1055 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1056 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1058 mask_iova += sizeof(uint16_t);
1059 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1062 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP +
1063 (2 * sizeof(uint16_t)));
1065 return device_configured;
1069 dpaa2_configure_flow_sctp(struct rte_flow *flow,
1070 struct rte_eth_dev *dev,
1071 const struct rte_flow_attr *attr,
1072 const struct rte_flow_item *pattern,
1073 const struct rte_flow_action actions[] __rte_unused,
1074 struct rte_flow_error *error __rte_unused)
1079 int device_configured = 0, entry_found = 0;
1081 const struct rte_flow_item_sctp *spec, *mask;
1083 const struct rte_flow_item_sctp *last __rte_unused;
1084 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1086 group = attr->group;
1088 /* DPAA2 platform has a limitation that extract parameter can not be */
1089 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1090 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1091 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1092 DPKG_MAX_NUM_OF_EXTRACTS);
1096 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1097 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1098 DPKG_MAX_NUM_OF_EXTRACTS);
1102 for (j = 0; j < priv->pattern[8].item_count; j++) {
1103 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1112 priv->pattern[8].pattern_type[j] = pattern->type;
1113 priv->pattern[8].item_count++;
1114 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1118 for (j = 0; j < priv->pattern[group].item_count; j++) {
1119 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1128 priv->pattern[group].pattern_type[j] = pattern->type;
1129 priv->pattern[group].item_count++;
1130 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1133 /* Get traffic class index and flow id to be configured */
1134 flow->tc_id = group;
1135 flow->index = attr->priority;
1137 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1138 index = priv->extract.qos_key_cfg.num_extracts;
1139 priv->extract.qos_key_cfg.extracts[index].type =
1140 DPKG_EXTRACT_FROM_HDR;
1141 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1142 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1143 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1146 priv->extract.qos_key_cfg.extracts[index].type =
1147 DPKG_EXTRACT_FROM_HDR;
1148 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1149 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1150 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1153 priv->extract.qos_key_cfg.extracts[index].type =
1154 DPKG_EXTRACT_FROM_HDR;
1155 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1156 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1157 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1160 priv->extract.qos_key_cfg.num_extracts = index;
1163 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1164 index = priv->extract.fs_key_cfg[group].num_extracts;
1165 priv->extract.fs_key_cfg[group].extracts[index].type =
1166 DPKG_EXTRACT_FROM_HDR;
1167 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1168 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1169 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1172 priv->extract.fs_key_cfg[group].extracts[index].type =
1173 DPKG_EXTRACT_FROM_HDR;
1174 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1175 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1176 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1179 priv->extract.fs_key_cfg[group].extracts[index].type =
1180 DPKG_EXTRACT_FROM_HDR;
1181 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1182 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1183 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1186 priv->extract.fs_key_cfg[group].num_extracts = index;
1189 /* Parse pattern list to get the matching parameters */
1190 spec = (const struct rte_flow_item_sctp *)pattern->spec;
1191 last = (const struct rte_flow_item_sctp *)pattern->last;
1192 mask = (const struct rte_flow_item_sctp *)
1193 (pattern->mask ? pattern->mask : default_mask);
1195 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1196 (2 * sizeof(uint32_t));
1197 memset((void *)key_iova, 0x84, sizeof(uint8_t));
1198 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1199 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1201 key_iova += sizeof(uint16_t);
1202 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1205 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1206 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1208 mask_iova += sizeof(uint16_t);
1209 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1212 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP +
1213 (2 * sizeof(uint16_t)));
1214 return device_configured;
1218 dpaa2_configure_flow_gre(struct rte_flow *flow,
1219 struct rte_eth_dev *dev,
1220 const struct rte_flow_attr *attr,
1221 const struct rte_flow_item *pattern,
1222 const struct rte_flow_action actions[] __rte_unused,
1223 struct rte_flow_error *error __rte_unused)
1228 int device_configured = 0, entry_found = 0;
1230 const struct rte_flow_item_gre *spec, *mask;
1232 const struct rte_flow_item_gre *last __rte_unused;
1233 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1235 group = attr->group;
1237 /* DPAA2 platform has a limitation that extract parameter can not be */
1238 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1239 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1240 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1241 DPKG_MAX_NUM_OF_EXTRACTS);
1245 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1246 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1247 DPKG_MAX_NUM_OF_EXTRACTS);
1251 for (j = 0; j < priv->pattern[8].item_count; j++) {
1252 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1261 priv->pattern[8].pattern_type[j] = pattern->type;
1262 priv->pattern[8].item_count++;
1263 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1267 for (j = 0; j < priv->pattern[group].item_count; j++) {
1268 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1277 priv->pattern[group].pattern_type[j] = pattern->type;
1278 priv->pattern[group].item_count++;
1279 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1282 /* Get traffic class index and flow id to be configured */
1283 flow->tc_id = group;
1284 flow->index = attr->priority;
1286 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1287 index = priv->extract.qos_key_cfg.num_extracts;
1288 priv->extract.qos_key_cfg.extracts[index].type =
1289 DPKG_EXTRACT_FROM_HDR;
1290 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1291 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1292 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1295 priv->extract.qos_key_cfg.num_extracts = index;
1298 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1299 index = priv->extract.fs_key_cfg[group].num_extracts;
1300 priv->extract.fs_key_cfg[group].extracts[index].type =
1301 DPKG_EXTRACT_FROM_HDR;
1302 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1303 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1304 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1307 priv->extract.fs_key_cfg[group].num_extracts = index;
1310 /* Parse pattern list to get the matching parameters */
1311 spec = (const struct rte_flow_item_gre *)pattern->spec;
1312 last = (const struct rte_flow_item_gre *)pattern->last;
1313 mask = (const struct rte_flow_item_gre *)
1314 (pattern->mask ? pattern->mask : default_mask);
1316 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1317 memcpy((void *)key_iova, (const void *)(&spec->protocol),
1318 sizeof(rte_be16_t));
1320 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1321 memcpy((void *)mask_iova, (const void *)(&mask->protocol),
1322 sizeof(rte_be16_t));
1324 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t));
1326 return device_configured;
1330 dpaa2_generic_flow_set(struct rte_flow *flow,
1331 struct rte_eth_dev *dev,
1332 const struct rte_flow_attr *attr,
1333 const struct rte_flow_item pattern[],
1334 const struct rte_flow_action actions[],
1335 struct rte_flow_error *error)
1337 const struct rte_flow_action_queue *dest_queue;
1338 const struct rte_flow_action_rss *rss_conf;
1340 int is_keycfg_configured = 0, end_of_list = 0;
1341 int ret = 0, i = 0, j = 0;
1342 struct dpni_attr nic_attr;
1343 struct dpni_rx_tc_dist_cfg tc_cfg;
1344 struct dpni_qos_tbl_cfg qos_cfg;
1345 struct dpkg_profile_cfg key_cfg;
1346 struct dpni_fs_action_cfg action;
1347 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1348 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1350 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1352 /* Parse pattern list to get the matching parameters */
1353 while (!end_of_list) {
1354 switch (pattern[i].type) {
1355 case RTE_FLOW_ITEM_TYPE_ETH:
1356 is_keycfg_configured = dpaa2_configure_flow_eth(flow,
1363 case RTE_FLOW_ITEM_TYPE_VLAN:
1364 is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
1371 case RTE_FLOW_ITEM_TYPE_IPV4:
1372 is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
1379 case RTE_FLOW_ITEM_TYPE_IPV6:
1380 is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
1387 case RTE_FLOW_ITEM_TYPE_ICMP:
1388 is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
1395 case RTE_FLOW_ITEM_TYPE_UDP:
1396 is_keycfg_configured = dpaa2_configure_flow_udp(flow,
1403 case RTE_FLOW_ITEM_TYPE_TCP:
1404 is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
1411 case RTE_FLOW_ITEM_TYPE_SCTP:
1412 is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
1418 case RTE_FLOW_ITEM_TYPE_GRE:
1419 is_keycfg_configured = dpaa2_configure_flow_gre(flow,
1426 case RTE_FLOW_ITEM_TYPE_END:
1428 break; /*End of List*/
1430 DPAA2_PMD_ERR("Invalid action type");
1437 /* Let's parse action on matching traffic */
1439 while (!end_of_list) {
1440 switch (actions[j].type) {
1441 case RTE_FLOW_ACTION_TYPE_QUEUE:
1442 dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
1443 flow->flow_id = dest_queue->index;
1444 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
1445 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
1446 action.flow_id = flow->flow_id;
1447 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1448 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1449 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1451 "Unable to prepare extract parameters");
1455 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
1456 qos_cfg.discard_on_miss = true;
1457 qos_cfg.keep_entries = true;
1458 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1459 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1460 priv->token, &qos_cfg);
1463 "Distribution cannot be configured.(%d)"
1468 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1469 if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
1470 (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
1472 "Unable to prepare extract parameters");
1476 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1477 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
1478 tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
1479 tc_cfg.key_cfg_iova =
1480 (uint64_t)priv->extract.fs_extract_param[flow->tc_id];
1481 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1482 tc_cfg.fs_cfg.keep_entries = true;
1483 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1485 flow->tc_id, &tc_cfg);
1488 "Distribution cannot be configured.(%d)"
1493 /* Configure QoS table first */
1494 memset(&nic_attr, 0, sizeof(struct dpni_attr));
1495 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1496 priv->token, &nic_attr);
1499 "Failure to get attribute. dpni@%p err code(%d)\n",
1504 action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
1505 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1506 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
1507 priv->token, &flow->rule,
1512 "Error in addnig entry to QoS table(%d)", ret);
1516 /* Then Configure FS table */
1517 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1518 flow->tc_id, flow->index,
1519 &flow->rule, &action);
1522 "Error in adding entry to FS table(%d)", ret);
1526 case RTE_FLOW_ACTION_TYPE_RSS:
1527 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1528 priv->token, &nic_attr);
1531 "Failure to get attribute. dpni@%p err code(%d)\n",
1535 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
1536 for (i = 0; i < (int)rss_conf->queue_num; i++) {
1537 if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
1538 rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
1540 "Queue/Group combination are not supported\n");
1545 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
1546 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
1550 "unable to set flow distribution.please check queue config\n");
1554 /* Allocate DMA'ble memory to write the rules */
1555 param = (size_t)rte_malloc(NULL, 256, 64);
1557 DPAA2_PMD_ERR("Memory allocation failure\n");
1561 if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
1563 "Unable to prepare extract parameters");
1564 rte_free((void *)param);
1568 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1569 tc_cfg.dist_size = rss_conf->queue_num;
1570 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
1571 tc_cfg.key_cfg_iova = (size_t)param;
1572 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1574 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1575 priv->token, flow->tc_id,
1579 "Distribution cannot be configured: %d\n", ret);
1580 rte_free((void *)param);
1584 rte_free((void *)param);
1585 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1586 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1587 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1589 "Unable to prepare extract parameters");
1593 sizeof(struct dpni_qos_tbl_cfg));
1594 qos_cfg.discard_on_miss = true;
1595 qos_cfg.keep_entries = true;
1596 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1597 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1598 priv->token, &qos_cfg);
1601 "Distribution can not be configured(%d)\n",
1607 /* Add Rule into QoS table */
1608 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1609 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1610 &flow->rule, flow->tc_id,
1614 "Error in entry addition in QoS table(%d)",
1619 case RTE_FLOW_ACTION_TYPE_END:
1623 DPAA2_PMD_ERR("Invalid action type");
1631 /* New rules are inserted. */
1633 LIST_INSERT_HEAD(&priv->flows, flow, next);
1635 while (LIST_NEXT(curr, next))
1636 curr = LIST_NEXT(curr, next);
1637 LIST_INSERT_AFTER(curr, flow, next);
1644 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
1645 const struct rte_flow_attr *attr)
1649 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
1650 DPAA2_PMD_ERR("Priority group is out of range\n");
1653 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
1654 DPAA2_PMD_ERR("Priority within the group is out of range\n");
1657 if (unlikely(attr->egress)) {
1659 "Flow configuration is not supported on egress side\n");
1662 if (unlikely(!attr->ingress)) {
1663 DPAA2_PMD_ERR("Ingress flag must be configured\n");
1670 dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
1672 switch (pattern->type) {
1673 case RTE_FLOW_ITEM_TYPE_ETH:
1674 default_mask = (const void *)&rte_flow_item_eth_mask;
1676 case RTE_FLOW_ITEM_TYPE_VLAN:
1677 default_mask = (const void *)&rte_flow_item_vlan_mask;
1679 case RTE_FLOW_ITEM_TYPE_IPV4:
1680 default_mask = (const void *)&rte_flow_item_ipv4_mask;
1682 case RTE_FLOW_ITEM_TYPE_IPV6:
1683 default_mask = (const void *)&rte_flow_item_ipv6_mask;
1685 case RTE_FLOW_ITEM_TYPE_ICMP:
1686 default_mask = (const void *)&rte_flow_item_icmp_mask;
1688 case RTE_FLOW_ITEM_TYPE_UDP:
1689 default_mask = (const void *)&rte_flow_item_udp_mask;
1691 case RTE_FLOW_ITEM_TYPE_TCP:
1692 default_mask = (const void *)&rte_flow_item_tcp_mask;
1694 case RTE_FLOW_ITEM_TYPE_SCTP:
1695 default_mask = (const void *)&rte_flow_item_sctp_mask;
1697 case RTE_FLOW_ITEM_TYPE_GRE:
1698 default_mask = (const void *)&rte_flow_item_gre_mask;
1701 DPAA2_PMD_ERR("Invalid pattern type");
1706 dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
1707 const struct rte_flow_item pattern[])
1709 unsigned int i, j, k, is_found = 0;
1712 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1713 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
1714 if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
1724 /* Lets verify other combinations of given pattern rules */
1725 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1726 if (!pattern[j].spec) {
1730 if ((pattern[j].last) && (!pattern[j].mask))
1731 dpaa2_dev_update_default_mask(&pattern[j]);
1734 /* DPAA2 platform has a limitation that extract parameter can not be */
1735 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1736 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1737 for (j = 0; j < MAX_TCS + 1; j++) {
1738 for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; k++) {
1739 if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
1742 if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
1750 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
1752 unsigned int i, j, is_found = 0;
1755 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1756 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
1757 if (dpaa2_supported_action_type[i] == actions[j].type) {
1767 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1768 if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
1775 int dpaa2_flow_validate(struct rte_eth_dev *dev,
1776 const struct rte_flow_attr *flow_attr,
1777 const struct rte_flow_item pattern[],
1778 const struct rte_flow_action actions[],
1779 struct rte_flow_error *error)
1781 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1782 struct dpni_attr dpni_attr;
1783 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1784 uint16_t token = priv->token;
1787 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
1788 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
1791 "Failure to get dpni@%p attribute, err code %d\n",
1793 rte_flow_error_set(error, EPERM,
1794 RTE_FLOW_ERROR_TYPE_ATTR,
1795 flow_attr, "invalid");
1799 /* Verify input attributes */
1800 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
1803 "Invalid attributes are given\n");
1804 rte_flow_error_set(error, EPERM,
1805 RTE_FLOW_ERROR_TYPE_ATTR,
1806 flow_attr, "invalid");
1807 goto not_valid_params;
1809 /* Verify input pattern list */
1810 ret = dpaa2_dev_verify_patterns(priv, pattern);
1813 "Invalid pattern list is given\n");
1814 rte_flow_error_set(error, EPERM,
1815 RTE_FLOW_ERROR_TYPE_ITEM,
1816 pattern, "invalid");
1817 goto not_valid_params;
1819 /* Verify input action list */
1820 ret = dpaa2_dev_verify_actions(actions);
1823 "Invalid action list is given\n");
1824 rte_flow_error_set(error, EPERM,
1825 RTE_FLOW_ERROR_TYPE_ACTION,
1826 actions, "invalid");
1827 goto not_valid_params;
1834 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
1835 const struct rte_flow_attr *attr,
1836 const struct rte_flow_item pattern[],
1837 const struct rte_flow_action actions[],
1838 struct rte_flow_error *error)
1840 struct rte_flow *flow = NULL;
1841 size_t key_iova = 0, mask_iova = 0;
1844 flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
1846 DPAA2_PMD_ERR("Failure to allocate memory for flow");
1849 /* Allocate DMA'ble memory to write the rules */
1850 key_iova = (size_t)rte_malloc(NULL, 256, 64);
1853 "Memory allocation failure for rule configuration\n");
1856 mask_iova = (size_t)rte_malloc(NULL, 256, 64);
1859 "Memory allocation failure for rule configuration\n");
1863 flow->rule.key_iova = key_iova;
1864 flow->rule.mask_iova = mask_iova;
1865 flow->rule.key_size = 0;
1867 switch (dpaa2_filter_type) {
1868 case RTE_ETH_FILTER_GENERIC:
1869 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
1872 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
1873 rte_flow_error_set(error, EPERM,
1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1877 "Failure to create flow, return code (%d)", ret);
1878 goto creation_error;
1882 DPAA2_PMD_ERR("Filter type (%d) not supported",
1889 rte_flow_error_set(error, EPERM,
1890 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1891 NULL, "memory alloc");
1893 rte_free((void *)flow);
1894 rte_free((void *)key_iova);
1895 rte_free((void *)mask_iova);
1901 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
1902 struct rte_flow *flow,
1903 struct rte_flow_error *error)
1906 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1907 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1909 switch (flow->action) {
1910 case RTE_FLOW_ACTION_TYPE_QUEUE:
1911 /* Remove entry from QoS table first */
1912 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1916 "Error in adding entry to QoS table(%d)", ret);
1920 /* Then remove entry from FS table */
1921 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1922 flow->tc_id, &flow->rule);
1925 "Error in entry addition in FS table(%d)", ret);
1929 case RTE_FLOW_ACTION_TYPE_RSS:
1930 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1934 "Error in entry addition in QoS table(%d)", ret);
1940 "Action type (%d) is not supported", flow->action);
1945 LIST_REMOVE(flow, next);
1946 /* Now free the flow */
1951 rte_flow_error_set(error, EPERM,
1952 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1958 * Destroy user-configured flow rules.
1960 * This function skips internal flows rules.
1962 * @see rte_flow_flush()
1966 dpaa2_flow_flush(struct rte_eth_dev *dev,
1967 struct rte_flow_error *error)
1969 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1970 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1973 struct rte_flow *next = LIST_NEXT(flow, next);
1975 dpaa2_flow_destroy(dev, flow, error);
1982 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
1983 struct rte_flow *flow __rte_unused,
1984 const struct rte_flow_action *actions __rte_unused,
1985 void *data __rte_unused,
1986 struct rte_flow_error *error __rte_unused)
1992 * Clean up all flow rules.
1994 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
1995 * rules regardless of whether they are internal or user-configured.
1998 * Pointer to private structure.
2001 dpaa2_flow_clean(struct rte_eth_dev *dev)
2003 struct rte_flow *flow;
2004 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2006 while ((flow = LIST_FIRST(&priv->flows)))
2007 dpaa2_flow_destroy(dev, flow, NULL);
2010 const struct rte_flow_ops dpaa2_flow_ops = {
2011 .create = dpaa2_flow_create,
2012 .validate = dpaa2_flow_validate,
2013 .destroy = dpaa2_flow_destroy,
2014 .flush = dpaa2_flow_flush,
2015 .query = dpaa2_flow_query,