1 /* * SPDX-License-Identifier: BSD-3-Clause
13 #include <rte_ethdev.h>
15 #include <rte_eth_ctrl.h>
16 #include <rte_malloc.h>
17 #include <rte_flow_driver.h>
18 #include <rte_tailq.h>
23 #include <dpaa2_ethdev.h>
24 #include <dpaa2_pmd_logs.h>
27 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
28 struct dpni_rule_cfg rule;
33 enum rte_flow_action_type action;
37 /* Layout for rule compositions for supported patterns */
38 /* TODO: Current design only supports Ethernet + IPv4 based classification. */
39 /* So corresponding offset macros are valid only. Rest are placeholder for */
40 /* now. Once support for other netwrok headers will be added then */
41 /* corresponding macros will be updated with correct values*/
42 #define DPAA2_CLS_RULE_OFFSET_ETH 0 /*Start of buffer*/
43 #define DPAA2_CLS_RULE_OFFSET_VLAN 14 /* DPAA2_CLS_RULE_OFFSET_ETH */
44 /* + Sizeof Eth fields */
45 #define DPAA2_CLS_RULE_OFFSET_IPV4 14 /* DPAA2_CLS_RULE_OFFSET_VLAN */
46 /* + Sizeof VLAN fields */
47 #define DPAA2_CLS_RULE_OFFSET_IPV6 25 /* DPAA2_CLS_RULE_OFFSET_IPV4 */
48 /* + Sizeof IPV4 fields */
49 #define DPAA2_CLS_RULE_OFFSET_ICMP 58 /* DPAA2_CLS_RULE_OFFSET_IPV6 */
50 /* + Sizeof IPV6 fields */
51 #define DPAA2_CLS_RULE_OFFSET_UDP 60 /* DPAA2_CLS_RULE_OFFSET_ICMP */
52 /* + Sizeof ICMP fields */
53 #define DPAA2_CLS_RULE_OFFSET_TCP 64 /* DPAA2_CLS_RULE_OFFSET_UDP */
54 /* + Sizeof UDP fields */
55 #define DPAA2_CLS_RULE_OFFSET_SCTP 68 /* DPAA2_CLS_RULE_OFFSET_TCP */
56 /* + Sizeof TCP fields */
57 #define DPAA2_CLS_RULE_OFFSET_GRE 72 /* DPAA2_CLS_RULE_OFFSET_SCTP */
58 /* + Sizeof SCTP fields */
61 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
62 RTE_FLOW_ITEM_TYPE_END,
63 RTE_FLOW_ITEM_TYPE_ETH,
64 RTE_FLOW_ITEM_TYPE_VLAN,
65 RTE_FLOW_ITEM_TYPE_IPV4,
66 RTE_FLOW_ITEM_TYPE_IPV6,
67 RTE_FLOW_ITEM_TYPE_ICMP,
68 RTE_FLOW_ITEM_TYPE_UDP,
69 RTE_FLOW_ITEM_TYPE_TCP,
70 RTE_FLOW_ITEM_TYPE_SCTP,
71 RTE_FLOW_ITEM_TYPE_GRE,
75 enum rte_flow_action_type dpaa2_supported_action_type[] = {
76 RTE_FLOW_ACTION_TYPE_END,
77 RTE_FLOW_ACTION_TYPE_QUEUE,
78 RTE_FLOW_ACTION_TYPE_RSS
81 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
82 static const void *default_mask;
85 dpaa2_configure_flow_eth(struct rte_flow *flow,
86 struct rte_eth_dev *dev,
87 const struct rte_flow_attr *attr,
88 const struct rte_flow_item *pattern,
89 const struct rte_flow_action actions[] __rte_unused,
90 struct rte_flow_error *error __rte_unused)
95 int device_configured = 0, entry_found = 0;
97 const struct rte_flow_item_eth *spec, *mask;
99 /* TODO: Currently upper bound of range parameter is not implemented */
100 const struct rte_flow_item_eth *last __rte_unused;
101 struct dpaa2_dev_priv *priv = dev->data->dev_private;
105 /* DPAA2 platform has a limitation that extract parameter can not be */
106 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
107 /* TODO: pattern is an array of 9 elements where 9th pattern element */
108 /* is for QoS table and 1-8th pattern element is for FS tables. */
109 /* It can be changed to macro. */
110 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
111 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
112 DPKG_MAX_NUM_OF_EXTRACTS);
116 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
117 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
118 DPKG_MAX_NUM_OF_EXTRACTS);
122 for (j = 0; j < priv->pattern[8].item_count; j++) {
123 if (priv->pattern[8].pattern_type[j] != pattern->type) {
132 priv->pattern[8].pattern_type[j] = pattern->type;
133 priv->pattern[8].item_count++;
134 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
138 for (j = 0; j < priv->pattern[group].item_count; j++) {
139 if (priv->pattern[group].pattern_type[j] != pattern->type) {
148 priv->pattern[group].pattern_type[j] = pattern->type;
149 priv->pattern[group].item_count++;
150 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
153 /* Get traffic class index and flow id to be configured */
155 flow->index = attr->priority;
157 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
158 index = priv->extract.qos_key_cfg.num_extracts;
159 priv->extract.qos_key_cfg.extracts[index].type =
160 DPKG_EXTRACT_FROM_HDR;
161 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
162 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
163 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
166 priv->extract.qos_key_cfg.extracts[index].type =
167 DPKG_EXTRACT_FROM_HDR;
168 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
169 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
170 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
173 priv->extract.qos_key_cfg.extracts[index].type =
174 DPKG_EXTRACT_FROM_HDR;
175 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
176 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
177 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
180 priv->extract.qos_key_cfg.num_extracts = index;
183 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
184 index = priv->extract.fs_key_cfg[group].num_extracts;
185 priv->extract.fs_key_cfg[group].extracts[index].type =
186 DPKG_EXTRACT_FROM_HDR;
187 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
188 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
189 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
192 priv->extract.fs_key_cfg[group].extracts[index].type =
193 DPKG_EXTRACT_FROM_HDR;
194 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
195 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
196 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
199 priv->extract.fs_key_cfg[group].extracts[index].type =
200 DPKG_EXTRACT_FROM_HDR;
201 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
202 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
203 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
206 priv->extract.fs_key_cfg[group].num_extracts = index;
209 /* Parse pattern list to get the matching parameters */
210 spec = (const struct rte_flow_item_eth *)pattern->spec;
211 last = (const struct rte_flow_item_eth *)pattern->last;
212 mask = (const struct rte_flow_item_eth *)
213 (pattern->mask ? pattern->mask : default_mask);
216 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ETH;
217 memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
218 sizeof(struct ether_addr));
219 key_iova += sizeof(struct ether_addr);
220 memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
221 sizeof(struct ether_addr));
222 key_iova += sizeof(struct ether_addr);
223 memcpy((void *)key_iova, (const void *)(&spec->type),
227 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ETH;
228 memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
229 sizeof(struct ether_addr));
230 mask_iova += sizeof(struct ether_addr);
231 memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
232 sizeof(struct ether_addr));
233 mask_iova += sizeof(struct ether_addr);
234 memcpy((void *)mask_iova, (const void *)(&mask->type),
237 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ETH +
238 ((2 * sizeof(struct ether_addr)) +
239 sizeof(rte_be16_t)));
240 return device_configured;
244 dpaa2_configure_flow_vlan(struct rte_flow *flow,
245 struct rte_eth_dev *dev,
246 const struct rte_flow_attr *attr,
247 const struct rte_flow_item *pattern,
248 const struct rte_flow_action actions[] __rte_unused,
249 struct rte_flow_error *error __rte_unused)
254 int device_configured = 0, entry_found = 0;
256 const struct rte_flow_item_vlan *spec, *mask;
258 const struct rte_flow_item_vlan *last __rte_unused;
259 struct dpaa2_dev_priv *priv = dev->data->dev_private;
263 /* DPAA2 platform has a limitation that extract parameter can not be */
264 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
265 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
266 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
267 DPKG_MAX_NUM_OF_EXTRACTS);
271 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
272 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
273 DPKG_MAX_NUM_OF_EXTRACTS);
277 for (j = 0; j < priv->pattern[8].item_count; j++) {
278 if (priv->pattern[8].pattern_type[j] != pattern->type) {
287 priv->pattern[8].pattern_type[j] = pattern->type;
288 priv->pattern[8].item_count++;
289 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
293 for (j = 0; j < priv->pattern[group].item_count; j++) {
294 if (priv->pattern[group].pattern_type[j] != pattern->type) {
303 priv->pattern[group].pattern_type[j] = pattern->type;
304 priv->pattern[group].item_count++;
305 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
309 /* Get traffic class index and flow id to be configured */
311 flow->index = attr->priority;
313 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
314 index = priv->extract.qos_key_cfg.num_extracts;
315 priv->extract.qos_key_cfg.extracts[index].type =
316 DPKG_EXTRACT_FROM_HDR;
317 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
318 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
319 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
320 priv->extract.qos_key_cfg.num_extracts++;
323 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
324 index = priv->extract.fs_key_cfg[group].num_extracts;
325 priv->extract.fs_key_cfg[group].extracts[index].type =
326 DPKG_EXTRACT_FROM_HDR;
327 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
328 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
329 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
330 priv->extract.fs_key_cfg[group].num_extracts++;
333 /* Parse pattern list to get the matching parameters */
334 spec = (const struct rte_flow_item_vlan *)pattern->spec;
335 last = (const struct rte_flow_item_vlan *)pattern->last;
336 mask = (const struct rte_flow_item_vlan *)
337 (pattern->mask ? pattern->mask : default_mask);
339 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
340 memcpy((void *)key_iova, (const void *)(&spec->tci),
343 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_VLAN;
344 memcpy((void *)mask_iova, (const void *)(&mask->tci),
347 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_VLAN + sizeof(rte_be16_t));
348 return device_configured;
352 dpaa2_configure_flow_ipv4(struct rte_flow *flow,
353 struct rte_eth_dev *dev,
354 const struct rte_flow_attr *attr,
355 const struct rte_flow_item *pattern,
356 const struct rte_flow_action actions[] __rte_unused,
357 struct rte_flow_error *error __rte_unused)
362 int device_configured = 0, entry_found = 0;
364 const struct rte_flow_item_ipv4 *spec, *mask;
366 const struct rte_flow_item_ipv4 *last __rte_unused;
367 struct dpaa2_dev_priv *priv = dev->data->dev_private;
371 /* DPAA2 platform has a limitation that extract parameter can not be */
372 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
373 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
374 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
375 DPKG_MAX_NUM_OF_EXTRACTS);
379 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
380 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
381 DPKG_MAX_NUM_OF_EXTRACTS);
385 for (j = 0; j < priv->pattern[8].item_count; j++) {
386 if (priv->pattern[8].pattern_type[j] != pattern->type) {
395 priv->pattern[8].pattern_type[j] = pattern->type;
396 priv->pattern[8].item_count++;
397 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
401 for (j = 0; j < priv->pattern[group].item_count; j++) {
402 if (priv->pattern[group].pattern_type[j] != pattern->type) {
411 priv->pattern[group].pattern_type[j] = pattern->type;
412 priv->pattern[group].item_count++;
413 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
416 /* Get traffic class index and flow id to be configured */
418 flow->index = attr->priority;
420 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
421 index = priv->extract.qos_key_cfg.num_extracts;
422 priv->extract.qos_key_cfg.extracts[index].type =
423 DPKG_EXTRACT_FROM_HDR;
424 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
425 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
426 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
429 priv->extract.qos_key_cfg.extracts[index].type =
430 DPKG_EXTRACT_FROM_HDR;
431 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
432 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
433 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
436 priv->extract.qos_key_cfg.extracts[index].type =
437 DPKG_EXTRACT_FROM_HDR;
438 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
439 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
440 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
443 priv->extract.qos_key_cfg.num_extracts = index;
446 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
447 index = priv->extract.fs_key_cfg[group].num_extracts;
448 priv->extract.fs_key_cfg[group].extracts[index].type =
449 DPKG_EXTRACT_FROM_HDR;
450 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
451 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
452 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
455 priv->extract.fs_key_cfg[group].extracts[index].type =
456 DPKG_EXTRACT_FROM_HDR;
457 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
458 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
459 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
462 priv->extract.fs_key_cfg[group].extracts[index].type =
463 DPKG_EXTRACT_FROM_HDR;
464 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
465 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
466 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
469 priv->extract.fs_key_cfg[group].num_extracts = index;
472 /* Parse pattern list to get the matching parameters */
473 spec = (const struct rte_flow_item_ipv4 *)pattern->spec;
474 last = (const struct rte_flow_item_ipv4 *)pattern->last;
475 mask = (const struct rte_flow_item_ipv4 *)
476 (pattern->mask ? pattern->mask : default_mask);
478 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
479 memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
481 key_iova += sizeof(uint32_t);
482 memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
484 key_iova += sizeof(uint32_t);
485 memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
488 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV4;
489 memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
491 mask_iova += sizeof(uint32_t);
492 memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
494 mask_iova += sizeof(uint32_t);
495 memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
498 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV4 +
499 (2 * sizeof(uint32_t)) + sizeof(uint8_t));
501 return device_configured;
505 dpaa2_configure_flow_ipv6(struct rte_flow *flow,
506 struct rte_eth_dev *dev,
507 const struct rte_flow_attr *attr,
508 const struct rte_flow_item *pattern,
509 const struct rte_flow_action actions[] __rte_unused,
510 struct rte_flow_error *error __rte_unused)
515 int device_configured = 0, entry_found = 0;
517 const struct rte_flow_item_ipv6 *spec, *mask;
519 const struct rte_flow_item_ipv6 *last __rte_unused;
520 struct dpaa2_dev_priv *priv = dev->data->dev_private;
524 /* DPAA2 platform has a limitation that extract parameter can not be */
525 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
526 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
527 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
528 DPKG_MAX_NUM_OF_EXTRACTS);
532 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
533 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
534 DPKG_MAX_NUM_OF_EXTRACTS);
538 for (j = 0; j < priv->pattern[8].item_count; j++) {
539 if (priv->pattern[8].pattern_type[j] != pattern->type) {
548 priv->pattern[8].pattern_type[j] = pattern->type;
549 priv->pattern[8].item_count++;
550 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
554 for (j = 0; j < priv->pattern[group].item_count; j++) {
555 if (priv->pattern[group].pattern_type[j] != pattern->type) {
564 priv->pattern[group].pattern_type[j] = pattern->type;
565 priv->pattern[group].item_count++;
566 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
569 /* Get traffic class index and flow id to be configured */
571 flow->index = attr->priority;
573 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
574 index = priv->extract.qos_key_cfg.num_extracts;
575 priv->extract.qos_key_cfg.extracts[index].type =
576 DPKG_EXTRACT_FROM_HDR;
577 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
578 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
579 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
582 priv->extract.qos_key_cfg.extracts[index].type =
583 DPKG_EXTRACT_FROM_HDR;
584 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
585 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
586 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
589 priv->extract.qos_key_cfg.num_extracts = index;
592 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
593 index = priv->extract.fs_key_cfg[group].num_extracts;
594 priv->extract.fs_key_cfg[group].extracts[index].type =
595 DPKG_EXTRACT_FROM_HDR;
596 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
597 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
598 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
601 priv->extract.fs_key_cfg[group].extracts[index].type =
602 DPKG_EXTRACT_FROM_HDR;
603 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
604 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
605 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
608 priv->extract.fs_key_cfg[group].num_extracts = index;
611 /* Parse pattern list to get the matching parameters */
612 spec = (const struct rte_flow_item_ipv6 *)pattern->spec;
613 last = (const struct rte_flow_item_ipv6 *)pattern->last;
614 mask = (const struct rte_flow_item_ipv6 *)
615 (pattern->mask ? pattern->mask : default_mask);
617 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
618 memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
619 sizeof(spec->hdr.src_addr));
620 key_iova += sizeof(spec->hdr.src_addr);
621 memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
622 sizeof(spec->hdr.dst_addr));
624 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_IPV6;
625 memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
626 sizeof(mask->hdr.src_addr));
627 mask_iova += sizeof(mask->hdr.src_addr);
628 memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
629 sizeof(mask->hdr.dst_addr));
631 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_IPV6 +
632 sizeof(spec->hdr.src_addr) +
633 sizeof(mask->hdr.dst_addr));
634 return device_configured;
638 dpaa2_configure_flow_icmp(struct rte_flow *flow,
639 struct rte_eth_dev *dev,
640 const struct rte_flow_attr *attr,
641 const struct rte_flow_item *pattern,
642 const struct rte_flow_action actions[] __rte_unused,
643 struct rte_flow_error *error __rte_unused)
648 int device_configured = 0, entry_found = 0;
650 const struct rte_flow_item_icmp *spec, *mask;
652 const struct rte_flow_item_icmp *last __rte_unused;
653 struct dpaa2_dev_priv *priv = dev->data->dev_private;
657 /* DPAA2 platform has a limitation that extract parameter can not be */
658 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
659 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
660 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
661 DPKG_MAX_NUM_OF_EXTRACTS);
665 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
666 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
667 DPKG_MAX_NUM_OF_EXTRACTS);
671 for (j = 0; j < priv->pattern[8].item_count; j++) {
672 if (priv->pattern[8].pattern_type[j] != pattern->type) {
681 priv->pattern[8].pattern_type[j] = pattern->type;
682 priv->pattern[8].item_count++;
683 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
687 for (j = 0; j < priv->pattern[group].item_count; j++) {
688 if (priv->pattern[group].pattern_type[j] != pattern->type) {
697 priv->pattern[group].pattern_type[j] = pattern->type;
698 priv->pattern[group].item_count++;
699 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
702 /* Get traffic class index and flow id to be configured */
704 flow->index = attr->priority;
706 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
707 index = priv->extract.qos_key_cfg.num_extracts;
708 priv->extract.qos_key_cfg.extracts[index].type =
709 DPKG_EXTRACT_FROM_HDR;
710 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
711 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
712 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
715 priv->extract.qos_key_cfg.extracts[index].type =
716 DPKG_EXTRACT_FROM_HDR;
717 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
718 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
719 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
722 priv->extract.qos_key_cfg.num_extracts = index;
725 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
726 index = priv->extract.fs_key_cfg[group].num_extracts;
727 priv->extract.fs_key_cfg[group].extracts[index].type =
728 DPKG_EXTRACT_FROM_HDR;
729 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
730 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
731 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
734 priv->extract.fs_key_cfg[group].extracts[index].type =
735 DPKG_EXTRACT_FROM_HDR;
736 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
737 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
738 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
741 priv->extract.fs_key_cfg[group].num_extracts = index;
744 /* Parse pattern list to get the matching parameters */
745 spec = (const struct rte_flow_item_icmp *)pattern->spec;
746 last = (const struct rte_flow_item_icmp *)pattern->last;
747 mask = (const struct rte_flow_item_icmp *)
748 (pattern->mask ? pattern->mask : default_mask);
750 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
751 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
753 key_iova += sizeof(uint8_t);
754 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
757 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_ICMP;
758 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
760 key_iova += sizeof(uint8_t);
761 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
764 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_ICMP +
765 (2 * sizeof(uint8_t)));
767 return device_configured;
771 dpaa2_configure_flow_udp(struct rte_flow *flow,
772 struct rte_eth_dev *dev,
773 const struct rte_flow_attr *attr,
774 const struct rte_flow_item *pattern,
775 const struct rte_flow_action actions[] __rte_unused,
776 struct rte_flow_error *error __rte_unused)
781 int device_configured = 0, entry_found = 0;
783 const struct rte_flow_item_udp *spec, *mask;
785 const struct rte_flow_item_udp *last __rte_unused;
786 struct dpaa2_dev_priv *priv = dev->data->dev_private;
790 /* DPAA2 platform has a limitation that extract parameter can not be */
791 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
792 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
793 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
794 DPKG_MAX_NUM_OF_EXTRACTS);
798 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
799 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
800 DPKG_MAX_NUM_OF_EXTRACTS);
804 for (j = 0; j < priv->pattern[8].item_count; j++) {
805 if (priv->pattern[8].pattern_type[j] != pattern->type) {
814 priv->pattern[8].pattern_type[j] = pattern->type;
815 priv->pattern[8].item_count++;
816 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
820 for (j = 0; j < priv->pattern[group].item_count; j++) {
821 if (priv->pattern[group].pattern_type[j] != pattern->type) {
830 priv->pattern[group].pattern_type[j] = pattern->type;
831 priv->pattern[group].item_count++;
832 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
835 /* Get traffic class index and flow id to be configured */
837 flow->index = attr->priority;
839 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
840 index = priv->extract.qos_key_cfg.num_extracts;
841 priv->extract.qos_key_cfg.extracts[index].type =
842 DPKG_EXTRACT_FROM_HDR;
843 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
844 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
845 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
848 priv->extract.qos_key_cfg.extracts[index].type =
849 DPKG_EXTRACT_FROM_HDR;
850 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
851 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
852 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
855 priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
856 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
857 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
858 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
861 priv->extract.qos_key_cfg.num_extracts = index;
864 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
865 index = priv->extract.fs_key_cfg[group].num_extracts;
866 priv->extract.fs_key_cfg[group].extracts[index].type =
867 DPKG_EXTRACT_FROM_HDR;
868 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
869 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
870 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
873 priv->extract.fs_key_cfg[group].extracts[index].type =
874 DPKG_EXTRACT_FROM_HDR;
875 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
876 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
877 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
880 priv->extract.fs_key_cfg[group].extracts[index].type =
881 DPKG_EXTRACT_FROM_HDR;
882 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
883 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
884 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
887 priv->extract.fs_key_cfg[group].num_extracts = index;
890 /* Parse pattern list to get the matching parameters */
891 spec = (const struct rte_flow_item_udp *)pattern->spec;
892 last = (const struct rte_flow_item_udp *)pattern->last;
893 mask = (const struct rte_flow_item_udp *)
894 (pattern->mask ? pattern->mask : default_mask);
896 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
897 (2 * sizeof(uint32_t));
898 memset((void *)key_iova, 0x11, sizeof(uint8_t));
899 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_UDP;
900 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
902 key_iova += sizeof(uint16_t);
903 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
906 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_UDP;
907 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
909 mask_iova += sizeof(uint16_t);
910 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
913 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_UDP +
914 (2 * sizeof(uint16_t)));
916 return device_configured;
920 dpaa2_configure_flow_tcp(struct rte_flow *flow,
921 struct rte_eth_dev *dev,
922 const struct rte_flow_attr *attr,
923 const struct rte_flow_item *pattern,
924 const struct rte_flow_action actions[] __rte_unused,
925 struct rte_flow_error *error __rte_unused)
930 int device_configured = 0, entry_found = 0;
932 const struct rte_flow_item_tcp *spec, *mask;
934 const struct rte_flow_item_tcp *last __rte_unused;
935 struct dpaa2_dev_priv *priv = dev->data->dev_private;
939 /* DPAA2 platform has a limitation that extract parameter can not be */
940 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
941 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
942 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
943 DPKG_MAX_NUM_OF_EXTRACTS);
947 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
948 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
949 DPKG_MAX_NUM_OF_EXTRACTS);
953 for (j = 0; j < priv->pattern[8].item_count; j++) {
954 if (priv->pattern[8].pattern_type[j] != pattern->type) {
963 priv->pattern[8].pattern_type[j] = pattern->type;
964 priv->pattern[8].item_count++;
965 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
969 for (j = 0; j < priv->pattern[group].item_count; j++) {
970 if (priv->pattern[group].pattern_type[j] != pattern->type) {
979 priv->pattern[group].pattern_type[j] = pattern->type;
980 priv->pattern[group].item_count++;
981 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
984 /* Get traffic class index and flow id to be configured */
986 flow->index = attr->priority;
988 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
989 index = priv->extract.qos_key_cfg.num_extracts;
990 priv->extract.qos_key_cfg.extracts[index].type =
991 DPKG_EXTRACT_FROM_HDR;
992 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
993 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
994 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
997 priv->extract.qos_key_cfg.extracts[index].type =
998 DPKG_EXTRACT_FROM_HDR;
999 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1000 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1001 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1004 priv->extract.qos_key_cfg.extracts[index].type =
1005 DPKG_EXTRACT_FROM_HDR;
1006 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1007 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1008 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1011 priv->extract.qos_key_cfg.num_extracts = index;
1014 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1015 index = priv->extract.fs_key_cfg[group].num_extracts;
1016 priv->extract.fs_key_cfg[group].extracts[index].type =
1017 DPKG_EXTRACT_FROM_HDR;
1018 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1019 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1020 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1023 priv->extract.fs_key_cfg[group].extracts[index].type =
1024 DPKG_EXTRACT_FROM_HDR;
1025 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1026 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1027 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
1030 priv->extract.fs_key_cfg[group].extracts[index].type =
1031 DPKG_EXTRACT_FROM_HDR;
1032 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1033 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
1034 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
1037 priv->extract.fs_key_cfg[group].num_extracts = index;
1040 /* Parse pattern list to get the matching parameters */
1041 spec = (const struct rte_flow_item_tcp *)pattern->spec;
1042 last = (const struct rte_flow_item_tcp *)pattern->last;
1043 mask = (const struct rte_flow_item_tcp *)
1044 (pattern->mask ? pattern->mask : default_mask);
1046 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1047 (2 * sizeof(uint32_t));
1048 memset((void *)key_iova, 0x06, sizeof(uint8_t));
1049 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1050 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1052 key_iova += sizeof(uint16_t);
1053 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1056 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_TCP;
1057 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1059 mask_iova += sizeof(uint16_t);
1060 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1063 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_TCP +
1064 (2 * sizeof(uint16_t)));
1066 return device_configured;
1070 dpaa2_configure_flow_sctp(struct rte_flow *flow,
1071 struct rte_eth_dev *dev,
1072 const struct rte_flow_attr *attr,
1073 const struct rte_flow_item *pattern,
1074 const struct rte_flow_action actions[] __rte_unused,
1075 struct rte_flow_error *error __rte_unused)
1080 int device_configured = 0, entry_found = 0;
1082 const struct rte_flow_item_sctp *spec, *mask;
1084 const struct rte_flow_item_sctp *last __rte_unused;
1085 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1087 group = attr->group;
1089 /* DPAA2 platform has a limitation that extract parameter can not be */
1090 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1091 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1092 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1093 DPKG_MAX_NUM_OF_EXTRACTS);
1097 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1098 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1099 DPKG_MAX_NUM_OF_EXTRACTS);
1103 for (j = 0; j < priv->pattern[8].item_count; j++) {
1104 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1113 priv->pattern[8].pattern_type[j] = pattern->type;
1114 priv->pattern[8].item_count++;
1115 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1119 for (j = 0; j < priv->pattern[group].item_count; j++) {
1120 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1129 priv->pattern[group].pattern_type[j] = pattern->type;
1130 priv->pattern[group].item_count++;
1131 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1134 /* Get traffic class index and flow id to be configured */
1135 flow->tc_id = group;
1136 flow->index = attr->priority;
1138 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1139 index = priv->extract.qos_key_cfg.num_extracts;
1140 priv->extract.qos_key_cfg.extracts[index].type =
1141 DPKG_EXTRACT_FROM_HDR;
1142 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1143 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1144 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1147 priv->extract.qos_key_cfg.extracts[index].type =
1148 DPKG_EXTRACT_FROM_HDR;
1149 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1150 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1151 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1154 priv->extract.qos_key_cfg.extracts[index].type =
1155 DPKG_EXTRACT_FROM_HDR;
1156 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1157 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1158 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1161 priv->extract.qos_key_cfg.num_extracts = index;
1164 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1165 index = priv->extract.fs_key_cfg[group].num_extracts;
1166 priv->extract.fs_key_cfg[group].extracts[index].type =
1167 DPKG_EXTRACT_FROM_HDR;
1168 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1169 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
1170 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
1173 priv->extract.fs_key_cfg[group].extracts[index].type =
1174 DPKG_EXTRACT_FROM_HDR;
1175 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1176 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1177 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1180 priv->extract.fs_key_cfg[group].extracts[index].type =
1181 DPKG_EXTRACT_FROM_HDR;
1182 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1183 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1184 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1187 priv->extract.fs_key_cfg[group].num_extracts = index;
1190 /* Parse pattern list to get the matching parameters */
1191 spec = (const struct rte_flow_item_sctp *)pattern->spec;
1192 last = (const struct rte_flow_item_sctp *)pattern->last;
1193 mask = (const struct rte_flow_item_sctp *)
1194 (pattern->mask ? pattern->mask : default_mask);
1196 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_IPV4 +
1197 (2 * sizeof(uint32_t));
1198 memset((void *)key_iova, 0x84, sizeof(uint8_t));
1199 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1200 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1202 key_iova += sizeof(uint16_t);
1203 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1206 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_SCTP;
1207 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1209 mask_iova += sizeof(uint16_t);
1210 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1213 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_SCTP +
1214 (2 * sizeof(uint16_t)));
1215 return device_configured;
1219 dpaa2_configure_flow_gre(struct rte_flow *flow,
1220 struct rte_eth_dev *dev,
1221 const struct rte_flow_attr *attr,
1222 const struct rte_flow_item *pattern,
1223 const struct rte_flow_action actions[] __rte_unused,
1224 struct rte_flow_error *error __rte_unused)
1229 int device_configured = 0, entry_found = 0;
1231 const struct rte_flow_item_gre *spec, *mask;
1233 const struct rte_flow_item_gre *last __rte_unused;
1234 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1236 group = attr->group;
1238 /* DPAA2 platform has a limitation that extract parameter can not be */
1239 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1240 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1241 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1242 DPKG_MAX_NUM_OF_EXTRACTS);
1246 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1247 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1248 DPKG_MAX_NUM_OF_EXTRACTS);
1252 for (j = 0; j < priv->pattern[8].item_count; j++) {
1253 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1262 priv->pattern[8].pattern_type[j] = pattern->type;
1263 priv->pattern[8].item_count++;
1264 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1268 for (j = 0; j < priv->pattern[group].item_count; j++) {
1269 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1278 priv->pattern[group].pattern_type[j] = pattern->type;
1279 priv->pattern[group].item_count++;
1280 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1283 /* Get traffic class index and flow id to be configured */
1284 flow->tc_id = group;
1285 flow->index = attr->priority;
1287 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1288 index = priv->extract.qos_key_cfg.num_extracts;
1289 priv->extract.qos_key_cfg.extracts[index].type =
1290 DPKG_EXTRACT_FROM_HDR;
1291 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1292 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1293 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1296 priv->extract.qos_key_cfg.num_extracts = index;
1299 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1300 index = priv->extract.fs_key_cfg[group].num_extracts;
1301 priv->extract.fs_key_cfg[group].extracts[index].type =
1302 DPKG_EXTRACT_FROM_HDR;
1303 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1304 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1305 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1308 priv->extract.fs_key_cfg[group].num_extracts = index;
1311 /* Parse pattern list to get the matching parameters */
1312 spec = (const struct rte_flow_item_gre *)pattern->spec;
1313 last = (const struct rte_flow_item_gre *)pattern->last;
1314 mask = (const struct rte_flow_item_gre *)
1315 (pattern->mask ? pattern->mask : default_mask);
1317 key_iova = flow->rule.key_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1318 memcpy((void *)key_iova, (const void *)(&spec->protocol),
1319 sizeof(rte_be16_t));
1321 mask_iova = flow->rule.mask_iova + DPAA2_CLS_RULE_OFFSET_GRE;
1322 memcpy((void *)mask_iova, (const void *)(&mask->protocol),
1323 sizeof(rte_be16_t));
1325 flow->rule.key_size = (DPAA2_CLS_RULE_OFFSET_GRE + sizeof(rte_be16_t));
1327 return device_configured;
1331 dpaa2_generic_flow_set(struct rte_flow *flow,
1332 struct rte_eth_dev *dev,
1333 const struct rte_flow_attr *attr,
1334 const struct rte_flow_item pattern[],
1335 const struct rte_flow_action actions[],
1336 struct rte_flow_error *error)
1338 const struct rte_flow_action_queue *dest_queue;
1339 const struct rte_flow_action_rss *rss_conf;
1341 int is_keycfg_configured = 0, end_of_list = 0;
1342 int ret = 0, i = 0, j = 0;
1343 struct dpni_attr nic_attr;
1344 struct dpni_rx_tc_dist_cfg tc_cfg;
1345 struct dpni_qos_tbl_cfg qos_cfg;
1346 struct dpkg_profile_cfg key_cfg;
1347 struct dpni_fs_action_cfg action;
1348 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1349 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1351 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1353 /* Parse pattern list to get the matching parameters */
1354 while (!end_of_list) {
1355 switch (pattern[i].type) {
1356 case RTE_FLOW_ITEM_TYPE_ETH:
1357 is_keycfg_configured = dpaa2_configure_flow_eth(flow,
1364 case RTE_FLOW_ITEM_TYPE_VLAN:
1365 is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
1372 case RTE_FLOW_ITEM_TYPE_IPV4:
1373 is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
1380 case RTE_FLOW_ITEM_TYPE_IPV6:
1381 is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
1388 case RTE_FLOW_ITEM_TYPE_ICMP:
1389 is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
1396 case RTE_FLOW_ITEM_TYPE_UDP:
1397 is_keycfg_configured = dpaa2_configure_flow_udp(flow,
1404 case RTE_FLOW_ITEM_TYPE_TCP:
1405 is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
1412 case RTE_FLOW_ITEM_TYPE_SCTP:
1413 is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
1419 case RTE_FLOW_ITEM_TYPE_GRE:
1420 is_keycfg_configured = dpaa2_configure_flow_gre(flow,
1427 case RTE_FLOW_ITEM_TYPE_END:
1429 break; /*End of List*/
1431 DPAA2_PMD_ERR("Invalid action type");
1438 /* Let's parse action on matching traffic */
1440 while (!end_of_list) {
1441 switch (actions[j].type) {
1442 case RTE_FLOW_ACTION_TYPE_QUEUE:
1443 dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
1444 flow->flow_id = dest_queue->index;
1445 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
1446 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
1447 action.flow_id = flow->flow_id;
1448 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1449 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1450 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1452 "Unable to prepare extract parameters");
1456 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
1457 qos_cfg.discard_on_miss = true;
1458 qos_cfg.keep_entries = true;
1459 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1460 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1461 priv->token, &qos_cfg);
1464 "Distribution cannot be configured.(%d)"
1469 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1470 if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
1471 (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
1473 "Unable to prepare extract parameters");
1477 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1478 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
1479 tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
1480 tc_cfg.key_cfg_iova =
1481 (uint64_t)priv->extract.fs_extract_param[flow->tc_id];
1482 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1483 tc_cfg.fs_cfg.keep_entries = true;
1484 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1486 flow->tc_id, &tc_cfg);
1489 "Distribution cannot be configured.(%d)"
1494 /* Configure QoS table first */
1495 memset(&nic_attr, 0, sizeof(struct dpni_attr));
1496 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1497 priv->token, &nic_attr);
1500 "Failure to get attribute. dpni@%p err code(%d)\n",
1505 action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
1506 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1507 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
1508 priv->token, &flow->rule,
1509 flow->tc_id, index);
1512 "Error in addnig entry to QoS table(%d)", ret);
1516 /* Then Configure FS table */
1517 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1518 flow->tc_id, flow->index,
1519 &flow->rule, &action);
1522 "Error in adding entry to FS table(%d)", ret);
1526 case RTE_FLOW_ACTION_TYPE_RSS:
1527 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1528 priv->token, &nic_attr);
1531 "Failure to get attribute. dpni@%p err code(%d)\n",
1535 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
1536 for (i = 0; i < (int)rss_conf->queue_num; i++) {
1537 if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
1538 rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
1540 "Queue/Group combination are not supported\n");
1545 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
1546 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
1550 "unable to set flow distribution.please check queue config\n");
1554 /* Allocate DMA'ble memory to write the rules */
1555 param = (size_t)rte_malloc(NULL, 256, 64);
1557 DPAA2_PMD_ERR("Memory allocation failure\n");
1561 if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
1563 "Unable to prepare extract parameters");
1564 rte_free((void *)param);
1568 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1569 tc_cfg.dist_size = rss_conf->queue_num;
1570 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
1571 tc_cfg.key_cfg_iova = (size_t)param;
1572 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1574 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1575 priv->token, flow->tc_id,
1579 "Distribution cannot be configured: %d\n", ret);
1580 rte_free((void *)param);
1584 rte_free((void *)param);
1585 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1586 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1587 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1589 "Unable to prepare extract parameters");
1593 sizeof(struct dpni_qos_tbl_cfg));
1594 qos_cfg.discard_on_miss = true;
1595 qos_cfg.keep_entries = true;
1596 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1597 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1598 priv->token, &qos_cfg);
1601 "Distribution can not be configured(%d)\n",
1607 /* Add Rule into QoS table */
1608 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1609 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1610 &flow->rule, flow->tc_id,
1614 "Error in entry addition in QoS table(%d)",
1619 case RTE_FLOW_ACTION_TYPE_END:
1623 DPAA2_PMD_ERR("Invalid action type");
1631 /* New rules are inserted. */
1633 LIST_INSERT_HEAD(&priv->flows, flow, next);
1635 while (LIST_NEXT(curr, next))
1636 curr = LIST_NEXT(curr, next);
1637 LIST_INSERT_AFTER(curr, flow, next);
1644 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
1645 const struct rte_flow_attr *attr)
1649 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
1650 DPAA2_PMD_ERR("Priority group is out of range\n");
1653 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
1654 DPAA2_PMD_ERR("Priority within the group is out of range\n");
1657 if (unlikely(attr->egress)) {
1659 "Flow configuration is not supported on egress side\n");
1662 if (unlikely(!attr->ingress)) {
1663 DPAA2_PMD_ERR("Ingress flag must be configured\n");
1670 dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
1672 switch (pattern->type) {
1673 case RTE_FLOW_ITEM_TYPE_ETH:
1674 default_mask = (const void *)&rte_flow_item_eth_mask;
1676 case RTE_FLOW_ITEM_TYPE_VLAN:
1677 default_mask = (const void *)&rte_flow_item_vlan_mask;
1679 case RTE_FLOW_ITEM_TYPE_IPV4:
1680 default_mask = (const void *)&rte_flow_item_ipv4_mask;
1682 case RTE_FLOW_ITEM_TYPE_IPV6:
1683 default_mask = (const void *)&rte_flow_item_ipv6_mask;
1685 case RTE_FLOW_ITEM_TYPE_ICMP:
1686 default_mask = (const void *)&rte_flow_item_icmp_mask;
1688 case RTE_FLOW_ITEM_TYPE_UDP:
1689 default_mask = (const void *)&rte_flow_item_udp_mask;
1691 case RTE_FLOW_ITEM_TYPE_TCP:
1692 default_mask = (const void *)&rte_flow_item_tcp_mask;
1694 case RTE_FLOW_ITEM_TYPE_SCTP:
1695 default_mask = (const void *)&rte_flow_item_sctp_mask;
1697 case RTE_FLOW_ITEM_TYPE_GRE:
1698 default_mask = (const void *)&rte_flow_item_gre_mask;
1701 DPAA2_PMD_ERR("Invalid pattern type");
1706 dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
1707 const struct rte_flow_item pattern[])
1709 unsigned int i, j, k, is_found = 0;
1712 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1713 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
1714 if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
1724 /* Lets verify other combinations of given pattern rules */
1725 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1726 if (!pattern[j].spec) {
1730 if ((pattern[j].last) && (!pattern[j].mask))
1731 dpaa2_dev_update_default_mask(&pattern[j]);
1734 /* DPAA2 platform has a limitation that extract parameter can not be */
1735 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1736 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1737 for (j = 0; j < MAX_TCS + 1; j++) {
1738 for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; j++) {
1739 if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
1742 if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
1750 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
1752 unsigned int i, j, is_found = 0;
1755 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1756 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
1757 if (dpaa2_supported_action_type[i] == actions[j].type) {
1767 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1768 if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
1775 int dpaa2_flow_validate(struct rte_eth_dev *dev,
1776 const struct rte_flow_attr *flow_attr,
1777 const struct rte_flow_item pattern[],
1778 const struct rte_flow_action actions[],
1779 struct rte_flow_error *error)
1781 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1782 struct dpni_attr dpni_attr;
1783 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1784 uint16_t token = priv->token;
1787 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
1788 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
1791 "Failure to get dpni@%p attribute, err code %d\n",
1793 rte_flow_error_set(error, EPERM,
1794 RTE_FLOW_ERROR_TYPE_ATTR,
1795 flow_attr, "invalid");
1799 /* Verify input attributes */
1800 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
1803 "Invalid attributes are given\n");
1804 rte_flow_error_set(error, EPERM,
1805 RTE_FLOW_ERROR_TYPE_ATTR,
1806 flow_attr, "invalid");
1807 goto not_valid_params;
1809 /* Verify input pattern list */
1810 ret = dpaa2_dev_verify_patterns(priv, pattern);
1813 "Invalid pattern list is given\n");
1814 rte_flow_error_set(error, EPERM,
1815 RTE_FLOW_ERROR_TYPE_ITEM,
1816 pattern, "invalid");
1817 goto not_valid_params;
1819 /* Verify input action list */
1820 ret = dpaa2_dev_verify_actions(actions);
1823 "Invalid action list is given\n");
1824 rte_flow_error_set(error, EPERM,
1825 RTE_FLOW_ERROR_TYPE_ACTION,
1826 actions, "invalid");
1827 goto not_valid_params;
1834 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
1835 const struct rte_flow_attr *attr,
1836 const struct rte_flow_item pattern[],
1837 const struct rte_flow_action actions[],
1838 struct rte_flow_error *error)
1840 struct rte_flow *flow = NULL;
1841 size_t key_iova = 0, mask_iova = 0;
1844 flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
1846 DPAA2_PMD_ERR("Failure to allocate memory for flow");
1849 /* Allocate DMA'ble memory to write the rules */
1850 key_iova = (size_t)rte_malloc(NULL, 256, 64);
1853 "Memory allocation failure for rule configration\n");
1856 mask_iova = (size_t)rte_malloc(NULL, 256, 64);
1859 "Memory allocation failure for rule configration\n");
1863 flow->rule.key_iova = key_iova;
1864 flow->rule.mask_iova = mask_iova;
1865 flow->rule.key_size = 0;
1867 switch (dpaa2_filter_type) {
1868 case RTE_ETH_FILTER_GENERIC:
1869 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
1872 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
1873 rte_flow_error_set(error, EPERM,
1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1877 "Failure to create flow, return code (%d)", ret);
1878 goto creation_error;
1882 DPAA2_PMD_ERR("Filter type (%d) not supported",
1889 rte_flow_error_set(error, EPERM,
1890 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1891 NULL, "memory alloc");
1893 rte_free((void *)flow);
1894 rte_free((void *)key_iova);
1895 rte_free((void *)mask_iova);
1901 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
1902 struct rte_flow *flow,
1903 struct rte_flow_error *error)
1906 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1907 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1909 switch (flow->action) {
1910 case RTE_FLOW_ACTION_TYPE_QUEUE:
1911 /* Remove entry from QoS table first */
1912 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1916 "Error in adding entry to QoS table(%d)", ret);
1920 /* Then remove entry from FS table */
1921 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1922 flow->tc_id, &flow->rule);
1925 "Error in entry addition in FS table(%d)", ret);
1929 case RTE_FLOW_ACTION_TYPE_RSS:
1930 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1934 "Error in entry addition in QoS table(%d)", ret);
1940 "Action type (%d) is not supported", flow->action);
1945 LIST_REMOVE(flow, next);
1946 /* Now free the flow */
1951 rte_flow_error_set(error, EPERM,
1952 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1958 * Destroy user-configured flow rules.
1960 * This function skips internal flows rules.
1962 * @see rte_flow_flush()
1966 dpaa2_flow_flush(struct rte_eth_dev *dev,
1967 struct rte_flow_error *error)
1969 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1970 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1973 struct rte_flow *next = LIST_NEXT(flow, next);
1975 dpaa2_flow_destroy(dev, flow, error);
1982 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
1983 struct rte_flow *flow __rte_unused,
1984 const struct rte_flow_action *actions __rte_unused,
1985 void *data __rte_unused,
1986 struct rte_flow_error *error __rte_unused)
1992 * Clean up all flow rules.
1994 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
1995 * rules regardless of whether they are internal or user-configured.
1998 * Pointer to private structure.
2001 dpaa2_flow_clean(struct rte_eth_dev *dev)
2003 struct rte_flow *flow;
2004 struct dpaa2_dev_priv *priv = dev->data->dev_private;
2006 while ((flow = LIST_FIRST(&priv->flows)))
2007 dpaa2_flow_destroy(dev, flow, NULL);
2010 const struct rte_flow_ops dpaa2_flow_ops = {
2011 .create = dpaa2_flow_create,
2012 .validate = dpaa2_flow_validate,
2013 .destroy = dpaa2_flow_destroy,
2014 .flush = dpaa2_flow_flush,
2015 .query = dpaa2_flow_query,