1 /* * SPDX-License-Identifier: BSD-3-Clause
13 #include <rte_ethdev.h>
15 #include <rte_malloc.h>
16 #include <rte_flow_driver.h>
17 #include <rte_tailq.h>
22 #include <dpaa2_ethdev.h>
23 #include <dpaa2_pmd_logs.h>
26 LIST_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
27 struct dpni_rule_cfg rule;
32 enum rte_flow_action_type action;
37 enum rte_flow_item_type dpaa2_supported_pattern_type[] = {
38 RTE_FLOW_ITEM_TYPE_END,
39 RTE_FLOW_ITEM_TYPE_ETH,
40 RTE_FLOW_ITEM_TYPE_VLAN,
41 RTE_FLOW_ITEM_TYPE_IPV4,
42 RTE_FLOW_ITEM_TYPE_IPV6,
43 RTE_FLOW_ITEM_TYPE_ICMP,
44 RTE_FLOW_ITEM_TYPE_UDP,
45 RTE_FLOW_ITEM_TYPE_TCP,
46 RTE_FLOW_ITEM_TYPE_SCTP,
47 RTE_FLOW_ITEM_TYPE_GRE,
51 enum rte_flow_action_type dpaa2_supported_action_type[] = {
52 RTE_FLOW_ACTION_TYPE_END,
53 RTE_FLOW_ACTION_TYPE_QUEUE,
54 RTE_FLOW_ACTION_TYPE_RSS
57 enum rte_filter_type dpaa2_filter_type = RTE_ETH_FILTER_NONE;
58 static const void *default_mask;
61 dpaa2_configure_flow_eth(struct rte_flow *flow,
62 struct rte_eth_dev *dev,
63 const struct rte_flow_attr *attr,
64 const struct rte_flow_item *pattern,
65 const struct rte_flow_action actions[] __rte_unused,
66 struct rte_flow_error *error __rte_unused)
71 int device_configured = 0, entry_found = 0;
73 const struct rte_flow_item_eth *spec, *mask;
75 /* TODO: Currently upper bound of range parameter is not implemented */
76 const struct rte_flow_item_eth *last __rte_unused;
77 struct dpaa2_dev_priv *priv = dev->data->dev_private;
81 /* DPAA2 platform has a limitation that extract parameter can not be */
82 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
83 /* TODO: pattern is an array of 9 elements where 9th pattern element */
84 /* is for QoS table and 1-8th pattern element is for FS tables. */
85 /* It can be changed to macro. */
86 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
87 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
88 DPKG_MAX_NUM_OF_EXTRACTS);
92 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
93 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
94 DPKG_MAX_NUM_OF_EXTRACTS);
98 for (j = 0; j < priv->pattern[8].item_count; j++) {
99 if (priv->pattern[8].pattern_type[j] != pattern->type) {
108 priv->pattern[8].pattern_type[j] = pattern->type;
109 priv->pattern[8].item_count++;
110 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
114 for (j = 0; j < priv->pattern[group].item_count; j++) {
115 if (priv->pattern[group].pattern_type[j] != pattern->type) {
124 priv->pattern[group].pattern_type[j] = pattern->type;
125 priv->pattern[group].item_count++;
126 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
129 /* Get traffic class index and flow id to be configured */
131 flow->index = attr->priority;
133 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
134 index = priv->extract.qos_key_cfg.num_extracts;
135 priv->extract.qos_key_cfg.extracts[index].type =
136 DPKG_EXTRACT_FROM_HDR;
137 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
138 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
139 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
142 priv->extract.qos_key_cfg.extracts[index].type =
143 DPKG_EXTRACT_FROM_HDR;
144 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
145 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
146 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
149 priv->extract.qos_key_cfg.extracts[index].type =
150 DPKG_EXTRACT_FROM_HDR;
151 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
152 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
153 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
156 priv->extract.qos_key_cfg.num_extracts = index;
159 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
160 index = priv->extract.fs_key_cfg[group].num_extracts;
161 priv->extract.fs_key_cfg[group].extracts[index].type =
162 DPKG_EXTRACT_FROM_HDR;
163 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
164 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
165 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_SA;
168 priv->extract.fs_key_cfg[group].extracts[index].type =
169 DPKG_EXTRACT_FROM_HDR;
170 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
171 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
172 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_DA;
175 priv->extract.fs_key_cfg[group].extracts[index].type =
176 DPKG_EXTRACT_FROM_HDR;
177 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
178 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ETH;
179 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ETH_TYPE;
182 priv->extract.fs_key_cfg[group].num_extracts = index;
185 /* Parse pattern list to get the matching parameters */
186 spec = (const struct rte_flow_item_eth *)pattern->spec;
187 last = (const struct rte_flow_item_eth *)pattern->last;
188 mask = (const struct rte_flow_item_eth *)
189 (pattern->mask ? pattern->mask : default_mask);
192 key_iova = flow->rule.key_iova + flow->key_size;
193 memcpy((void *)key_iova, (const void *)(spec->src.addr_bytes),
194 sizeof(struct rte_ether_addr));
195 key_iova += sizeof(struct rte_ether_addr);
196 memcpy((void *)key_iova, (const void *)(spec->dst.addr_bytes),
197 sizeof(struct rte_ether_addr));
198 key_iova += sizeof(struct rte_ether_addr);
199 memcpy((void *)key_iova, (const void *)(&spec->type),
203 mask_iova = flow->rule.mask_iova + flow->key_size;
204 memcpy((void *)mask_iova, (const void *)(mask->src.addr_bytes),
205 sizeof(struct rte_ether_addr));
206 mask_iova += sizeof(struct rte_ether_addr);
207 memcpy((void *)mask_iova, (const void *)(mask->dst.addr_bytes),
208 sizeof(struct rte_ether_addr));
209 mask_iova += sizeof(struct rte_ether_addr);
210 memcpy((void *)mask_iova, (const void *)(&mask->type),
213 flow->key_size += ((2 * sizeof(struct rte_ether_addr)) +
216 return device_configured;
220 dpaa2_configure_flow_vlan(struct rte_flow *flow,
221 struct rte_eth_dev *dev,
222 const struct rte_flow_attr *attr,
223 const struct rte_flow_item *pattern,
224 const struct rte_flow_action actions[] __rte_unused,
225 struct rte_flow_error *error __rte_unused)
230 int device_configured = 0, entry_found = 0;
232 const struct rte_flow_item_vlan *spec, *mask;
234 const struct rte_flow_item_vlan *last __rte_unused;
235 struct dpaa2_dev_priv *priv = dev->data->dev_private;
239 /* DPAA2 platform has a limitation that extract parameter can not be */
240 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
241 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
242 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
243 DPKG_MAX_NUM_OF_EXTRACTS);
247 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
248 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
249 DPKG_MAX_NUM_OF_EXTRACTS);
253 for (j = 0; j < priv->pattern[8].item_count; j++) {
254 if (priv->pattern[8].pattern_type[j] != pattern->type) {
263 priv->pattern[8].pattern_type[j] = pattern->type;
264 priv->pattern[8].item_count++;
265 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
269 for (j = 0; j < priv->pattern[group].item_count; j++) {
270 if (priv->pattern[group].pattern_type[j] != pattern->type) {
279 priv->pattern[group].pattern_type[j] = pattern->type;
280 priv->pattern[group].item_count++;
281 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
285 /* Get traffic class index and flow id to be configured */
287 flow->index = attr->priority;
289 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
290 index = priv->extract.qos_key_cfg.num_extracts;
291 priv->extract.qos_key_cfg.extracts[index].type =
292 DPKG_EXTRACT_FROM_HDR;
293 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
294 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
295 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
296 priv->extract.qos_key_cfg.num_extracts++;
299 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
300 index = priv->extract.fs_key_cfg[group].num_extracts;
301 priv->extract.fs_key_cfg[group].extracts[index].type =
302 DPKG_EXTRACT_FROM_HDR;
303 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
304 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_VLAN;
305 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_VLAN_TCI;
306 priv->extract.fs_key_cfg[group].num_extracts++;
309 /* Parse pattern list to get the matching parameters */
310 spec = (const struct rte_flow_item_vlan *)pattern->spec;
311 last = (const struct rte_flow_item_vlan *)pattern->last;
312 mask = (const struct rte_flow_item_vlan *)
313 (pattern->mask ? pattern->mask : default_mask);
315 key_iova = flow->rule.key_iova + flow->key_size;
316 memcpy((void *)key_iova, (const void *)(&spec->tci),
319 mask_iova = flow->rule.mask_iova + flow->key_size;
320 memcpy((void *)mask_iova, (const void *)(&mask->tci),
323 flow->key_size += sizeof(rte_be16_t);
324 return device_configured;
328 dpaa2_configure_flow_ipv4(struct rte_flow *flow,
329 struct rte_eth_dev *dev,
330 const struct rte_flow_attr *attr,
331 const struct rte_flow_item *pattern,
332 const struct rte_flow_action actions[] __rte_unused,
333 struct rte_flow_error *error __rte_unused)
338 int device_configured = 0, entry_found = 0;
340 const struct rte_flow_item_ipv4 *spec, *mask;
342 const struct rte_flow_item_ipv4 *last __rte_unused;
343 struct dpaa2_dev_priv *priv = dev->data->dev_private;
347 /* DPAA2 platform has a limitation that extract parameter can not be */
348 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
349 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
350 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
351 DPKG_MAX_NUM_OF_EXTRACTS);
355 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
356 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
357 DPKG_MAX_NUM_OF_EXTRACTS);
361 for (j = 0; j < priv->pattern[8].item_count; j++) {
362 if (priv->pattern[8].pattern_type[j] != pattern->type) {
371 priv->pattern[8].pattern_type[j] = pattern->type;
372 priv->pattern[8].item_count++;
373 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
377 for (j = 0; j < priv->pattern[group].item_count; j++) {
378 if (priv->pattern[group].pattern_type[j] != pattern->type) {
387 priv->pattern[group].pattern_type[j] = pattern->type;
388 priv->pattern[group].item_count++;
389 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
392 /* Get traffic class index and flow id to be configured */
394 flow->index = attr->priority;
396 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
397 index = priv->extract.qos_key_cfg.num_extracts;
398 priv->extract.qos_key_cfg.extracts[index].type =
399 DPKG_EXTRACT_FROM_HDR;
400 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
401 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
402 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
405 priv->extract.qos_key_cfg.extracts[index].type =
406 DPKG_EXTRACT_FROM_HDR;
407 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
408 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
409 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
412 priv->extract.qos_key_cfg.extracts[index].type =
413 DPKG_EXTRACT_FROM_HDR;
414 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
415 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
416 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
419 priv->extract.qos_key_cfg.num_extracts = index;
422 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
423 index = priv->extract.fs_key_cfg[group].num_extracts;
424 priv->extract.fs_key_cfg[group].extracts[index].type =
425 DPKG_EXTRACT_FROM_HDR;
426 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
427 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
428 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
431 priv->extract.fs_key_cfg[group].extracts[index].type =
432 DPKG_EXTRACT_FROM_HDR;
433 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
434 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
435 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
438 priv->extract.fs_key_cfg[group].extracts[index].type =
439 DPKG_EXTRACT_FROM_HDR;
440 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
441 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
442 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_PROTO;
445 priv->extract.fs_key_cfg[group].num_extracts = index;
448 /* Parse pattern list to get the matching parameters */
449 spec = (const struct rte_flow_item_ipv4 *)pattern->spec;
450 last = (const struct rte_flow_item_ipv4 *)pattern->last;
451 mask = (const struct rte_flow_item_ipv4 *)
452 (pattern->mask ? pattern->mask : default_mask);
454 key_iova = flow->rule.key_iova + flow->key_size;
455 memcpy((void *)key_iova, (const void *)&spec->hdr.src_addr,
457 key_iova += sizeof(uint32_t);
458 memcpy((void *)key_iova, (const void *)&spec->hdr.dst_addr,
460 key_iova += sizeof(uint32_t);
461 memcpy((void *)key_iova, (const void *)&spec->hdr.next_proto_id,
464 mask_iova = flow->rule.mask_iova + flow->key_size;
465 memcpy((void *)mask_iova, (const void *)&mask->hdr.src_addr,
467 mask_iova += sizeof(uint32_t);
468 memcpy((void *)mask_iova, (const void *)&mask->hdr.dst_addr,
470 mask_iova += sizeof(uint32_t);
471 memcpy((void *)mask_iova, (const void *)&mask->hdr.next_proto_id,
474 flow->key_size += (2 * sizeof(uint32_t)) + sizeof(uint8_t);
475 return device_configured;
479 dpaa2_configure_flow_ipv6(struct rte_flow *flow,
480 struct rte_eth_dev *dev,
481 const struct rte_flow_attr *attr,
482 const struct rte_flow_item *pattern,
483 const struct rte_flow_action actions[] __rte_unused,
484 struct rte_flow_error *error __rte_unused)
489 int device_configured = 0, entry_found = 0;
491 const struct rte_flow_item_ipv6 *spec, *mask;
493 const struct rte_flow_item_ipv6 *last __rte_unused;
494 struct dpaa2_dev_priv *priv = dev->data->dev_private;
498 /* DPAA2 platform has a limitation that extract parameter can not be */
499 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
500 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
501 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
502 DPKG_MAX_NUM_OF_EXTRACTS);
506 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
507 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
508 DPKG_MAX_NUM_OF_EXTRACTS);
512 for (j = 0; j < priv->pattern[8].item_count; j++) {
513 if (priv->pattern[8].pattern_type[j] != pattern->type) {
522 priv->pattern[8].pattern_type[j] = pattern->type;
523 priv->pattern[8].item_count++;
524 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
528 for (j = 0; j < priv->pattern[group].item_count; j++) {
529 if (priv->pattern[group].pattern_type[j] != pattern->type) {
538 priv->pattern[group].pattern_type[j] = pattern->type;
539 priv->pattern[group].item_count++;
540 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
543 /* Get traffic class index and flow id to be configured */
545 flow->index = attr->priority;
547 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
548 index = priv->extract.qos_key_cfg.num_extracts;
549 priv->extract.qos_key_cfg.extracts[index].type =
550 DPKG_EXTRACT_FROM_HDR;
551 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
552 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
553 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
556 priv->extract.qos_key_cfg.extracts[index].type =
557 DPKG_EXTRACT_FROM_HDR;
558 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
559 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_IP;
560 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
563 priv->extract.qos_key_cfg.num_extracts = index;
566 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
567 index = priv->extract.fs_key_cfg[group].num_extracts;
568 priv->extract.fs_key_cfg[group].extracts[index].type =
569 DPKG_EXTRACT_FROM_HDR;
570 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
571 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
572 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_SRC;
575 priv->extract.fs_key_cfg[group].extracts[index].type =
576 DPKG_EXTRACT_FROM_HDR;
577 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
578 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_IP;
579 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_IP_DST;
582 priv->extract.fs_key_cfg[group].num_extracts = index;
585 /* Parse pattern list to get the matching parameters */
586 spec = (const struct rte_flow_item_ipv6 *)pattern->spec;
587 last = (const struct rte_flow_item_ipv6 *)pattern->last;
588 mask = (const struct rte_flow_item_ipv6 *)
589 (pattern->mask ? pattern->mask : default_mask);
591 key_iova = flow->rule.key_iova + flow->key_size;
592 memcpy((void *)key_iova, (const void *)(spec->hdr.src_addr),
593 sizeof(spec->hdr.src_addr));
594 key_iova += sizeof(spec->hdr.src_addr);
595 memcpy((void *)key_iova, (const void *)(spec->hdr.dst_addr),
596 sizeof(spec->hdr.dst_addr));
598 mask_iova = flow->rule.mask_iova + flow->key_size;
599 memcpy((void *)mask_iova, (const void *)(mask->hdr.src_addr),
600 sizeof(mask->hdr.src_addr));
601 mask_iova += sizeof(mask->hdr.src_addr);
602 memcpy((void *)mask_iova, (const void *)(mask->hdr.dst_addr),
603 sizeof(mask->hdr.dst_addr));
605 flow->key_size += sizeof(spec->hdr.src_addr) +
606 sizeof(mask->hdr.dst_addr);
607 return device_configured;
611 dpaa2_configure_flow_icmp(struct rte_flow *flow,
612 struct rte_eth_dev *dev,
613 const struct rte_flow_attr *attr,
614 const struct rte_flow_item *pattern,
615 const struct rte_flow_action actions[] __rte_unused,
616 struct rte_flow_error *error __rte_unused)
621 int device_configured = 0, entry_found = 0;
623 const struct rte_flow_item_icmp *spec, *mask;
625 const struct rte_flow_item_icmp *last __rte_unused;
626 struct dpaa2_dev_priv *priv = dev->data->dev_private;
630 /* DPAA2 platform has a limitation that extract parameter can not be */
631 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
632 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
633 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
634 DPKG_MAX_NUM_OF_EXTRACTS);
638 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
639 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
640 DPKG_MAX_NUM_OF_EXTRACTS);
644 for (j = 0; j < priv->pattern[8].item_count; j++) {
645 if (priv->pattern[8].pattern_type[j] != pattern->type) {
654 priv->pattern[8].pattern_type[j] = pattern->type;
655 priv->pattern[8].item_count++;
656 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
660 for (j = 0; j < priv->pattern[group].item_count; j++) {
661 if (priv->pattern[group].pattern_type[j] != pattern->type) {
670 priv->pattern[group].pattern_type[j] = pattern->type;
671 priv->pattern[group].item_count++;
672 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
675 /* Get traffic class index and flow id to be configured */
677 flow->index = attr->priority;
679 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
680 index = priv->extract.qos_key_cfg.num_extracts;
681 priv->extract.qos_key_cfg.extracts[index].type =
682 DPKG_EXTRACT_FROM_HDR;
683 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
684 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
685 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
688 priv->extract.qos_key_cfg.extracts[index].type =
689 DPKG_EXTRACT_FROM_HDR;
690 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
691 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
692 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
695 priv->extract.qos_key_cfg.num_extracts = index;
698 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
699 index = priv->extract.fs_key_cfg[group].num_extracts;
700 priv->extract.fs_key_cfg[group].extracts[index].type =
701 DPKG_EXTRACT_FROM_HDR;
702 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
703 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
704 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_TYPE;
707 priv->extract.fs_key_cfg[group].extracts[index].type =
708 DPKG_EXTRACT_FROM_HDR;
709 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
710 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_ICMP;
711 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_ICMP_CODE;
714 priv->extract.fs_key_cfg[group].num_extracts = index;
717 /* Parse pattern list to get the matching parameters */
718 spec = (const struct rte_flow_item_icmp *)pattern->spec;
719 last = (const struct rte_flow_item_icmp *)pattern->last;
720 mask = (const struct rte_flow_item_icmp *)
721 (pattern->mask ? pattern->mask : default_mask);
723 key_iova = flow->rule.key_iova + flow->key_size;
724 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_type,
726 key_iova += sizeof(uint8_t);
727 memcpy((void *)key_iova, (const void *)&spec->hdr.icmp_code,
730 mask_iova = flow->rule.mask_iova + flow->key_size;
731 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_type,
733 key_iova += sizeof(uint8_t);
734 memcpy((void *)mask_iova, (const void *)&mask->hdr.icmp_code,
737 flow->key_size += 2 * sizeof(uint8_t);
739 return device_configured;
743 dpaa2_configure_flow_udp(struct rte_flow *flow,
744 struct rte_eth_dev *dev,
745 const struct rte_flow_attr *attr,
746 const struct rte_flow_item *pattern,
747 const struct rte_flow_action actions[] __rte_unused,
748 struct rte_flow_error *error __rte_unused)
753 int device_configured = 0, entry_found = 0;
755 const struct rte_flow_item_udp *spec, *mask;
757 const struct rte_flow_item_udp *last __rte_unused;
758 struct dpaa2_dev_priv *priv = dev->data->dev_private;
762 /* DPAA2 platform has a limitation that extract parameter can not be */
763 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
764 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
765 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
766 DPKG_MAX_NUM_OF_EXTRACTS);
770 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
771 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
772 DPKG_MAX_NUM_OF_EXTRACTS);
776 for (j = 0; j < priv->pattern[8].item_count; j++) {
777 if (priv->pattern[8].pattern_type[j] != pattern->type) {
786 priv->pattern[8].pattern_type[j] = pattern->type;
787 priv->pattern[8].item_count++;
788 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
792 for (j = 0; j < priv->pattern[group].item_count; j++) {
793 if (priv->pattern[group].pattern_type[j] != pattern->type) {
802 priv->pattern[group].pattern_type[j] = pattern->type;
803 priv->pattern[group].item_count++;
804 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
807 /* Get traffic class index and flow id to be configured */
809 flow->index = attr->priority;
811 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
812 index = priv->extract.qos_key_cfg.num_extracts;
813 priv->extract.qos_key_cfg.extracts[index].type =
814 DPKG_EXTRACT_FROM_HDR;
815 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
816 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
817 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
820 priv->extract.qos_key_cfg.extracts[index].type = DPKG_EXTRACT_FROM_HDR;
821 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
822 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
823 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
826 priv->extract.qos_key_cfg.num_extracts = index;
829 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
830 index = priv->extract.fs_key_cfg[group].num_extracts;
831 priv->extract.fs_key_cfg[group].extracts[index].type =
832 DPKG_EXTRACT_FROM_HDR;
833 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
834 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
835 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_SRC;
838 priv->extract.fs_key_cfg[group].extracts[index].type =
839 DPKG_EXTRACT_FROM_HDR;
840 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
841 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_UDP;
842 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_UDP_PORT_DST;
845 priv->extract.fs_key_cfg[group].num_extracts = index;
848 /* Parse pattern list to get the matching parameters */
849 spec = (const struct rte_flow_item_udp *)pattern->spec;
850 last = (const struct rte_flow_item_udp *)pattern->last;
851 mask = (const struct rte_flow_item_udp *)
852 (pattern->mask ? pattern->mask : default_mask);
854 key_iova = flow->rule.key_iova + flow->key_size;
855 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
857 key_iova += sizeof(uint16_t);
858 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
861 mask_iova = flow->rule.mask_iova + flow->key_size;
862 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
864 mask_iova += sizeof(uint16_t);
865 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
868 flow->key_size += (2 * sizeof(uint16_t));
870 return device_configured;
874 dpaa2_configure_flow_tcp(struct rte_flow *flow,
875 struct rte_eth_dev *dev,
876 const struct rte_flow_attr *attr,
877 const struct rte_flow_item *pattern,
878 const struct rte_flow_action actions[] __rte_unused,
879 struct rte_flow_error *error __rte_unused)
884 int device_configured = 0, entry_found = 0;
886 const struct rte_flow_item_tcp *spec, *mask;
888 const struct rte_flow_item_tcp *last __rte_unused;
889 struct dpaa2_dev_priv *priv = dev->data->dev_private;
893 /* DPAA2 platform has a limitation that extract parameter can not be */
894 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too.*/
895 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
896 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
897 DPKG_MAX_NUM_OF_EXTRACTS);
901 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
902 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
903 DPKG_MAX_NUM_OF_EXTRACTS);
907 for (j = 0; j < priv->pattern[8].item_count; j++) {
908 if (priv->pattern[8].pattern_type[j] != pattern->type) {
917 priv->pattern[8].pattern_type[j] = pattern->type;
918 priv->pattern[8].item_count++;
919 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
923 for (j = 0; j < priv->pattern[group].item_count; j++) {
924 if (priv->pattern[group].pattern_type[j] != pattern->type) {
933 priv->pattern[group].pattern_type[j] = pattern->type;
934 priv->pattern[group].item_count++;
935 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
938 /* Get traffic class index and flow id to be configured */
940 flow->index = attr->priority;
942 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
943 index = priv->extract.qos_key_cfg.num_extracts;
944 priv->extract.qos_key_cfg.extracts[index].type =
945 DPKG_EXTRACT_FROM_HDR;
946 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
947 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
948 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
951 priv->extract.qos_key_cfg.extracts[index].type =
952 DPKG_EXTRACT_FROM_HDR;
953 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
954 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
955 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
958 priv->extract.qos_key_cfg.num_extracts = index;
961 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
962 index = priv->extract.fs_key_cfg[group].num_extracts;
963 priv->extract.fs_key_cfg[group].extracts[index].type =
964 DPKG_EXTRACT_FROM_HDR;
965 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
966 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
967 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_SRC;
970 priv->extract.fs_key_cfg[group].extracts[index].type =
971 DPKG_EXTRACT_FROM_HDR;
972 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
973 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_TCP;
974 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_TCP_PORT_DST;
977 priv->extract.fs_key_cfg[group].num_extracts = index;
980 /* Parse pattern list to get the matching parameters */
981 spec = (const struct rte_flow_item_tcp *)pattern->spec;
982 last = (const struct rte_flow_item_tcp *)pattern->last;
983 mask = (const struct rte_flow_item_tcp *)
984 (pattern->mask ? pattern->mask : default_mask);
986 key_iova = flow->rule.key_iova + flow->key_size;
987 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
989 key_iova += sizeof(uint16_t);
990 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
993 mask_iova = flow->rule.mask_iova + flow->key_size;
994 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
996 mask_iova += sizeof(uint16_t);
997 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1000 flow->key_size += 2 * sizeof(uint16_t);
1002 return device_configured;
1006 dpaa2_configure_flow_sctp(struct rte_flow *flow,
1007 struct rte_eth_dev *dev,
1008 const struct rte_flow_attr *attr,
1009 const struct rte_flow_item *pattern,
1010 const struct rte_flow_action actions[] __rte_unused,
1011 struct rte_flow_error *error __rte_unused)
1016 int device_configured = 0, entry_found = 0;
1018 const struct rte_flow_item_sctp *spec, *mask;
1020 const struct rte_flow_item_sctp *last __rte_unused;
1021 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1023 group = attr->group;
1025 /* DPAA2 platform has a limitation that extract parameter can not be */
1026 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1027 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1028 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1029 DPKG_MAX_NUM_OF_EXTRACTS);
1033 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1034 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1035 DPKG_MAX_NUM_OF_EXTRACTS);
1039 for (j = 0; j < priv->pattern[8].item_count; j++) {
1040 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1049 priv->pattern[8].pattern_type[j] = pattern->type;
1050 priv->pattern[8].item_count++;
1051 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1055 for (j = 0; j < priv->pattern[group].item_count; j++) {
1056 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1065 priv->pattern[group].pattern_type[j] = pattern->type;
1066 priv->pattern[group].item_count++;
1067 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1070 /* Get traffic class index and flow id to be configured */
1071 flow->tc_id = group;
1072 flow->index = attr->priority;
1074 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1075 index = priv->extract.qos_key_cfg.num_extracts;
1076 priv->extract.qos_key_cfg.extracts[index].type =
1077 DPKG_EXTRACT_FROM_HDR;
1078 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1079 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1080 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1083 priv->extract.qos_key_cfg.extracts[index].type =
1084 DPKG_EXTRACT_FROM_HDR;
1085 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1086 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1087 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1090 priv->extract.qos_key_cfg.num_extracts = index;
1093 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1094 index = priv->extract.fs_key_cfg[group].num_extracts;
1095 priv->extract.fs_key_cfg[group].extracts[index].type =
1096 DPKG_EXTRACT_FROM_HDR;
1097 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1098 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1099 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_SRC;
1102 priv->extract.fs_key_cfg[group].extracts[index].type =
1103 DPKG_EXTRACT_FROM_HDR;
1104 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1105 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_SCTP;
1106 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_SCTP_PORT_DST;
1109 priv->extract.fs_key_cfg[group].num_extracts = index;
1112 /* Parse pattern list to get the matching parameters */
1113 spec = (const struct rte_flow_item_sctp *)pattern->spec;
1114 last = (const struct rte_flow_item_sctp *)pattern->last;
1115 mask = (const struct rte_flow_item_sctp *)
1116 (pattern->mask ? pattern->mask : default_mask);
1118 key_iova = flow->rule.key_iova + flow->key_size;
1119 memcpy((void *)key_iova, (const void *)(&spec->hdr.src_port),
1121 key_iova += sizeof(uint16_t);
1122 memcpy((void *)key_iova, (const void *)(&spec->hdr.dst_port),
1125 mask_iova = flow->rule.mask_iova + flow->key_size;
1126 memcpy((void *)mask_iova, (const void *)(&mask->hdr.src_port),
1128 mask_iova += sizeof(uint16_t);
1129 memcpy((void *)mask_iova, (const void *)(&mask->hdr.dst_port),
1132 flow->key_size += 2 * sizeof(uint16_t);
1134 return device_configured;
1138 dpaa2_configure_flow_gre(struct rte_flow *flow,
1139 struct rte_eth_dev *dev,
1140 const struct rte_flow_attr *attr,
1141 const struct rte_flow_item *pattern,
1142 const struct rte_flow_action actions[] __rte_unused,
1143 struct rte_flow_error *error __rte_unused)
1148 int device_configured = 0, entry_found = 0;
1150 const struct rte_flow_item_gre *spec, *mask;
1152 const struct rte_flow_item_gre *last __rte_unused;
1153 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1155 group = attr->group;
1157 /* DPAA2 platform has a limitation that extract parameter can not be */
1158 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1159 if (priv->pattern[8].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1160 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1161 DPKG_MAX_NUM_OF_EXTRACTS);
1165 if (priv->pattern[group].item_count >= DPKG_MAX_NUM_OF_EXTRACTS) {
1166 DPAA2_PMD_ERR("Maximum limit for different pattern type = %d\n",
1167 DPKG_MAX_NUM_OF_EXTRACTS);
1171 for (j = 0; j < priv->pattern[8].item_count; j++) {
1172 if (priv->pattern[8].pattern_type[j] != pattern->type) {
1181 priv->pattern[8].pattern_type[j] = pattern->type;
1182 priv->pattern[8].item_count++;
1183 device_configured |= DPAA2_QOS_TABLE_RECONFIGURE;
1187 for (j = 0; j < priv->pattern[group].item_count; j++) {
1188 if (priv->pattern[group].pattern_type[j] != pattern->type) {
1197 priv->pattern[group].pattern_type[j] = pattern->type;
1198 priv->pattern[group].item_count++;
1199 device_configured |= DPAA2_FS_TABLE_RECONFIGURE;
1202 /* Get traffic class index and flow id to be configured */
1203 flow->tc_id = group;
1204 flow->index = attr->priority;
1206 if (device_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1207 index = priv->extract.qos_key_cfg.num_extracts;
1208 priv->extract.qos_key_cfg.extracts[index].type =
1209 DPKG_EXTRACT_FROM_HDR;
1210 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1211 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1212 priv->extract.qos_key_cfg.extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1215 priv->extract.qos_key_cfg.num_extracts = index;
1218 if (device_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1219 index = priv->extract.fs_key_cfg[group].num_extracts;
1220 priv->extract.fs_key_cfg[group].extracts[index].type =
1221 DPKG_EXTRACT_FROM_HDR;
1222 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.type = DPKG_FULL_FIELD;
1223 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.prot = NET_PROT_GRE;
1224 priv->extract.fs_key_cfg[group].extracts[index].extract.from_hdr.field = NH_FLD_GRE_TYPE;
1227 priv->extract.fs_key_cfg[group].num_extracts = index;
1230 /* Parse pattern list to get the matching parameters */
1231 spec = (const struct rte_flow_item_gre *)pattern->spec;
1232 last = (const struct rte_flow_item_gre *)pattern->last;
1233 mask = (const struct rte_flow_item_gre *)
1234 (pattern->mask ? pattern->mask : default_mask);
1236 key_iova = flow->rule.key_iova + flow->key_size;
1237 memcpy((void *)key_iova, (const void *)(&spec->protocol),
1238 sizeof(rte_be16_t));
1240 mask_iova = flow->rule.mask_iova + flow->key_size;
1241 memcpy((void *)mask_iova, (const void *)(&mask->protocol),
1242 sizeof(rte_be16_t));
1244 flow->key_size += sizeof(rte_be16_t);
1246 return device_configured;
1250 dpaa2_generic_flow_set(struct rte_flow *flow,
1251 struct rte_eth_dev *dev,
1252 const struct rte_flow_attr *attr,
1253 const struct rte_flow_item pattern[],
1254 const struct rte_flow_action actions[],
1255 struct rte_flow_error *error)
1257 const struct rte_flow_action_queue *dest_queue;
1258 const struct rte_flow_action_rss *rss_conf;
1260 int is_keycfg_configured = 0, end_of_list = 0;
1261 int ret = 0, i = 0, j = 0;
1262 struct dpni_attr nic_attr;
1263 struct dpni_rx_tc_dist_cfg tc_cfg;
1264 struct dpni_qos_tbl_cfg qos_cfg;
1265 struct dpkg_profile_cfg key_cfg;
1266 struct dpni_fs_action_cfg action;
1267 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1268 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1270 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1272 /* Parse pattern list to get the matching parameters */
1273 while (!end_of_list) {
1274 switch (pattern[i].type) {
1275 case RTE_FLOW_ITEM_TYPE_ETH:
1276 is_keycfg_configured = dpaa2_configure_flow_eth(flow,
1283 case RTE_FLOW_ITEM_TYPE_VLAN:
1284 is_keycfg_configured = dpaa2_configure_flow_vlan(flow,
1291 case RTE_FLOW_ITEM_TYPE_IPV4:
1292 is_keycfg_configured = dpaa2_configure_flow_ipv4(flow,
1299 case RTE_FLOW_ITEM_TYPE_IPV6:
1300 is_keycfg_configured = dpaa2_configure_flow_ipv6(flow,
1307 case RTE_FLOW_ITEM_TYPE_ICMP:
1308 is_keycfg_configured = dpaa2_configure_flow_icmp(flow,
1315 case RTE_FLOW_ITEM_TYPE_UDP:
1316 is_keycfg_configured = dpaa2_configure_flow_udp(flow,
1323 case RTE_FLOW_ITEM_TYPE_TCP:
1324 is_keycfg_configured = dpaa2_configure_flow_tcp(flow,
1331 case RTE_FLOW_ITEM_TYPE_SCTP:
1332 is_keycfg_configured = dpaa2_configure_flow_sctp(flow,
1338 case RTE_FLOW_ITEM_TYPE_GRE:
1339 is_keycfg_configured = dpaa2_configure_flow_gre(flow,
1346 case RTE_FLOW_ITEM_TYPE_END:
1348 break; /*End of List*/
1350 DPAA2_PMD_ERR("Invalid action type");
1357 /* Let's parse action on matching traffic */
1359 while (!end_of_list) {
1360 switch (actions[j].type) {
1361 case RTE_FLOW_ACTION_TYPE_QUEUE:
1362 dest_queue = (const struct rte_flow_action_queue *)(actions[j].conf);
1363 flow->flow_id = dest_queue->index;
1364 flow->action = RTE_FLOW_ACTION_TYPE_QUEUE;
1365 memset(&action, 0, sizeof(struct dpni_fs_action_cfg));
1366 action.flow_id = flow->flow_id;
1367 if (is_keycfg_configured & DPAA2_QOS_TABLE_RECONFIGURE) {
1368 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1369 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1371 "Unable to prepare extract parameters");
1375 memset(&qos_cfg, 0, sizeof(struct dpni_qos_tbl_cfg));
1376 qos_cfg.discard_on_miss = true;
1377 qos_cfg.keep_entries = true;
1378 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1379 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1380 priv->token, &qos_cfg);
1383 "Distribution cannot be configured.(%d)"
1388 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1389 if (dpkg_prepare_key_cfg(&priv->extract.fs_key_cfg[flow->tc_id],
1390 (uint8_t *)(size_t)priv->extract.fs_extract_param[flow->tc_id]) < 0) {
1392 "Unable to prepare extract parameters");
1396 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1397 tc_cfg.dist_size = priv->nb_rx_queues / priv->num_rx_tc;
1398 tc_cfg.dist_mode = DPNI_DIST_MODE_FS;
1399 tc_cfg.key_cfg_iova =
1400 (uint64_t)priv->extract.fs_extract_param[flow->tc_id];
1401 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1402 tc_cfg.fs_cfg.keep_entries = true;
1403 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1405 flow->tc_id, &tc_cfg);
1408 "Distribution cannot be configured.(%d)"
1413 /* Configure QoS table first */
1414 memset(&nic_attr, 0, sizeof(struct dpni_attr));
1415 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1416 priv->token, &nic_attr);
1419 "Failure to get attribute. dpni@%p err code(%d)\n",
1424 action.flow_id = action.flow_id % nic_attr.num_rx_tcs;
1425 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1426 flow->rule.key_size = flow->key_size;
1427 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW,
1428 priv->token, &flow->rule,
1433 "Error in addnig entry to QoS table(%d)", ret);
1437 /* Then Configure FS table */
1438 ret = dpni_add_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1439 flow->tc_id, flow->index,
1440 &flow->rule, &action);
1443 "Error in adding entry to FS table(%d)", ret);
1447 case RTE_FLOW_ACTION_TYPE_RSS:
1448 ret = dpni_get_attributes(dpni, CMD_PRI_LOW,
1449 priv->token, &nic_attr);
1452 "Failure to get attribute. dpni@%p err code(%d)\n",
1456 rss_conf = (const struct rte_flow_action_rss *)(actions[j].conf);
1457 for (i = 0; i < (int)rss_conf->queue_num; i++) {
1458 if (rss_conf->queue[i] < (attr->group * nic_attr.num_queues) ||
1459 rss_conf->queue[i] >= ((attr->group + 1) * nic_attr.num_queues)) {
1461 "Queue/Group combination are not supported\n");
1466 flow->action = RTE_FLOW_ACTION_TYPE_RSS;
1467 ret = dpaa2_distset_to_dpkg_profile_cfg(rss_conf->types,
1471 "unable to set flow distribution.please check queue config\n");
1475 /* Allocate DMA'ble memory to write the rules */
1476 param = (size_t)rte_malloc(NULL, 256, 64);
1478 DPAA2_PMD_ERR("Memory allocation failure\n");
1482 if (dpkg_prepare_key_cfg(&key_cfg, (uint8_t *)param) < 0) {
1484 "Unable to prepare extract parameters");
1485 rte_free((void *)param);
1489 memset(&tc_cfg, 0, sizeof(struct dpni_rx_tc_dist_cfg));
1490 tc_cfg.dist_size = rss_conf->queue_num;
1491 tc_cfg.dist_mode = DPNI_DIST_MODE_HASH;
1492 tc_cfg.key_cfg_iova = (size_t)param;
1493 tc_cfg.fs_cfg.miss_action = DPNI_FS_MISS_DROP;
1495 ret = dpni_set_rx_tc_dist(dpni, CMD_PRI_LOW,
1496 priv->token, flow->tc_id,
1500 "Distribution cannot be configured: %d\n", ret);
1501 rte_free((void *)param);
1505 rte_free((void *)param);
1506 if (is_keycfg_configured & DPAA2_FS_TABLE_RECONFIGURE) {
1507 if (dpkg_prepare_key_cfg(&priv->extract.qos_key_cfg,
1508 (uint8_t *)(size_t)priv->extract.qos_extract_param) < 0) {
1510 "Unable to prepare extract parameters");
1514 sizeof(struct dpni_qos_tbl_cfg));
1515 qos_cfg.discard_on_miss = true;
1516 qos_cfg.keep_entries = true;
1517 qos_cfg.key_cfg_iova = (size_t)priv->extract.qos_extract_param;
1518 ret = dpni_set_qos_table(dpni, CMD_PRI_LOW,
1519 priv->token, &qos_cfg);
1522 "Distribution can not be configured(%d)\n",
1528 /* Add Rule into QoS table */
1529 index = flow->index + (flow->tc_id * nic_attr.fs_entries);
1530 flow->rule.key_size = flow->key_size;
1531 ret = dpni_add_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1532 &flow->rule, flow->tc_id,
1536 "Error in entry addition in QoS table(%d)",
1541 case RTE_FLOW_ACTION_TYPE_END:
1545 DPAA2_PMD_ERR("Invalid action type");
1553 /* New rules are inserted. */
1555 LIST_INSERT_HEAD(&priv->flows, flow, next);
1557 while (LIST_NEXT(curr, next))
1558 curr = LIST_NEXT(curr, next);
1559 LIST_INSERT_AFTER(curr, flow, next);
1566 dpaa2_dev_verify_attr(struct dpni_attr *dpni_attr,
1567 const struct rte_flow_attr *attr)
1571 if (unlikely(attr->group >= dpni_attr->num_rx_tcs)) {
1572 DPAA2_PMD_ERR("Priority group is out of range\n");
1575 if (unlikely(attr->priority >= dpni_attr->fs_entries)) {
1576 DPAA2_PMD_ERR("Priority within the group is out of range\n");
1579 if (unlikely(attr->egress)) {
1581 "Flow configuration is not supported on egress side\n");
1584 if (unlikely(!attr->ingress)) {
1585 DPAA2_PMD_ERR("Ingress flag must be configured\n");
1592 dpaa2_dev_update_default_mask(const struct rte_flow_item *pattern)
1594 switch (pattern->type) {
1595 case RTE_FLOW_ITEM_TYPE_ETH:
1596 default_mask = (const void *)&rte_flow_item_eth_mask;
1598 case RTE_FLOW_ITEM_TYPE_VLAN:
1599 default_mask = (const void *)&rte_flow_item_vlan_mask;
1601 case RTE_FLOW_ITEM_TYPE_IPV4:
1602 default_mask = (const void *)&rte_flow_item_ipv4_mask;
1604 case RTE_FLOW_ITEM_TYPE_IPV6:
1605 default_mask = (const void *)&rte_flow_item_ipv6_mask;
1607 case RTE_FLOW_ITEM_TYPE_ICMP:
1608 default_mask = (const void *)&rte_flow_item_icmp_mask;
1610 case RTE_FLOW_ITEM_TYPE_UDP:
1611 default_mask = (const void *)&rte_flow_item_udp_mask;
1613 case RTE_FLOW_ITEM_TYPE_TCP:
1614 default_mask = (const void *)&rte_flow_item_tcp_mask;
1616 case RTE_FLOW_ITEM_TYPE_SCTP:
1617 default_mask = (const void *)&rte_flow_item_sctp_mask;
1619 case RTE_FLOW_ITEM_TYPE_GRE:
1620 default_mask = (const void *)&rte_flow_item_gre_mask;
1623 DPAA2_PMD_ERR("Invalid pattern type");
1628 dpaa2_dev_verify_patterns(struct dpaa2_dev_priv *dev_priv,
1629 const struct rte_flow_item pattern[])
1631 unsigned int i, j, k, is_found = 0;
1634 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1635 for (i = 0; i < RTE_DIM(dpaa2_supported_pattern_type); i++) {
1636 if (dpaa2_supported_pattern_type[i] == pattern[j].type) {
1646 /* Lets verify other combinations of given pattern rules */
1647 for (j = 0; pattern[j].type != RTE_FLOW_ITEM_TYPE_END; j++) {
1648 if (!pattern[j].spec) {
1652 if ((pattern[j].last) && (!pattern[j].mask))
1653 dpaa2_dev_update_default_mask(&pattern[j]);
1656 /* DPAA2 platform has a limitation that extract parameter can not be */
1657 /* more than DPKG_MAX_NUM_OF_EXTRACTS. Verify this limitation too. */
1658 for (i = 0; pattern[i].type != RTE_FLOW_ITEM_TYPE_END; i++) {
1659 for (j = 0; j < MAX_TCS + 1; j++) {
1660 for (k = 0; k < DPKG_MAX_NUM_OF_EXTRACTS; k++) {
1661 if (dev_priv->pattern[j].pattern_type[k] == pattern[i].type)
1664 if (dev_priv->pattern[j].item_count >= DPKG_MAX_NUM_OF_EXTRACTS)
1672 dpaa2_dev_verify_actions(const struct rte_flow_action actions[])
1674 unsigned int i, j, is_found = 0;
1677 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1678 for (i = 0; i < RTE_DIM(dpaa2_supported_action_type); i++) {
1679 if (dpaa2_supported_action_type[i] == actions[j].type) {
1689 for (j = 0; actions[j].type != RTE_FLOW_ACTION_TYPE_END; j++) {
1690 if ((actions[j].type != RTE_FLOW_ACTION_TYPE_DROP) && (!actions[j].conf))
1697 int dpaa2_flow_validate(struct rte_eth_dev *dev,
1698 const struct rte_flow_attr *flow_attr,
1699 const struct rte_flow_item pattern[],
1700 const struct rte_flow_action actions[],
1701 struct rte_flow_error *error)
1703 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1704 struct dpni_attr dpni_attr;
1705 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1706 uint16_t token = priv->token;
1709 memset(&dpni_attr, 0, sizeof(struct dpni_attr));
1710 ret = dpni_get_attributes(dpni, CMD_PRI_LOW, token, &dpni_attr);
1713 "Failure to get dpni@%p attribute, err code %d\n",
1715 rte_flow_error_set(error, EPERM,
1716 RTE_FLOW_ERROR_TYPE_ATTR,
1717 flow_attr, "invalid");
1721 /* Verify input attributes */
1722 ret = dpaa2_dev_verify_attr(&dpni_attr, flow_attr);
1725 "Invalid attributes are given\n");
1726 rte_flow_error_set(error, EPERM,
1727 RTE_FLOW_ERROR_TYPE_ATTR,
1728 flow_attr, "invalid");
1729 goto not_valid_params;
1731 /* Verify input pattern list */
1732 ret = dpaa2_dev_verify_patterns(priv, pattern);
1735 "Invalid pattern list is given\n");
1736 rte_flow_error_set(error, EPERM,
1737 RTE_FLOW_ERROR_TYPE_ITEM,
1738 pattern, "invalid");
1739 goto not_valid_params;
1741 /* Verify input action list */
1742 ret = dpaa2_dev_verify_actions(actions);
1745 "Invalid action list is given\n");
1746 rte_flow_error_set(error, EPERM,
1747 RTE_FLOW_ERROR_TYPE_ACTION,
1748 actions, "invalid");
1749 goto not_valid_params;
1756 struct rte_flow *dpaa2_flow_create(struct rte_eth_dev *dev,
1757 const struct rte_flow_attr *attr,
1758 const struct rte_flow_item pattern[],
1759 const struct rte_flow_action actions[],
1760 struct rte_flow_error *error)
1762 struct rte_flow *flow = NULL;
1763 size_t key_iova = 0, mask_iova = 0;
1766 flow = rte_malloc(NULL, sizeof(struct rte_flow), RTE_CACHE_LINE_SIZE);
1768 DPAA2_PMD_ERR("Failure to allocate memory for flow");
1771 /* Allocate DMA'ble memory to write the rules */
1772 key_iova = (size_t)rte_malloc(NULL, 256, 64);
1775 "Memory allocation failure for rule configuration\n");
1778 mask_iova = (size_t)rte_malloc(NULL, 256, 64);
1781 "Memory allocation failure for rule configuration\n");
1785 flow->rule.key_iova = key_iova;
1786 flow->rule.mask_iova = mask_iova;
1789 switch (dpaa2_filter_type) {
1790 case RTE_ETH_FILTER_GENERIC:
1791 ret = dpaa2_generic_flow_set(flow, dev, attr, pattern,
1794 if (error->type > RTE_FLOW_ERROR_TYPE_ACTION)
1795 rte_flow_error_set(error, EPERM,
1796 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1799 "Failure to create flow, return code (%d)", ret);
1800 goto creation_error;
1804 DPAA2_PMD_ERR("Filter type (%d) not supported",
1811 rte_flow_error_set(error, EPERM,
1812 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1813 NULL, "memory alloc");
1815 rte_free((void *)flow);
1816 rte_free((void *)key_iova);
1817 rte_free((void *)mask_iova);
1823 int dpaa2_flow_destroy(struct rte_eth_dev *dev,
1824 struct rte_flow *flow,
1825 struct rte_flow_error *error)
1828 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1829 struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw;
1831 switch (flow->action) {
1832 case RTE_FLOW_ACTION_TYPE_QUEUE:
1833 /* Remove entry from QoS table first */
1834 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1838 "Error in adding entry to QoS table(%d)", ret);
1842 /* Then remove entry from FS table */
1843 ret = dpni_remove_fs_entry(dpni, CMD_PRI_LOW, priv->token,
1844 flow->tc_id, &flow->rule);
1847 "Error in entry addition in FS table(%d)", ret);
1851 case RTE_FLOW_ACTION_TYPE_RSS:
1852 ret = dpni_remove_qos_entry(dpni, CMD_PRI_LOW, priv->token,
1856 "Error in entry addition in QoS table(%d)", ret);
1862 "Action type (%d) is not supported", flow->action);
1867 LIST_REMOVE(flow, next);
1868 /* Now free the flow */
1873 rte_flow_error_set(error, EPERM,
1874 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1880 * Destroy user-configured flow rules.
1882 * This function skips internal flows rules.
1884 * @see rte_flow_flush()
1888 dpaa2_flow_flush(struct rte_eth_dev *dev,
1889 struct rte_flow_error *error)
1891 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1892 struct rte_flow *flow = LIST_FIRST(&priv->flows);
1895 struct rte_flow *next = LIST_NEXT(flow, next);
1897 dpaa2_flow_destroy(dev, flow, error);
1904 dpaa2_flow_query(struct rte_eth_dev *dev __rte_unused,
1905 struct rte_flow *flow __rte_unused,
1906 const struct rte_flow_action *actions __rte_unused,
1907 void *data __rte_unused,
1908 struct rte_flow_error *error __rte_unused)
1914 * Clean up all flow rules.
1916 * Unlike dpaa2_flow_flush(), this function takes care of all remaining flow
1917 * rules regardless of whether they are internal or user-configured.
1920 * Pointer to private structure.
1923 dpaa2_flow_clean(struct rte_eth_dev *dev)
1925 struct rte_flow *flow;
1926 struct dpaa2_dev_priv *priv = dev->data->dev_private;
1928 while ((flow = LIST_FIRST(&priv->flows)))
1929 dpaa2_flow_destroy(dev, flow, NULL);
1932 const struct rte_flow_ops dpaa2_flow_ops = {
1933 .create = dpaa2_flow_create,
1934 .validate = dpaa2_flow_validate,
1935 .destroy = dpaa2_flow_destroy,
1936 .flush = dpaa2_flow_flush,
1937 .query = dpaa2_flow_query,