net/mlx5: add VLAN push/pop DR commands to glue
[dpdk.git] / drivers / net / ice / ice_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
20 #include "ice_switch_filter.h"
21
22 static int ice_flow_validate(struct rte_eth_dev *dev,
23                 const struct rte_flow_attr *attr,
24                 const struct rte_flow_item pattern[],
25                 const struct rte_flow_action actions[],
26                 struct rte_flow_error *error);
27 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
28                 const struct rte_flow_attr *attr,
29                 const struct rte_flow_item pattern[],
30                 const struct rte_flow_action actions[],
31                 struct rte_flow_error *error);
32 static int ice_flow_destroy(struct rte_eth_dev *dev,
33                 struct rte_flow *flow,
34                 struct rte_flow_error *error);
35 static int ice_flow_flush(struct rte_eth_dev *dev,
36                 struct rte_flow_error *error);
37
38 const struct rte_flow_ops ice_flow_ops = {
39         .validate = ice_flow_validate,
40         .create = ice_flow_create,
41         .destroy = ice_flow_destroy,
42         .flush = ice_flow_flush,
43 };
44
45 static int
46 ice_flow_valid_attr(const struct rte_flow_attr *attr,
47                      struct rte_flow_error *error)
48 {
49         /* Must be input direction */
50         if (!attr->ingress) {
51                 rte_flow_error_set(error, EINVAL,
52                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
53                                    attr, "Only support ingress.");
54                 return -rte_errno;
55         }
56
57         /* Not supported */
58         if (attr->egress) {
59                 rte_flow_error_set(error, EINVAL,
60                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
61                                    attr, "Not support egress.");
62                 return -rte_errno;
63         }
64
65         /* Not supported */
66         if (attr->priority) {
67                 rte_flow_error_set(error, EINVAL,
68                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69                                    attr, "Not support priority.");
70                 return -rte_errno;
71         }
72
73         /* Not supported */
74         if (attr->group) {
75                 rte_flow_error_set(error, EINVAL,
76                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
77                                    attr, "Not support group.");
78                 return -rte_errno;
79         }
80
81         return 0;
82 }
83
84 /* Find the first VOID or non-VOID item pointer */
85 static const struct rte_flow_item *
86 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
87 {
88         bool is_find;
89
90         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91                 if (is_void)
92                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
93                 else
94                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
95                 if (is_find)
96                         break;
97                 item++;
98         }
99         return item;
100 }
101
102 /* Skip all VOID items of the pattern */
103 static void
104 ice_pattern_skip_void_item(struct rte_flow_item *items,
105                             const struct rte_flow_item *pattern)
106 {
107         uint32_t cpy_count = 0;
108         const struct rte_flow_item *pb = pattern, *pe = pattern;
109
110         for (;;) {
111                 /* Find a non-void item first */
112                 pb = ice_find_first_item(pb, false);
113                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
114                         pe = pb;
115                         break;
116                 }
117
118                 /* Find a void item */
119                 pe = ice_find_first_item(pb + 1, true);
120
121                 cpy_count = pe - pb;
122                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
123
124                 items += cpy_count;
125
126                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
127                         pb = pe;
128                         break;
129                 }
130
131                 pb = pe + 1;
132         }
133         /* Copy the END item. */
134         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
135 }
136
137 /* Check if the pattern matches a supported item type array */
138 static bool
139 ice_match_pattern(enum rte_flow_item_type *item_array,
140                 const struct rte_flow_item *pattern)
141 {
142         const struct rte_flow_item *item = pattern;
143
144         while ((*item_array == item->type) &&
145                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
146                 item_array++;
147                 item++;
148         }
149
150         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
151                 item->type == RTE_FLOW_ITEM_TYPE_END);
152 }
153
154 static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
155                 struct rte_flow_error *error)
156 {
157         uint16_t i = 0;
158         uint64_t inset;
159         struct rte_flow_item *items; /* used for pattern without VOID items */
160         uint32_t item_num = 0; /* non-void item number */
161
162         /* Get the non-void item number of pattern */
163         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
164                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
165                         item_num++;
166                 i++;
167         }
168         item_num++;
169
170         items = rte_zmalloc("ice_pattern",
171                             item_num * sizeof(struct rte_flow_item), 0);
172         if (!items) {
173                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
174                                    NULL, "No memory for PMD internal items.");
175                 return -ENOMEM;
176         }
177
178         ice_pattern_skip_void_item(items, pattern);
179
180         for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
181                 if (ice_match_pattern(ice_supported_patterns[i].items,
182                                       items)) {
183                         inset = ice_supported_patterns[i].sw_fields;
184                         rte_free(items);
185                         return inset;
186                 }
187         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
188                            pattern, "Unsupported pattern");
189
190         rte_free(items);
191         return 0;
192 }
193
194 static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
195                         struct rte_flow_error *error)
196 {
197         const struct rte_flow_item *item = pattern;
198         const struct rte_flow_item_eth *eth_spec, *eth_mask;
199         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
200         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
201         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
202         const struct rte_flow_item_udp *udp_spec, *udp_mask;
203         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
204         const struct rte_flow_item_icmp *icmp_mask;
205         const struct rte_flow_item_icmp6 *icmp6_mask;
206         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
207         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
208         enum rte_flow_item_type item_type;
209         uint8_t  ipv6_addr_mask[16] = {
210                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
211                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
212         uint64_t input_set = ICE_INSET_NONE;
213         bool is_tunnel = false;
214
215         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
216                 if (item->last) {
217                         rte_flow_error_set(error, EINVAL,
218                                            RTE_FLOW_ERROR_TYPE_ITEM,
219                                            item,
220                                            "Not support range");
221                         return 0;
222                 }
223                 item_type = item->type;
224                 switch (item_type) {
225                 case RTE_FLOW_ITEM_TYPE_ETH:
226                         eth_spec = item->spec;
227                         eth_mask = item->mask;
228
229                         if (eth_spec && eth_mask) {
230                                 if (rte_is_broadcast_ether_addr(&eth_mask->src))
231                                         input_set |= ICE_INSET_SMAC;
232                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst))
233                                         input_set |= ICE_INSET_DMAC;
234                                 if (eth_mask->type == RTE_BE16(0xffff))
235                                         input_set |= ICE_INSET_ETHERTYPE;
236                         }
237                         break;
238                 case RTE_FLOW_ITEM_TYPE_IPV4:
239                         ipv4_spec = item->spec;
240                         ipv4_mask = item->mask;
241
242                         if (!(ipv4_spec && ipv4_mask))
243                                 break;
244
245                         /* Check IPv4 mask and update input set */
246                         if (ipv4_mask->hdr.version_ihl ||
247                             ipv4_mask->hdr.total_length ||
248                             ipv4_mask->hdr.packet_id ||
249                             ipv4_mask->hdr.hdr_checksum) {
250                                 rte_flow_error_set(error, EINVAL,
251                                            RTE_FLOW_ERROR_TYPE_ITEM,
252                                            item,
253                                            "Invalid IPv4 mask.");
254                                 return 0;
255                         }
256
257                         if (is_tunnel) {
258                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
259                                         input_set |= ICE_INSET_TUN_IPV4_SRC;
260                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
261                                         input_set |= ICE_INSET_TUN_IPV4_DST;
262                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
263                                         input_set |= ICE_INSET_TUN_IPV4_TTL;
264                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
265                                         input_set |= ICE_INSET_TUN_IPV4_PROTO;
266                         } else {
267                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
268                                         input_set |= ICE_INSET_IPV4_SRC;
269                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
270                                         input_set |= ICE_INSET_IPV4_DST;
271                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
272                                         input_set |= ICE_INSET_IPV4_TTL;
273                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
274                                         input_set |= ICE_INSET_IPV4_PROTO;
275                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
276                                         input_set |= ICE_INSET_IPV4_TOS;
277                         }
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_IPV6:
280                         ipv6_spec = item->spec;
281                         ipv6_mask = item->mask;
282
283                         if (!(ipv6_spec && ipv6_mask))
284                                 break;
285
286                         if (ipv6_mask->hdr.payload_len) {
287                                 rte_flow_error_set(error, EINVAL,
288                                            RTE_FLOW_ERROR_TYPE_ITEM,
289                                            item,
290                                            "Invalid IPv6 mask");
291                                 return 0;
292                         }
293
294                         if (is_tunnel) {
295                                 if (!memcmp(ipv6_mask->hdr.src_addr,
296                                             ipv6_addr_mask,
297                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
298                                         input_set |= ICE_INSET_TUN_IPV6_SRC;
299                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
300                                             ipv6_addr_mask,
301                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
302                                         input_set |= ICE_INSET_TUN_IPV6_DST;
303                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
304                                         input_set |= ICE_INSET_TUN_IPV6_PROTO;
305                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
306                                         input_set |= ICE_INSET_TUN_IPV6_TTL;
307                         } else {
308                                 if (!memcmp(ipv6_mask->hdr.src_addr,
309                                             ipv6_addr_mask,
310                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
311                                         input_set |= ICE_INSET_IPV6_SRC;
312                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
313                                             ipv6_addr_mask,
314                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
315                                         input_set |= ICE_INSET_IPV6_DST;
316                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
317                                         input_set |= ICE_INSET_IPV6_PROTO;
318                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
319                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
320                                 if ((ipv6_mask->hdr.vtc_flow &
321                                         rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
322                                                 == rte_cpu_to_be_32
323                                                 (RTE_IPV6_HDR_TC_MASK))
324                                         input_set |= ICE_INSET_IPV6_TOS;
325                         }
326
327                         break;
328                 case RTE_FLOW_ITEM_TYPE_UDP:
329                         udp_spec = item->spec;
330                         udp_mask = item->mask;
331
332                         if (!(udp_spec && udp_mask))
333                                 break;
334
335                         /* Check UDP mask and update input set*/
336                         if (udp_mask->hdr.dgram_len ||
337                             udp_mask->hdr.dgram_cksum) {
338                                 rte_flow_error_set(error, EINVAL,
339                                                    RTE_FLOW_ERROR_TYPE_ITEM,
340                                                    item,
341                                                    "Invalid UDP mask");
342                                 return 0;
343                         }
344
345                         if (is_tunnel) {
346                                 if (udp_mask->hdr.src_port == UINT16_MAX)
347                                         input_set |= ICE_INSET_TUN_SRC_PORT;
348                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
349                                         input_set |= ICE_INSET_TUN_DST_PORT;
350                         } else {
351                                 if (udp_mask->hdr.src_port == UINT16_MAX)
352                                         input_set |= ICE_INSET_SRC_PORT;
353                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
354                                         input_set |= ICE_INSET_DST_PORT;
355                         }
356
357                         break;
358                 case RTE_FLOW_ITEM_TYPE_TCP:
359                         tcp_spec = item->spec;
360                         tcp_mask = item->mask;
361
362                         if (!(tcp_spec && tcp_mask))
363                                 break;
364
365                         /* Check TCP mask and update input set */
366                         if (tcp_mask->hdr.sent_seq ||
367                             tcp_mask->hdr.recv_ack ||
368                             tcp_mask->hdr.data_off ||
369                             tcp_mask->hdr.tcp_flags ||
370                             tcp_mask->hdr.rx_win ||
371                             tcp_mask->hdr.cksum ||
372                             tcp_mask->hdr.tcp_urp) {
373                                 rte_flow_error_set(error, EINVAL,
374                                                    RTE_FLOW_ERROR_TYPE_ITEM,
375                                                    item,
376                                                    "Invalid TCP mask");
377                                 return 0;
378                         }
379
380                         if (is_tunnel) {
381                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
382                                         input_set |= ICE_INSET_TUN_SRC_PORT;
383                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
384                                         input_set |= ICE_INSET_TUN_DST_PORT;
385                         } else {
386                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
387                                         input_set |= ICE_INSET_SRC_PORT;
388                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
389                                         input_set |= ICE_INSET_DST_PORT;
390                         }
391
392                         break;
393                 case RTE_FLOW_ITEM_TYPE_SCTP:
394                         sctp_spec = item->spec;
395                         sctp_mask = item->mask;
396
397                         if (!(sctp_spec && sctp_mask))
398                                 break;
399
400                         /* Check SCTP mask and update input set */
401                         if (sctp_mask->hdr.cksum) {
402                                 rte_flow_error_set(error, EINVAL,
403                                            RTE_FLOW_ERROR_TYPE_ITEM,
404                                            item,
405                                            "Invalid SCTP mask");
406                                 return 0;
407                         }
408
409                         if (is_tunnel) {
410                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
411                                         input_set |= ICE_INSET_TUN_SRC_PORT;
412                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
413                                         input_set |= ICE_INSET_TUN_DST_PORT;
414                         } else {
415                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
416                                         input_set |= ICE_INSET_SRC_PORT;
417                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
418                                         input_set |= ICE_INSET_DST_PORT;
419                         }
420
421                         break;
422                 case RTE_FLOW_ITEM_TYPE_ICMP:
423                         icmp_mask = item->mask;
424                         if (icmp_mask->hdr.icmp_code ||
425                             icmp_mask->hdr.icmp_cksum ||
426                             icmp_mask->hdr.icmp_ident ||
427                             icmp_mask->hdr.icmp_seq_nb) {
428                                 rte_flow_error_set(error, EINVAL,
429                                                    RTE_FLOW_ERROR_TYPE_ITEM,
430                                                    item,
431                                                    "Invalid ICMP mask");
432                                 return 0;
433                         }
434
435                         if (icmp_mask->hdr.icmp_type == UINT8_MAX)
436                                 input_set |= ICE_INSET_ICMP;
437                         break;
438                 case RTE_FLOW_ITEM_TYPE_ICMP6:
439                         icmp6_mask = item->mask;
440                         if (icmp6_mask->code ||
441                             icmp6_mask->checksum) {
442                                 rte_flow_error_set(error, EINVAL,
443                                                    RTE_FLOW_ERROR_TYPE_ITEM,
444                                                    item,
445                                                    "Invalid ICMP6 mask");
446                                 return 0;
447                         }
448
449                         if (icmp6_mask->type == UINT8_MAX)
450                                 input_set |= ICE_INSET_ICMP6;
451                         break;
452                 case RTE_FLOW_ITEM_TYPE_VXLAN:
453                         vxlan_spec = item->spec;
454                         vxlan_mask = item->mask;
455                         /* Check if VXLAN item is used to describe protocol.
456                          * If yes, both spec and mask should be NULL.
457                          * If no, both spec and mask shouldn't be NULL.
458                          */
459                         if ((!vxlan_spec && vxlan_mask) ||
460                             (vxlan_spec && !vxlan_mask)) {
461                                 rte_flow_error_set(error, EINVAL,
462                                            RTE_FLOW_ERROR_TYPE_ITEM,
463                                            item,
464                                            "Invalid VXLAN item");
465                                 return 0;
466                         }
467                         if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
468                                         vxlan_mask->vni[1] == UINT8_MAX &&
469                                         vxlan_mask->vni[2] == UINT8_MAX)
470                                 input_set |= ICE_INSET_TUN_ID;
471                         is_tunnel = 1;
472
473                         break;
474                 case RTE_FLOW_ITEM_TYPE_NVGRE:
475                         nvgre_spec = item->spec;
476                         nvgre_mask = item->mask;
477                         /* Check if NVGRE item is used to describe protocol.
478                          * If yes, both spec and mask should be NULL.
479                          * If no, both spec and mask shouldn't be NULL.
480                          */
481                         if ((!nvgre_spec && nvgre_mask) ||
482                             (nvgre_spec && !nvgre_mask)) {
483                                 rte_flow_error_set(error, EINVAL,
484                                            RTE_FLOW_ERROR_TYPE_ITEM,
485                                            item,
486                                            "Invalid NVGRE item");
487                                 return 0;
488                         }
489                         if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
490                                         nvgre_mask->tni[1] == UINT8_MAX &&
491                                         nvgre_mask->tni[2] == UINT8_MAX)
492                                 input_set |= ICE_INSET_TUN_ID;
493                         is_tunnel = 1;
494
495                         break;
496                 case RTE_FLOW_ITEM_TYPE_VOID:
497                         break;
498                 default:
499                         rte_flow_error_set(error, EINVAL,
500                                            RTE_FLOW_ERROR_TYPE_ITEM,
501                                            item,
502                                            "Invalid pattern");
503                         break;
504                 }
505         }
506         return input_set;
507 }
508
509 static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
510                         uint64_t inset, struct rte_flow_error *error)
511 {
512         uint64_t fields;
513
514         /* get valid field */
515         fields = ice_get_flow_field(pattern, error);
516         if (!fields || fields & (~inset)) {
517                 rte_flow_error_set(error, EINVAL,
518                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
519                                    pattern,
520                                    "Invalid input set");
521                 return -rte_errno;
522         }
523
524         return 0;
525 }
526
527 static int ice_flow_valid_action(struct rte_eth_dev *dev,
528                                 const struct rte_flow_action *actions,
529                                 struct rte_flow_error *error)
530 {
531         const struct rte_flow_action_queue *act_q;
532         uint16_t queue;
533         const struct rte_flow_action *action;
534         for (action = actions; action->type !=
535                         RTE_FLOW_ACTION_TYPE_END; action++) {
536                 switch (action->type) {
537                 case RTE_FLOW_ACTION_TYPE_QUEUE:
538                         act_q = action->conf;
539                         queue = act_q->index;
540                         if (queue >= dev->data->nb_rx_queues) {
541                                 rte_flow_error_set(error, EINVAL,
542                                                 RTE_FLOW_ERROR_TYPE_ACTION,
543                                                 actions, "Invalid queue ID for"
544                                                 " switch filter.");
545                                 return -rte_errno;
546                         }
547                         break;
548                 case RTE_FLOW_ACTION_TYPE_DROP:
549                 case RTE_FLOW_ACTION_TYPE_VOID:
550                         break;
551                 default:
552                         rte_flow_error_set(error, EINVAL,
553                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
554                                            "Invalid action.");
555                         return -rte_errno;
556                 }
557         }
558         return 0;
559 }
560
561 static int
562 ice_flow_validate(struct rte_eth_dev *dev,
563                    const struct rte_flow_attr *attr,
564                    const struct rte_flow_item pattern[],
565                    const struct rte_flow_action actions[],
566                    struct rte_flow_error *error)
567 {
568         uint64_t inset = 0;
569         int ret = ICE_ERR_NOT_SUPPORTED;
570
571         if (!pattern) {
572                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
573                                    NULL, "NULL pattern.");
574                 return -rte_errno;
575         }
576
577         if (!actions) {
578                 rte_flow_error_set(error, EINVAL,
579                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
580                                    NULL, "NULL action.");
581                 return -rte_errno;
582         }
583
584         if (!attr) {
585                 rte_flow_error_set(error, EINVAL,
586                                    RTE_FLOW_ERROR_TYPE_ATTR,
587                                    NULL, "NULL attribute.");
588                 return -rte_errno;
589         }
590
591         ret = ice_flow_valid_attr(attr, error);
592         if (ret)
593                 return ret;
594
595         inset = ice_flow_valid_pattern(pattern, error);
596         if (!inset)
597                 return -rte_errno;
598
599         ret = ice_flow_valid_inset(pattern, inset, error);
600         if (ret)
601                 return ret;
602
603         ret = ice_flow_valid_action(dev, actions, error);
604         if (ret)
605                 return ret;
606
607         return 0;
608 }
609
610 static struct rte_flow *
611 ice_flow_create(struct rte_eth_dev *dev,
612                  const struct rte_flow_attr *attr,
613                  const struct rte_flow_item pattern[],
614                  const struct rte_flow_action actions[],
615                  struct rte_flow_error *error)
616 {
617         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
618         struct rte_flow *flow = NULL;
619         int ret;
620
621         flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
622         if (!flow) {
623                 rte_flow_error_set(error, ENOMEM,
624                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
625                                    "Failed to allocate memory");
626                 return flow;
627         }
628
629         ret = ice_flow_validate(dev, attr, pattern, actions, error);
630         if (ret < 0)
631                 goto free_flow;
632
633         ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
634         if (ret)
635                 goto free_flow;
636
637         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
638         return flow;
639
640 free_flow:
641         rte_flow_error_set(error, -ret,
642                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
643                            "Failed to create flow.");
644         rte_free(flow);
645         return NULL;
646 }
647
648 static int
649 ice_flow_destroy(struct rte_eth_dev *dev,
650                  struct rte_flow *flow,
651                  struct rte_flow_error *error)
652 {
653         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
654         int ret = 0;
655
656         ret = ice_destroy_switch_filter(pf, flow, error);
657
658         if (!ret) {
659                 TAILQ_REMOVE(&pf->flow_list, flow, node);
660                 rte_free(flow);
661         } else {
662                 rte_flow_error_set(error, -ret,
663                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
664                                    "Failed to destroy flow.");
665         }
666
667         return ret;
668 }
669
670 static int
671 ice_flow_flush(struct rte_eth_dev *dev,
672                struct rte_flow_error *error)
673 {
674         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
675         struct rte_flow *p_flow;
676         void *temp;
677         int ret = 0;
678
679         TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
680                 ret = ice_flow_destroy(dev, p_flow, error);
681                 if (ret) {
682                         rte_flow_error_set(error, -ret,
683                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
684                                            "Failed to flush SW flows.");
685                         return -rte_errno;
686                 }
687         }
688
689         return ret;
690 }