net/ice: fix unsafe tailq element removal
[dpdk.git] / drivers / net / ice / ice_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
20 #include "ice_switch_filter.h"
21
22 static int ice_flow_validate(struct rte_eth_dev *dev,
23                 const struct rte_flow_attr *attr,
24                 const struct rte_flow_item pattern[],
25                 const struct rte_flow_action actions[],
26                 struct rte_flow_error *error);
27 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
28                 const struct rte_flow_attr *attr,
29                 const struct rte_flow_item pattern[],
30                 const struct rte_flow_action actions[],
31                 struct rte_flow_error *error);
32 static int ice_flow_destroy(struct rte_eth_dev *dev,
33                 struct rte_flow *flow,
34                 struct rte_flow_error *error);
35 static int ice_flow_flush(struct rte_eth_dev *dev,
36                 struct rte_flow_error *error);
37
38 const struct rte_flow_ops ice_flow_ops = {
39         .validate = ice_flow_validate,
40         .create = ice_flow_create,
41         .destroy = ice_flow_destroy,
42         .flush = ice_flow_flush,
43 };
44
45 static int
46 ice_flow_valid_attr(const struct rte_flow_attr *attr,
47                      struct rte_flow_error *error)
48 {
49         /* Must be input direction */
50         if (!attr->ingress) {
51                 rte_flow_error_set(error, EINVAL,
52                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
53                                    attr, "Only support ingress.");
54                 return -rte_errno;
55         }
56
57         /* Not supported */
58         if (attr->egress) {
59                 rte_flow_error_set(error, EINVAL,
60                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
61                                    attr, "Not support egress.");
62                 return -rte_errno;
63         }
64
65         /* Not supported */
66         if (attr->priority) {
67                 rte_flow_error_set(error, EINVAL,
68                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69                                    attr, "Not support priority.");
70                 return -rte_errno;
71         }
72
73         /* Not supported */
74         if (attr->group) {
75                 rte_flow_error_set(error, EINVAL,
76                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
77                                    attr, "Not support group.");
78                 return -rte_errno;
79         }
80
81         return 0;
82 }
83
84 /* Find the first VOID or non-VOID item pointer */
85 static const struct rte_flow_item *
86 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
87 {
88         bool is_find;
89
90         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91                 if (is_void)
92                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
93                 else
94                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
95                 if (is_find)
96                         break;
97                 item++;
98         }
99         return item;
100 }
101
102 /* Skip all VOID items of the pattern */
103 static void
104 ice_pattern_skip_void_item(struct rte_flow_item *items,
105                             const struct rte_flow_item *pattern)
106 {
107         uint32_t cpy_count = 0;
108         const struct rte_flow_item *pb = pattern, *pe = pattern;
109
110         for (;;) {
111                 /* Find a non-void item first */
112                 pb = ice_find_first_item(pb, false);
113                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
114                         pe = pb;
115                         break;
116                 }
117
118                 /* Find a void item */
119                 pe = ice_find_first_item(pb + 1, true);
120
121                 cpy_count = pe - pb;
122                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
123
124                 items += cpy_count;
125
126                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
127                         pb = pe;
128                         break;
129                 }
130
131                 pb = pe + 1;
132         }
133         /* Copy the END item. */
134         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
135 }
136
137 /* Check if the pattern matches a supported item type array */
138 static bool
139 ice_match_pattern(enum rte_flow_item_type *item_array,
140                 const struct rte_flow_item *pattern)
141 {
142         const struct rte_flow_item *item = pattern;
143
144         while ((*item_array == item->type) &&
145                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
146                 item_array++;
147                 item++;
148         }
149
150         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
151                 item->type == RTE_FLOW_ITEM_TYPE_END);
152 }
153
154 static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
155                 struct rte_flow_error *error)
156 {
157         uint16_t i = 0;
158         uint64_t inset;
159         struct rte_flow_item *items; /* used for pattern without VOID items */
160         uint32_t item_num = 0; /* non-void item number */
161
162         /* Get the non-void item number of pattern */
163         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
164                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
165                         item_num++;
166                 i++;
167         }
168         item_num++;
169
170         items = rte_zmalloc("ice_pattern",
171                             item_num * sizeof(struct rte_flow_item), 0);
172         if (!items) {
173                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
174                                    NULL, "No memory for PMD internal items.");
175                 return -ENOMEM;
176         }
177
178         ice_pattern_skip_void_item(items, pattern);
179
180         for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
181                 if (ice_match_pattern(ice_supported_patterns[i].items,
182                                       items)) {
183                         inset = ice_supported_patterns[i].sw_fields;
184                         rte_free(items);
185                         return inset;
186                 }
187         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
188                            pattern, "Unsupported pattern");
189
190         rte_free(items);
191         return 0;
192 }
193
194 static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
195                         struct rte_flow_error *error)
196 {
197         const struct rte_flow_item *item = pattern;
198         const struct rte_flow_item_eth *eth_spec, *eth_mask;
199         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
200         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
201         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
202         const struct rte_flow_item_udp *udp_spec, *udp_mask;
203         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
204         const struct rte_flow_item_icmp *icmp_mask;
205         const struct rte_flow_item_icmp6 *icmp6_mask;
206         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
207         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
208         enum rte_flow_item_type item_type;
209         uint8_t  ipv6_addr_mask[16] = {
210                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
211                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
212         uint64_t input_set = ICE_INSET_NONE;
213         bool is_tunnel = false;
214
215         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
216                 if (item->last) {
217                         rte_flow_error_set(error, EINVAL,
218                                            RTE_FLOW_ERROR_TYPE_ITEM,
219                                            item,
220                                            "Not support range");
221                         return 0;
222                 }
223                 item_type = item->type;
224                 switch (item_type) {
225                 case RTE_FLOW_ITEM_TYPE_ETH:
226                         eth_spec = item->spec;
227                         eth_mask = item->mask;
228
229                         if (eth_spec && eth_mask) {
230                                 if (rte_is_broadcast_ether_addr(&eth_mask->src))
231                                         input_set |= ICE_INSET_SMAC;
232                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst))
233                                         input_set |= ICE_INSET_DMAC;
234                                 if (eth_mask->type == RTE_BE16(0xffff))
235                                         input_set |= ICE_INSET_ETHERTYPE;
236                         }
237                         break;
238                 case RTE_FLOW_ITEM_TYPE_IPV4:
239                         ipv4_spec = item->spec;
240                         ipv4_mask = item->mask;
241
242                         if (!(ipv4_spec && ipv4_mask))
243                                 break;
244
245                         /* Check IPv4 mask and update input set */
246                         if (ipv4_mask->hdr.version_ihl ||
247                             ipv4_mask->hdr.total_length ||
248                             ipv4_mask->hdr.packet_id ||
249                             ipv4_mask->hdr.hdr_checksum) {
250                                 rte_flow_error_set(error, EINVAL,
251                                            RTE_FLOW_ERROR_TYPE_ITEM,
252                                            item,
253                                            "Invalid IPv4 mask.");
254                                 return 0;
255                         }
256
257                         if (is_tunnel) {
258                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
259                                         input_set |= ICE_INSET_TUN_IPV4_SRC;
260                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
261                                         input_set |= ICE_INSET_TUN_IPV4_DST;
262                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
263                                         input_set |= ICE_INSET_TUN_IPV4_TTL;
264                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
265                                         input_set |= ICE_INSET_TUN_IPV4_PROTO;
266                         } else {
267                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
268                                         input_set |= ICE_INSET_IPV4_SRC;
269                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
270                                         input_set |= ICE_INSET_IPV4_DST;
271                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
272                                         input_set |= ICE_INSET_IPV4_TTL;
273                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
274                                         input_set |= ICE_INSET_IPV4_PROTO;
275                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
276                                         input_set |= ICE_INSET_IPV4_TOS;
277                         }
278                         break;
279                 case RTE_FLOW_ITEM_TYPE_IPV6:
280                         ipv6_spec = item->spec;
281                         ipv6_mask = item->mask;
282
283                         if (!(ipv6_spec && ipv6_mask))
284                                 break;
285
286                         if (ipv6_mask->hdr.payload_len) {
287                                 rte_flow_error_set(error, EINVAL,
288                                            RTE_FLOW_ERROR_TYPE_ITEM,
289                                            item,
290                                            "Invalid IPv6 mask");
291                                 return 0;
292                         }
293
294                         if (is_tunnel) {
295                                 if (!memcmp(ipv6_mask->hdr.src_addr,
296                                             ipv6_addr_mask,
297                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
298                                         input_set |= ICE_INSET_TUN_IPV6_SRC;
299                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
300                                             ipv6_addr_mask,
301                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
302                                         input_set |= ICE_INSET_TUN_IPV6_DST;
303                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
304                                         input_set |= ICE_INSET_TUN_IPV6_PROTO;
305                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
306                                         input_set |= ICE_INSET_TUN_IPV6_TTL;
307                         } else {
308                                 if (!memcmp(ipv6_mask->hdr.src_addr,
309                                             ipv6_addr_mask,
310                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
311                                         input_set |= ICE_INSET_IPV6_SRC;
312                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
313                                             ipv6_addr_mask,
314                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
315                                         input_set |= ICE_INSET_IPV6_DST;
316                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
317                                         input_set |= ICE_INSET_IPV6_PROTO;
318                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
319                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
320                                 if ((ipv6_mask->hdr.vtc_flow &
321                                         rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
322                                                 == rte_cpu_to_be_32
323                                                 (RTE_IPV6_HDR_TC_MASK))
324                                         input_set |= ICE_INSET_IPV6_TOS;
325                         }
326
327                         break;
328                 case RTE_FLOW_ITEM_TYPE_UDP:
329                         udp_spec = item->spec;
330                         udp_mask = item->mask;
331
332                         if (!(udp_spec && udp_mask))
333                                 break;
334
335                         /* Check UDP mask and update input set*/
336                         if (udp_mask->hdr.dgram_len ||
337                             udp_mask->hdr.dgram_cksum) {
338                                 rte_flow_error_set(error, EINVAL,
339                                                    RTE_FLOW_ERROR_TYPE_ITEM,
340                                                    item,
341                                                    "Invalid UDP mask");
342                                 return 0;
343                         }
344
345                         if (is_tunnel) {
346                                 if (udp_mask->hdr.src_port == UINT16_MAX)
347                                         input_set |= ICE_INSET_TUN_SRC_PORT;
348                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
349                                         input_set |= ICE_INSET_TUN_DST_PORT;
350                         } else {
351                                 if (udp_mask->hdr.src_port == UINT16_MAX)
352                                         input_set |= ICE_INSET_SRC_PORT;
353                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
354                                         input_set |= ICE_INSET_DST_PORT;
355                         }
356
357                         break;
358                 case RTE_FLOW_ITEM_TYPE_TCP:
359                         tcp_spec = item->spec;
360                         tcp_mask = item->mask;
361
362                         if (!(tcp_spec && tcp_mask))
363                                 break;
364
365                         /* Check TCP mask and update input set */
366                         if (tcp_mask->hdr.sent_seq ||
367                             tcp_mask->hdr.recv_ack ||
368                             tcp_mask->hdr.data_off ||
369                             tcp_mask->hdr.tcp_flags ||
370                             tcp_mask->hdr.rx_win ||
371                             tcp_mask->hdr.cksum ||
372                             tcp_mask->hdr.tcp_urp) {
373                                 rte_flow_error_set(error, EINVAL,
374                                                    RTE_FLOW_ERROR_TYPE_ITEM,
375                                                    item,
376                                                    "Invalid TCP mask");
377                                 return 0;
378                         }
379
380                         if (is_tunnel) {
381                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
382                                         input_set |= ICE_INSET_TUN_SRC_PORT;
383                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
384                                         input_set |= ICE_INSET_TUN_DST_PORT;
385                         } else {
386                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
387                                         input_set |= ICE_INSET_SRC_PORT;
388                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
389                                         input_set |= ICE_INSET_DST_PORT;
390                         }
391
392                         break;
393                 case RTE_FLOW_ITEM_TYPE_SCTP:
394                         sctp_spec = item->spec;
395                         sctp_mask = item->mask;
396
397                         if (!(sctp_spec && sctp_mask))
398                                 break;
399
400                         /* Check SCTP mask and update input set */
401                         if (sctp_mask->hdr.cksum) {
402                                 rte_flow_error_set(error, EINVAL,
403                                            RTE_FLOW_ERROR_TYPE_ITEM,
404                                            item,
405                                            "Invalid SCTP mask");
406                                 return 0;
407                         }
408
409                         if (is_tunnel) {
410                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
411                                         input_set |= ICE_INSET_TUN_SRC_PORT;
412                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
413                                         input_set |= ICE_INSET_TUN_DST_PORT;
414                         } else {
415                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
416                                         input_set |= ICE_INSET_SRC_PORT;
417                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
418                                         input_set |= ICE_INSET_DST_PORT;
419                         }
420
421                         break;
422                 case RTE_FLOW_ITEM_TYPE_ICMP:
423                         icmp_mask = item->mask;
424                         if (icmp_mask->hdr.icmp_code ||
425                             icmp_mask->hdr.icmp_cksum ||
426                             icmp_mask->hdr.icmp_ident ||
427                             icmp_mask->hdr.icmp_seq_nb) {
428                                 rte_flow_error_set(error, EINVAL,
429                                                    RTE_FLOW_ERROR_TYPE_ITEM,
430                                                    item,
431                                                    "Invalid ICMP mask");
432                                 return 0;
433                         }
434
435                         if (icmp_mask->hdr.icmp_type == UINT8_MAX)
436                                 input_set |= ICE_INSET_ICMP;
437                         break;
438                 case RTE_FLOW_ITEM_TYPE_ICMP6:
439                         icmp6_mask = item->mask;
440                         if (icmp6_mask->code ||
441                             icmp6_mask->checksum) {
442                                 rte_flow_error_set(error, EINVAL,
443                                                    RTE_FLOW_ERROR_TYPE_ITEM,
444                                                    item,
445                                                    "Invalid ICMP6 mask");
446                                 return 0;
447                         }
448
449                         if (icmp6_mask->type == UINT8_MAX)
450                                 input_set |= ICE_INSET_ICMP6;
451                         break;
452                 case RTE_FLOW_ITEM_TYPE_VXLAN:
453                         vxlan_spec = item->spec;
454                         vxlan_mask = item->mask;
455                         /* Check if VXLAN item is used to describe protocol.
456                          * If yes, both spec and mask should be NULL.
457                          * If no, both spec and mask shouldn't be NULL.
458                          */
459                         if ((!vxlan_spec && vxlan_mask) ||
460                             (vxlan_spec && !vxlan_mask)) {
461                                 rte_flow_error_set(error, EINVAL,
462                                            RTE_FLOW_ERROR_TYPE_ITEM,
463                                            item,
464                                            "Invalid VXLAN item");
465                                 return 0;
466                         }
467                         is_tunnel = 1;
468
469                         break;
470                 case RTE_FLOW_ITEM_TYPE_NVGRE:
471                         nvgre_spec = item->spec;
472                         nvgre_mask = item->mask;
473                         /* Check if NVGRE item is used to describe protocol.
474                          * If yes, both spec and mask should be NULL.
475                          * If no, both spec and mask shouldn't be NULL.
476                          */
477                         if ((!nvgre_spec && nvgre_mask) ||
478                             (nvgre_spec && !nvgre_mask)) {
479                                 rte_flow_error_set(error, EINVAL,
480                                            RTE_FLOW_ERROR_TYPE_ITEM,
481                                            item,
482                                            "Invalid NVGRE item");
483                                 return 0;
484                         }
485                         is_tunnel = 1;
486
487                         break;
488                 case RTE_FLOW_ITEM_TYPE_VOID:
489                         break;
490                 default:
491                         rte_flow_error_set(error, EINVAL,
492                                            RTE_FLOW_ERROR_TYPE_ITEM,
493                                            item,
494                                            "Invalid pattern");
495                         break;
496                 }
497         }
498         return input_set;
499 }
500
501 static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
502                         uint64_t inset, struct rte_flow_error *error)
503 {
504         uint64_t fields;
505
506         /* get valid field */
507         fields = ice_get_flow_field(pattern, error);
508         if (!fields || fields & (~inset)) {
509                 rte_flow_error_set(error, EINVAL,
510                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
511                                    pattern,
512                                    "Invalid input set");
513                 return -rte_errno;
514         }
515
516         return 0;
517 }
518
519 static int ice_flow_valid_action(struct rte_eth_dev *dev,
520                                 const struct rte_flow_action *actions,
521                                 struct rte_flow_error *error)
522 {
523         const struct rte_flow_action_queue *act_q;
524         uint16_t queue;
525         const struct rte_flow_action *action;
526         for (action = actions; action->type !=
527                         RTE_FLOW_ACTION_TYPE_END; action++) {
528                 switch (action->type) {
529                 case RTE_FLOW_ACTION_TYPE_QUEUE:
530                         act_q = action->conf;
531                         queue = act_q->index;
532                         if (queue >= dev->data->nb_rx_queues) {
533                                 rte_flow_error_set(error, EINVAL,
534                                                 RTE_FLOW_ERROR_TYPE_ACTION,
535                                                 actions, "Invalid queue ID for"
536                                                 " switch filter.");
537                                 return -rte_errno;
538                         }
539                         break;
540                 case RTE_FLOW_ACTION_TYPE_DROP:
541                 case RTE_FLOW_ACTION_TYPE_VOID:
542                         break;
543                 default:
544                         rte_flow_error_set(error, EINVAL,
545                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
546                                            "Invalid action.");
547                         return -rte_errno;
548                 }
549         }
550         return 0;
551 }
552
553 static int
554 ice_flow_validate(struct rte_eth_dev *dev,
555                    const struct rte_flow_attr *attr,
556                    const struct rte_flow_item pattern[],
557                    const struct rte_flow_action actions[],
558                    struct rte_flow_error *error)
559 {
560         uint64_t inset = 0;
561         int ret = ICE_ERR_NOT_SUPPORTED;
562
563         if (!pattern) {
564                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
565                                    NULL, "NULL pattern.");
566                 return -rte_errno;
567         }
568
569         if (!actions) {
570                 rte_flow_error_set(error, EINVAL,
571                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
572                                    NULL, "NULL action.");
573                 return -rte_errno;
574         }
575
576         if (!attr) {
577                 rte_flow_error_set(error, EINVAL,
578                                    RTE_FLOW_ERROR_TYPE_ATTR,
579                                    NULL, "NULL attribute.");
580                 return -rte_errno;
581         }
582
583         ret = ice_flow_valid_attr(attr, error);
584         if (ret)
585                 return ret;
586
587         inset = ice_flow_valid_pattern(pattern, error);
588         if (!inset)
589                 return -rte_errno;
590
591         ret = ice_flow_valid_inset(pattern, inset, error);
592         if (ret)
593                 return ret;
594
595         ret = ice_flow_valid_action(dev, actions, error);
596         if (ret)
597                 return ret;
598
599         return 0;
600 }
601
602 static struct rte_flow *
603 ice_flow_create(struct rte_eth_dev *dev,
604                  const struct rte_flow_attr *attr,
605                  const struct rte_flow_item pattern[],
606                  const struct rte_flow_action actions[],
607                  struct rte_flow_error *error)
608 {
609         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
610         struct rte_flow *flow = NULL;
611         int ret;
612
613         flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
614         if (!flow) {
615                 rte_flow_error_set(error, ENOMEM,
616                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
617                                    "Failed to allocate memory");
618                 return flow;
619         }
620
621         ret = ice_flow_validate(dev, attr, pattern, actions, error);
622         if (ret < 0)
623                 goto free_flow;
624
625         ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
626         if (ret)
627                 goto free_flow;
628
629         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
630         return flow;
631
632 free_flow:
633         rte_flow_error_set(error, -ret,
634                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
635                            "Failed to create flow.");
636         rte_free(flow);
637         return NULL;
638 }
639
640 static int
641 ice_flow_destroy(struct rte_eth_dev *dev,
642                  struct rte_flow *flow,
643                  struct rte_flow_error *error)
644 {
645         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
646         int ret = 0;
647
648         ret = ice_destroy_switch_filter(pf, flow, error);
649
650         if (!ret) {
651                 TAILQ_REMOVE(&pf->flow_list, flow, node);
652                 rte_free(flow);
653         } else {
654                 rte_flow_error_set(error, -ret,
655                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
656                                    "Failed to destroy flow.");
657         }
658
659         return ret;
660 }
661
662 static int
663 ice_flow_flush(struct rte_eth_dev *dev,
664                struct rte_flow_error *error)
665 {
666         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
667         struct rte_flow *p_flow;
668         void *temp;
669         int ret = 0;
670
671         TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
672                 ret = ice_flow_destroy(dev, p_flow, error);
673                 if (ret) {
674                         rte_flow_error_set(error, -ret,
675                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
676                                            "Failed to flush SW flows.");
677                         return -rte_errno;
678                 }
679         }
680
681         return ret;
682 }