44dbccd3d021c1a235322c4fbb95ab8e7afd41f6
[dpdk.git] / drivers / net / ice / ice_generic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2019 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
17
18 #include "ice_ethdev.h"
19 #include "ice_generic_flow.h"
20 #include "ice_switch_filter.h"
21
22 static int ice_flow_validate(struct rte_eth_dev *dev,
23                 const struct rte_flow_attr *attr,
24                 const struct rte_flow_item pattern[],
25                 const struct rte_flow_action actions[],
26                 struct rte_flow_error *error);
27 static struct rte_flow *ice_flow_create(struct rte_eth_dev *dev,
28                 const struct rte_flow_attr *attr,
29                 const struct rte_flow_item pattern[],
30                 const struct rte_flow_action actions[],
31                 struct rte_flow_error *error);
32 static int ice_flow_destroy(struct rte_eth_dev *dev,
33                 struct rte_flow *flow,
34                 struct rte_flow_error *error);
35 static int ice_flow_flush(struct rte_eth_dev *dev,
36                 struct rte_flow_error *error);
37
38 const struct rte_flow_ops ice_flow_ops = {
39         .validate = ice_flow_validate,
40         .create = ice_flow_create,
41         .destroy = ice_flow_destroy,
42         .flush = ice_flow_flush,
43 };
44
45 static int
46 ice_flow_valid_attr(const struct rte_flow_attr *attr,
47                 struct rte_flow_error *error)
48 {
49         /* Must be input direction */
50         if (!attr->ingress) {
51                 rte_flow_error_set(error, EINVAL,
52                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
53                                 attr, "Only support ingress.");
54                 return -rte_errno;
55         }
56
57         /* Not supported */
58         if (attr->egress) {
59                 rte_flow_error_set(error, EINVAL,
60                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
61                                 attr, "Not support egress.");
62                 return -rte_errno;
63         }
64
65         /* Not supported */
66         if (attr->priority) {
67                 rte_flow_error_set(error, EINVAL,
68                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
69                                    attr, "Not support priority.");
70                 return -rte_errno;
71         }
72
73         /* Not supported */
74         if (attr->group) {
75                 rte_flow_error_set(error, EINVAL,
76                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
77                                 attr, "Not support group.");
78                 return -rte_errno;
79         }
80
81         return 0;
82 }
83
84 /* Find the first VOID or non-VOID item pointer */
85 static const struct rte_flow_item *
86 ice_find_first_item(const struct rte_flow_item *item, bool is_void)
87 {
88         bool is_find;
89
90         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
91                 if (is_void)
92                         is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
93                 else
94                         is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
95                 if (is_find)
96                         break;
97                 item++;
98         }
99         return item;
100 }
101
102 /* Skip all VOID items of the pattern */
103 static void
104 ice_pattern_skip_void_item(struct rte_flow_item *items,
105                         const struct rte_flow_item *pattern)
106 {
107         uint32_t cpy_count = 0;
108         const struct rte_flow_item *pb = pattern, *pe = pattern;
109
110         for (;;) {
111                 /* Find a non-void item first */
112                 pb = ice_find_first_item(pb, false);
113                 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
114                         pe = pb;
115                         break;
116                 }
117
118                 /* Find a void item */
119                 pe = ice_find_first_item(pb + 1, true);
120
121                 cpy_count = pe - pb;
122                 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
123
124                 items += cpy_count;
125
126                 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
127                         break;
128                 }
129
130                 pb = pe + 1;
131         }
132         /* Copy the END item. */
133         rte_memcpy(items, pe, sizeof(struct rte_flow_item));
134 }
135
136 /* Check if the pattern matches a supported item type array */
137 static bool
138 ice_match_pattern(enum rte_flow_item_type *item_array,
139                 const struct rte_flow_item *pattern)
140 {
141         const struct rte_flow_item *item = pattern;
142
143         while ((*item_array == item->type) &&
144                (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
145                 item_array++;
146                 item++;
147         }
148
149         return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
150                 item->type == RTE_FLOW_ITEM_TYPE_END);
151 }
152
153 static uint64_t ice_flow_valid_pattern(const struct rte_flow_item pattern[],
154                 struct rte_flow_error *error)
155 {
156         uint16_t i = 0;
157         uint64_t inset;
158         struct rte_flow_item *items; /* used for pattern without VOID items */
159         uint32_t item_num = 0; /* non-void item number */
160
161         /* Get the non-void item number of pattern */
162         while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
163                 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
164                         item_num++;
165                 i++;
166         }
167         item_num++;
168
169         items = rte_zmalloc("ice_pattern",
170                             item_num * sizeof(struct rte_flow_item), 0);
171         if (!items) {
172                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
173                                    NULL, "No memory for PMD internal items.");
174                 return -ENOMEM;
175         }
176
177         ice_pattern_skip_void_item(items, pattern);
178
179         for (i = 0; i < RTE_DIM(ice_supported_patterns); i++)
180                 if (ice_match_pattern(ice_supported_patterns[i].items,
181                                       items)) {
182                         inset = ice_supported_patterns[i].sw_fields;
183                         rte_free(items);
184                         return inset;
185                 }
186         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
187                            pattern, "Unsupported pattern");
188
189         rte_free(items);
190         return 0;
191 }
192
193 static uint64_t ice_get_flow_field(const struct rte_flow_item pattern[],
194                         struct rte_flow_error *error)
195 {
196         const struct rte_flow_item *item = pattern;
197         const struct rte_flow_item_eth *eth_spec, *eth_mask;
198         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
199         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
200         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
201         const struct rte_flow_item_udp *udp_spec, *udp_mask;
202         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
203         const struct rte_flow_item_icmp *icmp_mask;
204         const struct rte_flow_item_icmp6 *icmp6_mask;
205         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
206         const struct rte_flow_item_nvgre *nvgre_spec, *nvgre_mask;
207         enum rte_flow_item_type item_type;
208         uint8_t  ipv6_addr_mask[16] = {
209                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
210                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
211         uint64_t input_set = ICE_INSET_NONE;
212         bool is_tunnel = false;
213
214         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
215                 if (item->last) {
216                         rte_flow_error_set(error, EINVAL,
217                                            RTE_FLOW_ERROR_TYPE_ITEM,
218                                            item,
219                                            "Not support range");
220                         return 0;
221                 }
222                 item_type = item->type;
223                 switch (item_type) {
224                 case RTE_FLOW_ITEM_TYPE_ETH:
225                         eth_spec = item->spec;
226                         eth_mask = item->mask;
227
228                         if (eth_spec && eth_mask) {
229                                 if (rte_is_broadcast_ether_addr(&eth_mask->src))
230                                         input_set |= ICE_INSET_SMAC;
231                                 if (rte_is_broadcast_ether_addr(&eth_mask->dst))
232                                         input_set |= ICE_INSET_DMAC;
233                                 if (eth_mask->type == RTE_BE16(0xffff))
234                                         input_set |= ICE_INSET_ETHERTYPE;
235                         }
236                         break;
237                 case RTE_FLOW_ITEM_TYPE_IPV4:
238                         ipv4_spec = item->spec;
239                         ipv4_mask = item->mask;
240
241                         if (!(ipv4_spec && ipv4_mask))
242                                 break;
243
244                         /* Check IPv4 mask and update input set */
245                         if (ipv4_mask->hdr.version_ihl ||
246                             ipv4_mask->hdr.total_length ||
247                             ipv4_mask->hdr.packet_id ||
248                             ipv4_mask->hdr.hdr_checksum) {
249                                 rte_flow_error_set(error, EINVAL,
250                                            RTE_FLOW_ERROR_TYPE_ITEM,
251                                            item,
252                                            "Invalid IPv4 mask.");
253                                 return 0;
254                         }
255
256                         if (is_tunnel) {
257                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
258                                         input_set |= ICE_INSET_TUN_IPV4_SRC;
259                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
260                                         input_set |= ICE_INSET_TUN_IPV4_DST;
261                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
262                                         input_set |= ICE_INSET_TUN_IPV4_TTL;
263                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
264                                         input_set |= ICE_INSET_TUN_IPV4_PROTO;
265                         } else {
266                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
267                                         input_set |= ICE_INSET_IPV4_SRC;
268                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
269                                         input_set |= ICE_INSET_IPV4_DST;
270                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
271                                         input_set |= ICE_INSET_IPV4_TTL;
272                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
273                                         input_set |= ICE_INSET_IPV4_PROTO;
274                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
275                                         input_set |= ICE_INSET_IPV4_TOS;
276                         }
277                         break;
278                 case RTE_FLOW_ITEM_TYPE_IPV6:
279                         ipv6_spec = item->spec;
280                         ipv6_mask = item->mask;
281
282                         if (!(ipv6_spec && ipv6_mask))
283                                 break;
284
285                         if (ipv6_mask->hdr.payload_len) {
286                                 rte_flow_error_set(error, EINVAL,
287                                            RTE_FLOW_ERROR_TYPE_ITEM,
288                                            item,
289                                            "Invalid IPv6 mask");
290                                 return 0;
291                         }
292
293                         if (is_tunnel) {
294                                 if (!memcmp(ipv6_mask->hdr.src_addr,
295                                             ipv6_addr_mask,
296                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
297                                         input_set |= ICE_INSET_TUN_IPV6_SRC;
298                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
299                                             ipv6_addr_mask,
300                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
301                                         input_set |= ICE_INSET_TUN_IPV6_DST;
302                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
303                                         input_set |= ICE_INSET_TUN_IPV6_PROTO;
304                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
305                                         input_set |= ICE_INSET_TUN_IPV6_TTL;
306                         } else {
307                                 if (!memcmp(ipv6_mask->hdr.src_addr,
308                                             ipv6_addr_mask,
309                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
310                                         input_set |= ICE_INSET_IPV6_SRC;
311                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
312                                             ipv6_addr_mask,
313                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
314                                         input_set |= ICE_INSET_IPV6_DST;
315                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
316                                         input_set |= ICE_INSET_IPV6_PROTO;
317                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
318                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
319                                 if ((ipv6_mask->hdr.vtc_flow &
320                                         rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
321                                                 == rte_cpu_to_be_32
322                                                 (RTE_IPV6_HDR_TC_MASK))
323                                         input_set |= ICE_INSET_IPV6_TOS;
324                         }
325
326                         break;
327                 case RTE_FLOW_ITEM_TYPE_UDP:
328                         udp_spec = item->spec;
329                         udp_mask = item->mask;
330
331                         if (!(udp_spec && udp_mask))
332                                 break;
333
334                         /* Check UDP mask and update input set*/
335                         if (udp_mask->hdr.dgram_len ||
336                             udp_mask->hdr.dgram_cksum) {
337                                 rte_flow_error_set(error, EINVAL,
338                                                    RTE_FLOW_ERROR_TYPE_ITEM,
339                                                    item,
340                                                    "Invalid UDP mask");
341                                 return 0;
342                         }
343
344                         if (is_tunnel) {
345                                 if (udp_mask->hdr.src_port == UINT16_MAX)
346                                         input_set |= ICE_INSET_TUN_SRC_PORT;
347                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
348                                         input_set |= ICE_INSET_TUN_DST_PORT;
349                         } else {
350                                 if (udp_mask->hdr.src_port == UINT16_MAX)
351                                         input_set |= ICE_INSET_SRC_PORT;
352                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
353                                         input_set |= ICE_INSET_DST_PORT;
354                         }
355
356                         break;
357                 case RTE_FLOW_ITEM_TYPE_TCP:
358                         tcp_spec = item->spec;
359                         tcp_mask = item->mask;
360
361                         if (!(tcp_spec && tcp_mask))
362                                 break;
363
364                         /* Check TCP mask and update input set */
365                         if (tcp_mask->hdr.sent_seq ||
366                             tcp_mask->hdr.recv_ack ||
367                             tcp_mask->hdr.data_off ||
368                             tcp_mask->hdr.tcp_flags ||
369                             tcp_mask->hdr.rx_win ||
370                             tcp_mask->hdr.cksum ||
371                             tcp_mask->hdr.tcp_urp) {
372                                 rte_flow_error_set(error, EINVAL,
373                                                    RTE_FLOW_ERROR_TYPE_ITEM,
374                                                    item,
375                                                    "Invalid TCP mask");
376                                 return 0;
377                         }
378
379                         if (is_tunnel) {
380                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
381                                         input_set |= ICE_INSET_TUN_SRC_PORT;
382                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
383                                         input_set |= ICE_INSET_TUN_DST_PORT;
384                         } else {
385                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
386                                         input_set |= ICE_INSET_SRC_PORT;
387                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
388                                         input_set |= ICE_INSET_DST_PORT;
389                         }
390
391                         break;
392                 case RTE_FLOW_ITEM_TYPE_SCTP:
393                         sctp_spec = item->spec;
394                         sctp_mask = item->mask;
395
396                         if (!(sctp_spec && sctp_mask))
397                                 break;
398
399                         /* Check SCTP mask and update input set */
400                         if (sctp_mask->hdr.cksum) {
401                                 rte_flow_error_set(error, EINVAL,
402                                            RTE_FLOW_ERROR_TYPE_ITEM,
403                                            item,
404                                            "Invalid SCTP mask");
405                                 return 0;
406                         }
407
408                         if (is_tunnel) {
409                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
410                                         input_set |= ICE_INSET_TUN_SRC_PORT;
411                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
412                                         input_set |= ICE_INSET_TUN_DST_PORT;
413                         } else {
414                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
415                                         input_set |= ICE_INSET_SRC_PORT;
416                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
417                                         input_set |= ICE_INSET_DST_PORT;
418                         }
419
420                         break;
421                 case RTE_FLOW_ITEM_TYPE_ICMP:
422                         icmp_mask = item->mask;
423                         if (icmp_mask->hdr.icmp_code ||
424                             icmp_mask->hdr.icmp_cksum ||
425                             icmp_mask->hdr.icmp_ident ||
426                             icmp_mask->hdr.icmp_seq_nb) {
427                                 rte_flow_error_set(error, EINVAL,
428                                                    RTE_FLOW_ERROR_TYPE_ITEM,
429                                                    item,
430                                                    "Invalid ICMP mask");
431                                 return 0;
432                         }
433
434                         if (icmp_mask->hdr.icmp_type == UINT8_MAX)
435                                 input_set |= ICE_INSET_ICMP;
436                         break;
437                 case RTE_FLOW_ITEM_TYPE_ICMP6:
438                         icmp6_mask = item->mask;
439                         if (icmp6_mask->code ||
440                             icmp6_mask->checksum) {
441                                 rte_flow_error_set(error, EINVAL,
442                                                    RTE_FLOW_ERROR_TYPE_ITEM,
443                                                    item,
444                                                    "Invalid ICMP6 mask");
445                                 return 0;
446                         }
447
448                         if (icmp6_mask->type == UINT8_MAX)
449                                 input_set |= ICE_INSET_ICMP6;
450                         break;
451                 case RTE_FLOW_ITEM_TYPE_VXLAN:
452                         vxlan_spec = item->spec;
453                         vxlan_mask = item->mask;
454                         /* Check if VXLAN item is used to describe protocol.
455                          * If yes, both spec and mask should be NULL.
456                          * If no, both spec and mask shouldn't be NULL.
457                          */
458                         if ((!vxlan_spec && vxlan_mask) ||
459                             (vxlan_spec && !vxlan_mask)) {
460                                 rte_flow_error_set(error, EINVAL,
461                                            RTE_FLOW_ERROR_TYPE_ITEM,
462                                            item,
463                                            "Invalid VXLAN item");
464                                 return 0;
465                         }
466                         if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
467                                         vxlan_mask->vni[1] == UINT8_MAX &&
468                                         vxlan_mask->vni[2] == UINT8_MAX)
469                                 input_set |= ICE_INSET_TUN_ID;
470                         is_tunnel = 1;
471
472                         break;
473                 case RTE_FLOW_ITEM_TYPE_NVGRE:
474                         nvgre_spec = item->spec;
475                         nvgre_mask = item->mask;
476                         /* Check if NVGRE item is used to describe protocol.
477                          * If yes, both spec and mask should be NULL.
478                          * If no, both spec and mask shouldn't be NULL.
479                          */
480                         if ((!nvgre_spec && nvgre_mask) ||
481                             (nvgre_spec && !nvgre_mask)) {
482                                 rte_flow_error_set(error, EINVAL,
483                                            RTE_FLOW_ERROR_TYPE_ITEM,
484                                            item,
485                                            "Invalid NVGRE item");
486                                 return 0;
487                         }
488                         if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
489                                         nvgre_mask->tni[1] == UINT8_MAX &&
490                                         nvgre_mask->tni[2] == UINT8_MAX)
491                                 input_set |= ICE_INSET_TUN_ID;
492                         is_tunnel = 1;
493
494                         break;
495                 case RTE_FLOW_ITEM_TYPE_VOID:
496                         break;
497                 default:
498                         rte_flow_error_set(error, EINVAL,
499                                            RTE_FLOW_ERROR_TYPE_ITEM,
500                                            item,
501                                            "Invalid pattern");
502                         break;
503                 }
504         }
505         return input_set;
506 }
507
508 static int ice_flow_valid_inset(const struct rte_flow_item pattern[],
509                         uint64_t inset, struct rte_flow_error *error)
510 {
511         uint64_t fields;
512
513         /* get valid field */
514         fields = ice_get_flow_field(pattern, error);
515         if (!fields || fields & (~inset)) {
516                 rte_flow_error_set(error, EINVAL,
517                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
518                                    pattern,
519                                    "Invalid input set");
520                 return -rte_errno;
521         }
522
523         return 0;
524 }
525
526 static int ice_flow_valid_action(struct rte_eth_dev *dev,
527                                 const struct rte_flow_action *actions,
528                                 struct rte_flow_error *error)
529 {
530         const struct rte_flow_action_queue *act_q;
531         uint16_t queue;
532         const struct rte_flow_action *action;
533         for (action = actions; action->type !=
534                         RTE_FLOW_ACTION_TYPE_END; action++) {
535                 switch (action->type) {
536                 case RTE_FLOW_ACTION_TYPE_QUEUE:
537                         act_q = action->conf;
538                         queue = act_q->index;
539                         if (queue >= dev->data->nb_rx_queues) {
540                                 rte_flow_error_set(error, EINVAL,
541                                                 RTE_FLOW_ERROR_TYPE_ACTION,
542                                                 actions, "Invalid queue ID for"
543                                                 " switch filter.");
544                                 return -rte_errno;
545                         }
546                         break;
547                 case RTE_FLOW_ACTION_TYPE_DROP:
548                 case RTE_FLOW_ACTION_TYPE_VOID:
549                         break;
550                 default:
551                         rte_flow_error_set(error, EINVAL,
552                                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
553                                            "Invalid action.");
554                         return -rte_errno;
555                 }
556         }
557         return 0;
558 }
559
560 static int
561 ice_flow_validate(struct rte_eth_dev *dev,
562                 const struct rte_flow_attr *attr,
563                 const struct rte_flow_item pattern[],
564                 const struct rte_flow_action actions[],
565                 struct rte_flow_error *error)
566 {
567         uint64_t inset = 0;
568         int ret = ICE_ERR_NOT_SUPPORTED;
569
570         if (!pattern) {
571                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
572                                    NULL, "NULL pattern.");
573                 return -rte_errno;
574         }
575
576         if (!actions) {
577                 rte_flow_error_set(error, EINVAL,
578                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
579                                    NULL, "NULL action.");
580                 return -rte_errno;
581         }
582
583         if (!attr) {
584                 rte_flow_error_set(error, EINVAL,
585                                    RTE_FLOW_ERROR_TYPE_ATTR,
586                                    NULL, "NULL attribute.");
587                 return -rte_errno;
588         }
589
590         ret = ice_flow_valid_attr(attr, error);
591         if (ret)
592                 return ret;
593
594         inset = ice_flow_valid_pattern(pattern, error);
595         if (!inset)
596                 return -rte_errno;
597
598         ret = ice_flow_valid_inset(pattern, inset, error);
599         if (ret)
600                 return ret;
601
602         ret = ice_flow_valid_action(dev, actions, error);
603         if (ret)
604                 return ret;
605
606         return 0;
607 }
608
609 static struct rte_flow *
610 ice_flow_create(struct rte_eth_dev *dev,
611                 const struct rte_flow_attr *attr,
612                 const struct rte_flow_item pattern[],
613                 const struct rte_flow_action actions[],
614                 struct rte_flow_error *error)
615 {
616         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
617         struct rte_flow *flow = NULL;
618         int ret;
619
620         flow = rte_zmalloc("ice_flow", sizeof(struct rte_flow), 0);
621         if (!flow) {
622                 rte_flow_error_set(error, ENOMEM,
623                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
624                                    "Failed to allocate memory");
625                 return flow;
626         }
627
628         ret = ice_flow_validate(dev, attr, pattern, actions, error);
629         if (ret < 0)
630                 goto free_flow;
631
632         ret = ice_create_switch_filter(pf, pattern, actions, flow, error);
633         if (ret)
634                 goto free_flow;
635
636         TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
637         return flow;
638
639 free_flow:
640         rte_flow_error_set(error, -ret,
641                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
642                            "Failed to create flow.");
643         rte_free(flow);
644         return NULL;
645 }
646
647 static int
648 ice_flow_destroy(struct rte_eth_dev *dev,
649                 struct rte_flow *flow,
650                 struct rte_flow_error *error)
651 {
652         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
653         int ret = 0;
654
655         ret = ice_destroy_switch_filter(pf, flow, error);
656
657         if (!ret) {
658                 TAILQ_REMOVE(&pf->flow_list, flow, node);
659                 rte_free(flow);
660         } else {
661                 rte_flow_error_set(error, -ret,
662                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
663                                    "Failed to destroy flow.");
664         }
665
666         return ret;
667 }
668
669 static int
670 ice_flow_flush(struct rte_eth_dev *dev,
671                 struct rte_flow_error *error)
672 {
673         struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
674         struct rte_flow *p_flow;
675         void *temp;
676         int ret = 0;
677
678         TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
679                 ret = ice_flow_destroy(dev, p_flow, error);
680                 if (ret) {
681                         rte_flow_error_set(error, -ret,
682                                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
683                                            "Failed to flush SW flows.");
684                         return -rte_errno;
685                 }
686         }
687
688         return ret;
689 }