net/ice: support flow director VXLAN tunnel
[dpdk.git] / drivers / net / ice / ice_fdir_filter.c
1 #include <stdio.h>
2 #include <rte_flow.h>
3 #include <rte_hash.h>
4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
9 #include "ice_rxtx.h"
10 #include "ice_generic_flow.h"
11
12 #define ICE_FDIR_IPV6_TC_OFFSET         20
13 #define ICE_IPV6_TC_MASK                (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
14
15 #define ICE_FDIR_MAX_QREGION_SIZE       128
16
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
18         ICE_INSET_DMAC | \
19         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20         ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
21
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23         ICE_FDIR_INSET_ETH_IPV4 | \
24         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
25
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27         ICE_FDIR_INSET_ETH_IPV4 | \
28         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
29
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31         ICE_FDIR_INSET_ETH_IPV4 | \
32         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
33
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35         ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
36         ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
37
38 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
39         ICE_FDIR_INSET_ETH_IPV6 | \
40         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
41
42 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
43         ICE_FDIR_INSET_ETH_IPV6 | \
44         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
45
46 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
47         ICE_FDIR_INSET_ETH_IPV6 | \
48         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
49
50 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
51         ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
52
53 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
54         ICE_FDIR_INSET_VXLAN_IPV4 | \
55         ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
56
57 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
58         ICE_FDIR_INSET_VXLAN_IPV4 | \
59         ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
60
61 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
62         ICE_FDIR_INSET_VXLAN_IPV4 | \
63         ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
64
65 static struct ice_pattern_match_item ice_fdir_pattern[] = {
66         {pattern_eth_ipv4,             ICE_FDIR_INSET_ETH_IPV4,              ICE_INSET_NONE},
67         {pattern_eth_ipv4_udp,         ICE_FDIR_INSET_ETH_IPV4_UDP,          ICE_INSET_NONE},
68         {pattern_eth_ipv4_tcp,         ICE_FDIR_INSET_ETH_IPV4_TCP,          ICE_INSET_NONE},
69         {pattern_eth_ipv4_sctp,        ICE_FDIR_INSET_ETH_IPV4_SCTP,         ICE_INSET_NONE},
70         {pattern_eth_ipv6,             ICE_FDIR_INSET_ETH_IPV6,              ICE_INSET_NONE},
71         {pattern_eth_ipv6_udp,         ICE_FDIR_INSET_ETH_IPV6_UDP,          ICE_INSET_NONE},
72         {pattern_eth_ipv6_tcp,         ICE_FDIR_INSET_ETH_IPV6_TCP,          ICE_INSET_NONE},
73         {pattern_eth_ipv6_sctp,        ICE_FDIR_INSET_ETH_IPV6_SCTP,         ICE_INSET_NONE},
74         {pattern_eth_ipv4_udp_vxlan_ipv4,
75                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
76         {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
77                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
78         {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
79                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
80         {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
81                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
82         {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
83                                        ICE_FDIR_INSET_VXLAN_IPV4,            ICE_INSET_NONE},
84         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
85                                        ICE_FDIR_INSET_VXLAN_IPV4_UDP,        ICE_INSET_NONE},
86         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
87                                        ICE_FDIR_INSET_VXLAN_IPV4_TCP,        ICE_INSET_NONE},
88         {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
89                                        ICE_FDIR_INSET_VXLAN_IPV4_SCTP,       ICE_INSET_NONE},
90 };
91
92 static struct ice_flow_parser ice_fdir_parser;
93
94 static const struct rte_memzone *
95 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
96 {
97         return rte_memzone_reserve_aligned(name, len, socket_id,
98                                            RTE_MEMZONE_IOVA_CONTIG,
99                                            ICE_RING_BASE_ALIGN);
100 }
101
102 #define ICE_FDIR_MZ_NAME        "FDIR_MEMZONE"
103
104 static int
105 ice_fdir_prof_alloc(struct ice_hw *hw)
106 {
107         enum ice_fltr_ptype ptype, fltr_ptype;
108
109         if (!hw->fdir_prof) {
110                 hw->fdir_prof = (struct ice_fd_hw_prof **)
111                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
112                                    sizeof(*hw->fdir_prof));
113                 if (!hw->fdir_prof)
114                         return -ENOMEM;
115         }
116         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
117              ptype < ICE_FLTR_PTYPE_MAX;
118              ptype++) {
119                 if (!hw->fdir_prof[ptype]) {
120                         hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
121                                 ice_malloc(hw, sizeof(**hw->fdir_prof));
122                         if (!hw->fdir_prof[ptype])
123                                 goto fail_mem;
124                 }
125         }
126         return 0;
127
128 fail_mem:
129         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
130              fltr_ptype < ptype;
131              fltr_ptype++)
132                 rte_free(hw->fdir_prof[fltr_ptype]);
133         rte_free(hw->fdir_prof);
134         return -ENOMEM;
135 }
136
137 static int
138 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
139                           struct ice_fdir_counter_pool_container *container,
140                           uint32_t index_start,
141                           uint32_t len)
142 {
143         struct ice_fdir_counter_pool *pool;
144         uint32_t i;
145         int ret = 0;
146
147         pool = rte_zmalloc("ice_fdir_counter_pool",
148                            sizeof(*pool) +
149                            sizeof(struct ice_fdir_counter) * len,
150                            0);
151         if (!pool) {
152                 PMD_INIT_LOG(ERR,
153                              "Failed to allocate memory for fdir counter pool");
154                 return -ENOMEM;
155         }
156
157         TAILQ_INIT(&pool->counter_list);
158         TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
159
160         for (i = 0; i < len; i++) {
161                 struct ice_fdir_counter *counter = &pool->counters[i];
162
163                 counter->hw_index = index_start + i;
164                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
165         }
166
167         if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
168                 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
169                 ret = -EINVAL;
170                 goto free_pool;
171         }
172
173         container->pools[container->index_free++] = pool;
174         return 0;
175
176 free_pool:
177         rte_free(pool);
178         return ret;
179 }
180
181 static int
182 ice_fdir_counter_init(struct ice_pf *pf)
183 {
184         struct ice_hw *hw = ICE_PF_TO_HW(pf);
185         struct ice_fdir_info *fdir_info = &pf->fdir;
186         struct ice_fdir_counter_pool_container *container =
187                                 &fdir_info->counter;
188         uint32_t cnt_index, len;
189         int ret;
190
191         TAILQ_INIT(&container->pool_list);
192
193         cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
194         len = ICE_FDIR_COUNTERS_PER_BLOCK;
195
196         ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
197         if (ret) {
198                 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
199                 return ret;
200         }
201
202         return 0;
203 }
204
205 static int
206 ice_fdir_counter_release(struct ice_pf *pf)
207 {
208         struct ice_fdir_info *fdir_info = &pf->fdir;
209         struct ice_fdir_counter_pool_container *container =
210                                 &fdir_info->counter;
211         uint8_t i;
212
213         for (i = 0; i < container->index_free; i++)
214                 rte_free(container->pools[i]);
215
216         return 0;
217 }
218
219 static struct ice_fdir_counter *
220 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
221                                         *container,
222                                uint32_t id)
223 {
224         struct ice_fdir_counter_pool *pool;
225         struct ice_fdir_counter *counter;
226         int i;
227
228         TAILQ_FOREACH(pool, &container->pool_list, next) {
229                 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
230                         counter = &pool->counters[i];
231
232                         if (counter->shared &&
233                             counter->ref_cnt &&
234                             counter->id == id)
235                                 return counter;
236                 }
237         }
238
239         return NULL;
240 }
241
242 static struct ice_fdir_counter *
243 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
244 {
245         struct ice_hw *hw = ICE_PF_TO_HW(pf);
246         struct ice_fdir_info *fdir_info = &pf->fdir;
247         struct ice_fdir_counter_pool_container *container =
248                                 &fdir_info->counter;
249         struct ice_fdir_counter_pool *pool = NULL;
250         struct ice_fdir_counter *counter_free = NULL;
251
252         if (shared) {
253                 counter_free = ice_fdir_counter_shared_search(container, id);
254                 if (counter_free) {
255                         if (counter_free->ref_cnt + 1 == 0) {
256                                 rte_errno = E2BIG;
257                                 return NULL;
258                         }
259                         counter_free->ref_cnt++;
260                         return counter_free;
261                 }
262         }
263
264         TAILQ_FOREACH(pool, &container->pool_list, next) {
265                 counter_free = TAILQ_FIRST(&pool->counter_list);
266                 if (counter_free)
267                         break;
268                 counter_free = NULL;
269         }
270
271         if (!counter_free) {
272                 PMD_DRV_LOG(ERR, "No free counter found\n");
273                 return NULL;
274         }
275
276         counter_free->shared = shared;
277         counter_free->id = id;
278         counter_free->ref_cnt = 1;
279         counter_free->pool = pool;
280
281         /* reset statistic counter value */
282         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
283         ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
284
285         TAILQ_REMOVE(&pool->counter_list, counter_free, next);
286         if (TAILQ_EMPTY(&pool->counter_list)) {
287                 TAILQ_REMOVE(&container->pool_list, pool, next);
288                 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
289         }
290
291         return counter_free;
292 }
293
294 static void
295 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
296                       struct ice_fdir_counter *counter)
297 {
298         if (!counter)
299                 return;
300
301         if (--counter->ref_cnt == 0) {
302                 struct ice_fdir_counter_pool *pool = counter->pool;
303
304                 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
305         }
306 }
307
308 static int
309 ice_fdir_init_filter_list(struct ice_pf *pf)
310 {
311         struct rte_eth_dev *dev = pf->adapter->eth_dev;
312         struct ice_fdir_info *fdir_info = &pf->fdir;
313         char fdir_hash_name[RTE_HASH_NAMESIZE];
314         int ret;
315
316         struct rte_hash_parameters fdir_hash_params = {
317                 .name = fdir_hash_name,
318                 .entries = ICE_MAX_FDIR_FILTER_NUM,
319                 .key_len = sizeof(struct ice_fdir_fltr_pattern),
320                 .hash_func = rte_hash_crc,
321                 .hash_func_init_val = 0,
322                 .socket_id = rte_socket_id(),
323         };
324
325         /* Initialize hash */
326         snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
327                  "fdir_%s", dev->device->name);
328         fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
329         if (!fdir_info->hash_table) {
330                 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
331                 return -EINVAL;
332         }
333         fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
334                                           sizeof(*fdir_info->hash_map) *
335                                           ICE_MAX_FDIR_FILTER_NUM,
336                                           0);
337         if (!fdir_info->hash_map) {
338                 PMD_INIT_LOG(ERR,
339                              "Failed to allocate memory for fdir hash map!");
340                 ret = -ENOMEM;
341                 goto err_fdir_hash_map_alloc;
342         }
343         return 0;
344
345 err_fdir_hash_map_alloc:
346         rte_hash_free(fdir_info->hash_table);
347
348         return ret;
349 }
350
351 static void
352 ice_fdir_release_filter_list(struct ice_pf *pf)
353 {
354         struct ice_fdir_info *fdir_info = &pf->fdir;
355
356         if (fdir_info->hash_map)
357                 rte_free(fdir_info->hash_map);
358         if (fdir_info->hash_table)
359                 rte_hash_free(fdir_info->hash_table);
360 }
361
362 /*
363  * ice_fdir_setup - reserve and initialize the Flow Director resources
364  * @pf: board private structure
365  */
366 static int
367 ice_fdir_setup(struct ice_pf *pf)
368 {
369         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
370         struct ice_hw *hw = ICE_PF_TO_HW(pf);
371         const struct rte_memzone *mz = NULL;
372         char z_name[RTE_MEMZONE_NAMESIZE];
373         struct ice_vsi *vsi;
374         int err = ICE_SUCCESS;
375
376         if ((pf->flags & ICE_FLAG_FDIR) == 0) {
377                 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
378                 return -ENOTSUP;
379         }
380
381         PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
382                     " fd_fltr_best_effort = %u.",
383                     hw->func_caps.fd_fltr_guar,
384                     hw->func_caps.fd_fltr_best_effort);
385
386         if (pf->fdir.fdir_vsi) {
387                 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
388                 return ICE_SUCCESS;
389         }
390
391         /* make new FDIR VSI */
392         vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
393         if (!vsi) {
394                 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
395                 return -EINVAL;
396         }
397         pf->fdir.fdir_vsi = vsi;
398
399         err = ice_fdir_init_filter_list(pf);
400         if (err) {
401                 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
402                 return -EINVAL;
403         }
404
405         err = ice_fdir_counter_init(pf);
406         if (err) {
407                 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
408                 return -EINVAL;
409         }
410
411         /*Fdir tx queue setup*/
412         err = ice_fdir_setup_tx_resources(pf);
413         if (err) {
414                 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
415                 goto fail_setup_tx;
416         }
417
418         /*Fdir rx queue setup*/
419         err = ice_fdir_setup_rx_resources(pf);
420         if (err) {
421                 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
422                 goto fail_setup_rx;
423         }
424
425         err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
426         if (err) {
427                 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
428                 goto fail_mem;
429         }
430
431         err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
432         if (err) {
433                 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
434                 goto fail_mem;
435         }
436
437         /* reserve memory for the fdir programming packet */
438         snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
439                  ICE_FDIR_MZ_NAME,
440                  eth_dev->data->port_id);
441         mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
442         if (!mz) {
443                 PMD_DRV_LOG(ERR, "Cannot init memzone for "
444                             "flow director program packet.");
445                 err = -ENOMEM;
446                 goto fail_mem;
447         }
448         pf->fdir.prg_pkt = mz->addr;
449         pf->fdir.dma_addr = mz->iova;
450
451         err = ice_fdir_prof_alloc(hw);
452         if (err) {
453                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
454                             "flow director profile.");
455                 err = -ENOMEM;
456                 goto fail_mem;
457         }
458
459         PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
460                     vsi->base_queue);
461         return ICE_SUCCESS;
462
463 fail_mem:
464         ice_rx_queue_release(pf->fdir.rxq);
465         pf->fdir.rxq = NULL;
466 fail_setup_rx:
467         ice_tx_queue_release(pf->fdir.txq);
468         pf->fdir.txq = NULL;
469 fail_setup_tx:
470         ice_release_vsi(vsi);
471         pf->fdir.fdir_vsi = NULL;
472         return err;
473 }
474
475 static void
476 ice_fdir_prof_free(struct ice_hw *hw)
477 {
478         enum ice_fltr_ptype ptype;
479
480         for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
481              ptype < ICE_FLTR_PTYPE_MAX;
482              ptype++)
483                 rte_free(hw->fdir_prof[ptype]);
484
485         rte_free(hw->fdir_prof);
486 }
487
488 /* Remove a profile for some filter type */
489 static void
490 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
491 {
492         struct ice_hw *hw = ICE_PF_TO_HW(pf);
493         struct ice_fd_hw_prof *hw_prof;
494         uint64_t prof_id;
495         uint16_t vsi_num;
496         int i;
497
498         if (!hw->fdir_prof || !hw->fdir_prof[ptype])
499                 return;
500
501         hw_prof = hw->fdir_prof[ptype];
502
503         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
504         for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
505                 if (hw_prof->entry_h[i][is_tunnel]) {
506                         vsi_num = ice_get_hw_vsi_num(hw,
507                                                      hw_prof->vsi_h[i]);
508                         ice_rem_prof_id_flow(hw, ICE_BLK_FD,
509                                              vsi_num, ptype);
510                         ice_flow_rem_entry(hw,
511                                            hw_prof->entry_h[i][is_tunnel]);
512                         hw_prof->entry_h[i][is_tunnel] = 0;
513                 }
514         }
515         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
516         rte_free(hw_prof->fdir_seg[is_tunnel]);
517         hw_prof->fdir_seg[is_tunnel] = NULL;
518
519         for (i = 0; i < hw_prof->cnt; i++)
520                 hw_prof->vsi_h[i] = 0;
521         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
522 }
523
524 /* Remove all created profiles */
525 static void
526 ice_fdir_prof_rm_all(struct ice_pf *pf)
527 {
528         enum ice_fltr_ptype ptype;
529
530         for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
531              ptype < ICE_FLTR_PTYPE_MAX;
532              ptype++) {
533                 ice_fdir_prof_rm(pf, ptype, false);
534                 ice_fdir_prof_rm(pf, ptype, true);
535         }
536 }
537
538 /*
539  * ice_fdir_teardown - release the Flow Director resources
540  * @pf: board private structure
541  */
542 static void
543 ice_fdir_teardown(struct ice_pf *pf)
544 {
545         struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
546         struct ice_hw *hw = ICE_PF_TO_HW(pf);
547         struct ice_vsi *vsi;
548         int err;
549
550         vsi = pf->fdir.fdir_vsi;
551         if (!vsi)
552                 return;
553
554         err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
555         if (err)
556                 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
557
558         err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
559         if (err)
560                 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
561
562         err = ice_fdir_counter_release(pf);
563         if (err)
564                 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
565
566         ice_fdir_release_filter_list(pf);
567
568         ice_tx_queue_release(pf->fdir.txq);
569         pf->fdir.txq = NULL;
570         ice_rx_queue_release(pf->fdir.rxq);
571         pf->fdir.rxq = NULL;
572         ice_fdir_prof_rm_all(pf);
573         ice_fdir_prof_free(hw);
574         ice_release_vsi(vsi);
575         pf->fdir.fdir_vsi = NULL;
576 }
577
578 static int
579 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
580                      struct ice_vsi *ctrl_vsi,
581                      struct ice_flow_seg_info *seg,
582                      enum ice_fltr_ptype ptype,
583                      bool is_tunnel)
584 {
585         struct ice_hw *hw = ICE_PF_TO_HW(pf);
586         enum ice_flow_dir dir = ICE_FLOW_RX;
587         struct ice_flow_seg_info *ori_seg;
588         struct ice_fd_hw_prof *hw_prof;
589         struct ice_flow_prof *prof;
590         uint64_t entry_1 = 0;
591         uint64_t entry_2 = 0;
592         uint16_t vsi_num;
593         int ret;
594         uint64_t prof_id;
595
596         hw_prof = hw->fdir_prof[ptype];
597         ori_seg = hw_prof->fdir_seg[is_tunnel];
598         if (ori_seg) {
599                 if (!is_tunnel) {
600                         if (!memcmp(ori_seg, seg, sizeof(*seg)))
601                                 return -EAGAIN;
602                 } else {
603                         if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
604                                 return -EAGAIN;
605                 }
606
607                 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
608                         return -EINVAL;
609
610                 ice_fdir_prof_rm(pf, ptype, is_tunnel);
611         }
612
613         prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
614         ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
615                                 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
616         if (ret)
617                 return ret;
618         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
619                                  vsi->idx, ICE_FLOW_PRIO_NORMAL,
620                                  seg, NULL, 0, &entry_1);
621         if (ret) {
622                 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
623                             ptype);
624                 goto err_add_prof;
625         }
626         ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
627                                  ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
628                                  seg, NULL, 0, &entry_2);
629         if (ret) {
630                 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
631                             ptype);
632                 goto err_add_entry;
633         }
634
635         pf->hw_prof_cnt[ptype][is_tunnel] = 0;
636         hw_prof->cnt = 0;
637         hw_prof->fdir_seg[is_tunnel] = seg;
638         hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
639         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
640         pf->hw_prof_cnt[ptype][is_tunnel]++;
641         hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
642         hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
643         pf->hw_prof_cnt[ptype][is_tunnel]++;
644
645         return ret;
646
647 err_add_entry:
648         vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
649         ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
650         ice_flow_rem_entry(hw, entry_1);
651 err_add_prof:
652         ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
653
654         return ret;
655 }
656
657 static void
658 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
659 {
660         uint32_t i, j;
661
662         struct ice_inset_map {
663                 uint64_t inset;
664                 enum ice_flow_field fld;
665         };
666         static const struct ice_inset_map ice_inset_map[] = {
667                 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
668                 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
669                 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
670                 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
671                 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
672                 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
673                 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
674                 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
675                 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
676                 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
677                 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
678                 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
679                 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
680                 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
681                 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
682                 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
683                 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
684                 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
685                 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
686                 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
687                 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
688                 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
689                 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
690                 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
691                 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
692         };
693
694         for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
695                 if ((inset & ice_inset_map[i].inset) ==
696                     ice_inset_map[i].inset)
697                         field[j++] = ice_inset_map[i].fld;
698         }
699 }
700
701 static int
702 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
703                         uint64_t input_set, bool is_tunnel)
704 {
705         struct ice_flow_seg_info *seg;
706         struct ice_flow_seg_info *seg_tun = NULL;
707         enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
708         int i, ret;
709
710         if (!input_set)
711                 return -EINVAL;
712
713         seg = (struct ice_flow_seg_info *)
714                 ice_malloc(hw, sizeof(*seg));
715         if (!seg) {
716                 PMD_DRV_LOG(ERR, "No memory can be allocated");
717                 return -ENOMEM;
718         }
719
720         for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
721                 field[i] = ICE_FLOW_FIELD_IDX_MAX;
722         ice_fdir_input_set_parse(input_set, field);
723
724         switch (flow) {
725         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
726                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
727                                   ICE_FLOW_SEG_HDR_IPV4);
728                 break;
729         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
730                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
731                                   ICE_FLOW_SEG_HDR_IPV4);
732                 break;
733         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
734                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
735                                   ICE_FLOW_SEG_HDR_IPV4);
736                 break;
737         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
738                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
739                 break;
740         case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
741                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
742                                   ICE_FLOW_SEG_HDR_IPV6);
743                 break;
744         case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
745                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
746                                   ICE_FLOW_SEG_HDR_IPV6);
747                 break;
748         case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
749                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
750                                   ICE_FLOW_SEG_HDR_IPV6);
751                 break;
752         case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
753                 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
754                 break;
755         default:
756                 PMD_DRV_LOG(ERR, "not supported filter type.");
757                 break;
758         }
759
760         for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
761                 ice_flow_set_fld(seg, field[i],
762                                  ICE_FLOW_FLD_OFF_INVAL,
763                                  ICE_FLOW_FLD_OFF_INVAL,
764                                  ICE_FLOW_FLD_OFF_INVAL, false);
765         }
766
767         if (!is_tunnel) {
768                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
769                                            seg, flow, false);
770         } else {
771                 seg_tun = (struct ice_flow_seg_info *)
772                         ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
773                 if (!seg_tun) {
774                         PMD_DRV_LOG(ERR, "No memory can be allocated");
775                         rte_free(seg);
776                         return -ENOMEM;
777                 }
778                 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
779                 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
780                                            seg_tun, flow, true);
781         }
782
783         if (!ret) {
784                 return ret;
785         } else if (ret < 0) {
786                 rte_free(seg);
787                 if (is_tunnel)
788                         rte_free(seg_tun);
789                 return (ret == -EAGAIN) ? 0 : ret;
790         } else {
791                 return ret;
792         }
793 }
794
795 static void
796 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
797                     bool is_tunnel, bool add)
798 {
799         struct ice_hw *hw = ICE_PF_TO_HW(pf);
800         int cnt;
801
802         cnt = (add) ? 1 : -1;
803         hw->fdir_active_fltr += cnt;
804         if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
805                 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
806         else
807                 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
808 }
809
810 static int
811 ice_fdir_init(struct ice_adapter *ad)
812 {
813         struct ice_pf *pf = &ad->pf;
814         int ret;
815
816         ret = ice_fdir_setup(pf);
817         if (ret)
818                 return ret;
819
820         return ice_register_parser(&ice_fdir_parser, ad);
821 }
822
823 static void
824 ice_fdir_uninit(struct ice_adapter *ad)
825 {
826         struct ice_pf *pf = &ad->pf;
827
828         ice_unregister_parser(&ice_fdir_parser, ad);
829
830         ice_fdir_teardown(pf);
831 }
832
833 static int
834 ice_fdir_add_del_filter(struct ice_pf *pf,
835                         struct ice_fdir_filter_conf *filter,
836                         bool add)
837 {
838         struct ice_fltr_desc desc;
839         struct ice_hw *hw = ICE_PF_TO_HW(pf);
840         unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
841         bool is_tun;
842         int ret;
843
844         filter->input.dest_vsi = pf->main_vsi->idx;
845
846         memset(&desc, 0, sizeof(desc));
847         ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
848
849         is_tun = filter->tunnel_type ? true : false;
850
851         memset(pkt, 0, ICE_FDIR_PKT_LEN);
852         ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
853         if (ret) {
854                 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
855                 return -EINVAL;
856         }
857
858         return ice_fdir_programming(pf, &desc);
859 }
860
861 static void
862 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
863                           struct ice_fdir_filter_conf *filter)
864 {
865         struct ice_fdir_fltr *input = &filter->input;
866         memset(key, 0, sizeof(*key));
867
868         key->flow_type = input->flow_type;
869         rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
870         rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
871         rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
872         rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
873
874         key->tunnel_type = filter->tunnel_type;
875 }
876
877 /* Check if there exists the flow director filter */
878 static struct ice_fdir_filter_conf *
879 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
880                         const struct ice_fdir_fltr_pattern *key)
881 {
882         int ret;
883
884         ret = rte_hash_lookup(fdir_info->hash_table, key);
885         if (ret < 0)
886                 return NULL;
887
888         return fdir_info->hash_map[ret];
889 }
890
891 /* Add a flow director entry into the SW list */
892 static int
893 ice_fdir_entry_insert(struct ice_pf *pf,
894                       struct ice_fdir_filter_conf *entry,
895                       struct ice_fdir_fltr_pattern *key)
896 {
897         struct ice_fdir_info *fdir_info = &pf->fdir;
898         int ret;
899
900         ret = rte_hash_add_key(fdir_info->hash_table, key);
901         if (ret < 0) {
902                 PMD_DRV_LOG(ERR,
903                             "Failed to insert fdir entry to hash table %d!",
904                             ret);
905                 return ret;
906         }
907         fdir_info->hash_map[ret] = entry;
908
909         return 0;
910 }
911
912 /* Delete a flow director entry from the SW list */
913 static int
914 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
915 {
916         struct ice_fdir_info *fdir_info = &pf->fdir;
917         int ret;
918
919         ret = rte_hash_del_key(fdir_info->hash_table, key);
920         if (ret < 0) {
921                 PMD_DRV_LOG(ERR,
922                             "Failed to delete fdir filter to hash table %d!",
923                             ret);
924                 return ret;
925         }
926         fdir_info->hash_map[ret] = NULL;
927
928         return 0;
929 }
930
931 static int
932 ice_fdir_create_filter(struct ice_adapter *ad,
933                        struct rte_flow *flow,
934                        void *meta,
935                        struct rte_flow_error *error)
936 {
937         struct ice_pf *pf = &ad->pf;
938         struct ice_fdir_filter_conf *filter = meta;
939         struct ice_fdir_info *fdir_info = &pf->fdir;
940         struct ice_fdir_filter_conf *entry, *node;
941         struct ice_fdir_fltr_pattern key;
942         bool is_tun;
943         int ret;
944
945         ice_fdir_extract_fltr_key(&key, filter);
946         node = ice_fdir_entry_lookup(fdir_info, &key);
947         if (node) {
948                 rte_flow_error_set(error, EEXIST,
949                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
950                                    "Rule already exists!");
951                 return -rte_errno;
952         }
953
954         entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
955         if (!entry) {
956                 rte_flow_error_set(error, ENOMEM,
957                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
958                                    "Failed to allocate memory");
959                 return -rte_errno;
960         }
961
962         is_tun = filter->tunnel_type ? true : false;
963
964         ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
965                         filter->input_set, is_tun);
966         if (ret) {
967                 rte_flow_error_set(error, -ret,
968                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
969                                    "Profile configure failed.");
970                 goto free_entry;
971         }
972
973         /* alloc counter for FDIR */
974         if (filter->input.cnt_ena) {
975                 struct rte_flow_action_count *act_count = &filter->act_count;
976
977                 filter->counter = ice_fdir_counter_alloc(pf,
978                                                          act_count->shared,
979                                                          act_count->id);
980                 if (!filter->counter) {
981                         rte_flow_error_set(error, EINVAL,
982                                         RTE_FLOW_ERROR_TYPE_ACTION, NULL,
983                                         "Failed to alloc FDIR counter.");
984                         goto free_entry;
985                 }
986                 filter->input.cnt_index = filter->counter->hw_index;
987         }
988
989         ret = ice_fdir_add_del_filter(pf, filter, true);
990         if (ret) {
991                 rte_flow_error_set(error, -ret,
992                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
993                                    "Add filter rule failed.");
994                 goto free_counter;
995         }
996
997         rte_memcpy(entry, filter, sizeof(*entry));
998         ret = ice_fdir_entry_insert(pf, entry, &key);
999         if (ret) {
1000                 rte_flow_error_set(error, -ret,
1001                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1002                                    "Insert entry to table failed.");
1003                 goto free_entry;
1004         }
1005
1006         flow->rule = entry;
1007         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1008
1009         return 0;
1010
1011 free_counter:
1012         if (filter->counter) {
1013                 ice_fdir_counter_free(pf, filter->counter);
1014                 filter->counter = NULL;
1015         }
1016
1017 free_entry:
1018         rte_free(entry);
1019         return -rte_errno;
1020 }
1021
1022 static int
1023 ice_fdir_destroy_filter(struct ice_adapter *ad,
1024                         struct rte_flow *flow,
1025                         struct rte_flow_error *error)
1026 {
1027         struct ice_pf *pf = &ad->pf;
1028         struct ice_fdir_info *fdir_info = &pf->fdir;
1029         struct ice_fdir_filter_conf *filter, *entry;
1030         struct ice_fdir_fltr_pattern key;
1031         bool is_tun;
1032         int ret;
1033
1034         filter = (struct ice_fdir_filter_conf *)flow->rule;
1035
1036         is_tun = filter->tunnel_type ? true : false;
1037
1038         if (filter->counter) {
1039                 ice_fdir_counter_free(pf, filter->counter);
1040                 filter->counter = NULL;
1041         }
1042
1043         ice_fdir_extract_fltr_key(&key, filter);
1044         entry = ice_fdir_entry_lookup(fdir_info, &key);
1045         if (!entry) {
1046                 rte_flow_error_set(error, ENOENT,
1047                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1048                                    "Can't find entry.");
1049                 return -rte_errno;
1050         }
1051
1052         ret = ice_fdir_add_del_filter(pf, filter, false);
1053         if (ret) {
1054                 rte_flow_error_set(error, -ret,
1055                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1056                                    "Del filter rule failed.");
1057                 return -rte_errno;
1058         }
1059
1060         ret = ice_fdir_entry_del(pf, &key);
1061         if (ret) {
1062                 rte_flow_error_set(error, -ret,
1063                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1064                                    "Remove entry from table failed.");
1065                 return -rte_errno;
1066         }
1067
1068         ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1069         flow->rule = NULL;
1070
1071         rte_free(filter);
1072
1073         return 0;
1074 }
1075
1076 static int
1077 ice_fdir_query_count(struct ice_adapter *ad,
1078                       struct rte_flow *flow,
1079                       struct rte_flow_query_count *flow_stats,
1080                       struct rte_flow_error *error)
1081 {
1082         struct ice_pf *pf = &ad->pf;
1083         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1084         struct ice_fdir_filter_conf *filter = flow->rule;
1085         struct ice_fdir_counter *counter = filter->counter;
1086         uint64_t hits_lo, hits_hi;
1087
1088         if (!counter) {
1089                 rte_flow_error_set(error, EINVAL,
1090                                   RTE_FLOW_ERROR_TYPE_ACTION,
1091                                   NULL,
1092                                   "FDIR counters not available");
1093                 return -rte_errno;
1094         }
1095
1096         /*
1097          * Reading the low 32-bits latches the high 32-bits into a shadow
1098          * register. Reading the high 32-bit returns the value in the
1099          * shadow register.
1100          */
1101         hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1102         hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1103
1104         flow_stats->hits_set = 1;
1105         flow_stats->hits = hits_lo | (hits_hi << 32);
1106         flow_stats->bytes_set = 0;
1107         flow_stats->bytes = 0;
1108
1109         if (flow_stats->reset) {
1110                 /* reset statistic counter value */
1111                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1112                 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1113         }
1114
1115         return 0;
1116 }
1117
1118 static struct ice_flow_engine ice_fdir_engine = {
1119         .init = ice_fdir_init,
1120         .uninit = ice_fdir_uninit,
1121         .create = ice_fdir_create_filter,
1122         .destroy = ice_fdir_destroy_filter,
1123         .query_count = ice_fdir_query_count,
1124         .type = ICE_FLOW_ENGINE_FDIR,
1125 };
1126
1127 static int
1128 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1129                               struct rte_flow_error *error,
1130                               const struct rte_flow_action *act,
1131                               struct ice_fdir_filter_conf *filter)
1132 {
1133         const struct rte_flow_action_rss *rss = act->conf;
1134         uint32_t i;
1135
1136         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1137                 rte_flow_error_set(error, EINVAL,
1138                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1139                                    "Invalid action.");
1140                 return -rte_errno;
1141         }
1142
1143         if (rss->queue_num <= 1) {
1144                 rte_flow_error_set(error, EINVAL,
1145                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1146                                    "Queue region size can't be 0 or 1.");
1147                 return -rte_errno;
1148         }
1149
1150         /* check if queue index for queue region is continuous */
1151         for (i = 0; i < rss->queue_num - 1; i++) {
1152                 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1153                         rte_flow_error_set(error, EINVAL,
1154                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
1155                                            "Discontinuous queue region");
1156                         return -rte_errno;
1157                 }
1158         }
1159
1160         if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1161                 rte_flow_error_set(error, EINVAL,
1162                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1163                                    "Invalid queue region indexes.");
1164                 return -rte_errno;
1165         }
1166
1167         if (!(rte_is_power_of_2(rss->queue_num) &&
1168              (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1169                 rte_flow_error_set(error, EINVAL,
1170                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
1171                                    "The region size should be any of the following values:"
1172                                    "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1173                                    "of queues do not exceed the VSI allocation.");
1174                 return -rte_errno;
1175         }
1176
1177         filter->input.q_index = rss->queue[0];
1178         filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1179         filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1180
1181         return 0;
1182 }
1183
1184 static int
1185 ice_fdir_parse_action(struct ice_adapter *ad,
1186                       const struct rte_flow_action actions[],
1187                       struct rte_flow_error *error,
1188                       struct ice_fdir_filter_conf *filter)
1189 {
1190         struct ice_pf *pf = &ad->pf;
1191         const struct rte_flow_action_queue *act_q;
1192         const struct rte_flow_action_mark *mark_spec = NULL;
1193         const struct rte_flow_action_count *act_count;
1194         uint32_t dest_num = 0;
1195         uint32_t mark_num = 0;
1196         uint32_t counter_num = 0;
1197         int ret;
1198
1199         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1200                 switch (actions->type) {
1201                 case RTE_FLOW_ACTION_TYPE_VOID:
1202                         break;
1203                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1204                         dest_num++;
1205
1206                         act_q = actions->conf;
1207                         filter->input.q_index = act_q->index;
1208                         if (filter->input.q_index >=
1209                                         pf->dev_data->nb_rx_queues) {
1210                                 rte_flow_error_set(error, EINVAL,
1211                                                    RTE_FLOW_ERROR_TYPE_ACTION,
1212                                                    actions,
1213                                                    "Invalid queue for FDIR.");
1214                                 return -rte_errno;
1215                         }
1216                         filter->input.dest_ctl =
1217                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1218                         break;
1219                 case RTE_FLOW_ACTION_TYPE_DROP:
1220                         dest_num++;
1221
1222                         filter->input.dest_ctl =
1223                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1224                         break;
1225                 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1226                         dest_num++;
1227
1228                         filter->input.dest_ctl =
1229                                 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1230                         filter->input.q_index = 0;
1231                         break;
1232                 case RTE_FLOW_ACTION_TYPE_RSS:
1233                         dest_num++;
1234
1235                         ret = ice_fdir_parse_action_qregion(pf,
1236                                                 error, actions, filter);
1237                         if (ret)
1238                                 return ret;
1239                         break;
1240                 case RTE_FLOW_ACTION_TYPE_MARK:
1241                         mark_num++;
1242
1243                         mark_spec = actions->conf;
1244                         filter->input.fltr_id = mark_spec->id;
1245                         break;
1246                 case RTE_FLOW_ACTION_TYPE_COUNT:
1247                         counter_num++;
1248
1249                         act_count = actions->conf;
1250                         filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1251                         rte_memcpy(&filter->act_count, act_count,
1252                                                 sizeof(filter->act_count));
1253
1254                         break;
1255                 default:
1256                         rte_flow_error_set(error, EINVAL,
1257                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
1258                                    "Invalid action.");
1259                         return -rte_errno;
1260                 }
1261         }
1262
1263         if (dest_num == 0 || dest_num >= 2) {
1264                 rte_flow_error_set(error, EINVAL,
1265                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1266                            "Unsupported action combination");
1267                 return -rte_errno;
1268         }
1269
1270         if (mark_num >= 2) {
1271                 rte_flow_error_set(error, EINVAL,
1272                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1273                            "Too many mark actions");
1274                 return -rte_errno;
1275         }
1276
1277         if (counter_num >= 2) {
1278                 rte_flow_error_set(error, EINVAL,
1279                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
1280                            "Too many count actions");
1281                 return -rte_errno;
1282         }
1283
1284         return 0;
1285 }
1286
1287 static int
1288 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1289                        const struct rte_flow_item pattern[],
1290                        struct rte_flow_error *error,
1291                        struct ice_fdir_filter_conf *filter)
1292 {
1293         const struct rte_flow_item *item = pattern;
1294         enum rte_flow_item_type item_type;
1295         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1296         enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1297         const struct rte_flow_item_eth *eth_spec, *eth_mask;
1298         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1299         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1300         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1301         const struct rte_flow_item_udp *udp_spec, *udp_mask;
1302         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1303         const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1304         uint64_t input_set = ICE_INSET_NONE;
1305         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1306         uint8_t  ipv6_addr_mask[16] = {
1307                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1308                 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1309         };
1310         uint32_t vtc_flow_cpu;
1311
1312
1313         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1314                 if (item->last) {
1315                         rte_flow_error_set(error, EINVAL,
1316                                         RTE_FLOW_ERROR_TYPE_ITEM,
1317                                         item,
1318                                         "Not support range");
1319                         return -rte_errno;
1320                 }
1321                 item_type = item->type;
1322
1323                 switch (item_type) {
1324                 case RTE_FLOW_ITEM_TYPE_ETH:
1325                         eth_spec = item->spec;
1326                         eth_mask = item->mask;
1327
1328                         if (eth_spec && eth_mask) {
1329                                 if (!rte_is_zero_ether_addr(&eth_spec->src) ||
1330                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
1331                                         rte_flow_error_set(error, EINVAL,
1332                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1333                                                 item,
1334                                                 "Src mac not support");
1335                                         return -rte_errno;
1336                                 }
1337
1338                                 if (!rte_is_broadcast_ether_addr(&eth_mask->dst)) {
1339                                         rte_flow_error_set(error, EINVAL,
1340                                                 RTE_FLOW_ERROR_TYPE_ITEM,
1341                                                 item,
1342                                                 "Invalid mac addr mask");
1343                                         return -rte_errno;
1344                                 }
1345
1346                                 input_set |= ICE_INSET_DMAC;
1347                                 rte_memcpy(&filter->input.ext_data.dst_mac,
1348                                            &eth_spec->dst,
1349                                            RTE_ETHER_ADDR_LEN);
1350                         }
1351                         break;
1352                 case RTE_FLOW_ITEM_TYPE_IPV4:
1353                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1354                         ipv4_spec = item->spec;
1355                         ipv4_mask = item->mask;
1356
1357                         if (ipv4_spec && ipv4_mask) {
1358                                 /* Check IPv4 mask and update input set */
1359                                 if (ipv4_mask->hdr.version_ihl ||
1360                                     ipv4_mask->hdr.total_length ||
1361                                     ipv4_mask->hdr.packet_id ||
1362                                     ipv4_mask->hdr.fragment_offset ||
1363                                     ipv4_mask->hdr.hdr_checksum) {
1364                                         rte_flow_error_set(error, EINVAL,
1365                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1366                                                    item,
1367                                                    "Invalid IPv4 mask.");
1368                                         return -rte_errno;
1369                                 }
1370                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1371                                         input_set |= tunnel_type ?
1372                                                      ICE_INSET_TUN_IPV4_SRC :
1373                                                      ICE_INSET_IPV4_SRC;
1374                                 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1375                                         input_set |= tunnel_type ?
1376                                                      ICE_INSET_TUN_IPV4_DST :
1377                                                      ICE_INSET_IPV4_DST;
1378                                 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1379                                         input_set |= ICE_INSET_IPV4_TOS;
1380                                 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1381                                         input_set |= ICE_INSET_IPV4_TTL;
1382                                 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1383                                         input_set |= ICE_INSET_IPV4_PROTO;
1384
1385                                 filter->input.ip.v4.dst_ip =
1386                                         ipv4_spec->hdr.src_addr;
1387                                 filter->input.ip.v4.src_ip =
1388                                         ipv4_spec->hdr.dst_addr;
1389                                 filter->input.ip.v4.tos =
1390                                         ipv4_spec->hdr.type_of_service;
1391                                 filter->input.ip.v4.ttl =
1392                                         ipv4_spec->hdr.time_to_live;
1393                                 filter->input.ip.v4.proto =
1394                                         ipv4_spec->hdr.next_proto_id;
1395                         }
1396
1397                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1398                         break;
1399                 case RTE_FLOW_ITEM_TYPE_IPV6:
1400                         l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1401                         ipv6_spec = item->spec;
1402                         ipv6_mask = item->mask;
1403
1404                         if (ipv6_spec && ipv6_mask) {
1405                                 /* Check IPv6 mask and update input set */
1406                                 if (ipv6_mask->hdr.payload_len) {
1407                                         rte_flow_error_set(error, EINVAL,
1408                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1409                                                    item,
1410                                                    "Invalid IPv6 mask");
1411                                         return -rte_errno;
1412                                 }
1413
1414                                 if (!memcmp(ipv6_mask->hdr.src_addr,
1415                                             ipv6_addr_mask,
1416                                             RTE_DIM(ipv6_mask->hdr.src_addr)))
1417                                         input_set |= ICE_INSET_IPV6_SRC;
1418                                 if (!memcmp(ipv6_mask->hdr.dst_addr,
1419                                             ipv6_addr_mask,
1420                                             RTE_DIM(ipv6_mask->hdr.dst_addr)))
1421                                         input_set |= ICE_INSET_IPV6_DST;
1422
1423                                 if ((ipv6_mask->hdr.vtc_flow &
1424                                      rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1425                                     == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1426                                         input_set |= ICE_INSET_IPV6_TC;
1427                                 if (ipv6_mask->hdr.proto == UINT8_MAX)
1428                                         input_set |= ICE_INSET_IPV6_NEXT_HDR;
1429                                 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1430                                         input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1431
1432                                 rte_memcpy(filter->input.ip.v6.dst_ip,
1433                                            ipv6_spec->hdr.src_addr, 16);
1434                                 rte_memcpy(filter->input.ip.v6.src_ip,
1435                                            ipv6_spec->hdr.dst_addr, 16);
1436
1437                                 vtc_flow_cpu =
1438                                       rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1439                                 filter->input.ip.v6.tc =
1440                                         (uint8_t)(vtc_flow_cpu >>
1441                                                   ICE_FDIR_IPV6_TC_OFFSET);
1442                                 filter->input.ip.v6.proto =
1443                                         ipv6_spec->hdr.proto;
1444                                 filter->input.ip.v6.hlim =
1445                                         ipv6_spec->hdr.hop_limits;
1446                         }
1447
1448                         flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1449                         break;
1450                 case RTE_FLOW_ITEM_TYPE_TCP:
1451                         tcp_spec = item->spec;
1452                         tcp_mask = item->mask;
1453
1454                         if (tcp_spec && tcp_mask) {
1455                                 /* Check TCP mask and update input set */
1456                                 if (tcp_mask->hdr.sent_seq ||
1457                                     tcp_mask->hdr.recv_ack ||
1458                                     tcp_mask->hdr.data_off ||
1459                                     tcp_mask->hdr.tcp_flags ||
1460                                     tcp_mask->hdr.rx_win ||
1461                                     tcp_mask->hdr.cksum ||
1462                                     tcp_mask->hdr.tcp_urp) {
1463                                         rte_flow_error_set(error, EINVAL,
1464                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1465                                                    item,
1466                                                    "Invalid TCP mask");
1467                                         return -rte_errno;
1468                                 }
1469
1470                                 if (tcp_mask->hdr.src_port == UINT16_MAX)
1471                                         input_set |= tunnel_type ?
1472                                                      ICE_INSET_TUN_TCP_SRC_PORT :
1473                                                      ICE_INSET_TCP_SRC_PORT;
1474                                 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1475                                         input_set |= tunnel_type ?
1476                                                      ICE_INSET_TUN_TCP_DST_PORT :
1477                                                      ICE_INSET_TCP_DST_PORT;
1478
1479                                 /* Get filter info */
1480                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1481                                         filter->input.ip.v4.dst_port =
1482                                                 tcp_spec->hdr.src_port;
1483                                         filter->input.ip.v4.src_port =
1484                                                 tcp_spec->hdr.dst_port;
1485                                         flow_type =
1486                                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1487                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1488                                         filter->input.ip.v6.dst_port =
1489                                                 tcp_spec->hdr.src_port;
1490                                         filter->input.ip.v6.src_port =
1491                                                 tcp_spec->hdr.dst_port;
1492                                         flow_type =
1493                                                 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1494                                 }
1495                         }
1496                         break;
1497                 case RTE_FLOW_ITEM_TYPE_UDP:
1498                         udp_spec = item->spec;
1499                         udp_mask = item->mask;
1500
1501                         if (udp_spec && udp_mask) {
1502                                 /* Check UDP mask and update input set*/
1503                                 if (udp_mask->hdr.dgram_len ||
1504                                     udp_mask->hdr.dgram_cksum) {
1505                                         rte_flow_error_set(error, EINVAL,
1506                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1507                                                    item,
1508                                                    "Invalid UDP mask");
1509                                         return -rte_errno;
1510                                 }
1511
1512                                 if (udp_mask->hdr.src_port == UINT16_MAX)
1513                                         input_set |= tunnel_type ?
1514                                                      ICE_INSET_TUN_UDP_SRC_PORT :
1515                                                      ICE_INSET_UDP_SRC_PORT;
1516                                 if (udp_mask->hdr.dst_port == UINT16_MAX)
1517                                         input_set |= tunnel_type ?
1518                                                      ICE_INSET_TUN_UDP_DST_PORT :
1519                                                      ICE_INSET_UDP_DST_PORT;
1520
1521                                 /* Get filter info */
1522                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1523                                         filter->input.ip.v4.dst_port =
1524                                                 udp_spec->hdr.src_port;
1525                                         filter->input.ip.v4.src_port =
1526                                                 udp_spec->hdr.dst_port;
1527                                         flow_type =
1528                                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1529                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1530                                         filter->input.ip.v6.src_port =
1531                                                 udp_spec->hdr.src_port;
1532                                         filter->input.ip.v6.dst_port =
1533                                                 udp_spec->hdr.dst_port;
1534                                         flow_type =
1535                                                 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1536                                 }
1537                         }
1538                         break;
1539                 case RTE_FLOW_ITEM_TYPE_SCTP:
1540                         sctp_spec = item->spec;
1541                         sctp_mask = item->mask;
1542
1543                         if (sctp_spec && sctp_mask) {
1544                                 /* Check SCTP mask and update input set */
1545                                 if (sctp_mask->hdr.cksum) {
1546                                         rte_flow_error_set(error, EINVAL,
1547                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1548                                                    item,
1549                                                    "Invalid UDP mask");
1550                                         return -rte_errno;
1551                                 }
1552
1553                                 if (sctp_mask->hdr.src_port == UINT16_MAX)
1554                                         input_set |= tunnel_type ?
1555                                                      ICE_INSET_TUN_SCTP_SRC_PORT :
1556                                                      ICE_INSET_SCTP_SRC_PORT;
1557                                 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1558                                         input_set |= tunnel_type ?
1559                                                      ICE_INSET_TUN_SCTP_DST_PORT :
1560                                                      ICE_INSET_SCTP_DST_PORT;
1561
1562                                 /* Get filter info */
1563                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1564                                         filter->input.ip.v4.dst_port =
1565                                                 sctp_spec->hdr.src_port;
1566                                         filter->input.ip.v4.src_port =
1567                                                 sctp_spec->hdr.dst_port;
1568                                         flow_type =
1569                                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1570                                 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1571                                         filter->input.ip.v6.dst_port =
1572                                                 sctp_spec->hdr.src_port;
1573                                         filter->input.ip.v6.src_port =
1574                                                 sctp_spec->hdr.dst_port;
1575                                         flow_type =
1576                                                 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1577                                 }
1578                         }
1579                         break;
1580                 case RTE_FLOW_ITEM_TYPE_VOID:
1581                         break;
1582                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1583                         l3 = RTE_FLOW_ITEM_TYPE_END;
1584                         vxlan_spec = item->spec;
1585                         vxlan_mask = item->mask;
1586
1587                         if (vxlan_spec || vxlan_mask) {
1588                                 rte_flow_error_set(error, EINVAL,
1589                                                    RTE_FLOW_ERROR_TYPE_ITEM,
1590                                                    item,
1591                                                    "Invalid vxlan field");
1592                                 return -rte_errno;
1593                         }
1594
1595                         tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1596                         break;
1597                 default:
1598                         rte_flow_error_set(error, EINVAL,
1599                                    RTE_FLOW_ERROR_TYPE_ITEM,
1600                                    item,
1601                                    "Invalid pattern item.");
1602                         return -rte_errno;
1603                 }
1604         }
1605
1606         filter->tunnel_type = tunnel_type;
1607         filter->input.flow_type = flow_type;
1608         filter->input_set = input_set;
1609
1610         return 0;
1611 }
1612
1613 static int
1614 ice_fdir_parse(struct ice_adapter *ad,
1615                struct ice_pattern_match_item *array,
1616                uint32_t array_len,
1617                const struct rte_flow_item pattern[],
1618                const struct rte_flow_action actions[],
1619                void **meta,
1620                struct rte_flow_error *error)
1621 {
1622         struct ice_pf *pf = &ad->pf;
1623         struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1624         struct ice_pattern_match_item *item = NULL;
1625         uint64_t input_set;
1626         int ret;
1627
1628         memset(filter, 0, sizeof(*filter));
1629         item = ice_search_pattern_match_item(pattern, array, array_len, error);
1630         if (!item)
1631                 return -rte_errno;
1632
1633         ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1634         if (ret)
1635                 return ret;
1636         input_set = filter->input_set;
1637         if (!input_set || input_set & ~item->input_set_mask) {
1638                 rte_flow_error_set(error, EINVAL,
1639                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1640                                    pattern,
1641                                    "Invalid input set");
1642                 return -rte_errno;
1643         }
1644
1645         ret = ice_fdir_parse_action(ad, actions, error, filter);
1646         if (ret)
1647                 return ret;
1648
1649         *meta = filter;
1650
1651         return 0;
1652 }
1653
1654 static struct ice_flow_parser ice_fdir_parser = {
1655         .engine = &ice_fdir_engine,
1656         .array = ice_fdir_pattern,
1657         .array_len = RTE_DIM(ice_fdir_pattern),
1658         .parse_pattern_action = ice_fdir_parse,
1659         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1660 };
1661
1662 RTE_INIT(ice_fdir_engine_register)
1663 {
1664         ice_register_flow_engine(&ice_fdir_engine);
1665 }