1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
14 #include "ice_generic_flow.h"
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
23 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31 ICE_FDIR_INSET_ETH_IPV4 | \
32 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35 ICE_FDIR_INSET_ETH_IPV4 | \
36 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
40 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44 ICE_FDIR_INSET_ETH_IPV6 | \
45 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52 ICE_FDIR_INSET_ETH_IPV6 | \
53 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59 ICE_FDIR_INSET_VXLAN_IPV4 | \
60 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63 ICE_FDIR_INSET_VXLAN_IPV4 | \
64 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67 ICE_FDIR_INSET_VXLAN_IPV4 | \
68 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
70 #define ICE_FDIR_INSET_GTPU_IPV4 (\
71 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
73 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
74 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
75 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
76 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
77 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
78 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
79 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
80 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
81 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
82 {pattern_eth_ipv4_udp_vxlan_ipv4,
83 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
84 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
85 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
86 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
87 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
88 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
89 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
90 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
91 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
92 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
93 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
94 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
95 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
96 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
97 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
100 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
101 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
102 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
103 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
104 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
105 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
106 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
107 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
108 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
109 {pattern_eth_ipv4_udp_vxlan_ipv4,
110 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
111 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
112 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
113 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
114 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
115 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
116 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
118 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
119 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
120 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
121 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
122 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
123 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
124 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
125 {pattern_eth_ipv4_gtpu_ipv4, ICE_FDIR_INSET_GTPU_IPV4, ICE_INSET_NONE},
128 static struct ice_flow_parser ice_fdir_parser_os;
129 static struct ice_flow_parser ice_fdir_parser_comms;
131 static const struct rte_memzone *
132 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
134 const struct rte_memzone *mz;
136 mz = rte_memzone_lookup(name);
140 return rte_memzone_reserve_aligned(name, len, socket_id,
141 RTE_MEMZONE_IOVA_CONTIG,
142 ICE_RING_BASE_ALIGN);
145 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
148 ice_fdir_prof_alloc(struct ice_hw *hw)
150 enum ice_fltr_ptype ptype, fltr_ptype;
152 if (!hw->fdir_prof) {
153 hw->fdir_prof = (struct ice_fd_hw_prof **)
154 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
155 sizeof(*hw->fdir_prof));
159 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
160 ptype < ICE_FLTR_PTYPE_MAX;
162 if (!hw->fdir_prof[ptype]) {
163 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
164 ice_malloc(hw, sizeof(**hw->fdir_prof));
165 if (!hw->fdir_prof[ptype])
172 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
175 rte_free(hw->fdir_prof[fltr_ptype]);
176 hw->fdir_prof[fltr_ptype] = NULL;
179 rte_free(hw->fdir_prof);
180 hw->fdir_prof = NULL;
186 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
187 struct ice_fdir_counter_pool_container *container,
188 uint32_t index_start,
191 struct ice_fdir_counter_pool *pool;
195 pool = rte_zmalloc("ice_fdir_counter_pool",
197 sizeof(struct ice_fdir_counter) * len,
201 "Failed to allocate memory for fdir counter pool");
205 TAILQ_INIT(&pool->counter_list);
206 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
208 for (i = 0; i < len; i++) {
209 struct ice_fdir_counter *counter = &pool->counters[i];
211 counter->hw_index = index_start + i;
212 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
215 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
216 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
221 container->pools[container->index_free++] = pool;
230 ice_fdir_counter_init(struct ice_pf *pf)
232 struct ice_hw *hw = ICE_PF_TO_HW(pf);
233 struct ice_fdir_info *fdir_info = &pf->fdir;
234 struct ice_fdir_counter_pool_container *container =
236 uint32_t cnt_index, len;
239 TAILQ_INIT(&container->pool_list);
241 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
242 len = ICE_FDIR_COUNTERS_PER_BLOCK;
244 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
246 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
254 ice_fdir_counter_release(struct ice_pf *pf)
256 struct ice_fdir_info *fdir_info = &pf->fdir;
257 struct ice_fdir_counter_pool_container *container =
261 for (i = 0; i < container->index_free; i++) {
262 rte_free(container->pools[i]);
263 container->pools[i] = NULL;
266 TAILQ_INIT(&container->pool_list);
267 container->index_free = 0;
272 static struct ice_fdir_counter *
273 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
277 struct ice_fdir_counter_pool *pool;
278 struct ice_fdir_counter *counter;
281 TAILQ_FOREACH(pool, &container->pool_list, next) {
282 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
283 counter = &pool->counters[i];
285 if (counter->shared &&
295 static struct ice_fdir_counter *
296 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
298 struct ice_hw *hw = ICE_PF_TO_HW(pf);
299 struct ice_fdir_info *fdir_info = &pf->fdir;
300 struct ice_fdir_counter_pool_container *container =
302 struct ice_fdir_counter_pool *pool = NULL;
303 struct ice_fdir_counter *counter_free = NULL;
306 counter_free = ice_fdir_counter_shared_search(container, id);
308 if (counter_free->ref_cnt + 1 == 0) {
312 counter_free->ref_cnt++;
317 TAILQ_FOREACH(pool, &container->pool_list, next) {
318 counter_free = TAILQ_FIRST(&pool->counter_list);
325 PMD_DRV_LOG(ERR, "No free counter found\n");
329 counter_free->shared = shared;
330 counter_free->id = id;
331 counter_free->ref_cnt = 1;
332 counter_free->pool = pool;
334 /* reset statistic counter value */
335 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
336 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
338 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
339 if (TAILQ_EMPTY(&pool->counter_list)) {
340 TAILQ_REMOVE(&container->pool_list, pool, next);
341 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
348 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
349 struct ice_fdir_counter *counter)
354 if (--counter->ref_cnt == 0) {
355 struct ice_fdir_counter_pool *pool = counter->pool;
357 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
362 ice_fdir_init_filter_list(struct ice_pf *pf)
364 struct rte_eth_dev *dev = pf->adapter->eth_dev;
365 struct ice_fdir_info *fdir_info = &pf->fdir;
366 char fdir_hash_name[RTE_HASH_NAMESIZE];
369 struct rte_hash_parameters fdir_hash_params = {
370 .name = fdir_hash_name,
371 .entries = ICE_MAX_FDIR_FILTER_NUM,
372 .key_len = sizeof(struct ice_fdir_fltr_pattern),
373 .hash_func = rte_hash_crc,
374 .hash_func_init_val = 0,
375 .socket_id = rte_socket_id(),
376 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
379 /* Initialize hash */
380 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
381 "fdir_%s", dev->device->name);
382 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
383 if (!fdir_info->hash_table) {
384 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
387 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
388 sizeof(*fdir_info->hash_map) *
389 ICE_MAX_FDIR_FILTER_NUM,
391 if (!fdir_info->hash_map) {
393 "Failed to allocate memory for fdir hash map!");
395 goto err_fdir_hash_map_alloc;
399 err_fdir_hash_map_alloc:
400 rte_hash_free(fdir_info->hash_table);
406 ice_fdir_release_filter_list(struct ice_pf *pf)
408 struct ice_fdir_info *fdir_info = &pf->fdir;
410 if (fdir_info->hash_map)
411 rte_free(fdir_info->hash_map);
412 if (fdir_info->hash_table)
413 rte_hash_free(fdir_info->hash_table);
415 fdir_info->hash_map = NULL;
416 fdir_info->hash_table = NULL;
420 * ice_fdir_setup - reserve and initialize the Flow Director resources
421 * @pf: board private structure
424 ice_fdir_setup(struct ice_pf *pf)
426 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
427 struct ice_hw *hw = ICE_PF_TO_HW(pf);
428 const struct rte_memzone *mz = NULL;
429 char z_name[RTE_MEMZONE_NAMESIZE];
431 int err = ICE_SUCCESS;
433 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
434 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
438 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
439 " fd_fltr_best_effort = %u.",
440 hw->func_caps.fd_fltr_guar,
441 hw->func_caps.fd_fltr_best_effort);
443 if (pf->fdir.fdir_vsi) {
444 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
448 /* make new FDIR VSI */
449 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
451 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
454 pf->fdir.fdir_vsi = vsi;
456 err = ice_fdir_init_filter_list(pf);
458 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
462 err = ice_fdir_counter_init(pf);
464 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
468 /*Fdir tx queue setup*/
469 err = ice_fdir_setup_tx_resources(pf);
471 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
475 /*Fdir rx queue setup*/
476 err = ice_fdir_setup_rx_resources(pf);
478 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
482 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
484 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
488 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
490 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
494 /* Enable FDIR MSIX interrupt */
495 vsi->nb_used_qps = 1;
496 ice_vsi_queues_bind_intr(vsi);
497 ice_vsi_enable_queues_intr(vsi);
499 /* reserve memory for the fdir programming packet */
500 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
502 eth_dev->data->port_id);
503 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
505 PMD_DRV_LOG(ERR, "Cannot init memzone for "
506 "flow director program packet.");
510 pf->fdir.prg_pkt = mz->addr;
511 pf->fdir.dma_addr = mz->iova;
514 err = ice_fdir_prof_alloc(hw);
516 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
517 "flow director profile.");
522 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
527 rte_memzone_free(pf->fdir.mz);
530 ice_rx_queue_release(pf->fdir.rxq);
533 ice_tx_queue_release(pf->fdir.txq);
536 ice_release_vsi(vsi);
537 pf->fdir.fdir_vsi = NULL;
542 ice_fdir_prof_free(struct ice_hw *hw)
544 enum ice_fltr_ptype ptype;
546 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
547 ptype < ICE_FLTR_PTYPE_MAX;
549 rte_free(hw->fdir_prof[ptype]);
550 hw->fdir_prof[ptype] = NULL;
553 rte_free(hw->fdir_prof);
554 hw->fdir_prof = NULL;
557 /* Remove a profile for some filter type */
559 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
561 struct ice_hw *hw = ICE_PF_TO_HW(pf);
562 struct ice_fd_hw_prof *hw_prof;
567 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
570 hw_prof = hw->fdir_prof[ptype];
572 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
573 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
574 if (hw_prof->entry_h[i][is_tunnel]) {
575 vsi_num = ice_get_hw_vsi_num(hw,
577 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
579 ice_flow_rem_entry(hw,
580 hw_prof->entry_h[i][is_tunnel]);
581 hw_prof->entry_h[i][is_tunnel] = 0;
584 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
585 rte_free(hw_prof->fdir_seg[is_tunnel]);
586 hw_prof->fdir_seg[is_tunnel] = NULL;
588 for (i = 0; i < hw_prof->cnt; i++)
589 hw_prof->vsi_h[i] = 0;
590 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
593 /* Remove all created profiles */
595 ice_fdir_prof_rm_all(struct ice_pf *pf)
597 enum ice_fltr_ptype ptype;
599 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
600 ptype < ICE_FLTR_PTYPE_MAX;
602 ice_fdir_prof_rm(pf, ptype, false);
603 ice_fdir_prof_rm(pf, ptype, true);
608 * ice_fdir_teardown - release the Flow Director resources
609 * @pf: board private structure
612 ice_fdir_teardown(struct ice_pf *pf)
614 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
615 struct ice_hw *hw = ICE_PF_TO_HW(pf);
619 vsi = pf->fdir.fdir_vsi;
623 ice_vsi_disable_queues_intr(vsi);
625 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
627 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
629 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
631 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
633 err = ice_fdir_counter_release(pf);
635 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
637 ice_fdir_release_filter_list(pf);
639 ice_tx_queue_release(pf->fdir.txq);
641 ice_rx_queue_release(pf->fdir.rxq);
643 ice_fdir_prof_rm_all(pf);
644 ice_fdir_prof_free(hw);
645 ice_release_vsi(vsi);
646 pf->fdir.fdir_vsi = NULL;
649 err = rte_memzone_free(pf->fdir.mz);
652 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
657 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
658 enum ice_fltr_ptype ptype,
659 struct ice_flow_seg_info *seg,
662 struct ice_hw *hw = ICE_PF_TO_HW(pf);
663 struct ice_flow_seg_info *ori_seg;
664 struct ice_fd_hw_prof *hw_prof;
666 hw_prof = hw->fdir_prof[ptype];
667 ori_seg = hw_prof->fdir_seg[is_tunnel];
669 /* profile does not exist */
673 /* if no input set conflict, return -EEXIST */
674 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
675 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
676 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
681 /* a rule with input set conflict already exist, so give up */
682 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
683 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
688 /* it's safe to delete an empty profile */
689 ice_fdir_prof_rm(pf, ptype, is_tunnel);
694 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
695 enum ice_fltr_ptype ptype,
698 struct ice_hw *hw = ICE_PF_TO_HW(pf);
699 struct ice_fd_hw_prof *hw_prof;
700 struct ice_flow_seg_info *seg;
702 hw_prof = hw->fdir_prof[ptype];
703 seg = hw_prof->fdir_seg[is_tunnel];
705 /* profile does not exist */
709 /* profile exists and rule exists, fail to resolve the conflict */
710 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
713 /* it's safe to delete an empty profile */
714 ice_fdir_prof_rm(pf, ptype, is_tunnel);
720 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
721 enum ice_fltr_ptype ptype,
724 enum ice_fltr_ptype cflct_ptype;
728 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
729 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
730 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
731 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
732 if (!ice_fdir_prof_resolve_conflict
733 (pf, cflct_ptype, is_tunnel))
736 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
737 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
738 if (!ice_fdir_prof_resolve_conflict
739 (pf, cflct_ptype, is_tunnel))
741 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
742 if (!ice_fdir_prof_resolve_conflict
743 (pf, cflct_ptype, is_tunnel))
745 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
746 if (!ice_fdir_prof_resolve_conflict
747 (pf, cflct_ptype, is_tunnel))
751 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
752 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
753 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
754 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
755 if (!ice_fdir_prof_resolve_conflict
756 (pf, cflct_ptype, is_tunnel))
759 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
760 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
761 if (!ice_fdir_prof_resolve_conflict
762 (pf, cflct_ptype, is_tunnel))
764 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
765 if (!ice_fdir_prof_resolve_conflict
766 (pf, cflct_ptype, is_tunnel))
768 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
769 if (!ice_fdir_prof_resolve_conflict
770 (pf, cflct_ptype, is_tunnel))
774 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
775 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
776 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
777 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
778 if (!ice_fdir_prof_resolve_conflict
779 (pf, cflct_ptype, is_tunnel))
782 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
783 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
784 if (!ice_fdir_prof_resolve_conflict
785 (pf, cflct_ptype, is_tunnel))
787 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
788 if (!ice_fdir_prof_resolve_conflict
789 (pf, cflct_ptype, is_tunnel))
791 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
792 if (!ice_fdir_prof_resolve_conflict
793 (pf, cflct_ptype, is_tunnel))
801 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
807 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
808 struct ice_vsi *ctrl_vsi,
809 struct ice_flow_seg_info *seg,
810 enum ice_fltr_ptype ptype,
813 struct ice_hw *hw = ICE_PF_TO_HW(pf);
814 enum ice_flow_dir dir = ICE_FLOW_RX;
815 struct ice_fd_hw_prof *hw_prof;
816 struct ice_flow_prof *prof;
817 uint64_t entry_1 = 0;
818 uint64_t entry_2 = 0;
823 /* check if have input set conflict on current profile. */
824 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
828 /* check if the profile is conflict with other profile. */
829 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
833 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
834 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
835 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
838 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
839 vsi->idx, ICE_FLOW_PRIO_NORMAL,
840 seg, NULL, 0, &entry_1);
842 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
846 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
847 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
848 seg, NULL, 0, &entry_2);
850 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
855 hw_prof = hw->fdir_prof[ptype];
856 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
858 hw_prof->fdir_seg[is_tunnel] = seg;
859 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
860 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
861 pf->hw_prof_cnt[ptype][is_tunnel]++;
862 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
863 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
864 pf->hw_prof_cnt[ptype][is_tunnel]++;
869 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
870 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
871 ice_flow_rem_entry(hw, entry_1);
873 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
879 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
883 struct ice_inset_map {
885 enum ice_flow_field fld;
887 static const struct ice_inset_map ice_inset_map[] = {
888 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
889 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
890 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
891 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
892 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
893 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
894 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
895 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
896 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
897 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
898 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
899 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
900 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
901 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
902 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
903 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
904 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
905 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
906 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
907 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
908 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
909 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
910 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
911 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
912 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
913 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
914 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
917 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
918 if ((inset & ice_inset_map[i].inset) ==
919 ice_inset_map[i].inset)
920 field[j++] = ice_inset_map[i].fld;
925 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
926 uint64_t input_set, bool is_tunnel)
928 struct ice_flow_seg_info *seg;
929 struct ice_flow_seg_info *seg_tun = NULL;
930 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
936 seg = (struct ice_flow_seg_info *)
937 ice_malloc(hw, sizeof(*seg));
939 PMD_DRV_LOG(ERR, "No memory can be allocated");
943 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
944 field[i] = ICE_FLOW_FIELD_IDX_MAX;
945 ice_fdir_input_set_parse(input_set, field);
948 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
949 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
950 ICE_FLOW_SEG_HDR_IPV4);
952 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
953 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
954 ICE_FLOW_SEG_HDR_IPV4);
956 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
957 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
958 ICE_FLOW_SEG_HDR_IPV4);
960 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
961 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
963 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
964 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
965 ICE_FLOW_SEG_HDR_IPV6);
967 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
968 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
969 ICE_FLOW_SEG_HDR_IPV6);
971 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
972 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
973 ICE_FLOW_SEG_HDR_IPV6);
975 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
976 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
978 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
979 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
980 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
981 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
982 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
983 ICE_FLOW_SEG_HDR_IPV4);
986 PMD_DRV_LOG(ERR, "not supported filter type.");
990 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
991 ice_flow_set_fld(seg, field[i],
992 ICE_FLOW_FLD_OFF_INVAL,
993 ICE_FLOW_FLD_OFF_INVAL,
994 ICE_FLOW_FLD_OFF_INVAL, false);
998 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1001 seg_tun = (struct ice_flow_seg_info *)
1002 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1004 PMD_DRV_LOG(ERR, "No memory can be allocated");
1008 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1009 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1010 seg_tun, flow, true);
1015 } else if (ret < 0) {
1019 return (ret == -EEXIST) ? 0 : ret;
1026 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1027 bool is_tunnel, bool add)
1029 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1032 cnt = (add) ? 1 : -1;
1033 hw->fdir_active_fltr += cnt;
1034 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1035 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1037 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1041 ice_fdir_init(struct ice_adapter *ad)
1043 struct ice_pf *pf = &ad->pf;
1044 struct ice_flow_parser *parser;
1047 ret = ice_fdir_setup(pf);
1051 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1052 parser = &ice_fdir_parser_comms;
1053 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1054 parser = &ice_fdir_parser_os;
1058 return ice_register_parser(parser, ad);
1062 ice_fdir_uninit(struct ice_adapter *ad)
1064 struct ice_pf *pf = &ad->pf;
1065 struct ice_flow_parser *parser;
1067 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1068 parser = &ice_fdir_parser_comms;
1070 parser = &ice_fdir_parser_os;
1072 ice_unregister_parser(parser, ad);
1074 ice_fdir_teardown(pf);
1078 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1080 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1087 ice_fdir_add_del_filter(struct ice_pf *pf,
1088 struct ice_fdir_filter_conf *filter,
1091 struct ice_fltr_desc desc;
1092 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1093 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1097 filter->input.dest_vsi = pf->main_vsi->idx;
1099 memset(&desc, 0, sizeof(desc));
1100 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1102 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1104 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1105 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1107 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1111 return ice_fdir_programming(pf, &desc);
1115 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1116 struct ice_fdir_filter_conf *filter)
1118 struct ice_fdir_fltr *input = &filter->input;
1119 memset(key, 0, sizeof(*key));
1121 key->flow_type = input->flow_type;
1122 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1123 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1124 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1125 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1127 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1128 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1130 key->tunnel_type = filter->tunnel_type;
1133 /* Check if there exists the flow director filter */
1134 static struct ice_fdir_filter_conf *
1135 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1136 const struct ice_fdir_fltr_pattern *key)
1140 ret = rte_hash_lookup(fdir_info->hash_table, key);
1144 return fdir_info->hash_map[ret];
1147 /* Add a flow director entry into the SW list */
1149 ice_fdir_entry_insert(struct ice_pf *pf,
1150 struct ice_fdir_filter_conf *entry,
1151 struct ice_fdir_fltr_pattern *key)
1153 struct ice_fdir_info *fdir_info = &pf->fdir;
1156 ret = rte_hash_add_key(fdir_info->hash_table, key);
1159 "Failed to insert fdir entry to hash table %d!",
1163 fdir_info->hash_map[ret] = entry;
1168 /* Delete a flow director entry from the SW list */
1170 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1172 struct ice_fdir_info *fdir_info = &pf->fdir;
1175 ret = rte_hash_del_key(fdir_info->hash_table, key);
1178 "Failed to delete fdir filter to hash table %d!",
1182 fdir_info->hash_map[ret] = NULL;
1188 ice_fdir_create_filter(struct ice_adapter *ad,
1189 struct rte_flow *flow,
1191 struct rte_flow_error *error)
1193 struct ice_pf *pf = &ad->pf;
1194 struct ice_fdir_filter_conf *filter = meta;
1195 struct ice_fdir_info *fdir_info = &pf->fdir;
1196 struct ice_fdir_filter_conf *entry, *node;
1197 struct ice_fdir_fltr_pattern key;
1201 ice_fdir_extract_fltr_key(&key, filter);
1202 node = ice_fdir_entry_lookup(fdir_info, &key);
1204 rte_flow_error_set(error, EEXIST,
1205 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1206 "Rule already exists!");
1210 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1212 rte_flow_error_set(error, ENOMEM,
1213 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1214 "Failed to allocate memory");
1218 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1220 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1221 filter->input_set, is_tun);
1223 rte_flow_error_set(error, -ret,
1224 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1225 "Profile configure failed.");
1229 /* alloc counter for FDIR */
1230 if (filter->input.cnt_ena) {
1231 struct rte_flow_action_count *act_count = &filter->act_count;
1233 filter->counter = ice_fdir_counter_alloc(pf,
1236 if (!filter->counter) {
1237 rte_flow_error_set(error, EINVAL,
1238 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1239 "Failed to alloc FDIR counter.");
1242 filter->input.cnt_index = filter->counter->hw_index;
1245 ret = ice_fdir_add_del_filter(pf, filter, true);
1247 rte_flow_error_set(error, -ret,
1248 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1249 "Add filter rule failed.");
1253 rte_memcpy(entry, filter, sizeof(*entry));
1254 ret = ice_fdir_entry_insert(pf, entry, &key);
1256 rte_flow_error_set(error, -ret,
1257 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1258 "Insert entry to table failed.");
1263 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1268 if (filter->counter) {
1269 ice_fdir_counter_free(pf, filter->counter);
1270 filter->counter = NULL;
1279 ice_fdir_destroy_filter(struct ice_adapter *ad,
1280 struct rte_flow *flow,
1281 struct rte_flow_error *error)
1283 struct ice_pf *pf = &ad->pf;
1284 struct ice_fdir_info *fdir_info = &pf->fdir;
1285 struct ice_fdir_filter_conf *filter, *entry;
1286 struct ice_fdir_fltr_pattern key;
1290 filter = (struct ice_fdir_filter_conf *)flow->rule;
1292 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1294 if (filter->counter) {
1295 ice_fdir_counter_free(pf, filter->counter);
1296 filter->counter = NULL;
1299 ice_fdir_extract_fltr_key(&key, filter);
1300 entry = ice_fdir_entry_lookup(fdir_info, &key);
1302 rte_flow_error_set(error, ENOENT,
1303 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1304 "Can't find entry.");
1308 ret = ice_fdir_add_del_filter(pf, filter, false);
1310 rte_flow_error_set(error, -ret,
1311 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1312 "Del filter rule failed.");
1316 ret = ice_fdir_entry_del(pf, &key);
1318 rte_flow_error_set(error, -ret,
1319 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1320 "Remove entry from table failed.");
1324 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1333 ice_fdir_query_count(struct ice_adapter *ad,
1334 struct rte_flow *flow,
1335 struct rte_flow_query_count *flow_stats,
1336 struct rte_flow_error *error)
1338 struct ice_pf *pf = &ad->pf;
1339 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1340 struct ice_fdir_filter_conf *filter = flow->rule;
1341 struct ice_fdir_counter *counter = filter->counter;
1342 uint64_t hits_lo, hits_hi;
1345 rte_flow_error_set(error, EINVAL,
1346 RTE_FLOW_ERROR_TYPE_ACTION,
1348 "FDIR counters not available");
1353 * Reading the low 32-bits latches the high 32-bits into a shadow
1354 * register. Reading the high 32-bit returns the value in the
1357 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1358 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1360 flow_stats->hits_set = 1;
1361 flow_stats->hits = hits_lo | (hits_hi << 32);
1362 flow_stats->bytes_set = 0;
1363 flow_stats->bytes = 0;
1365 if (flow_stats->reset) {
1366 /* reset statistic counter value */
1367 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1368 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1374 static struct ice_flow_engine ice_fdir_engine = {
1375 .init = ice_fdir_init,
1376 .uninit = ice_fdir_uninit,
1377 .create = ice_fdir_create_filter,
1378 .destroy = ice_fdir_destroy_filter,
1379 .query_count = ice_fdir_query_count,
1380 .type = ICE_FLOW_ENGINE_FDIR,
1384 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1385 struct rte_flow_error *error,
1386 const struct rte_flow_action *act,
1387 struct ice_fdir_filter_conf *filter)
1389 const struct rte_flow_action_rss *rss = act->conf;
1392 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1393 rte_flow_error_set(error, EINVAL,
1394 RTE_FLOW_ERROR_TYPE_ACTION, act,
1399 if (rss->queue_num <= 1) {
1400 rte_flow_error_set(error, EINVAL,
1401 RTE_FLOW_ERROR_TYPE_ACTION, act,
1402 "Queue region size can't be 0 or 1.");
1406 /* check if queue index for queue region is continuous */
1407 for (i = 0; i < rss->queue_num - 1; i++) {
1408 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1409 rte_flow_error_set(error, EINVAL,
1410 RTE_FLOW_ERROR_TYPE_ACTION, act,
1411 "Discontinuous queue region");
1416 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1417 rte_flow_error_set(error, EINVAL,
1418 RTE_FLOW_ERROR_TYPE_ACTION, act,
1419 "Invalid queue region indexes.");
1423 if (!(rte_is_power_of_2(rss->queue_num) &&
1424 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1425 rte_flow_error_set(error, EINVAL,
1426 RTE_FLOW_ERROR_TYPE_ACTION, act,
1427 "The region size should be any of the following values:"
1428 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1429 "of queues do not exceed the VSI allocation.");
1433 filter->input.q_index = rss->queue[0];
1434 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1435 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1441 ice_fdir_parse_action(struct ice_adapter *ad,
1442 const struct rte_flow_action actions[],
1443 struct rte_flow_error *error,
1444 struct ice_fdir_filter_conf *filter)
1446 struct ice_pf *pf = &ad->pf;
1447 const struct rte_flow_action_queue *act_q;
1448 const struct rte_flow_action_mark *mark_spec = NULL;
1449 const struct rte_flow_action_count *act_count;
1450 uint32_t dest_num = 0;
1451 uint32_t mark_num = 0;
1452 uint32_t counter_num = 0;
1455 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1456 switch (actions->type) {
1457 case RTE_FLOW_ACTION_TYPE_VOID:
1459 case RTE_FLOW_ACTION_TYPE_QUEUE:
1462 act_q = actions->conf;
1463 filter->input.q_index = act_q->index;
1464 if (filter->input.q_index >=
1465 pf->dev_data->nb_rx_queues) {
1466 rte_flow_error_set(error, EINVAL,
1467 RTE_FLOW_ERROR_TYPE_ACTION,
1469 "Invalid queue for FDIR.");
1472 filter->input.dest_ctl =
1473 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1475 case RTE_FLOW_ACTION_TYPE_DROP:
1478 filter->input.dest_ctl =
1479 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1481 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1484 filter->input.dest_ctl =
1485 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1486 filter->input.q_index = 0;
1488 case RTE_FLOW_ACTION_TYPE_RSS:
1491 ret = ice_fdir_parse_action_qregion(pf,
1492 error, actions, filter);
1496 case RTE_FLOW_ACTION_TYPE_MARK:
1499 mark_spec = actions->conf;
1500 filter->input.fltr_id = mark_spec->id;
1502 case RTE_FLOW_ACTION_TYPE_COUNT:
1505 act_count = actions->conf;
1506 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1507 rte_memcpy(&filter->act_count, act_count,
1508 sizeof(filter->act_count));
1512 rte_flow_error_set(error, EINVAL,
1513 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1519 if (dest_num == 0 || dest_num >= 2) {
1520 rte_flow_error_set(error, EINVAL,
1521 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1522 "Unsupported action combination");
1526 if (mark_num >= 2) {
1527 rte_flow_error_set(error, EINVAL,
1528 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1529 "Too many mark actions");
1533 if (counter_num >= 2) {
1534 rte_flow_error_set(error, EINVAL,
1535 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1536 "Too many count actions");
1544 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1545 const struct rte_flow_item pattern[],
1546 struct rte_flow_error *error,
1547 struct ice_fdir_filter_conf *filter)
1549 const struct rte_flow_item *item = pattern;
1550 enum rte_flow_item_type item_type;
1551 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1552 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1553 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1554 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1555 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1556 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1557 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1558 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1559 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1560 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1561 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1562 uint64_t input_set = ICE_INSET_NONE;
1563 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1564 uint8_t ipv6_addr_mask[16] = {
1565 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1566 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1568 uint32_t vtc_flow_cpu;
1571 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1573 rte_flow_error_set(error, EINVAL,
1574 RTE_FLOW_ERROR_TYPE_ITEM,
1576 "Not support range");
1579 item_type = item->type;
1581 switch (item_type) {
1582 case RTE_FLOW_ITEM_TYPE_ETH:
1583 eth_spec = item->spec;
1584 eth_mask = item->mask;
1586 if (eth_spec && eth_mask) {
1587 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1588 !rte_is_zero_ether_addr(ð_mask->src)) {
1589 rte_flow_error_set(error, EINVAL,
1590 RTE_FLOW_ERROR_TYPE_ITEM,
1592 "Src mac not support");
1596 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
1597 rte_flow_error_set(error, EINVAL,
1598 RTE_FLOW_ERROR_TYPE_ITEM,
1600 "Invalid mac addr mask");
1604 input_set |= ICE_INSET_DMAC;
1605 rte_memcpy(&filter->input.ext_data.dst_mac,
1607 RTE_ETHER_ADDR_LEN);
1610 case RTE_FLOW_ITEM_TYPE_IPV4:
1611 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1612 ipv4_spec = item->spec;
1613 ipv4_mask = item->mask;
1615 if (ipv4_spec && ipv4_mask) {
1616 /* Check IPv4 mask and update input set */
1617 if (ipv4_mask->hdr.version_ihl ||
1618 ipv4_mask->hdr.total_length ||
1619 ipv4_mask->hdr.packet_id ||
1620 ipv4_mask->hdr.fragment_offset ||
1621 ipv4_mask->hdr.hdr_checksum) {
1622 rte_flow_error_set(error, EINVAL,
1623 RTE_FLOW_ERROR_TYPE_ITEM,
1625 "Invalid IPv4 mask.");
1628 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1629 input_set |= tunnel_type ?
1630 ICE_INSET_TUN_IPV4_SRC :
1632 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1633 input_set |= tunnel_type ?
1634 ICE_INSET_TUN_IPV4_DST :
1636 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1637 input_set |= ICE_INSET_IPV4_TOS;
1638 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1639 input_set |= ICE_INSET_IPV4_TTL;
1640 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1641 input_set |= ICE_INSET_IPV4_PROTO;
1643 filter->input.ip.v4.dst_ip =
1644 ipv4_spec->hdr.src_addr;
1645 filter->input.ip.v4.src_ip =
1646 ipv4_spec->hdr.dst_addr;
1647 filter->input.ip.v4.tos =
1648 ipv4_spec->hdr.type_of_service;
1649 filter->input.ip.v4.ttl =
1650 ipv4_spec->hdr.time_to_live;
1651 filter->input.ip.v4.proto =
1652 ipv4_spec->hdr.next_proto_id;
1655 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1657 case RTE_FLOW_ITEM_TYPE_IPV6:
1658 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1659 ipv6_spec = item->spec;
1660 ipv6_mask = item->mask;
1662 if (ipv6_spec && ipv6_mask) {
1663 /* Check IPv6 mask and update input set */
1664 if (ipv6_mask->hdr.payload_len) {
1665 rte_flow_error_set(error, EINVAL,
1666 RTE_FLOW_ERROR_TYPE_ITEM,
1668 "Invalid IPv6 mask");
1672 if (!memcmp(ipv6_mask->hdr.src_addr,
1674 RTE_DIM(ipv6_mask->hdr.src_addr)))
1675 input_set |= ICE_INSET_IPV6_SRC;
1676 if (!memcmp(ipv6_mask->hdr.dst_addr,
1678 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1679 input_set |= ICE_INSET_IPV6_DST;
1681 if ((ipv6_mask->hdr.vtc_flow &
1682 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1683 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1684 input_set |= ICE_INSET_IPV6_TC;
1685 if (ipv6_mask->hdr.proto == UINT8_MAX)
1686 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1687 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1688 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1690 rte_memcpy(filter->input.ip.v6.dst_ip,
1691 ipv6_spec->hdr.src_addr, 16);
1692 rte_memcpy(filter->input.ip.v6.src_ip,
1693 ipv6_spec->hdr.dst_addr, 16);
1696 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1697 filter->input.ip.v6.tc =
1698 (uint8_t)(vtc_flow_cpu >>
1699 ICE_FDIR_IPV6_TC_OFFSET);
1700 filter->input.ip.v6.proto =
1701 ipv6_spec->hdr.proto;
1702 filter->input.ip.v6.hlim =
1703 ipv6_spec->hdr.hop_limits;
1706 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1708 case RTE_FLOW_ITEM_TYPE_TCP:
1709 tcp_spec = item->spec;
1710 tcp_mask = item->mask;
1712 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1713 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1714 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1715 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1717 if (tcp_spec && tcp_mask) {
1718 /* Check TCP mask and update input set */
1719 if (tcp_mask->hdr.sent_seq ||
1720 tcp_mask->hdr.recv_ack ||
1721 tcp_mask->hdr.data_off ||
1722 tcp_mask->hdr.tcp_flags ||
1723 tcp_mask->hdr.rx_win ||
1724 tcp_mask->hdr.cksum ||
1725 tcp_mask->hdr.tcp_urp) {
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_ITEM,
1729 "Invalid TCP mask");
1733 if (tcp_mask->hdr.src_port == UINT16_MAX)
1734 input_set |= tunnel_type ?
1735 ICE_INSET_TUN_TCP_SRC_PORT :
1736 ICE_INSET_TCP_SRC_PORT;
1737 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1738 input_set |= tunnel_type ?
1739 ICE_INSET_TUN_TCP_DST_PORT :
1740 ICE_INSET_TCP_DST_PORT;
1742 /* Get filter info */
1743 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1744 filter->input.ip.v4.dst_port =
1745 tcp_spec->hdr.src_port;
1746 filter->input.ip.v4.src_port =
1747 tcp_spec->hdr.dst_port;
1748 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1749 filter->input.ip.v6.dst_port =
1750 tcp_spec->hdr.src_port;
1751 filter->input.ip.v6.src_port =
1752 tcp_spec->hdr.dst_port;
1756 case RTE_FLOW_ITEM_TYPE_UDP:
1757 udp_spec = item->spec;
1758 udp_mask = item->mask;
1760 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1761 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1762 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1763 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1765 if (udp_spec && udp_mask) {
1766 /* Check UDP mask and update input set*/
1767 if (udp_mask->hdr.dgram_len ||
1768 udp_mask->hdr.dgram_cksum) {
1769 rte_flow_error_set(error, EINVAL,
1770 RTE_FLOW_ERROR_TYPE_ITEM,
1772 "Invalid UDP mask");
1776 if (udp_mask->hdr.src_port == UINT16_MAX)
1777 input_set |= tunnel_type ?
1778 ICE_INSET_TUN_UDP_SRC_PORT :
1779 ICE_INSET_UDP_SRC_PORT;
1780 if (udp_mask->hdr.dst_port == UINT16_MAX)
1781 input_set |= tunnel_type ?
1782 ICE_INSET_TUN_UDP_DST_PORT :
1783 ICE_INSET_UDP_DST_PORT;
1785 /* Get filter info */
1786 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1787 filter->input.ip.v4.dst_port =
1788 udp_spec->hdr.src_port;
1789 filter->input.ip.v4.src_port =
1790 udp_spec->hdr.dst_port;
1791 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1792 filter->input.ip.v6.src_port =
1793 udp_spec->hdr.dst_port;
1794 filter->input.ip.v6.dst_port =
1795 udp_spec->hdr.src_port;
1799 case RTE_FLOW_ITEM_TYPE_SCTP:
1800 sctp_spec = item->spec;
1801 sctp_mask = item->mask;
1803 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1804 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1805 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1806 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1808 if (sctp_spec && sctp_mask) {
1809 /* Check SCTP mask and update input set */
1810 if (sctp_mask->hdr.cksum) {
1811 rte_flow_error_set(error, EINVAL,
1812 RTE_FLOW_ERROR_TYPE_ITEM,
1814 "Invalid UDP mask");
1818 if (sctp_mask->hdr.src_port == UINT16_MAX)
1819 input_set |= tunnel_type ?
1820 ICE_INSET_TUN_SCTP_SRC_PORT :
1821 ICE_INSET_SCTP_SRC_PORT;
1822 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1823 input_set |= tunnel_type ?
1824 ICE_INSET_TUN_SCTP_DST_PORT :
1825 ICE_INSET_SCTP_DST_PORT;
1827 /* Get filter info */
1828 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1829 filter->input.ip.v4.dst_port =
1830 sctp_spec->hdr.src_port;
1831 filter->input.ip.v4.src_port =
1832 sctp_spec->hdr.dst_port;
1833 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1834 filter->input.ip.v6.dst_port =
1835 sctp_spec->hdr.src_port;
1836 filter->input.ip.v6.src_port =
1837 sctp_spec->hdr.dst_port;
1841 case RTE_FLOW_ITEM_TYPE_VOID:
1843 case RTE_FLOW_ITEM_TYPE_VXLAN:
1844 l3 = RTE_FLOW_ITEM_TYPE_END;
1845 vxlan_spec = item->spec;
1846 vxlan_mask = item->mask;
1848 if (vxlan_spec || vxlan_mask) {
1849 rte_flow_error_set(error, EINVAL,
1850 RTE_FLOW_ERROR_TYPE_ITEM,
1852 "Invalid vxlan field");
1856 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1858 case RTE_FLOW_ITEM_TYPE_GTPU:
1859 l3 = RTE_FLOW_ITEM_TYPE_END;
1860 gtp_spec = item->spec;
1861 gtp_mask = item->mask;
1863 if (gtp_spec && gtp_mask) {
1864 if (gtp_mask->v_pt_rsv_flags ||
1865 gtp_mask->msg_type ||
1866 gtp_mask->msg_len) {
1867 rte_flow_error_set(error, EINVAL,
1868 RTE_FLOW_ERROR_TYPE_ITEM,
1870 "Invalid GTP mask");
1874 if (gtp_mask->teid == UINT32_MAX)
1875 input_set |= ICE_INSET_GTPU_TEID;
1877 filter->input.gtpu_data.teid = gtp_spec->teid;
1880 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1881 gtp_psc_spec = item->spec;
1882 gtp_psc_mask = item->mask;
1884 if (gtp_psc_spec && gtp_psc_mask) {
1885 if (gtp_psc_mask->qfi == UINT8_MAX)
1886 input_set |= ICE_INSET_GTPU_QFI;
1888 filter->input.gtpu_data.qfi =
1892 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1895 rte_flow_error_set(error, EINVAL,
1896 RTE_FLOW_ERROR_TYPE_ITEM,
1898 "Invalid pattern item.");
1903 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1904 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1906 filter->tunnel_type = tunnel_type;
1907 filter->input.flow_type = flow_type;
1908 filter->input_set = input_set;
1914 ice_fdir_parse(struct ice_adapter *ad,
1915 struct ice_pattern_match_item *array,
1917 const struct rte_flow_item pattern[],
1918 const struct rte_flow_action actions[],
1920 struct rte_flow_error *error)
1922 struct ice_pf *pf = &ad->pf;
1923 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1924 struct ice_pattern_match_item *item = NULL;
1928 memset(filter, 0, sizeof(*filter));
1929 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1933 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1936 input_set = filter->input_set;
1937 if (!input_set || input_set & ~item->input_set_mask) {
1938 rte_flow_error_set(error, EINVAL,
1939 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1941 "Invalid input set");
1945 ret = ice_fdir_parse_action(ad, actions, error, filter);
1954 static struct ice_flow_parser ice_fdir_parser_os = {
1955 .engine = &ice_fdir_engine,
1956 .array = ice_fdir_pattern_os,
1957 .array_len = RTE_DIM(ice_fdir_pattern_os),
1958 .parse_pattern_action = ice_fdir_parse,
1959 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1962 static struct ice_flow_parser ice_fdir_parser_comms = {
1963 .engine = &ice_fdir_engine,
1964 .array = ice_fdir_pattern_comms,
1965 .array_len = RTE_DIM(ice_fdir_pattern_comms),
1966 .parse_pattern_action = ice_fdir_parse,
1967 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1970 RTE_INIT(ice_fdir_engine_register)
1972 ice_register_flow_engine(&ice_fdir_engine);