1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
14 #include "ice_generic_flow.h"
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
23 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31 ICE_FDIR_INSET_ETH_IPV4 | \
32 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35 ICE_FDIR_INSET_ETH_IPV4 | \
36 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
40 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44 ICE_FDIR_INSET_ETH_IPV6 | \
45 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52 ICE_FDIR_INSET_ETH_IPV6 | \
53 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59 ICE_FDIR_INSET_VXLAN_IPV4 | \
60 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63 ICE_FDIR_INSET_VXLAN_IPV4 | \
64 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67 ICE_FDIR_INSET_VXLAN_IPV4 | \
68 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
70 #define ICE_FDIR_INSET_GTPU_IPV4 (\
73 #define ICE_FDIR_INSET_GTPU_EH_IPV4 (\
74 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
76 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
77 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
78 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
79 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
80 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
81 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
82 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
83 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
84 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
85 {pattern_eth_ipv4_udp_vxlan_ipv4,
86 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
87 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
88 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
89 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
90 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
91 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
92 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
93 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
94 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
95 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
96 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
97 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
98 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
99 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
100 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
103 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
104 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
105 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
106 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
107 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
108 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
109 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
110 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
111 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
112 {pattern_eth_ipv4_udp_vxlan_ipv4,
113 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
114 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
115 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
116 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
117 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
118 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
119 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
120 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
121 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
122 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
123 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
124 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
125 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
126 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
127 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
128 {pattern_eth_ipv4_gtpu_ipv4, ICE_FDIR_INSET_GTPU_IPV4, ICE_INSET_NONE},
129 {pattern_eth_ipv4_gtpu_eh_ipv4,
130 ICE_FDIR_INSET_GTPU_EH_IPV4, ICE_INSET_NONE},
133 static struct ice_flow_parser ice_fdir_parser_os;
134 static struct ice_flow_parser ice_fdir_parser_comms;
136 static const struct rte_memzone *
137 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
139 const struct rte_memzone *mz;
141 mz = rte_memzone_lookup(name);
145 return rte_memzone_reserve_aligned(name, len, socket_id,
146 RTE_MEMZONE_IOVA_CONTIG,
147 ICE_RING_BASE_ALIGN);
150 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
153 ice_fdir_prof_alloc(struct ice_hw *hw)
155 enum ice_fltr_ptype ptype, fltr_ptype;
157 if (!hw->fdir_prof) {
158 hw->fdir_prof = (struct ice_fd_hw_prof **)
159 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
160 sizeof(*hw->fdir_prof));
164 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
165 ptype < ICE_FLTR_PTYPE_MAX;
167 if (!hw->fdir_prof[ptype]) {
168 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
169 ice_malloc(hw, sizeof(**hw->fdir_prof));
170 if (!hw->fdir_prof[ptype])
177 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
180 rte_free(hw->fdir_prof[fltr_ptype]);
181 hw->fdir_prof[fltr_ptype] = NULL;
184 rte_free(hw->fdir_prof);
185 hw->fdir_prof = NULL;
191 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
192 struct ice_fdir_counter_pool_container *container,
193 uint32_t index_start,
196 struct ice_fdir_counter_pool *pool;
200 pool = rte_zmalloc("ice_fdir_counter_pool",
202 sizeof(struct ice_fdir_counter) * len,
206 "Failed to allocate memory for fdir counter pool");
210 TAILQ_INIT(&pool->counter_list);
211 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
213 for (i = 0; i < len; i++) {
214 struct ice_fdir_counter *counter = &pool->counters[i];
216 counter->hw_index = index_start + i;
217 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
220 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
221 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
226 container->pools[container->index_free++] = pool;
235 ice_fdir_counter_init(struct ice_pf *pf)
237 struct ice_hw *hw = ICE_PF_TO_HW(pf);
238 struct ice_fdir_info *fdir_info = &pf->fdir;
239 struct ice_fdir_counter_pool_container *container =
241 uint32_t cnt_index, len;
244 TAILQ_INIT(&container->pool_list);
246 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
247 len = ICE_FDIR_COUNTERS_PER_BLOCK;
249 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
251 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
259 ice_fdir_counter_release(struct ice_pf *pf)
261 struct ice_fdir_info *fdir_info = &pf->fdir;
262 struct ice_fdir_counter_pool_container *container =
266 for (i = 0; i < container->index_free; i++) {
267 rte_free(container->pools[i]);
268 container->pools[i] = NULL;
271 TAILQ_INIT(&container->pool_list);
272 container->index_free = 0;
277 static struct ice_fdir_counter *
278 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
282 struct ice_fdir_counter_pool *pool;
283 struct ice_fdir_counter *counter;
286 TAILQ_FOREACH(pool, &container->pool_list, next) {
287 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
288 counter = &pool->counters[i];
290 if (counter->shared &&
300 static struct ice_fdir_counter *
301 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
303 struct ice_hw *hw = ICE_PF_TO_HW(pf);
304 struct ice_fdir_info *fdir_info = &pf->fdir;
305 struct ice_fdir_counter_pool_container *container =
307 struct ice_fdir_counter_pool *pool = NULL;
308 struct ice_fdir_counter *counter_free = NULL;
311 counter_free = ice_fdir_counter_shared_search(container, id);
313 if (counter_free->ref_cnt + 1 == 0) {
317 counter_free->ref_cnt++;
322 TAILQ_FOREACH(pool, &container->pool_list, next) {
323 counter_free = TAILQ_FIRST(&pool->counter_list);
330 PMD_DRV_LOG(ERR, "No free counter found\n");
334 counter_free->shared = shared;
335 counter_free->id = id;
336 counter_free->ref_cnt = 1;
337 counter_free->pool = pool;
339 /* reset statistic counter value */
340 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
341 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
343 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
344 if (TAILQ_EMPTY(&pool->counter_list)) {
345 TAILQ_REMOVE(&container->pool_list, pool, next);
346 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
353 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
354 struct ice_fdir_counter *counter)
359 if (--counter->ref_cnt == 0) {
360 struct ice_fdir_counter_pool *pool = counter->pool;
362 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
367 ice_fdir_init_filter_list(struct ice_pf *pf)
369 struct rte_eth_dev *dev = pf->adapter->eth_dev;
370 struct ice_fdir_info *fdir_info = &pf->fdir;
371 char fdir_hash_name[RTE_HASH_NAMESIZE];
374 struct rte_hash_parameters fdir_hash_params = {
375 .name = fdir_hash_name,
376 .entries = ICE_MAX_FDIR_FILTER_NUM,
377 .key_len = sizeof(struct ice_fdir_fltr_pattern),
378 .hash_func = rte_hash_crc,
379 .hash_func_init_val = 0,
380 .socket_id = rte_socket_id(),
381 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
384 /* Initialize hash */
385 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
386 "fdir_%s", dev->device->name);
387 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
388 if (!fdir_info->hash_table) {
389 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
392 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
393 sizeof(*fdir_info->hash_map) *
394 ICE_MAX_FDIR_FILTER_NUM,
396 if (!fdir_info->hash_map) {
398 "Failed to allocate memory for fdir hash map!");
400 goto err_fdir_hash_map_alloc;
404 err_fdir_hash_map_alloc:
405 rte_hash_free(fdir_info->hash_table);
411 ice_fdir_release_filter_list(struct ice_pf *pf)
413 struct ice_fdir_info *fdir_info = &pf->fdir;
415 if (fdir_info->hash_map)
416 rte_free(fdir_info->hash_map);
417 if (fdir_info->hash_table)
418 rte_hash_free(fdir_info->hash_table);
420 fdir_info->hash_map = NULL;
421 fdir_info->hash_table = NULL;
425 * ice_fdir_setup - reserve and initialize the Flow Director resources
426 * @pf: board private structure
429 ice_fdir_setup(struct ice_pf *pf)
431 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
432 struct ice_hw *hw = ICE_PF_TO_HW(pf);
433 const struct rte_memzone *mz = NULL;
434 char z_name[RTE_MEMZONE_NAMESIZE];
436 int err = ICE_SUCCESS;
438 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
439 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
443 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
444 " fd_fltr_best_effort = %u.",
445 hw->func_caps.fd_fltr_guar,
446 hw->func_caps.fd_fltr_best_effort);
448 if (pf->fdir.fdir_vsi) {
449 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
453 /* make new FDIR VSI */
454 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
456 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
459 pf->fdir.fdir_vsi = vsi;
461 err = ice_fdir_init_filter_list(pf);
463 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
467 err = ice_fdir_counter_init(pf);
469 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
473 /*Fdir tx queue setup*/
474 err = ice_fdir_setup_tx_resources(pf);
476 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
480 /*Fdir rx queue setup*/
481 err = ice_fdir_setup_rx_resources(pf);
483 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
487 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
489 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
493 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
495 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
499 /* Enable FDIR MSIX interrupt */
500 vsi->nb_used_qps = 1;
501 ice_vsi_queues_bind_intr(vsi);
502 ice_vsi_enable_queues_intr(vsi);
504 /* reserve memory for the fdir programming packet */
505 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
507 eth_dev->data->port_id);
508 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
510 PMD_DRV_LOG(ERR, "Cannot init memzone for "
511 "flow director program packet.");
515 pf->fdir.prg_pkt = mz->addr;
516 pf->fdir.dma_addr = mz->iova;
519 err = ice_fdir_prof_alloc(hw);
521 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
522 "flow director profile.");
527 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
532 rte_memzone_free(pf->fdir.mz);
535 ice_rx_queue_release(pf->fdir.rxq);
538 ice_tx_queue_release(pf->fdir.txq);
541 ice_release_vsi(vsi);
542 pf->fdir.fdir_vsi = NULL;
547 ice_fdir_prof_free(struct ice_hw *hw)
549 enum ice_fltr_ptype ptype;
551 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
552 ptype < ICE_FLTR_PTYPE_MAX;
554 rte_free(hw->fdir_prof[ptype]);
555 hw->fdir_prof[ptype] = NULL;
558 rte_free(hw->fdir_prof);
559 hw->fdir_prof = NULL;
562 /* Remove a profile for some filter type */
564 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
566 struct ice_hw *hw = ICE_PF_TO_HW(pf);
567 struct ice_fd_hw_prof *hw_prof;
572 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
575 hw_prof = hw->fdir_prof[ptype];
577 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
578 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
579 if (hw_prof->entry_h[i][is_tunnel]) {
580 vsi_num = ice_get_hw_vsi_num(hw,
582 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
584 ice_flow_rem_entry(hw,
585 hw_prof->entry_h[i][is_tunnel]);
586 hw_prof->entry_h[i][is_tunnel] = 0;
589 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
590 rte_free(hw_prof->fdir_seg[is_tunnel]);
591 hw_prof->fdir_seg[is_tunnel] = NULL;
593 for (i = 0; i < hw_prof->cnt; i++)
594 hw_prof->vsi_h[i] = 0;
595 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
598 /* Remove all created profiles */
600 ice_fdir_prof_rm_all(struct ice_pf *pf)
602 enum ice_fltr_ptype ptype;
604 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
605 ptype < ICE_FLTR_PTYPE_MAX;
607 ice_fdir_prof_rm(pf, ptype, false);
608 ice_fdir_prof_rm(pf, ptype, true);
613 * ice_fdir_teardown - release the Flow Director resources
614 * @pf: board private structure
617 ice_fdir_teardown(struct ice_pf *pf)
619 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
620 struct ice_hw *hw = ICE_PF_TO_HW(pf);
624 vsi = pf->fdir.fdir_vsi;
628 ice_vsi_disable_queues_intr(vsi);
630 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
632 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
634 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
636 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
638 err = ice_fdir_counter_release(pf);
640 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
642 ice_fdir_release_filter_list(pf);
644 ice_tx_queue_release(pf->fdir.txq);
646 ice_rx_queue_release(pf->fdir.rxq);
648 ice_fdir_prof_rm_all(pf);
649 ice_fdir_prof_free(hw);
650 ice_release_vsi(vsi);
651 pf->fdir.fdir_vsi = NULL;
654 err = rte_memzone_free(pf->fdir.mz);
657 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
662 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
663 enum ice_fltr_ptype ptype,
664 struct ice_flow_seg_info *seg,
667 struct ice_hw *hw = ICE_PF_TO_HW(pf);
668 struct ice_flow_seg_info *ori_seg;
669 struct ice_fd_hw_prof *hw_prof;
671 hw_prof = hw->fdir_prof[ptype];
672 ori_seg = hw_prof->fdir_seg[is_tunnel];
674 /* profile does not exist */
678 /* if no input set conflict, return -EEXIST */
679 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
680 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
681 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
686 /* a rule with input set conflict already exist, so give up */
687 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
688 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
693 /* it's safe to delete an empty profile */
694 ice_fdir_prof_rm(pf, ptype, is_tunnel);
699 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
700 enum ice_fltr_ptype ptype,
703 struct ice_hw *hw = ICE_PF_TO_HW(pf);
704 struct ice_fd_hw_prof *hw_prof;
705 struct ice_flow_seg_info *seg;
707 hw_prof = hw->fdir_prof[ptype];
708 seg = hw_prof->fdir_seg[is_tunnel];
710 /* profile does not exist */
714 /* profile exists and rule exists, fail to resolve the conflict */
715 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
718 /* it's safe to delete an empty profile */
719 ice_fdir_prof_rm(pf, ptype, is_tunnel);
725 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
726 enum ice_fltr_ptype ptype,
729 enum ice_fltr_ptype cflct_ptype;
733 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
734 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
735 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
736 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
737 if (!ice_fdir_prof_resolve_conflict
738 (pf, cflct_ptype, is_tunnel))
741 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
742 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
743 if (!ice_fdir_prof_resolve_conflict
744 (pf, cflct_ptype, is_tunnel))
746 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
747 if (!ice_fdir_prof_resolve_conflict
748 (pf, cflct_ptype, is_tunnel))
750 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
751 if (!ice_fdir_prof_resolve_conflict
752 (pf, cflct_ptype, is_tunnel))
756 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
757 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
758 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
759 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
760 if (!ice_fdir_prof_resolve_conflict
761 (pf, cflct_ptype, is_tunnel))
764 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
765 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
766 if (!ice_fdir_prof_resolve_conflict
767 (pf, cflct_ptype, is_tunnel))
769 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
770 if (!ice_fdir_prof_resolve_conflict
771 (pf, cflct_ptype, is_tunnel))
773 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
774 if (!ice_fdir_prof_resolve_conflict
775 (pf, cflct_ptype, is_tunnel))
779 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
780 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
781 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
782 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
783 if (!ice_fdir_prof_resolve_conflict
784 (pf, cflct_ptype, is_tunnel))
787 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
788 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
789 if (!ice_fdir_prof_resolve_conflict
790 (pf, cflct_ptype, is_tunnel))
792 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
793 if (!ice_fdir_prof_resolve_conflict
794 (pf, cflct_ptype, is_tunnel))
796 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
797 if (!ice_fdir_prof_resolve_conflict
798 (pf, cflct_ptype, is_tunnel))
806 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
812 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
813 struct ice_vsi *ctrl_vsi,
814 struct ice_flow_seg_info *seg,
815 enum ice_fltr_ptype ptype,
818 struct ice_hw *hw = ICE_PF_TO_HW(pf);
819 enum ice_flow_dir dir = ICE_FLOW_RX;
820 struct ice_fd_hw_prof *hw_prof;
821 struct ice_flow_prof *prof;
822 uint64_t entry_1 = 0;
823 uint64_t entry_2 = 0;
828 /* check if have input set conflict on current profile. */
829 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
833 /* check if the profile is conflict with other profile. */
834 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
838 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
839 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
840 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
843 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
844 vsi->idx, ICE_FLOW_PRIO_NORMAL,
845 seg, NULL, 0, &entry_1);
847 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
851 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
852 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
853 seg, NULL, 0, &entry_2);
855 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
860 hw_prof = hw->fdir_prof[ptype];
861 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
863 hw_prof->fdir_seg[is_tunnel] = seg;
864 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
865 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
866 pf->hw_prof_cnt[ptype][is_tunnel]++;
867 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
868 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
869 pf->hw_prof_cnt[ptype][is_tunnel]++;
874 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
875 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
876 ice_flow_rem_entry(hw, entry_1);
878 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
884 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
888 struct ice_inset_map {
890 enum ice_flow_field fld;
892 static const struct ice_inset_map ice_inset_map[] = {
893 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
894 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
895 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
896 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
897 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
898 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
899 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
900 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
901 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
902 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
903 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
904 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
905 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
906 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
907 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
908 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
909 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
910 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
911 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
912 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
913 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
914 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
915 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
916 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
917 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
918 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
919 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
922 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
923 if ((inset & ice_inset_map[i].inset) ==
924 ice_inset_map[i].inset)
925 field[j++] = ice_inset_map[i].fld;
930 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
931 uint64_t input_set, bool is_tunnel)
933 struct ice_flow_seg_info *seg;
934 struct ice_flow_seg_info *seg_tun = NULL;
935 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
941 seg = (struct ice_flow_seg_info *)
942 ice_malloc(hw, sizeof(*seg));
944 PMD_DRV_LOG(ERR, "No memory can be allocated");
948 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
949 field[i] = ICE_FLOW_FIELD_IDX_MAX;
950 ice_fdir_input_set_parse(input_set, field);
953 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
954 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
955 ICE_FLOW_SEG_HDR_IPV4);
957 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
958 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
959 ICE_FLOW_SEG_HDR_IPV4);
961 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
962 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
963 ICE_FLOW_SEG_HDR_IPV4);
965 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
966 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
968 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
969 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
970 ICE_FLOW_SEG_HDR_IPV6);
972 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
973 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
974 ICE_FLOW_SEG_HDR_IPV6);
976 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
977 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
978 ICE_FLOW_SEG_HDR_IPV6);
980 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
981 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
983 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
984 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
985 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
986 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
987 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
988 ICE_FLOW_SEG_HDR_GTPU_IP |
989 ICE_FLOW_SEG_HDR_IPV4);
992 PMD_DRV_LOG(ERR, "not supported filter type.");
996 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
997 ice_flow_set_fld(seg, field[i],
998 ICE_FLOW_FLD_OFF_INVAL,
999 ICE_FLOW_FLD_OFF_INVAL,
1000 ICE_FLOW_FLD_OFF_INVAL, false);
1004 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1007 seg_tun = (struct ice_flow_seg_info *)
1008 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1010 PMD_DRV_LOG(ERR, "No memory can be allocated");
1014 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1015 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1016 seg_tun, flow, true);
1021 } else if (ret < 0) {
1025 return (ret == -EEXIST) ? 0 : ret;
1032 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1033 bool is_tunnel, bool add)
1035 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1038 cnt = (add) ? 1 : -1;
1039 hw->fdir_active_fltr += cnt;
1040 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1041 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1043 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1047 ice_fdir_init(struct ice_adapter *ad)
1049 struct ice_pf *pf = &ad->pf;
1050 struct ice_flow_parser *parser;
1053 ret = ice_fdir_setup(pf);
1057 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1058 parser = &ice_fdir_parser_comms;
1059 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1060 parser = &ice_fdir_parser_os;
1064 return ice_register_parser(parser, ad);
1068 ice_fdir_uninit(struct ice_adapter *ad)
1070 struct ice_pf *pf = &ad->pf;
1071 struct ice_flow_parser *parser;
1073 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1074 parser = &ice_fdir_parser_comms;
1076 parser = &ice_fdir_parser_os;
1078 ice_unregister_parser(parser, ad);
1080 ice_fdir_teardown(pf);
1084 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1086 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1093 ice_fdir_add_del_filter(struct ice_pf *pf,
1094 struct ice_fdir_filter_conf *filter,
1097 struct ice_fltr_desc desc;
1098 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1099 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1103 filter->input.dest_vsi = pf->main_vsi->idx;
1105 memset(&desc, 0, sizeof(desc));
1106 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1108 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1110 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1111 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1113 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1117 return ice_fdir_programming(pf, &desc);
1121 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1122 struct ice_fdir_filter_conf *filter)
1124 struct ice_fdir_fltr *input = &filter->input;
1125 memset(key, 0, sizeof(*key));
1127 key->flow_type = input->flow_type;
1128 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1129 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1130 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1131 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1133 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1134 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1136 key->tunnel_type = filter->tunnel_type;
1139 /* Check if there exists the flow director filter */
1140 static struct ice_fdir_filter_conf *
1141 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1142 const struct ice_fdir_fltr_pattern *key)
1146 ret = rte_hash_lookup(fdir_info->hash_table, key);
1150 return fdir_info->hash_map[ret];
1153 /* Add a flow director entry into the SW list */
1155 ice_fdir_entry_insert(struct ice_pf *pf,
1156 struct ice_fdir_filter_conf *entry,
1157 struct ice_fdir_fltr_pattern *key)
1159 struct ice_fdir_info *fdir_info = &pf->fdir;
1162 ret = rte_hash_add_key(fdir_info->hash_table, key);
1165 "Failed to insert fdir entry to hash table %d!",
1169 fdir_info->hash_map[ret] = entry;
1174 /* Delete a flow director entry from the SW list */
1176 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1178 struct ice_fdir_info *fdir_info = &pf->fdir;
1181 ret = rte_hash_del_key(fdir_info->hash_table, key);
1184 "Failed to delete fdir filter to hash table %d!",
1188 fdir_info->hash_map[ret] = NULL;
1194 ice_fdir_create_filter(struct ice_adapter *ad,
1195 struct rte_flow *flow,
1197 struct rte_flow_error *error)
1199 struct ice_pf *pf = &ad->pf;
1200 struct ice_fdir_filter_conf *filter = meta;
1201 struct ice_fdir_info *fdir_info = &pf->fdir;
1202 struct ice_fdir_filter_conf *entry, *node;
1203 struct ice_fdir_fltr_pattern key;
1207 ice_fdir_extract_fltr_key(&key, filter);
1208 node = ice_fdir_entry_lookup(fdir_info, &key);
1210 rte_flow_error_set(error, EEXIST,
1211 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1212 "Rule already exists!");
1216 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1218 rte_flow_error_set(error, ENOMEM,
1219 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1220 "Failed to allocate memory");
1224 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1226 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1227 filter->input_set, is_tun);
1229 rte_flow_error_set(error, -ret,
1230 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1231 "Profile configure failed.");
1235 /* alloc counter for FDIR */
1236 if (filter->input.cnt_ena) {
1237 struct rte_flow_action_count *act_count = &filter->act_count;
1239 filter->counter = ice_fdir_counter_alloc(pf,
1242 if (!filter->counter) {
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1245 "Failed to alloc FDIR counter.");
1248 filter->input.cnt_index = filter->counter->hw_index;
1251 ret = ice_fdir_add_del_filter(pf, filter, true);
1253 rte_flow_error_set(error, -ret,
1254 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1255 "Add filter rule failed.");
1259 rte_memcpy(entry, filter, sizeof(*entry));
1260 ret = ice_fdir_entry_insert(pf, entry, &key);
1262 rte_flow_error_set(error, -ret,
1263 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1264 "Insert entry to table failed.");
1269 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1274 if (filter->counter) {
1275 ice_fdir_counter_free(pf, filter->counter);
1276 filter->counter = NULL;
1285 ice_fdir_destroy_filter(struct ice_adapter *ad,
1286 struct rte_flow *flow,
1287 struct rte_flow_error *error)
1289 struct ice_pf *pf = &ad->pf;
1290 struct ice_fdir_info *fdir_info = &pf->fdir;
1291 struct ice_fdir_filter_conf *filter, *entry;
1292 struct ice_fdir_fltr_pattern key;
1296 filter = (struct ice_fdir_filter_conf *)flow->rule;
1298 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1300 if (filter->counter) {
1301 ice_fdir_counter_free(pf, filter->counter);
1302 filter->counter = NULL;
1305 ice_fdir_extract_fltr_key(&key, filter);
1306 entry = ice_fdir_entry_lookup(fdir_info, &key);
1308 rte_flow_error_set(error, ENOENT,
1309 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1310 "Can't find entry.");
1314 ret = ice_fdir_add_del_filter(pf, filter, false);
1316 rte_flow_error_set(error, -ret,
1317 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1318 "Del filter rule failed.");
1322 ret = ice_fdir_entry_del(pf, &key);
1324 rte_flow_error_set(error, -ret,
1325 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1326 "Remove entry from table failed.");
1330 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1339 ice_fdir_query_count(struct ice_adapter *ad,
1340 struct rte_flow *flow,
1341 struct rte_flow_query_count *flow_stats,
1342 struct rte_flow_error *error)
1344 struct ice_pf *pf = &ad->pf;
1345 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1346 struct ice_fdir_filter_conf *filter = flow->rule;
1347 struct ice_fdir_counter *counter = filter->counter;
1348 uint64_t hits_lo, hits_hi;
1351 rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ACTION,
1354 "FDIR counters not available");
1359 * Reading the low 32-bits latches the high 32-bits into a shadow
1360 * register. Reading the high 32-bit returns the value in the
1363 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1364 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1366 flow_stats->hits_set = 1;
1367 flow_stats->hits = hits_lo | (hits_hi << 32);
1368 flow_stats->bytes_set = 0;
1369 flow_stats->bytes = 0;
1371 if (flow_stats->reset) {
1372 /* reset statistic counter value */
1373 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1374 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1380 static struct ice_flow_engine ice_fdir_engine = {
1381 .init = ice_fdir_init,
1382 .uninit = ice_fdir_uninit,
1383 .create = ice_fdir_create_filter,
1384 .destroy = ice_fdir_destroy_filter,
1385 .query_count = ice_fdir_query_count,
1386 .type = ICE_FLOW_ENGINE_FDIR,
1390 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1391 struct rte_flow_error *error,
1392 const struct rte_flow_action *act,
1393 struct ice_fdir_filter_conf *filter)
1395 const struct rte_flow_action_rss *rss = act->conf;
1398 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1399 rte_flow_error_set(error, EINVAL,
1400 RTE_FLOW_ERROR_TYPE_ACTION, act,
1405 if (rss->queue_num <= 1) {
1406 rte_flow_error_set(error, EINVAL,
1407 RTE_FLOW_ERROR_TYPE_ACTION, act,
1408 "Queue region size can't be 0 or 1.");
1412 /* check if queue index for queue region is continuous */
1413 for (i = 0; i < rss->queue_num - 1; i++) {
1414 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1415 rte_flow_error_set(error, EINVAL,
1416 RTE_FLOW_ERROR_TYPE_ACTION, act,
1417 "Discontinuous queue region");
1422 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1423 rte_flow_error_set(error, EINVAL,
1424 RTE_FLOW_ERROR_TYPE_ACTION, act,
1425 "Invalid queue region indexes.");
1429 if (!(rte_is_power_of_2(rss->queue_num) &&
1430 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1431 rte_flow_error_set(error, EINVAL,
1432 RTE_FLOW_ERROR_TYPE_ACTION, act,
1433 "The region size should be any of the following values:"
1434 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1435 "of queues do not exceed the VSI allocation.");
1439 filter->input.q_index = rss->queue[0];
1440 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1441 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1447 ice_fdir_parse_action(struct ice_adapter *ad,
1448 const struct rte_flow_action actions[],
1449 struct rte_flow_error *error,
1450 struct ice_fdir_filter_conf *filter)
1452 struct ice_pf *pf = &ad->pf;
1453 const struct rte_flow_action_queue *act_q;
1454 const struct rte_flow_action_mark *mark_spec = NULL;
1455 const struct rte_flow_action_count *act_count;
1456 uint32_t dest_num = 0;
1457 uint32_t mark_num = 0;
1458 uint32_t counter_num = 0;
1461 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1462 switch (actions->type) {
1463 case RTE_FLOW_ACTION_TYPE_VOID:
1465 case RTE_FLOW_ACTION_TYPE_QUEUE:
1468 act_q = actions->conf;
1469 filter->input.q_index = act_q->index;
1470 if (filter->input.q_index >=
1471 pf->dev_data->nb_rx_queues) {
1472 rte_flow_error_set(error, EINVAL,
1473 RTE_FLOW_ERROR_TYPE_ACTION,
1475 "Invalid queue for FDIR.");
1478 filter->input.dest_ctl =
1479 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1481 case RTE_FLOW_ACTION_TYPE_DROP:
1484 filter->input.dest_ctl =
1485 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1487 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1490 filter->input.dest_ctl =
1491 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1492 filter->input.q_index = 0;
1494 case RTE_FLOW_ACTION_TYPE_RSS:
1497 ret = ice_fdir_parse_action_qregion(pf,
1498 error, actions, filter);
1502 case RTE_FLOW_ACTION_TYPE_MARK:
1505 mark_spec = actions->conf;
1506 filter->input.fltr_id = mark_spec->id;
1508 case RTE_FLOW_ACTION_TYPE_COUNT:
1511 act_count = actions->conf;
1512 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1513 rte_memcpy(&filter->act_count, act_count,
1514 sizeof(filter->act_count));
1518 rte_flow_error_set(error, EINVAL,
1519 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1525 if (dest_num == 0 || dest_num >= 2) {
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1528 "Unsupported action combination");
1532 if (mark_num >= 2) {
1533 rte_flow_error_set(error, EINVAL,
1534 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1535 "Too many mark actions");
1539 if (counter_num >= 2) {
1540 rte_flow_error_set(error, EINVAL,
1541 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1542 "Too many count actions");
1550 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1551 const struct rte_flow_item pattern[],
1552 struct rte_flow_error *error,
1553 struct ice_fdir_filter_conf *filter)
1555 const struct rte_flow_item *item = pattern;
1556 enum rte_flow_item_type item_type;
1557 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1558 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1559 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1560 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1561 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1562 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1563 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1564 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1565 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1566 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1567 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1568 uint64_t input_set = ICE_INSET_NONE;
1569 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1570 uint8_t ipv6_addr_mask[16] = {
1571 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1572 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1574 uint32_t vtc_flow_cpu;
1577 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1579 rte_flow_error_set(error, EINVAL,
1580 RTE_FLOW_ERROR_TYPE_ITEM,
1582 "Not support range");
1585 item_type = item->type;
1587 switch (item_type) {
1588 case RTE_FLOW_ITEM_TYPE_ETH:
1589 eth_spec = item->spec;
1590 eth_mask = item->mask;
1592 if (eth_spec && eth_mask) {
1593 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1594 !rte_is_zero_ether_addr(ð_mask->src)) {
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ITEM,
1598 "Src mac not support");
1602 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
1603 rte_flow_error_set(error, EINVAL,
1604 RTE_FLOW_ERROR_TYPE_ITEM,
1606 "Invalid mac addr mask");
1610 input_set |= ICE_INSET_DMAC;
1611 rte_memcpy(&filter->input.ext_data.dst_mac,
1613 RTE_ETHER_ADDR_LEN);
1616 case RTE_FLOW_ITEM_TYPE_IPV4:
1617 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1618 ipv4_spec = item->spec;
1619 ipv4_mask = item->mask;
1621 if (ipv4_spec && ipv4_mask) {
1622 /* Check IPv4 mask and update input set */
1623 if (ipv4_mask->hdr.version_ihl ||
1624 ipv4_mask->hdr.total_length ||
1625 ipv4_mask->hdr.packet_id ||
1626 ipv4_mask->hdr.fragment_offset ||
1627 ipv4_mask->hdr.hdr_checksum) {
1628 rte_flow_error_set(error, EINVAL,
1629 RTE_FLOW_ERROR_TYPE_ITEM,
1631 "Invalid IPv4 mask.");
1634 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1635 input_set |= tunnel_type ?
1636 ICE_INSET_TUN_IPV4_SRC :
1638 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1639 input_set |= tunnel_type ?
1640 ICE_INSET_TUN_IPV4_DST :
1642 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1643 input_set |= ICE_INSET_IPV4_TOS;
1644 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1645 input_set |= ICE_INSET_IPV4_TTL;
1646 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1647 input_set |= ICE_INSET_IPV4_PROTO;
1649 filter->input.ip.v4.dst_ip =
1650 ipv4_spec->hdr.src_addr;
1651 filter->input.ip.v4.src_ip =
1652 ipv4_spec->hdr.dst_addr;
1653 filter->input.ip.v4.tos =
1654 ipv4_spec->hdr.type_of_service;
1655 filter->input.ip.v4.ttl =
1656 ipv4_spec->hdr.time_to_live;
1657 filter->input.ip.v4.proto =
1658 ipv4_spec->hdr.next_proto_id;
1661 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1663 case RTE_FLOW_ITEM_TYPE_IPV6:
1664 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1665 ipv6_spec = item->spec;
1666 ipv6_mask = item->mask;
1668 if (ipv6_spec && ipv6_mask) {
1669 /* Check IPv6 mask and update input set */
1670 if (ipv6_mask->hdr.payload_len) {
1671 rte_flow_error_set(error, EINVAL,
1672 RTE_FLOW_ERROR_TYPE_ITEM,
1674 "Invalid IPv6 mask");
1678 if (!memcmp(ipv6_mask->hdr.src_addr,
1680 RTE_DIM(ipv6_mask->hdr.src_addr)))
1681 input_set |= ICE_INSET_IPV6_SRC;
1682 if (!memcmp(ipv6_mask->hdr.dst_addr,
1684 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1685 input_set |= ICE_INSET_IPV6_DST;
1687 if ((ipv6_mask->hdr.vtc_flow &
1688 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1689 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1690 input_set |= ICE_INSET_IPV6_TC;
1691 if (ipv6_mask->hdr.proto == UINT8_MAX)
1692 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1693 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1694 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1696 rte_memcpy(filter->input.ip.v6.dst_ip,
1697 ipv6_spec->hdr.src_addr, 16);
1698 rte_memcpy(filter->input.ip.v6.src_ip,
1699 ipv6_spec->hdr.dst_addr, 16);
1702 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1703 filter->input.ip.v6.tc =
1704 (uint8_t)(vtc_flow_cpu >>
1705 ICE_FDIR_IPV6_TC_OFFSET);
1706 filter->input.ip.v6.proto =
1707 ipv6_spec->hdr.proto;
1708 filter->input.ip.v6.hlim =
1709 ipv6_spec->hdr.hop_limits;
1712 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1714 case RTE_FLOW_ITEM_TYPE_TCP:
1715 tcp_spec = item->spec;
1716 tcp_mask = item->mask;
1718 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1719 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1720 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1721 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1723 if (tcp_spec && tcp_mask) {
1724 /* Check TCP mask and update input set */
1725 if (tcp_mask->hdr.sent_seq ||
1726 tcp_mask->hdr.recv_ack ||
1727 tcp_mask->hdr.data_off ||
1728 tcp_mask->hdr.tcp_flags ||
1729 tcp_mask->hdr.rx_win ||
1730 tcp_mask->hdr.cksum ||
1731 tcp_mask->hdr.tcp_urp) {
1732 rte_flow_error_set(error, EINVAL,
1733 RTE_FLOW_ERROR_TYPE_ITEM,
1735 "Invalid TCP mask");
1739 if (tcp_mask->hdr.src_port == UINT16_MAX)
1740 input_set |= tunnel_type ?
1741 ICE_INSET_TUN_TCP_SRC_PORT :
1742 ICE_INSET_TCP_SRC_PORT;
1743 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1744 input_set |= tunnel_type ?
1745 ICE_INSET_TUN_TCP_DST_PORT :
1746 ICE_INSET_TCP_DST_PORT;
1748 /* Get filter info */
1749 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1750 filter->input.ip.v4.dst_port =
1751 tcp_spec->hdr.src_port;
1752 filter->input.ip.v4.src_port =
1753 tcp_spec->hdr.dst_port;
1754 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1755 filter->input.ip.v6.dst_port =
1756 tcp_spec->hdr.src_port;
1757 filter->input.ip.v6.src_port =
1758 tcp_spec->hdr.dst_port;
1762 case RTE_FLOW_ITEM_TYPE_UDP:
1763 udp_spec = item->spec;
1764 udp_mask = item->mask;
1766 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1767 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1768 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1769 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1771 if (udp_spec && udp_mask) {
1772 /* Check UDP mask and update input set*/
1773 if (udp_mask->hdr.dgram_len ||
1774 udp_mask->hdr.dgram_cksum) {
1775 rte_flow_error_set(error, EINVAL,
1776 RTE_FLOW_ERROR_TYPE_ITEM,
1778 "Invalid UDP mask");
1782 if (udp_mask->hdr.src_port == UINT16_MAX)
1783 input_set |= tunnel_type ?
1784 ICE_INSET_TUN_UDP_SRC_PORT :
1785 ICE_INSET_UDP_SRC_PORT;
1786 if (udp_mask->hdr.dst_port == UINT16_MAX)
1787 input_set |= tunnel_type ?
1788 ICE_INSET_TUN_UDP_DST_PORT :
1789 ICE_INSET_UDP_DST_PORT;
1791 /* Get filter info */
1792 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1793 filter->input.ip.v4.dst_port =
1794 udp_spec->hdr.src_port;
1795 filter->input.ip.v4.src_port =
1796 udp_spec->hdr.dst_port;
1797 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1798 filter->input.ip.v6.src_port =
1799 udp_spec->hdr.dst_port;
1800 filter->input.ip.v6.dst_port =
1801 udp_spec->hdr.src_port;
1805 case RTE_FLOW_ITEM_TYPE_SCTP:
1806 sctp_spec = item->spec;
1807 sctp_mask = item->mask;
1809 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1810 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1811 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1812 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1814 if (sctp_spec && sctp_mask) {
1815 /* Check SCTP mask and update input set */
1816 if (sctp_mask->hdr.cksum) {
1817 rte_flow_error_set(error, EINVAL,
1818 RTE_FLOW_ERROR_TYPE_ITEM,
1820 "Invalid UDP mask");
1824 if (sctp_mask->hdr.src_port == UINT16_MAX)
1825 input_set |= tunnel_type ?
1826 ICE_INSET_TUN_SCTP_SRC_PORT :
1827 ICE_INSET_SCTP_SRC_PORT;
1828 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1829 input_set |= tunnel_type ?
1830 ICE_INSET_TUN_SCTP_DST_PORT :
1831 ICE_INSET_SCTP_DST_PORT;
1833 /* Get filter info */
1834 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1835 filter->input.ip.v4.dst_port =
1836 sctp_spec->hdr.src_port;
1837 filter->input.ip.v4.src_port =
1838 sctp_spec->hdr.dst_port;
1839 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1840 filter->input.ip.v6.dst_port =
1841 sctp_spec->hdr.src_port;
1842 filter->input.ip.v6.src_port =
1843 sctp_spec->hdr.dst_port;
1847 case RTE_FLOW_ITEM_TYPE_VOID:
1849 case RTE_FLOW_ITEM_TYPE_VXLAN:
1850 l3 = RTE_FLOW_ITEM_TYPE_END;
1851 vxlan_spec = item->spec;
1852 vxlan_mask = item->mask;
1854 if (vxlan_spec || vxlan_mask) {
1855 rte_flow_error_set(error, EINVAL,
1856 RTE_FLOW_ERROR_TYPE_ITEM,
1858 "Invalid vxlan field");
1862 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1864 case RTE_FLOW_ITEM_TYPE_GTPU:
1865 l3 = RTE_FLOW_ITEM_TYPE_END;
1866 gtp_spec = item->spec;
1867 gtp_mask = item->mask;
1869 if (gtp_spec && gtp_mask) {
1870 if (gtp_mask->v_pt_rsv_flags ||
1871 gtp_mask->msg_type ||
1872 gtp_mask->msg_len) {
1873 rte_flow_error_set(error, EINVAL,
1874 RTE_FLOW_ERROR_TYPE_ITEM,
1876 "Invalid GTP mask");
1880 if (gtp_mask->teid == UINT32_MAX)
1881 input_set |= ICE_INSET_GTPU_TEID;
1883 filter->input.gtpu_data.teid = gtp_spec->teid;
1886 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1888 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1889 gtp_psc_spec = item->spec;
1890 gtp_psc_mask = item->mask;
1892 if (gtp_psc_spec && gtp_psc_mask) {
1893 if (gtp_psc_mask->qfi == UINT8_MAX)
1894 input_set |= ICE_INSET_GTPU_QFI;
1896 filter->input.gtpu_data.qfi =
1901 rte_flow_error_set(error, EINVAL,
1902 RTE_FLOW_ERROR_TYPE_ITEM,
1904 "Invalid pattern item.");
1909 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1910 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1912 filter->tunnel_type = tunnel_type;
1913 filter->input.flow_type = flow_type;
1914 filter->input_set = input_set;
1920 ice_fdir_parse(struct ice_adapter *ad,
1921 struct ice_pattern_match_item *array,
1923 const struct rte_flow_item pattern[],
1924 const struct rte_flow_action actions[],
1926 struct rte_flow_error *error)
1928 struct ice_pf *pf = &ad->pf;
1929 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1930 struct ice_pattern_match_item *item = NULL;
1934 memset(filter, 0, sizeof(*filter));
1935 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1939 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1942 input_set = filter->input_set;
1943 if (!input_set || input_set & ~item->input_set_mask) {
1944 rte_flow_error_set(error, EINVAL,
1945 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1947 "Invalid input set");
1951 ret = ice_fdir_parse_action(ad, actions, error, filter);
1960 static struct ice_flow_parser ice_fdir_parser_os = {
1961 .engine = &ice_fdir_engine,
1962 .array = ice_fdir_pattern_os,
1963 .array_len = RTE_DIM(ice_fdir_pattern_os),
1964 .parse_pattern_action = ice_fdir_parse,
1965 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1968 static struct ice_flow_parser ice_fdir_parser_comms = {
1969 .engine = &ice_fdir_engine,
1970 .array = ice_fdir_pattern_comms,
1971 .array_len = RTE_DIM(ice_fdir_pattern_comms),
1972 .parse_pattern_action = ice_fdir_parse,
1973 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1976 RTE_INIT(ice_fdir_engine_register)
1978 ice_register_flow_engine(&ice_fdir_engine);