1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
14 #include "ice_generic_flow.h"
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
21 #define ICE_FDIR_INSET_ETH (\
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
26 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30 ICE_FDIR_INSET_ETH_IPV4 | \
31 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34 ICE_FDIR_INSET_ETH_IPV4 | \
35 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38 ICE_FDIR_INSET_ETH_IPV4 | \
39 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
43 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
46 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
47 ICE_FDIR_INSET_ETH_IPV6 | \
48 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
50 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
51 ICE_FDIR_INSET_ETH_IPV6 | \
52 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
54 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
55 ICE_FDIR_INSET_ETH_IPV6 | \
56 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
58 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
59 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
61 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
62 ICE_FDIR_INSET_VXLAN_IPV4 | \
63 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
65 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
66 ICE_FDIR_INSET_VXLAN_IPV4 | \
67 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
69 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
70 ICE_FDIR_INSET_VXLAN_IPV4 | \
71 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
73 #define ICE_FDIR_INSET_GTPU (\
74 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
76 #define ICE_FDIR_INSET_GTPU_EH (\
77 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
78 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
80 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
81 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
82 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
83 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
84 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
85 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
86 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
87 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
88 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
89 {pattern_eth_ipv4_udp_vxlan_ipv4,
90 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
91 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
92 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
93 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
94 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
95 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
96 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
97 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
98 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
99 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
100 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
101 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
102 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
103 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
104 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
107 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
108 {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE},
109 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
110 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
111 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
112 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
113 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
114 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
115 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
116 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp_vxlan_ipv4,
118 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
119 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
120 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
121 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
122 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
123 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
124 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
125 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
126 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
128 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
130 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
132 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
133 {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_GTPU, ICE_INSET_NONE},
134 {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_GTPU_EH, ICE_INSET_NONE},
137 static struct ice_flow_parser ice_fdir_parser_os;
138 static struct ice_flow_parser ice_fdir_parser_comms;
141 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
143 static const struct rte_memzone *
144 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
146 const struct rte_memzone *mz;
148 mz = rte_memzone_lookup(name);
152 return rte_memzone_reserve_aligned(name, len, socket_id,
153 RTE_MEMZONE_IOVA_CONTIG,
154 ICE_RING_BASE_ALIGN);
157 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
160 ice_fdir_prof_alloc(struct ice_hw *hw)
162 enum ice_fltr_ptype ptype, fltr_ptype;
164 if (!hw->fdir_prof) {
165 hw->fdir_prof = (struct ice_fd_hw_prof **)
166 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
167 sizeof(*hw->fdir_prof));
171 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
172 ptype < ICE_FLTR_PTYPE_MAX;
174 if (!hw->fdir_prof[ptype]) {
175 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
176 ice_malloc(hw, sizeof(**hw->fdir_prof));
177 if (!hw->fdir_prof[ptype])
184 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
187 rte_free(hw->fdir_prof[fltr_ptype]);
188 hw->fdir_prof[fltr_ptype] = NULL;
191 rte_free(hw->fdir_prof);
192 hw->fdir_prof = NULL;
198 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
199 struct ice_fdir_counter_pool_container *container,
200 uint32_t index_start,
203 struct ice_fdir_counter_pool *pool;
207 pool = rte_zmalloc("ice_fdir_counter_pool",
209 sizeof(struct ice_fdir_counter) * len,
213 "Failed to allocate memory for fdir counter pool");
217 TAILQ_INIT(&pool->counter_list);
218 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
220 for (i = 0; i < len; i++) {
221 struct ice_fdir_counter *counter = &pool->counters[i];
223 counter->hw_index = index_start + i;
224 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
227 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
228 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
233 container->pools[container->index_free++] = pool;
242 ice_fdir_counter_init(struct ice_pf *pf)
244 struct ice_hw *hw = ICE_PF_TO_HW(pf);
245 struct ice_fdir_info *fdir_info = &pf->fdir;
246 struct ice_fdir_counter_pool_container *container =
248 uint32_t cnt_index, len;
251 TAILQ_INIT(&container->pool_list);
253 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
254 len = ICE_FDIR_COUNTERS_PER_BLOCK;
256 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
258 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
266 ice_fdir_counter_release(struct ice_pf *pf)
268 struct ice_fdir_info *fdir_info = &pf->fdir;
269 struct ice_fdir_counter_pool_container *container =
273 for (i = 0; i < container->index_free; i++) {
274 rte_free(container->pools[i]);
275 container->pools[i] = NULL;
278 TAILQ_INIT(&container->pool_list);
279 container->index_free = 0;
284 static struct ice_fdir_counter *
285 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
289 struct ice_fdir_counter_pool *pool;
290 struct ice_fdir_counter *counter;
293 TAILQ_FOREACH(pool, &container->pool_list, next) {
294 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
295 counter = &pool->counters[i];
297 if (counter->shared &&
307 static struct ice_fdir_counter *
308 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
310 struct ice_hw *hw = ICE_PF_TO_HW(pf);
311 struct ice_fdir_info *fdir_info = &pf->fdir;
312 struct ice_fdir_counter_pool_container *container =
314 struct ice_fdir_counter_pool *pool = NULL;
315 struct ice_fdir_counter *counter_free = NULL;
318 counter_free = ice_fdir_counter_shared_search(container, id);
320 if (counter_free->ref_cnt + 1 == 0) {
324 counter_free->ref_cnt++;
329 TAILQ_FOREACH(pool, &container->pool_list, next) {
330 counter_free = TAILQ_FIRST(&pool->counter_list);
337 PMD_DRV_LOG(ERR, "No free counter found\n");
341 counter_free->shared = shared;
342 counter_free->id = id;
343 counter_free->ref_cnt = 1;
344 counter_free->pool = pool;
346 /* reset statistic counter value */
347 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
348 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
350 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
351 if (TAILQ_EMPTY(&pool->counter_list)) {
352 TAILQ_REMOVE(&container->pool_list, pool, next);
353 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
360 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
361 struct ice_fdir_counter *counter)
366 if (--counter->ref_cnt == 0) {
367 struct ice_fdir_counter_pool *pool = counter->pool;
369 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
374 ice_fdir_init_filter_list(struct ice_pf *pf)
376 struct rte_eth_dev *dev = pf->adapter->eth_dev;
377 struct ice_fdir_info *fdir_info = &pf->fdir;
378 char fdir_hash_name[RTE_HASH_NAMESIZE];
381 struct rte_hash_parameters fdir_hash_params = {
382 .name = fdir_hash_name,
383 .entries = ICE_MAX_FDIR_FILTER_NUM,
384 .key_len = sizeof(struct ice_fdir_fltr_pattern),
385 .hash_func = rte_hash_crc,
386 .hash_func_init_val = 0,
387 .socket_id = rte_socket_id(),
388 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
391 /* Initialize hash */
392 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
393 "fdir_%s", dev->device->name);
394 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
395 if (!fdir_info->hash_table) {
396 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
399 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
400 sizeof(*fdir_info->hash_map) *
401 ICE_MAX_FDIR_FILTER_NUM,
403 if (!fdir_info->hash_map) {
405 "Failed to allocate memory for fdir hash map!");
407 goto err_fdir_hash_map_alloc;
411 err_fdir_hash_map_alloc:
412 rte_hash_free(fdir_info->hash_table);
418 ice_fdir_release_filter_list(struct ice_pf *pf)
420 struct ice_fdir_info *fdir_info = &pf->fdir;
422 if (fdir_info->hash_map)
423 rte_free(fdir_info->hash_map);
424 if (fdir_info->hash_table)
425 rte_hash_free(fdir_info->hash_table);
427 fdir_info->hash_map = NULL;
428 fdir_info->hash_table = NULL;
432 * ice_fdir_setup - reserve and initialize the Flow Director resources
433 * @pf: board private structure
436 ice_fdir_setup(struct ice_pf *pf)
438 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
439 struct ice_hw *hw = ICE_PF_TO_HW(pf);
440 const struct rte_memzone *mz = NULL;
441 char z_name[RTE_MEMZONE_NAMESIZE];
443 int err = ICE_SUCCESS;
445 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
446 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
450 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
451 " fd_fltr_best_effort = %u.",
452 hw->func_caps.fd_fltr_guar,
453 hw->func_caps.fd_fltr_best_effort);
455 if (pf->fdir.fdir_vsi) {
456 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
460 /* make new FDIR VSI */
461 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
463 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
466 pf->fdir.fdir_vsi = vsi;
468 err = ice_fdir_init_filter_list(pf);
470 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
474 err = ice_fdir_counter_init(pf);
476 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
480 /*Fdir tx queue setup*/
481 err = ice_fdir_setup_tx_resources(pf);
483 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
487 /*Fdir rx queue setup*/
488 err = ice_fdir_setup_rx_resources(pf);
490 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
494 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
496 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
500 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
502 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
506 /* Enable FDIR MSIX interrupt */
507 vsi->nb_used_qps = 1;
508 ice_vsi_queues_bind_intr(vsi);
509 ice_vsi_enable_queues_intr(vsi);
511 /* reserve memory for the fdir programming packet */
512 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
514 eth_dev->data->port_id);
515 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
517 PMD_DRV_LOG(ERR, "Cannot init memzone for "
518 "flow director program packet.");
522 pf->fdir.prg_pkt = mz->addr;
523 pf->fdir.dma_addr = mz->iova;
526 err = ice_fdir_prof_alloc(hw);
528 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
529 "flow director profile.");
534 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
539 rte_memzone_free(pf->fdir.mz);
542 ice_rx_queue_release(pf->fdir.rxq);
545 ice_tx_queue_release(pf->fdir.txq);
548 ice_release_vsi(vsi);
549 pf->fdir.fdir_vsi = NULL;
554 ice_fdir_prof_free(struct ice_hw *hw)
556 enum ice_fltr_ptype ptype;
558 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
559 ptype < ICE_FLTR_PTYPE_MAX;
561 rte_free(hw->fdir_prof[ptype]);
562 hw->fdir_prof[ptype] = NULL;
565 rte_free(hw->fdir_prof);
566 hw->fdir_prof = NULL;
569 /* Remove a profile for some filter type */
571 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
573 struct ice_hw *hw = ICE_PF_TO_HW(pf);
574 struct ice_fd_hw_prof *hw_prof;
579 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
582 hw_prof = hw->fdir_prof[ptype];
584 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
585 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
586 if (hw_prof->entry_h[i][is_tunnel]) {
587 vsi_num = ice_get_hw_vsi_num(hw,
589 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
591 ice_flow_rem_entry(hw, ICE_BLK_FD,
592 hw_prof->entry_h[i][is_tunnel]);
593 hw_prof->entry_h[i][is_tunnel] = 0;
596 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
597 rte_free(hw_prof->fdir_seg[is_tunnel]);
598 hw_prof->fdir_seg[is_tunnel] = NULL;
600 for (i = 0; i < hw_prof->cnt; i++)
601 hw_prof->vsi_h[i] = 0;
602 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
605 /* Remove all created profiles */
607 ice_fdir_prof_rm_all(struct ice_pf *pf)
609 enum ice_fltr_ptype ptype;
611 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
612 ptype < ICE_FLTR_PTYPE_MAX;
614 ice_fdir_prof_rm(pf, ptype, false);
615 ice_fdir_prof_rm(pf, ptype, true);
620 * ice_fdir_teardown - release the Flow Director resources
621 * @pf: board private structure
624 ice_fdir_teardown(struct ice_pf *pf)
626 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
627 struct ice_hw *hw = ICE_PF_TO_HW(pf);
631 vsi = pf->fdir.fdir_vsi;
635 ice_vsi_disable_queues_intr(vsi);
637 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
639 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
641 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
643 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
645 err = ice_fdir_counter_release(pf);
647 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
649 ice_fdir_release_filter_list(pf);
651 ice_tx_queue_release(pf->fdir.txq);
653 ice_rx_queue_release(pf->fdir.rxq);
655 ice_fdir_prof_rm_all(pf);
656 ice_fdir_prof_free(hw);
657 ice_release_vsi(vsi);
658 pf->fdir.fdir_vsi = NULL;
661 err = rte_memzone_free(pf->fdir.mz);
664 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
669 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
670 enum ice_fltr_ptype ptype,
671 struct ice_flow_seg_info *seg,
674 struct ice_hw *hw = ICE_PF_TO_HW(pf);
675 struct ice_flow_seg_info *ori_seg;
676 struct ice_fd_hw_prof *hw_prof;
678 hw_prof = hw->fdir_prof[ptype];
679 ori_seg = hw_prof->fdir_seg[is_tunnel];
681 /* profile does not exist */
685 /* if no input set conflict, return -EEXIST */
686 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
687 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
688 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
693 /* a rule with input set conflict already exist, so give up */
694 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
695 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
700 /* it's safe to delete an empty profile */
701 ice_fdir_prof_rm(pf, ptype, is_tunnel);
706 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
707 enum ice_fltr_ptype ptype,
710 struct ice_hw *hw = ICE_PF_TO_HW(pf);
711 struct ice_fd_hw_prof *hw_prof;
712 struct ice_flow_seg_info *seg;
714 hw_prof = hw->fdir_prof[ptype];
715 seg = hw_prof->fdir_seg[is_tunnel];
717 /* profile does not exist */
721 /* profile exists and rule exists, fail to resolve the conflict */
722 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
725 /* it's safe to delete an empty profile */
726 ice_fdir_prof_rm(pf, ptype, is_tunnel);
732 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
733 enum ice_fltr_ptype ptype,
736 enum ice_fltr_ptype cflct_ptype;
740 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
741 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
742 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
743 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
744 if (!ice_fdir_prof_resolve_conflict
745 (pf, cflct_ptype, is_tunnel))
748 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
749 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
750 if (!ice_fdir_prof_resolve_conflict
751 (pf, cflct_ptype, is_tunnel))
753 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
754 if (!ice_fdir_prof_resolve_conflict
755 (pf, cflct_ptype, is_tunnel))
757 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
758 if (!ice_fdir_prof_resolve_conflict
759 (pf, cflct_ptype, is_tunnel))
763 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
764 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
765 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
766 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
767 if (!ice_fdir_prof_resolve_conflict
768 (pf, cflct_ptype, is_tunnel))
771 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
772 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
773 if (!ice_fdir_prof_resolve_conflict
774 (pf, cflct_ptype, is_tunnel))
776 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
777 if (!ice_fdir_prof_resolve_conflict
778 (pf, cflct_ptype, is_tunnel))
780 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
781 if (!ice_fdir_prof_resolve_conflict
782 (pf, cflct_ptype, is_tunnel))
786 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
787 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
788 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
789 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
790 if (!ice_fdir_prof_resolve_conflict
791 (pf, cflct_ptype, is_tunnel))
794 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
795 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
796 if (!ice_fdir_prof_resolve_conflict
797 (pf, cflct_ptype, is_tunnel))
799 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
800 if (!ice_fdir_prof_resolve_conflict
801 (pf, cflct_ptype, is_tunnel))
803 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
804 if (!ice_fdir_prof_resolve_conflict
805 (pf, cflct_ptype, is_tunnel))
813 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
819 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
820 struct ice_vsi *ctrl_vsi,
821 struct ice_flow_seg_info *seg,
822 enum ice_fltr_ptype ptype,
825 struct ice_hw *hw = ICE_PF_TO_HW(pf);
826 enum ice_flow_dir dir = ICE_FLOW_RX;
827 struct ice_fd_hw_prof *hw_prof;
828 struct ice_flow_prof *prof;
829 uint64_t entry_1 = 0;
830 uint64_t entry_2 = 0;
835 /* check if have input set conflict on current profile. */
836 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
840 /* check if the profile is conflict with other profile. */
841 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
845 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
846 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
847 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
850 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
851 vsi->idx, ICE_FLOW_PRIO_NORMAL,
852 seg, NULL, 0, &entry_1);
854 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
858 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
859 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
860 seg, NULL, 0, &entry_2);
862 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
867 hw_prof = hw->fdir_prof[ptype];
868 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
870 hw_prof->fdir_seg[is_tunnel] = seg;
871 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
872 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
873 pf->hw_prof_cnt[ptype][is_tunnel]++;
874 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
875 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
876 pf->hw_prof_cnt[ptype][is_tunnel]++;
881 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
882 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
883 ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
885 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
891 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
895 struct ice_inset_map {
897 enum ice_flow_field fld;
899 static const struct ice_inset_map ice_inset_map[] = {
900 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
901 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
902 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
903 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
904 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
905 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
906 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
907 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
908 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
909 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
910 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
911 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
912 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
913 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
914 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
915 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
916 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
917 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
918 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
919 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
920 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
921 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
922 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
923 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
924 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
925 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
926 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
927 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
930 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
931 if ((inset & ice_inset_map[i].inset) ==
932 ice_inset_map[i].inset)
933 field[j++] = ice_inset_map[i].fld;
938 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
939 uint64_t input_set, enum ice_fdir_tunnel_type ttype)
941 struct ice_flow_seg_info *seg;
942 struct ice_flow_seg_info *seg_tun = NULL;
943 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
950 seg = (struct ice_flow_seg_info *)
951 ice_malloc(hw, sizeof(*seg));
953 PMD_DRV_LOG(ERR, "No memory can be allocated");
957 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
958 field[i] = ICE_FLOW_FIELD_IDX_MAX;
959 ice_fdir_input_set_parse(input_set, field);
962 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
963 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
964 ICE_FLOW_SEG_HDR_IPV4 |
965 ICE_FLOW_SEG_HDR_IPV_OTHER);
967 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
968 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
969 ICE_FLOW_SEG_HDR_IPV4 |
970 ICE_FLOW_SEG_HDR_IPV_OTHER);
972 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
973 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
974 ICE_FLOW_SEG_HDR_IPV4 |
975 ICE_FLOW_SEG_HDR_IPV_OTHER);
977 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
978 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
979 ICE_FLOW_SEG_HDR_IPV_OTHER);
981 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
982 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
983 ICE_FLOW_SEG_HDR_IPV6 |
984 ICE_FLOW_SEG_HDR_IPV_OTHER);
986 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
987 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
988 ICE_FLOW_SEG_HDR_IPV6 |
989 ICE_FLOW_SEG_HDR_IPV_OTHER);
991 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
992 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
993 ICE_FLOW_SEG_HDR_IPV6 |
994 ICE_FLOW_SEG_HDR_IPV_OTHER);
996 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
997 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
998 ICE_FLOW_SEG_HDR_IPV_OTHER);
1000 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
1001 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
1002 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
1003 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
1004 if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU)
1005 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1006 ICE_FLOW_SEG_HDR_IPV4 |
1007 ICE_FLOW_SEG_HDR_IPV_OTHER);
1008 else if (ttype == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1009 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1010 ICE_FLOW_SEG_HDR_GTPU_IP |
1011 ICE_FLOW_SEG_HDR_IPV4 |
1012 ICE_FLOW_SEG_HDR_IPV_OTHER);
1014 PMD_DRV_LOG(ERR, "not supported tunnel type.");
1016 case ICE_FLTR_PTYPE_NON_IP_L2:
1017 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1020 PMD_DRV_LOG(ERR, "not supported filter type.");
1024 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1025 ice_flow_set_fld(seg, field[i],
1026 ICE_FLOW_FLD_OFF_INVAL,
1027 ICE_FLOW_FLD_OFF_INVAL,
1028 ICE_FLOW_FLD_OFF_INVAL, false);
1031 is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1033 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1036 seg_tun = (struct ice_flow_seg_info *)
1037 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
1039 PMD_DRV_LOG(ERR, "No memory can be allocated");
1043 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
1044 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1045 seg_tun, flow, true);
1050 } else if (ret < 0) {
1054 return (ret == -EEXIST) ? 0 : ret;
1061 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1062 bool is_tunnel, bool add)
1064 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1067 cnt = (add) ? 1 : -1;
1068 hw->fdir_active_fltr += cnt;
1069 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1070 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1072 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1076 ice_fdir_init(struct ice_adapter *ad)
1078 struct ice_pf *pf = &ad->pf;
1079 struct ice_flow_parser *parser;
1082 if (ad->hw.dcf_enabled)
1085 ret = ice_fdir_setup(pf);
1089 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1090 parser = &ice_fdir_parser_comms;
1091 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
1092 parser = &ice_fdir_parser_os;
1096 return ice_register_parser(parser, ad);
1100 ice_fdir_uninit(struct ice_adapter *ad)
1102 struct ice_pf *pf = &ad->pf;
1103 struct ice_flow_parser *parser;
1105 if (ad->hw.dcf_enabled)
1108 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
1109 parser = &ice_fdir_parser_comms;
1111 parser = &ice_fdir_parser_os;
1113 ice_unregister_parser(parser, ad);
1115 ice_fdir_teardown(pf);
1119 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1121 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1128 ice_fdir_add_del_filter(struct ice_pf *pf,
1129 struct ice_fdir_filter_conf *filter,
1132 struct ice_fltr_desc desc;
1133 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1134 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1138 filter->input.dest_vsi = pf->main_vsi->idx;
1140 memset(&desc, 0, sizeof(desc));
1141 filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1142 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1144 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1146 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1147 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1149 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1153 return ice_fdir_programming(pf, &desc);
1157 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1158 struct ice_fdir_filter_conf *filter)
1160 struct ice_fdir_fltr *input = &filter->input;
1161 memset(key, 0, sizeof(*key));
1163 key->flow_type = input->flow_type;
1164 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1165 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1166 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1167 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1169 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1170 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1172 key->tunnel_type = filter->tunnel_type;
1175 /* Check if there exists the flow director filter */
1176 static struct ice_fdir_filter_conf *
1177 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1178 const struct ice_fdir_fltr_pattern *key)
1182 ret = rte_hash_lookup(fdir_info->hash_table, key);
1186 return fdir_info->hash_map[ret];
1189 /* Add a flow director entry into the SW list */
1191 ice_fdir_entry_insert(struct ice_pf *pf,
1192 struct ice_fdir_filter_conf *entry,
1193 struct ice_fdir_fltr_pattern *key)
1195 struct ice_fdir_info *fdir_info = &pf->fdir;
1198 ret = rte_hash_add_key(fdir_info->hash_table, key);
1201 "Failed to insert fdir entry to hash table %d!",
1205 fdir_info->hash_map[ret] = entry;
1210 /* Delete a flow director entry from the SW list */
1212 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1214 struct ice_fdir_info *fdir_info = &pf->fdir;
1217 ret = rte_hash_del_key(fdir_info->hash_table, key);
1220 "Failed to delete fdir filter to hash table %d!",
1224 fdir_info->hash_map[ret] = NULL;
1230 ice_fdir_create_filter(struct ice_adapter *ad,
1231 struct rte_flow *flow,
1233 struct rte_flow_error *error)
1235 struct ice_pf *pf = &ad->pf;
1236 struct ice_fdir_filter_conf *filter = meta;
1237 struct ice_fdir_info *fdir_info = &pf->fdir;
1238 struct ice_fdir_filter_conf *entry, *node;
1239 struct ice_fdir_fltr_pattern key;
1243 ice_fdir_extract_fltr_key(&key, filter);
1244 node = ice_fdir_entry_lookup(fdir_info, &key);
1246 rte_flow_error_set(error, EEXIST,
1247 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1248 "Rule already exists!");
1252 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1254 rte_flow_error_set(error, ENOMEM,
1255 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1256 "Failed to allocate memory");
1260 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1262 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1263 filter->input_set, filter->tunnel_type);
1265 rte_flow_error_set(error, -ret,
1266 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1267 "Profile configure failed.");
1271 /* alloc counter for FDIR */
1272 if (filter->input.cnt_ena) {
1273 struct rte_flow_action_count *act_count = &filter->act_count;
1275 filter->counter = ice_fdir_counter_alloc(pf,
1278 if (!filter->counter) {
1279 rte_flow_error_set(error, EINVAL,
1280 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1281 "Failed to alloc FDIR counter.");
1284 filter->input.cnt_index = filter->counter->hw_index;
1287 ret = ice_fdir_add_del_filter(pf, filter, true);
1289 rte_flow_error_set(error, -ret,
1290 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1291 "Add filter rule failed.");
1295 rte_memcpy(entry, filter, sizeof(*entry));
1296 ret = ice_fdir_entry_insert(pf, entry, &key);
1298 rte_flow_error_set(error, -ret,
1299 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1300 "Insert entry to table failed.");
1305 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1310 if (filter->counter) {
1311 ice_fdir_counter_free(pf, filter->counter);
1312 filter->counter = NULL;
1321 ice_fdir_destroy_filter(struct ice_adapter *ad,
1322 struct rte_flow *flow,
1323 struct rte_flow_error *error)
1325 struct ice_pf *pf = &ad->pf;
1326 struct ice_fdir_info *fdir_info = &pf->fdir;
1327 struct ice_fdir_filter_conf *filter, *entry;
1328 struct ice_fdir_fltr_pattern key;
1332 filter = (struct ice_fdir_filter_conf *)flow->rule;
1334 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1336 if (filter->counter) {
1337 ice_fdir_counter_free(pf, filter->counter);
1338 filter->counter = NULL;
1341 ice_fdir_extract_fltr_key(&key, filter);
1342 entry = ice_fdir_entry_lookup(fdir_info, &key);
1344 rte_flow_error_set(error, ENOENT,
1345 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1346 "Can't find entry.");
1350 ret = ice_fdir_add_del_filter(pf, filter, false);
1352 rte_flow_error_set(error, -ret,
1353 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1354 "Del filter rule failed.");
1358 ret = ice_fdir_entry_del(pf, &key);
1360 rte_flow_error_set(error, -ret,
1361 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1362 "Remove entry from table failed.");
1366 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1375 ice_fdir_query_count(struct ice_adapter *ad,
1376 struct rte_flow *flow,
1377 struct rte_flow_query_count *flow_stats,
1378 struct rte_flow_error *error)
1380 struct ice_pf *pf = &ad->pf;
1381 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1382 struct ice_fdir_filter_conf *filter = flow->rule;
1383 struct ice_fdir_counter *counter = filter->counter;
1384 uint64_t hits_lo, hits_hi;
1387 rte_flow_error_set(error, EINVAL,
1388 RTE_FLOW_ERROR_TYPE_ACTION,
1390 "FDIR counters not available");
1395 * Reading the low 32-bits latches the high 32-bits into a shadow
1396 * register. Reading the high 32-bit returns the value in the
1399 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1400 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1402 flow_stats->hits_set = 1;
1403 flow_stats->hits = hits_lo | (hits_hi << 32);
1404 flow_stats->bytes_set = 0;
1405 flow_stats->bytes = 0;
1407 if (flow_stats->reset) {
1408 /* reset statistic counter value */
1409 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1410 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1416 static struct ice_flow_engine ice_fdir_engine = {
1417 .init = ice_fdir_init,
1418 .uninit = ice_fdir_uninit,
1419 .create = ice_fdir_create_filter,
1420 .destroy = ice_fdir_destroy_filter,
1421 .query_count = ice_fdir_query_count,
1422 .type = ICE_FLOW_ENGINE_FDIR,
1426 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1427 struct rte_flow_error *error,
1428 const struct rte_flow_action *act,
1429 struct ice_fdir_filter_conf *filter)
1431 const struct rte_flow_action_rss *rss = act->conf;
1434 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1435 rte_flow_error_set(error, EINVAL,
1436 RTE_FLOW_ERROR_TYPE_ACTION, act,
1441 if (rss->queue_num <= 1) {
1442 rte_flow_error_set(error, EINVAL,
1443 RTE_FLOW_ERROR_TYPE_ACTION, act,
1444 "Queue region size can't be 0 or 1.");
1448 /* check if queue index for queue region is continuous */
1449 for (i = 0; i < rss->queue_num - 1; i++) {
1450 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1451 rte_flow_error_set(error, EINVAL,
1452 RTE_FLOW_ERROR_TYPE_ACTION, act,
1453 "Discontinuous queue region");
1458 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1459 rte_flow_error_set(error, EINVAL,
1460 RTE_FLOW_ERROR_TYPE_ACTION, act,
1461 "Invalid queue region indexes.");
1465 if (!(rte_is_power_of_2(rss->queue_num) &&
1466 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1467 rte_flow_error_set(error, EINVAL,
1468 RTE_FLOW_ERROR_TYPE_ACTION, act,
1469 "The region size should be any of the following values:"
1470 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1471 "of queues do not exceed the VSI allocation.");
1475 filter->input.q_index = rss->queue[0];
1476 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1477 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1483 ice_fdir_parse_action(struct ice_adapter *ad,
1484 const struct rte_flow_action actions[],
1485 struct rte_flow_error *error,
1486 struct ice_fdir_filter_conf *filter)
1488 struct ice_pf *pf = &ad->pf;
1489 const struct rte_flow_action_queue *act_q;
1490 const struct rte_flow_action_mark *mark_spec = NULL;
1491 const struct rte_flow_action_count *act_count;
1492 uint32_t dest_num = 0;
1493 uint32_t mark_num = 0;
1494 uint32_t counter_num = 0;
1497 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1498 switch (actions->type) {
1499 case RTE_FLOW_ACTION_TYPE_VOID:
1501 case RTE_FLOW_ACTION_TYPE_QUEUE:
1504 act_q = actions->conf;
1505 filter->input.q_index = act_q->index;
1506 if (filter->input.q_index >=
1507 pf->dev_data->nb_rx_queues) {
1508 rte_flow_error_set(error, EINVAL,
1509 RTE_FLOW_ERROR_TYPE_ACTION,
1511 "Invalid queue for FDIR.");
1514 filter->input.dest_ctl =
1515 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1517 case RTE_FLOW_ACTION_TYPE_DROP:
1520 filter->input.dest_ctl =
1521 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1523 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1526 filter->input.dest_ctl =
1527 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1529 case RTE_FLOW_ACTION_TYPE_RSS:
1532 ret = ice_fdir_parse_action_qregion(pf,
1533 error, actions, filter);
1537 case RTE_FLOW_ACTION_TYPE_MARK:
1540 mark_spec = actions->conf;
1541 filter->input.fltr_id = mark_spec->id;
1542 filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1544 case RTE_FLOW_ACTION_TYPE_COUNT:
1547 act_count = actions->conf;
1548 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1549 rte_memcpy(&filter->act_count, act_count,
1550 sizeof(filter->act_count));
1554 rte_flow_error_set(error, EINVAL,
1555 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1561 if (dest_num >= 2) {
1562 rte_flow_error_set(error, EINVAL,
1563 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1564 "Unsupported action combination");
1568 if (mark_num >= 2) {
1569 rte_flow_error_set(error, EINVAL,
1570 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1571 "Too many mark actions");
1575 if (counter_num >= 2) {
1576 rte_flow_error_set(error, EINVAL,
1577 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1578 "Too many count actions");
1582 if (dest_num + mark_num + counter_num == 0) {
1583 rte_flow_error_set(error, EINVAL,
1584 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1589 /* set default action to PASSTHRU mode, in "mark/count only" case. */
1591 filter->input.dest_ctl =
1592 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1598 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1599 const struct rte_flow_item pattern[],
1600 struct rte_flow_error *error,
1601 struct ice_fdir_filter_conf *filter)
1603 const struct rte_flow_item *item = pattern;
1604 enum rte_flow_item_type item_type;
1605 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1606 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1607 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1608 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1609 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1610 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1611 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1612 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1613 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1614 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1615 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1616 uint64_t input_set = ICE_INSET_NONE;
1617 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1618 uint8_t ipv6_addr_mask[16] = {
1619 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1620 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1622 uint32_t vtc_flow_cpu;
1624 enum rte_flow_item_type next_type;
1625 uint16_t ether_type;
1627 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM,
1632 "Not support range");
1635 item_type = item->type;
1637 switch (item_type) {
1638 case RTE_FLOW_ITEM_TYPE_ETH:
1639 eth_spec = item->spec;
1640 eth_mask = item->mask;
1641 next_type = (item + 1)->type;
1643 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
1644 (!eth_spec || !eth_mask)) {
1645 rte_flow_error_set(error, EINVAL,
1646 RTE_FLOW_ERROR_TYPE_ITEM,
1647 item, "NULL eth spec/mask.");
1651 if (eth_spec && eth_mask) {
1652 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1653 !rte_is_zero_ether_addr(ð_mask->src)) {
1654 rte_flow_error_set(error, EINVAL,
1655 RTE_FLOW_ERROR_TYPE_ITEM,
1657 "Src mac not support");
1661 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
1662 input_set |= ICE_INSET_DMAC;
1663 rte_memcpy(&filter->input.ext_data.dst_mac,
1665 RTE_ETHER_ADDR_LEN);
1666 } else if (eth_mask->type == RTE_BE16(0xffff)) {
1667 ether_type = rte_be_to_cpu_16(eth_spec->type);
1668 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1669 ether_type == RTE_ETHER_TYPE_IPV6) {
1670 rte_flow_error_set(error, EINVAL,
1671 RTE_FLOW_ERROR_TYPE_ITEM,
1673 "Unsupported ether_type.");
1677 input_set |= ICE_INSET_ETHERTYPE;
1678 rte_memcpy(&filter->input.ext_data.ether_type,
1680 sizeof(eth_spec->type));
1681 flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1683 rte_flow_error_set(error, EINVAL,
1684 RTE_FLOW_ERROR_TYPE_ITEM,
1686 "Invalid dst mac addr mask or ethertype mask");
1690 case RTE_FLOW_ITEM_TYPE_IPV4:
1691 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1692 ipv4_spec = item->spec;
1693 ipv4_mask = item->mask;
1695 if (ipv4_spec && ipv4_mask) {
1696 /* Check IPv4 mask and update input set */
1697 if (ipv4_mask->hdr.version_ihl ||
1698 ipv4_mask->hdr.total_length ||
1699 ipv4_mask->hdr.packet_id ||
1700 ipv4_mask->hdr.fragment_offset ||
1701 ipv4_mask->hdr.hdr_checksum) {
1702 rte_flow_error_set(error, EINVAL,
1703 RTE_FLOW_ERROR_TYPE_ITEM,
1705 "Invalid IPv4 mask.");
1708 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1709 input_set |= tunnel_type ?
1710 ICE_INSET_TUN_IPV4_SRC :
1712 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1713 input_set |= tunnel_type ?
1714 ICE_INSET_TUN_IPV4_DST :
1716 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1717 input_set |= ICE_INSET_IPV4_TOS;
1718 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1719 input_set |= ICE_INSET_IPV4_TTL;
1720 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1721 input_set |= ICE_INSET_IPV4_PROTO;
1723 filter->input.ip.v4.dst_ip =
1724 ipv4_spec->hdr.dst_addr;
1725 filter->input.ip.v4.src_ip =
1726 ipv4_spec->hdr.src_addr;
1727 filter->input.ip.v4.tos =
1728 ipv4_spec->hdr.type_of_service;
1729 filter->input.ip.v4.ttl =
1730 ipv4_spec->hdr.time_to_live;
1731 filter->input.ip.v4.proto =
1732 ipv4_spec->hdr.next_proto_id;
1735 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1737 case RTE_FLOW_ITEM_TYPE_IPV6:
1738 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1739 ipv6_spec = item->spec;
1740 ipv6_mask = item->mask;
1742 if (ipv6_spec && ipv6_mask) {
1743 /* Check IPv6 mask and update input set */
1744 if (ipv6_mask->hdr.payload_len) {
1745 rte_flow_error_set(error, EINVAL,
1746 RTE_FLOW_ERROR_TYPE_ITEM,
1748 "Invalid IPv6 mask");
1752 if (!memcmp(ipv6_mask->hdr.src_addr,
1754 RTE_DIM(ipv6_mask->hdr.src_addr)))
1755 input_set |= ICE_INSET_IPV6_SRC;
1756 if (!memcmp(ipv6_mask->hdr.dst_addr,
1758 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1759 input_set |= ICE_INSET_IPV6_DST;
1761 if ((ipv6_mask->hdr.vtc_flow &
1762 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1763 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1764 input_set |= ICE_INSET_IPV6_TC;
1765 if (ipv6_mask->hdr.proto == UINT8_MAX)
1766 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1767 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1768 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1770 rte_memcpy(filter->input.ip.v6.dst_ip,
1771 ipv6_spec->hdr.dst_addr, 16);
1772 rte_memcpy(filter->input.ip.v6.src_ip,
1773 ipv6_spec->hdr.src_addr, 16);
1776 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1777 filter->input.ip.v6.tc =
1778 (uint8_t)(vtc_flow_cpu >>
1779 ICE_FDIR_IPV6_TC_OFFSET);
1780 filter->input.ip.v6.proto =
1781 ipv6_spec->hdr.proto;
1782 filter->input.ip.v6.hlim =
1783 ipv6_spec->hdr.hop_limits;
1786 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1788 case RTE_FLOW_ITEM_TYPE_TCP:
1789 tcp_spec = item->spec;
1790 tcp_mask = item->mask;
1792 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1793 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1794 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1795 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1797 if (tcp_spec && tcp_mask) {
1798 /* Check TCP mask and update input set */
1799 if (tcp_mask->hdr.sent_seq ||
1800 tcp_mask->hdr.recv_ack ||
1801 tcp_mask->hdr.data_off ||
1802 tcp_mask->hdr.tcp_flags ||
1803 tcp_mask->hdr.rx_win ||
1804 tcp_mask->hdr.cksum ||
1805 tcp_mask->hdr.tcp_urp) {
1806 rte_flow_error_set(error, EINVAL,
1807 RTE_FLOW_ERROR_TYPE_ITEM,
1809 "Invalid TCP mask");
1813 if (tcp_mask->hdr.src_port == UINT16_MAX)
1814 input_set |= tunnel_type ?
1815 ICE_INSET_TUN_TCP_SRC_PORT :
1816 ICE_INSET_TCP_SRC_PORT;
1817 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1818 input_set |= tunnel_type ?
1819 ICE_INSET_TUN_TCP_DST_PORT :
1820 ICE_INSET_TCP_DST_PORT;
1822 /* Get filter info */
1823 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1824 filter->input.ip.v4.dst_port =
1825 tcp_spec->hdr.dst_port;
1826 filter->input.ip.v4.src_port =
1827 tcp_spec->hdr.src_port;
1828 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1829 filter->input.ip.v6.dst_port =
1830 tcp_spec->hdr.dst_port;
1831 filter->input.ip.v6.src_port =
1832 tcp_spec->hdr.src_port;
1836 case RTE_FLOW_ITEM_TYPE_UDP:
1837 udp_spec = item->spec;
1838 udp_mask = item->mask;
1840 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1841 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1842 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1843 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1845 if (udp_spec && udp_mask) {
1846 /* Check UDP mask and update input set*/
1847 if (udp_mask->hdr.dgram_len ||
1848 udp_mask->hdr.dgram_cksum) {
1849 rte_flow_error_set(error, EINVAL,
1850 RTE_FLOW_ERROR_TYPE_ITEM,
1852 "Invalid UDP mask");
1856 if (udp_mask->hdr.src_port == UINT16_MAX)
1857 input_set |= tunnel_type ?
1858 ICE_INSET_TUN_UDP_SRC_PORT :
1859 ICE_INSET_UDP_SRC_PORT;
1860 if (udp_mask->hdr.dst_port == UINT16_MAX)
1861 input_set |= tunnel_type ?
1862 ICE_INSET_TUN_UDP_DST_PORT :
1863 ICE_INSET_UDP_DST_PORT;
1865 /* Get filter info */
1866 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1867 filter->input.ip.v4.dst_port =
1868 udp_spec->hdr.dst_port;
1869 filter->input.ip.v4.src_port =
1870 udp_spec->hdr.src_port;
1871 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1872 filter->input.ip.v6.src_port =
1873 udp_spec->hdr.src_port;
1874 filter->input.ip.v6.dst_port =
1875 udp_spec->hdr.dst_port;
1879 case RTE_FLOW_ITEM_TYPE_SCTP:
1880 sctp_spec = item->spec;
1881 sctp_mask = item->mask;
1883 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1884 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1885 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1886 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1888 if (sctp_spec && sctp_mask) {
1889 /* Check SCTP mask and update input set */
1890 if (sctp_mask->hdr.cksum) {
1891 rte_flow_error_set(error, EINVAL,
1892 RTE_FLOW_ERROR_TYPE_ITEM,
1894 "Invalid UDP mask");
1898 if (sctp_mask->hdr.src_port == UINT16_MAX)
1899 input_set |= tunnel_type ?
1900 ICE_INSET_TUN_SCTP_SRC_PORT :
1901 ICE_INSET_SCTP_SRC_PORT;
1902 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1903 input_set |= tunnel_type ?
1904 ICE_INSET_TUN_SCTP_DST_PORT :
1905 ICE_INSET_SCTP_DST_PORT;
1907 /* Get filter info */
1908 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1909 filter->input.ip.v4.dst_port =
1910 sctp_spec->hdr.dst_port;
1911 filter->input.ip.v4.src_port =
1912 sctp_spec->hdr.src_port;
1913 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1914 filter->input.ip.v6.dst_port =
1915 sctp_spec->hdr.dst_port;
1916 filter->input.ip.v6.src_port =
1917 sctp_spec->hdr.src_port;
1921 case RTE_FLOW_ITEM_TYPE_VOID:
1923 case RTE_FLOW_ITEM_TYPE_VXLAN:
1924 l3 = RTE_FLOW_ITEM_TYPE_END;
1925 vxlan_spec = item->spec;
1926 vxlan_mask = item->mask;
1928 if (vxlan_spec || vxlan_mask) {
1929 rte_flow_error_set(error, EINVAL,
1930 RTE_FLOW_ERROR_TYPE_ITEM,
1932 "Invalid vxlan field");
1936 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1938 case RTE_FLOW_ITEM_TYPE_GTPU:
1939 l3 = RTE_FLOW_ITEM_TYPE_END;
1940 gtp_spec = item->spec;
1941 gtp_mask = item->mask;
1943 if (gtp_spec && gtp_mask) {
1944 if (gtp_mask->v_pt_rsv_flags ||
1945 gtp_mask->msg_type ||
1946 gtp_mask->msg_len) {
1947 rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_ITEM,
1950 "Invalid GTP mask");
1954 if (gtp_mask->teid == UINT32_MAX)
1955 input_set |= ICE_INSET_GTPU_TEID;
1957 filter->input.gtpu_data.teid = gtp_spec->teid;
1960 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1962 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1963 gtp_psc_spec = item->spec;
1964 gtp_psc_mask = item->mask;
1966 if (gtp_psc_spec && gtp_psc_mask) {
1967 if (gtp_psc_mask->qfi == UINT8_MAX)
1968 input_set |= ICE_INSET_GTPU_QFI;
1970 filter->input.gtpu_data.qfi =
1973 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1976 rte_flow_error_set(error, EINVAL,
1977 RTE_FLOW_ERROR_TYPE_ITEM,
1979 "Invalid pattern item.");
1984 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU ||
1985 tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH)
1986 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1988 filter->tunnel_type = tunnel_type;
1989 filter->input.flow_type = flow_type;
1990 filter->input_set = input_set;
1996 ice_fdir_parse(struct ice_adapter *ad,
1997 struct ice_pattern_match_item *array,
1999 const struct rte_flow_item pattern[],
2000 const struct rte_flow_action actions[],
2002 struct rte_flow_error *error)
2004 struct ice_pf *pf = &ad->pf;
2005 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2006 struct ice_pattern_match_item *item = NULL;
2010 memset(filter, 0, sizeof(*filter));
2011 item = ice_search_pattern_match_item(pattern, array, array_len, error);
2015 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2018 input_set = filter->input_set;
2019 if (!input_set || input_set & ~item->input_set_mask) {
2020 rte_flow_error_set(error, EINVAL,
2021 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2023 "Invalid input set");
2028 ret = ice_fdir_parse_action(ad, actions, error, filter);
2039 static struct ice_flow_parser ice_fdir_parser_os = {
2040 .engine = &ice_fdir_engine,
2041 .array = ice_fdir_pattern_os,
2042 .array_len = RTE_DIM(ice_fdir_pattern_os),
2043 .parse_pattern_action = ice_fdir_parse,
2044 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2047 static struct ice_flow_parser ice_fdir_parser_comms = {
2048 .engine = &ice_fdir_engine,
2049 .array = ice_fdir_pattern_comms,
2050 .array_len = RTE_DIM(ice_fdir_pattern_comms),
2051 .parse_pattern_action = ice_fdir_parse,
2052 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2055 RTE_INIT(ice_fdir_engine_register)
2057 ice_register_flow_engine(&ice_fdir_engine);