1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
14 #include "ice_generic_flow.h"
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
21 #define ICE_FDIR_INSET_ETH (\
22 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25 ICE_FDIR_INSET_ETH | \
26 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30 ICE_FDIR_INSET_ETH_IPV4 | \
31 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34 ICE_FDIR_INSET_ETH_IPV4 | \
35 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38 ICE_FDIR_INSET_ETH_IPV4 | \
39 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
43 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
46 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
47 ICE_FDIR_INSET_ETH_IPV6 | \
48 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
50 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
51 ICE_FDIR_INSET_ETH_IPV6 | \
52 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
54 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
55 ICE_FDIR_INSET_ETH_IPV6 | \
56 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
58 #define ICE_FDIR_INSET_IPV4 (\
59 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
61 #define ICE_FDIR_INSET_IPV4_TCP (\
62 ICE_FDIR_INSET_IPV4 | \
63 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
65 #define ICE_FDIR_INSET_IPV4_UDP (\
66 ICE_FDIR_INSET_IPV4 | \
67 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
69 #define ICE_FDIR_INSET_IPV4_SCTP (\
70 ICE_FDIR_INSET_IPV4 | \
71 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
73 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
74 ICE_FDIR_INSET_ETH | \
75 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
77 #define ICE_FDIR_INSET_IPV4_GTPU (\
78 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
80 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
81 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
82 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
84 #define ICE_FDIR_INSET_IPV6_GTPU (\
85 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
87 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
88 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
89 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
91 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
92 {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
93 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
94 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
95 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
96 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
97 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
98 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
99 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
100 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
101 {pattern_eth_ipv4_udp_vxlan_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4, ICE_INSET_NONE},
102 {pattern_eth_ipv4_udp_vxlan_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_UDP, ICE_INSET_NONE},
103 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_TCP, ICE_INSET_NONE},
104 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_SCTP, ICE_INSET_NONE},
105 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
106 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
107 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
108 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
109 /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
110 {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_IPV4_GTPU, ICE_FDIR_INSET_IPV4_GTPU, ICE_INSET_NONE},
111 {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_INSET_NONE},
112 {pattern_eth_ipv6_gtpu, ICE_FDIR_INSET_IPV6_GTPU, ICE_FDIR_INSET_IPV6_GTPU, ICE_INSET_NONE},
113 {pattern_eth_ipv6_gtpu_eh, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_INSET_NONE},
116 static struct ice_flow_parser ice_fdir_parser;
119 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
121 static const struct rte_memzone *
122 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
124 const struct rte_memzone *mz;
126 mz = rte_memzone_lookup(name);
130 return rte_memzone_reserve_aligned(name, len, socket_id,
131 RTE_MEMZONE_IOVA_CONTIG,
132 ICE_RING_BASE_ALIGN);
135 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
138 ice_fdir_prof_alloc(struct ice_hw *hw)
140 enum ice_fltr_ptype ptype, fltr_ptype;
142 if (!hw->fdir_prof) {
143 hw->fdir_prof = (struct ice_fd_hw_prof **)
144 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
145 sizeof(*hw->fdir_prof));
149 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
150 ptype < ICE_FLTR_PTYPE_MAX;
152 if (!hw->fdir_prof[ptype]) {
153 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
154 ice_malloc(hw, sizeof(**hw->fdir_prof));
155 if (!hw->fdir_prof[ptype])
162 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
165 rte_free(hw->fdir_prof[fltr_ptype]);
166 hw->fdir_prof[fltr_ptype] = NULL;
169 rte_free(hw->fdir_prof);
170 hw->fdir_prof = NULL;
176 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
177 struct ice_fdir_counter_pool_container *container,
178 uint32_t index_start,
181 struct ice_fdir_counter_pool *pool;
185 pool = rte_zmalloc("ice_fdir_counter_pool",
187 sizeof(struct ice_fdir_counter) * len,
191 "Failed to allocate memory for fdir counter pool");
195 TAILQ_INIT(&pool->counter_list);
196 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
198 for (i = 0; i < len; i++) {
199 struct ice_fdir_counter *counter = &pool->counters[i];
201 counter->hw_index = index_start + i;
202 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
205 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
206 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
211 container->pools[container->index_free++] = pool;
220 ice_fdir_counter_init(struct ice_pf *pf)
222 struct ice_hw *hw = ICE_PF_TO_HW(pf);
223 struct ice_fdir_info *fdir_info = &pf->fdir;
224 struct ice_fdir_counter_pool_container *container =
226 uint32_t cnt_index, len;
229 TAILQ_INIT(&container->pool_list);
231 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
232 len = ICE_FDIR_COUNTERS_PER_BLOCK;
234 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
236 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
244 ice_fdir_counter_release(struct ice_pf *pf)
246 struct ice_fdir_info *fdir_info = &pf->fdir;
247 struct ice_fdir_counter_pool_container *container =
251 for (i = 0; i < container->index_free; i++) {
252 rte_free(container->pools[i]);
253 container->pools[i] = NULL;
256 TAILQ_INIT(&container->pool_list);
257 container->index_free = 0;
262 static struct ice_fdir_counter *
263 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
267 struct ice_fdir_counter_pool *pool;
268 struct ice_fdir_counter *counter;
271 TAILQ_FOREACH(pool, &container->pool_list, next) {
272 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
273 counter = &pool->counters[i];
275 if (counter->shared &&
285 static struct ice_fdir_counter *
286 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
288 struct ice_hw *hw = ICE_PF_TO_HW(pf);
289 struct ice_fdir_info *fdir_info = &pf->fdir;
290 struct ice_fdir_counter_pool_container *container =
292 struct ice_fdir_counter_pool *pool = NULL;
293 struct ice_fdir_counter *counter_free = NULL;
296 counter_free = ice_fdir_counter_shared_search(container, id);
298 if (counter_free->ref_cnt + 1 == 0) {
302 counter_free->ref_cnt++;
307 TAILQ_FOREACH(pool, &container->pool_list, next) {
308 counter_free = TAILQ_FIRST(&pool->counter_list);
315 PMD_DRV_LOG(ERR, "No free counter found\n");
319 counter_free->shared = shared;
320 counter_free->id = id;
321 counter_free->ref_cnt = 1;
322 counter_free->pool = pool;
324 /* reset statistic counter value */
325 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
326 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
328 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
329 if (TAILQ_EMPTY(&pool->counter_list)) {
330 TAILQ_REMOVE(&container->pool_list, pool, next);
331 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
338 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
339 struct ice_fdir_counter *counter)
344 if (--counter->ref_cnt == 0) {
345 struct ice_fdir_counter_pool *pool = counter->pool;
347 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
352 ice_fdir_init_filter_list(struct ice_pf *pf)
354 struct rte_eth_dev *dev = pf->adapter->eth_dev;
355 struct ice_fdir_info *fdir_info = &pf->fdir;
356 char fdir_hash_name[RTE_HASH_NAMESIZE];
359 struct rte_hash_parameters fdir_hash_params = {
360 .name = fdir_hash_name,
361 .entries = ICE_MAX_FDIR_FILTER_NUM,
362 .key_len = sizeof(struct ice_fdir_fltr_pattern),
363 .hash_func = rte_hash_crc,
364 .hash_func_init_val = 0,
365 .socket_id = rte_socket_id(),
366 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
369 /* Initialize hash */
370 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
371 "fdir_%s", dev->device->name);
372 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
373 if (!fdir_info->hash_table) {
374 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
377 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
378 sizeof(*fdir_info->hash_map) *
379 ICE_MAX_FDIR_FILTER_NUM,
381 if (!fdir_info->hash_map) {
383 "Failed to allocate memory for fdir hash map!");
385 goto err_fdir_hash_map_alloc;
389 err_fdir_hash_map_alloc:
390 rte_hash_free(fdir_info->hash_table);
396 ice_fdir_release_filter_list(struct ice_pf *pf)
398 struct ice_fdir_info *fdir_info = &pf->fdir;
400 if (fdir_info->hash_map)
401 rte_free(fdir_info->hash_map);
402 if (fdir_info->hash_table)
403 rte_hash_free(fdir_info->hash_table);
405 fdir_info->hash_map = NULL;
406 fdir_info->hash_table = NULL;
410 * ice_fdir_setup - reserve and initialize the Flow Director resources
411 * @pf: board private structure
414 ice_fdir_setup(struct ice_pf *pf)
416 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
417 struct ice_hw *hw = ICE_PF_TO_HW(pf);
418 const struct rte_memzone *mz = NULL;
419 char z_name[RTE_MEMZONE_NAMESIZE];
421 int err = ICE_SUCCESS;
423 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
424 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
428 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
429 " fd_fltr_best_effort = %u.",
430 hw->func_caps.fd_fltr_guar,
431 hw->func_caps.fd_fltr_best_effort);
433 if (pf->fdir.fdir_vsi) {
434 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
438 /* make new FDIR VSI */
439 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
441 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
444 pf->fdir.fdir_vsi = vsi;
446 err = ice_fdir_init_filter_list(pf);
448 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
452 err = ice_fdir_counter_init(pf);
454 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
458 /*Fdir tx queue setup*/
459 err = ice_fdir_setup_tx_resources(pf);
461 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
465 /*Fdir rx queue setup*/
466 err = ice_fdir_setup_rx_resources(pf);
468 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
472 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
474 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
478 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
480 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
484 /* Enable FDIR MSIX interrupt */
485 vsi->nb_used_qps = 1;
486 ice_vsi_queues_bind_intr(vsi);
487 ice_vsi_enable_queues_intr(vsi);
489 /* reserve memory for the fdir programming packet */
490 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
492 eth_dev->data->port_id);
493 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
495 PMD_DRV_LOG(ERR, "Cannot init memzone for "
496 "flow director program packet.");
500 pf->fdir.prg_pkt = mz->addr;
501 pf->fdir.dma_addr = mz->iova;
504 err = ice_fdir_prof_alloc(hw);
506 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
507 "flow director profile.");
512 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
517 rte_memzone_free(pf->fdir.mz);
520 ice_rx_queue_release(pf->fdir.rxq);
523 ice_tx_queue_release(pf->fdir.txq);
526 ice_release_vsi(vsi);
527 pf->fdir.fdir_vsi = NULL;
532 ice_fdir_prof_free(struct ice_hw *hw)
534 enum ice_fltr_ptype ptype;
536 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
537 ptype < ICE_FLTR_PTYPE_MAX;
539 rte_free(hw->fdir_prof[ptype]);
540 hw->fdir_prof[ptype] = NULL;
543 rte_free(hw->fdir_prof);
544 hw->fdir_prof = NULL;
547 /* Remove a profile for some filter type */
549 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
551 struct ice_hw *hw = ICE_PF_TO_HW(pf);
552 struct ice_fd_hw_prof *hw_prof;
557 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
560 hw_prof = hw->fdir_prof[ptype];
562 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
563 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
564 if (hw_prof->entry_h[i][is_tunnel]) {
565 vsi_num = ice_get_hw_vsi_num(hw,
567 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
569 ice_flow_rem_entry(hw, ICE_BLK_FD,
570 hw_prof->entry_h[i][is_tunnel]);
571 hw_prof->entry_h[i][is_tunnel] = 0;
574 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
575 rte_free(hw_prof->fdir_seg[is_tunnel]);
576 hw_prof->fdir_seg[is_tunnel] = NULL;
578 for (i = 0; i < hw_prof->cnt; i++)
579 hw_prof->vsi_h[i] = 0;
580 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
583 /* Remove all created profiles */
585 ice_fdir_prof_rm_all(struct ice_pf *pf)
587 enum ice_fltr_ptype ptype;
589 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
590 ptype < ICE_FLTR_PTYPE_MAX;
592 ice_fdir_prof_rm(pf, ptype, false);
593 ice_fdir_prof_rm(pf, ptype, true);
598 * ice_fdir_teardown - release the Flow Director resources
599 * @pf: board private structure
602 ice_fdir_teardown(struct ice_pf *pf)
604 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
605 struct ice_hw *hw = ICE_PF_TO_HW(pf);
609 vsi = pf->fdir.fdir_vsi;
613 ice_vsi_disable_queues_intr(vsi);
615 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
617 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
619 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
621 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
623 err = ice_fdir_counter_release(pf);
625 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
627 ice_fdir_release_filter_list(pf);
629 ice_tx_queue_release(pf->fdir.txq);
631 ice_rx_queue_release(pf->fdir.rxq);
633 ice_fdir_prof_rm_all(pf);
634 ice_fdir_prof_free(hw);
635 ice_release_vsi(vsi);
636 pf->fdir.fdir_vsi = NULL;
639 err = rte_memzone_free(pf->fdir.mz);
642 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
647 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
648 enum ice_fltr_ptype ptype,
649 struct ice_flow_seg_info *seg,
652 struct ice_hw *hw = ICE_PF_TO_HW(pf);
653 struct ice_flow_seg_info *ori_seg;
654 struct ice_fd_hw_prof *hw_prof;
656 hw_prof = hw->fdir_prof[ptype];
657 ori_seg = hw_prof->fdir_seg[is_tunnel];
659 /* profile does not exist */
663 /* if no input set conflict, return -EEXIST */
664 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
665 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
666 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
671 /* a rule with input set conflict already exist, so give up */
672 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
673 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
678 /* it's safe to delete an empty profile */
679 ice_fdir_prof_rm(pf, ptype, is_tunnel);
684 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
685 enum ice_fltr_ptype ptype,
688 struct ice_hw *hw = ICE_PF_TO_HW(pf);
689 struct ice_fd_hw_prof *hw_prof;
690 struct ice_flow_seg_info *seg;
692 hw_prof = hw->fdir_prof[ptype];
693 seg = hw_prof->fdir_seg[is_tunnel];
695 /* profile does not exist */
699 /* profile exists and rule exists, fail to resolve the conflict */
700 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
703 /* it's safe to delete an empty profile */
704 ice_fdir_prof_rm(pf, ptype, is_tunnel);
710 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
711 enum ice_fltr_ptype ptype,
714 enum ice_fltr_ptype cflct_ptype;
718 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
719 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
720 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
721 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
722 if (!ice_fdir_prof_resolve_conflict
723 (pf, cflct_ptype, is_tunnel))
726 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
727 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
728 if (!ice_fdir_prof_resolve_conflict
729 (pf, cflct_ptype, is_tunnel))
731 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
732 if (!ice_fdir_prof_resolve_conflict
733 (pf, cflct_ptype, is_tunnel))
735 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
736 if (!ice_fdir_prof_resolve_conflict
737 (pf, cflct_ptype, is_tunnel))
741 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
742 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
743 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
744 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
745 if (!ice_fdir_prof_resolve_conflict
746 (pf, cflct_ptype, is_tunnel))
749 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
750 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
751 if (!ice_fdir_prof_resolve_conflict
752 (pf, cflct_ptype, is_tunnel))
754 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
755 if (!ice_fdir_prof_resolve_conflict
756 (pf, cflct_ptype, is_tunnel))
758 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
759 if (!ice_fdir_prof_resolve_conflict
760 (pf, cflct_ptype, is_tunnel))
764 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
765 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
766 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
767 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
768 if (!ice_fdir_prof_resolve_conflict
769 (pf, cflct_ptype, is_tunnel))
772 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
773 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
774 if (!ice_fdir_prof_resolve_conflict
775 (pf, cflct_ptype, is_tunnel))
777 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
778 if (!ice_fdir_prof_resolve_conflict
779 (pf, cflct_ptype, is_tunnel))
781 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
782 if (!ice_fdir_prof_resolve_conflict
783 (pf, cflct_ptype, is_tunnel))
791 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
797 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
798 struct ice_vsi *ctrl_vsi,
799 struct ice_flow_seg_info *seg,
800 enum ice_fltr_ptype ptype,
803 struct ice_hw *hw = ICE_PF_TO_HW(pf);
804 enum ice_flow_dir dir = ICE_FLOW_RX;
805 struct ice_fd_hw_prof *hw_prof;
806 struct ice_flow_prof *prof;
807 uint64_t entry_1 = 0;
808 uint64_t entry_2 = 0;
813 /* check if have input set conflict on current profile. */
814 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
818 /* check if the profile is conflict with other profile. */
819 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
823 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
824 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
825 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
828 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
829 vsi->idx, ICE_FLOW_PRIO_NORMAL,
830 seg, NULL, 0, &entry_1);
832 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
836 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
837 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
838 seg, NULL, 0, &entry_2);
840 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
845 hw_prof = hw->fdir_prof[ptype];
846 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
848 hw_prof->fdir_seg[is_tunnel] = seg;
849 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
850 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
851 pf->hw_prof_cnt[ptype][is_tunnel]++;
852 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
853 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
854 pf->hw_prof_cnt[ptype][is_tunnel]++;
859 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
860 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
861 ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
863 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
869 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
873 struct ice_inset_map {
875 enum ice_flow_field fld;
877 static const struct ice_inset_map ice_inset_map[] = {
878 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
879 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
880 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
881 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
882 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
883 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
884 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
885 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
886 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
887 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
888 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
889 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
890 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
891 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
892 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
893 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
894 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
895 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
896 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
897 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
898 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
899 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
900 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
901 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
902 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
903 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
904 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
905 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
908 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
909 if ((inset & ice_inset_map[i].inset) ==
910 ice_inset_map[i].inset)
911 field[j++] = ice_inset_map[i].fld;
916 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
919 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
920 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
921 ICE_FLOW_SEG_HDR_IPV4 |
922 ICE_FLOW_SEG_HDR_IPV_OTHER);
924 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
925 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
926 ICE_FLOW_SEG_HDR_IPV4 |
927 ICE_FLOW_SEG_HDR_IPV_OTHER);
929 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
930 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
931 ICE_FLOW_SEG_HDR_IPV4 |
932 ICE_FLOW_SEG_HDR_IPV_OTHER);
934 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
935 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
936 ICE_FLOW_SEG_HDR_IPV_OTHER);
938 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
939 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
940 ICE_FLOW_SEG_HDR_IPV6 |
941 ICE_FLOW_SEG_HDR_IPV_OTHER);
943 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
944 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
945 ICE_FLOW_SEG_HDR_IPV6 |
946 ICE_FLOW_SEG_HDR_IPV_OTHER);
948 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
949 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
950 ICE_FLOW_SEG_HDR_IPV6 |
951 ICE_FLOW_SEG_HDR_IPV_OTHER);
953 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
954 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
955 ICE_FLOW_SEG_HDR_IPV_OTHER);
957 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
958 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
959 ICE_FLOW_SEG_HDR_IPV4 |
960 ICE_FLOW_SEG_HDR_IPV_OTHER);
962 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
963 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
964 ICE_FLOW_SEG_HDR_GTPU_IP |
965 ICE_FLOW_SEG_HDR_IPV4 |
966 ICE_FLOW_SEG_HDR_IPV_OTHER);
968 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
969 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
970 ICE_FLOW_SEG_HDR_IPV6 |
971 ICE_FLOW_SEG_HDR_IPV_OTHER);
973 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
974 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
975 ICE_FLOW_SEG_HDR_GTPU_IP |
976 ICE_FLOW_SEG_HDR_IPV6 |
977 ICE_FLOW_SEG_HDR_IPV_OTHER);
979 case ICE_FLTR_PTYPE_NON_IP_L2:
980 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
983 PMD_DRV_LOG(ERR, "not supported filter type.");
989 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
990 uint64_t inner_input_set, uint64_t outer_input_set,
991 enum ice_fdir_tunnel_type ttype)
993 struct ice_flow_seg_info *seg;
994 struct ice_flow_seg_info *seg_tun = NULL;
995 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1000 if (!(inner_input_set | outer_input_set))
1003 seg_tun = (struct ice_flow_seg_info *)
1004 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1006 PMD_DRV_LOG(ERR, "No memory can be allocated");
1010 /* use seg_tun[1] to record tunnel inner part */
1011 for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1013 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1017 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1018 field[i] = ICE_FLOW_FIELD_IDX_MAX;
1020 ice_fdir_input_set_parse(input_set, field);
1022 ice_fdir_input_set_hdrs(flow, seg);
1024 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1025 ice_flow_set_fld(seg, field[i],
1026 ICE_FLOW_FLD_OFF_INVAL,
1027 ICE_FLOW_FLD_OFF_INVAL,
1028 ICE_FLOW_FLD_OFF_INVAL, false);
1032 is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1034 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1035 seg_tun, flow, is_tunnel);
1039 } else if (ret < 0) {
1041 return (ret == -EEXIST) ? 0 : ret;
1048 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1049 bool is_tunnel, bool add)
1051 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1054 cnt = (add) ? 1 : -1;
1055 hw->fdir_active_fltr += cnt;
1056 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1057 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1059 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1063 ice_fdir_init(struct ice_adapter *ad)
1065 struct ice_pf *pf = &ad->pf;
1066 struct ice_flow_parser *parser;
1069 if (ad->hw.dcf_enabled)
1072 ret = ice_fdir_setup(pf);
1076 parser = &ice_fdir_parser;
1078 return ice_register_parser(parser, ad);
1082 ice_fdir_uninit(struct ice_adapter *ad)
1084 struct ice_flow_parser *parser;
1085 struct ice_pf *pf = &ad->pf;
1087 if (ad->hw.dcf_enabled)
1090 parser = &ice_fdir_parser;
1092 ice_unregister_parser(parser, ad);
1094 ice_fdir_teardown(pf);
1098 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1100 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1107 ice_fdir_add_del_filter(struct ice_pf *pf,
1108 struct ice_fdir_filter_conf *filter,
1111 struct ice_fltr_desc desc;
1112 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1113 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1117 filter->input.dest_vsi = pf->main_vsi->idx;
1119 memset(&desc, 0, sizeof(desc));
1120 filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1121 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1123 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1125 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1126 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1128 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1132 return ice_fdir_programming(pf, &desc);
1136 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1137 struct ice_fdir_filter_conf *filter)
1139 struct ice_fdir_fltr *input = &filter->input;
1140 memset(key, 0, sizeof(*key));
1142 key->flow_type = input->flow_type;
1143 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1144 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1145 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1146 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1148 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1149 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1151 key->tunnel_type = filter->tunnel_type;
1154 /* Check if there exists the flow director filter */
1155 static struct ice_fdir_filter_conf *
1156 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1157 const struct ice_fdir_fltr_pattern *key)
1161 ret = rte_hash_lookup(fdir_info->hash_table, key);
1165 return fdir_info->hash_map[ret];
1168 /* Add a flow director entry into the SW list */
1170 ice_fdir_entry_insert(struct ice_pf *pf,
1171 struct ice_fdir_filter_conf *entry,
1172 struct ice_fdir_fltr_pattern *key)
1174 struct ice_fdir_info *fdir_info = &pf->fdir;
1177 ret = rte_hash_add_key(fdir_info->hash_table, key);
1180 "Failed to insert fdir entry to hash table %d!",
1184 fdir_info->hash_map[ret] = entry;
1189 /* Delete a flow director entry from the SW list */
1191 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1193 struct ice_fdir_info *fdir_info = &pf->fdir;
1196 ret = rte_hash_del_key(fdir_info->hash_table, key);
1199 "Failed to delete fdir filter to hash table %d!",
1203 fdir_info->hash_map[ret] = NULL;
1209 ice_fdir_create_filter(struct ice_adapter *ad,
1210 struct rte_flow *flow,
1212 struct rte_flow_error *error)
1214 struct ice_pf *pf = &ad->pf;
1215 struct ice_fdir_filter_conf *filter = meta;
1216 struct ice_fdir_info *fdir_info = &pf->fdir;
1217 struct ice_fdir_filter_conf *entry, *node;
1218 struct ice_fdir_fltr_pattern key;
1222 ice_fdir_extract_fltr_key(&key, filter);
1223 node = ice_fdir_entry_lookup(fdir_info, &key);
1225 rte_flow_error_set(error, EEXIST,
1226 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1227 "Rule already exists!");
1231 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1233 rte_flow_error_set(error, ENOMEM,
1234 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1235 "Failed to allocate memory");
1239 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1241 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1242 filter->input_set_i, filter->input_set_o,
1243 filter->tunnel_type);
1245 rte_flow_error_set(error, -ret,
1246 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1247 "Profile configure failed.");
1251 /* alloc counter for FDIR */
1252 if (filter->input.cnt_ena) {
1253 struct rte_flow_action_count *act_count = &filter->act_count;
1255 filter->counter = ice_fdir_counter_alloc(pf,
1258 if (!filter->counter) {
1259 rte_flow_error_set(error, EINVAL,
1260 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1261 "Failed to alloc FDIR counter.");
1264 filter->input.cnt_index = filter->counter->hw_index;
1267 ret = ice_fdir_add_del_filter(pf, filter, true);
1269 rte_flow_error_set(error, -ret,
1270 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1271 "Add filter rule failed.");
1275 if (filter->mark_flag == 1)
1276 ice_fdir_rx_parsing_enable(ad, 1);
1278 rte_memcpy(entry, filter, sizeof(*entry));
1279 ret = ice_fdir_entry_insert(pf, entry, &key);
1281 rte_flow_error_set(error, -ret,
1282 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1283 "Insert entry to table failed.");
1288 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1293 if (filter->counter) {
1294 ice_fdir_counter_free(pf, filter->counter);
1295 filter->counter = NULL;
1304 ice_fdir_destroy_filter(struct ice_adapter *ad,
1305 struct rte_flow *flow,
1306 struct rte_flow_error *error)
1308 struct ice_pf *pf = &ad->pf;
1309 struct ice_fdir_info *fdir_info = &pf->fdir;
1310 struct ice_fdir_filter_conf *filter, *entry;
1311 struct ice_fdir_fltr_pattern key;
1315 filter = (struct ice_fdir_filter_conf *)flow->rule;
1317 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1319 if (filter->counter) {
1320 ice_fdir_counter_free(pf, filter->counter);
1321 filter->counter = NULL;
1324 ice_fdir_extract_fltr_key(&key, filter);
1325 entry = ice_fdir_entry_lookup(fdir_info, &key);
1327 rte_flow_error_set(error, ENOENT,
1328 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1329 "Can't find entry.");
1333 ret = ice_fdir_add_del_filter(pf, filter, false);
1335 rte_flow_error_set(error, -ret,
1336 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1337 "Del filter rule failed.");
1341 ret = ice_fdir_entry_del(pf, &key);
1343 rte_flow_error_set(error, -ret,
1344 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1345 "Remove entry from table failed.");
1349 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1351 if (filter->mark_flag == 1)
1352 ice_fdir_rx_parsing_enable(ad, 0);
1362 ice_fdir_query_count(struct ice_adapter *ad,
1363 struct rte_flow *flow,
1364 struct rte_flow_query_count *flow_stats,
1365 struct rte_flow_error *error)
1367 struct ice_pf *pf = &ad->pf;
1368 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1369 struct ice_fdir_filter_conf *filter = flow->rule;
1370 struct ice_fdir_counter *counter = filter->counter;
1371 uint64_t hits_lo, hits_hi;
1374 rte_flow_error_set(error, EINVAL,
1375 RTE_FLOW_ERROR_TYPE_ACTION,
1377 "FDIR counters not available");
1382 * Reading the low 32-bits latches the high 32-bits into a shadow
1383 * register. Reading the high 32-bit returns the value in the
1386 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1387 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1389 flow_stats->hits_set = 1;
1390 flow_stats->hits = hits_lo | (hits_hi << 32);
1391 flow_stats->bytes_set = 0;
1392 flow_stats->bytes = 0;
1394 if (flow_stats->reset) {
1395 /* reset statistic counter value */
1396 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1397 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1403 static struct ice_flow_engine ice_fdir_engine = {
1404 .init = ice_fdir_init,
1405 .uninit = ice_fdir_uninit,
1406 .create = ice_fdir_create_filter,
1407 .destroy = ice_fdir_destroy_filter,
1408 .query_count = ice_fdir_query_count,
1409 .type = ICE_FLOW_ENGINE_FDIR,
1413 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1414 struct rte_flow_error *error,
1415 const struct rte_flow_action *act,
1416 struct ice_fdir_filter_conf *filter)
1418 const struct rte_flow_action_rss *rss = act->conf;
1421 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1422 rte_flow_error_set(error, EINVAL,
1423 RTE_FLOW_ERROR_TYPE_ACTION, act,
1428 if (rss->queue_num <= 1) {
1429 rte_flow_error_set(error, EINVAL,
1430 RTE_FLOW_ERROR_TYPE_ACTION, act,
1431 "Queue region size can't be 0 or 1.");
1435 /* check if queue index for queue region is continuous */
1436 for (i = 0; i < rss->queue_num - 1; i++) {
1437 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1438 rte_flow_error_set(error, EINVAL,
1439 RTE_FLOW_ERROR_TYPE_ACTION, act,
1440 "Discontinuous queue region");
1445 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1446 rte_flow_error_set(error, EINVAL,
1447 RTE_FLOW_ERROR_TYPE_ACTION, act,
1448 "Invalid queue region indexes.");
1452 if (!(rte_is_power_of_2(rss->queue_num) &&
1453 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1454 rte_flow_error_set(error, EINVAL,
1455 RTE_FLOW_ERROR_TYPE_ACTION, act,
1456 "The region size should be any of the following values:"
1457 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1458 "of queues do not exceed the VSI allocation.");
1462 filter->input.q_index = rss->queue[0];
1463 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1464 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1470 ice_fdir_parse_action(struct ice_adapter *ad,
1471 const struct rte_flow_action actions[],
1472 struct rte_flow_error *error,
1473 struct ice_fdir_filter_conf *filter)
1475 struct ice_pf *pf = &ad->pf;
1476 const struct rte_flow_action_queue *act_q;
1477 const struct rte_flow_action_mark *mark_spec = NULL;
1478 const struct rte_flow_action_count *act_count;
1479 uint32_t dest_num = 0;
1480 uint32_t mark_num = 0;
1481 uint32_t counter_num = 0;
1484 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1485 switch (actions->type) {
1486 case RTE_FLOW_ACTION_TYPE_VOID:
1488 case RTE_FLOW_ACTION_TYPE_QUEUE:
1491 act_q = actions->conf;
1492 filter->input.q_index = act_q->index;
1493 if (filter->input.q_index >=
1494 pf->dev_data->nb_rx_queues) {
1495 rte_flow_error_set(error, EINVAL,
1496 RTE_FLOW_ERROR_TYPE_ACTION,
1498 "Invalid queue for FDIR.");
1501 filter->input.dest_ctl =
1502 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1504 case RTE_FLOW_ACTION_TYPE_DROP:
1507 filter->input.dest_ctl =
1508 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1510 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1513 filter->input.dest_ctl =
1514 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1516 case RTE_FLOW_ACTION_TYPE_RSS:
1519 ret = ice_fdir_parse_action_qregion(pf,
1520 error, actions, filter);
1524 case RTE_FLOW_ACTION_TYPE_MARK:
1526 filter->mark_flag = 1;
1527 mark_spec = actions->conf;
1528 filter->input.fltr_id = mark_spec->id;
1529 filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1531 case RTE_FLOW_ACTION_TYPE_COUNT:
1534 act_count = actions->conf;
1535 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1536 rte_memcpy(&filter->act_count, act_count,
1537 sizeof(filter->act_count));
1541 rte_flow_error_set(error, EINVAL,
1542 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1548 if (dest_num >= 2) {
1549 rte_flow_error_set(error, EINVAL,
1550 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1551 "Unsupported action combination");
1555 if (mark_num >= 2) {
1556 rte_flow_error_set(error, EINVAL,
1557 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1558 "Too many mark actions");
1562 if (counter_num >= 2) {
1563 rte_flow_error_set(error, EINVAL,
1564 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1565 "Too many count actions");
1569 if (dest_num + mark_num + counter_num == 0) {
1570 rte_flow_error_set(error, EINVAL,
1571 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1576 /* set default action to PASSTHRU mode, in "mark/count only" case. */
1578 filter->input.dest_ctl =
1579 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1585 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1586 const struct rte_flow_item pattern[],
1587 struct rte_flow_error *error,
1588 struct ice_fdir_filter_conf *filter)
1590 const struct rte_flow_item *item = pattern;
1591 enum rte_flow_item_type item_type;
1592 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1593 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1594 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1595 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1596 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1597 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1598 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1599 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1600 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1601 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1602 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1603 uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1604 uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1605 uint64_t *input_set;
1606 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1607 uint8_t ipv6_addr_mask[16] = {
1608 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1609 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1611 uint32_t vtc_flow_cpu;
1612 uint16_t ether_type;
1613 enum rte_flow_item_type next_type;
1614 bool is_outer = true;
1615 struct ice_fdir_extra *p_ext_data;
1616 struct ice_fdir_v4 *p_v4 = NULL;
1617 struct ice_fdir_v6 *p_v6 = NULL;
1619 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1620 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1621 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1622 /* To align with shared code behavior, save gtpu outer
1623 * fields in inner struct.
1625 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1626 item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1631 /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1632 * flow. input_set_i is used for inner part.
1634 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1636 rte_flow_error_set(error, EINVAL,
1637 RTE_FLOW_ERROR_TYPE_ITEM,
1639 "Not support range");
1642 item_type = item->type;
1644 input_set = (tunnel_type && !is_outer) ?
1645 &input_set_i : &input_set_o;
1647 switch (item_type) {
1648 case RTE_FLOW_ITEM_TYPE_ETH:
1649 flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1650 eth_spec = item->spec;
1651 eth_mask = item->mask;
1653 if (!(eth_spec && eth_mask))
1656 if (!rte_is_zero_ether_addr(ð_mask->dst))
1657 *input_set |= ICE_INSET_DMAC;
1658 if (!rte_is_zero_ether_addr(ð_mask->src))
1659 *input_set |= ICE_INSET_SMAC;
1661 next_type = (item + 1)->type;
1662 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1663 if (eth_mask->type == RTE_BE16(0xffff) &&
1664 next_type == RTE_FLOW_ITEM_TYPE_END) {
1665 *input_set |= ICE_INSET_ETHERTYPE;
1666 ether_type = rte_be_to_cpu_16(eth_spec->type);
1668 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1669 ether_type == RTE_ETHER_TYPE_IPV6) {
1670 rte_flow_error_set(error, EINVAL,
1671 RTE_FLOW_ERROR_TYPE_ITEM,
1673 "Unsupported ether_type.");
1678 p_ext_data = (tunnel_type && is_outer) ?
1679 &filter->input.ext_data_outer :
1680 &filter->input.ext_data;
1681 rte_memcpy(&p_ext_data->src_mac,
1682 ð_spec->src, RTE_ETHER_ADDR_LEN);
1683 rte_memcpy(&p_ext_data->dst_mac,
1684 ð_spec->dst, RTE_ETHER_ADDR_LEN);
1685 rte_memcpy(&p_ext_data->ether_type,
1686 ð_spec->type, sizeof(eth_spec->type));
1688 case RTE_FLOW_ITEM_TYPE_IPV4:
1689 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1690 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1691 ipv4_spec = item->spec;
1692 ipv4_mask = item->mask;
1693 p_v4 = (tunnel_type && is_outer) ?
1694 &filter->input.ip_outer.v4 :
1695 &filter->input.ip.v4;
1697 if (!(ipv4_spec && ipv4_mask))
1700 /* Check IPv4 mask and update input set */
1701 if (ipv4_mask->hdr.version_ihl ||
1702 ipv4_mask->hdr.total_length ||
1703 ipv4_mask->hdr.packet_id ||
1704 ipv4_mask->hdr.fragment_offset ||
1705 ipv4_mask->hdr.hdr_checksum) {
1706 rte_flow_error_set(error, EINVAL,
1707 RTE_FLOW_ERROR_TYPE_ITEM,
1709 "Invalid IPv4 mask.");
1713 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1714 *input_set |= ICE_INSET_IPV4_DST;
1715 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1716 *input_set |= ICE_INSET_IPV4_SRC;
1717 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1718 *input_set |= ICE_INSET_IPV4_TTL;
1719 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1720 *input_set |= ICE_INSET_IPV4_PROTO;
1721 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1722 *input_set |= ICE_INSET_IPV4_TOS;
1724 p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
1725 p_v4->src_ip = ipv4_spec->hdr.src_addr;
1726 p_v4->ttl = ipv4_spec->hdr.time_to_live;
1727 p_v4->proto = ipv4_spec->hdr.next_proto_id;
1728 p_v4->tos = ipv4_spec->hdr.type_of_service;
1730 case RTE_FLOW_ITEM_TYPE_IPV6:
1731 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1732 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1733 ipv6_spec = item->spec;
1734 ipv6_mask = item->mask;
1735 p_v6 = (tunnel_type && is_outer) ?
1736 &filter->input.ip_outer.v6 :
1737 &filter->input.ip.v6;
1739 if (!(ipv6_spec && ipv6_mask))
1742 /* Check IPv6 mask and update input set */
1743 if (ipv6_mask->hdr.payload_len) {
1744 rte_flow_error_set(error, EINVAL,
1745 RTE_FLOW_ERROR_TYPE_ITEM,
1747 "Invalid IPv6 mask");
1751 if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1752 RTE_DIM(ipv6_mask->hdr.src_addr)))
1753 *input_set |= ICE_INSET_IPV6_SRC;
1754 if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1755 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1756 *input_set |= ICE_INSET_IPV6_DST;
1758 if ((ipv6_mask->hdr.vtc_flow &
1759 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1760 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1761 *input_set |= ICE_INSET_IPV6_TC;
1762 if (ipv6_mask->hdr.proto == UINT8_MAX)
1763 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
1764 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1765 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1767 rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
1768 rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
1769 vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1770 p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
1771 p_v6->proto = ipv6_spec->hdr.proto;
1772 p_v6->hlim = ipv6_spec->hdr.hop_limits;
1774 case RTE_FLOW_ITEM_TYPE_TCP:
1775 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1776 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1777 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1778 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1780 tcp_spec = item->spec;
1781 tcp_mask = item->mask;
1783 if (!(tcp_spec && tcp_mask))
1786 /* Check TCP mask and update input set */
1787 if (tcp_mask->hdr.sent_seq ||
1788 tcp_mask->hdr.recv_ack ||
1789 tcp_mask->hdr.data_off ||
1790 tcp_mask->hdr.tcp_flags ||
1791 tcp_mask->hdr.rx_win ||
1792 tcp_mask->hdr.cksum ||
1793 tcp_mask->hdr.tcp_urp) {
1794 rte_flow_error_set(error, EINVAL,
1795 RTE_FLOW_ERROR_TYPE_ITEM,
1797 "Invalid TCP mask");
1801 if (tcp_mask->hdr.src_port == UINT16_MAX)
1802 *input_set |= ICE_INSET_TCP_SRC_PORT;
1803 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1804 *input_set |= ICE_INSET_TCP_DST_PORT;
1806 /* Get filter info */
1807 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1809 p_v4->dst_port = tcp_spec->hdr.dst_port;
1810 p_v4->src_port = tcp_spec->hdr.src_port;
1811 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1813 p_v6->dst_port = tcp_spec->hdr.dst_port;
1814 p_v6->src_port = tcp_spec->hdr.src_port;
1817 case RTE_FLOW_ITEM_TYPE_UDP:
1818 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1819 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1820 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1821 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1823 udp_spec = item->spec;
1824 udp_mask = item->mask;
1826 if (!(udp_spec && udp_mask))
1829 /* Check UDP mask and update input set*/
1830 if (udp_mask->hdr.dgram_len ||
1831 udp_mask->hdr.dgram_cksum) {
1832 rte_flow_error_set(error, EINVAL,
1833 RTE_FLOW_ERROR_TYPE_ITEM,
1835 "Invalid UDP mask");
1839 if (udp_mask->hdr.src_port == UINT16_MAX)
1840 *input_set |= ICE_INSET_UDP_SRC_PORT;
1841 if (udp_mask->hdr.dst_port == UINT16_MAX)
1842 *input_set |= ICE_INSET_UDP_DST_PORT;
1844 /* Get filter info */
1845 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1847 p_v4->dst_port = udp_spec->hdr.dst_port;
1848 p_v4->src_port = udp_spec->hdr.src_port;
1849 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1851 p_v6->src_port = udp_spec->hdr.src_port;
1852 p_v6->dst_port = udp_spec->hdr.dst_port;
1855 case RTE_FLOW_ITEM_TYPE_SCTP:
1856 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1857 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1858 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1859 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1861 sctp_spec = item->spec;
1862 sctp_mask = item->mask;
1864 if (!(sctp_spec && sctp_mask))
1867 /* Check SCTP mask and update input set */
1868 if (sctp_mask->hdr.cksum) {
1869 rte_flow_error_set(error, EINVAL,
1870 RTE_FLOW_ERROR_TYPE_ITEM,
1872 "Invalid UDP mask");
1876 if (sctp_mask->hdr.src_port == UINT16_MAX)
1877 *input_set |= ICE_INSET_SCTP_SRC_PORT;
1878 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1879 *input_set |= ICE_INSET_SCTP_DST_PORT;
1881 /* Get filter info */
1882 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1884 p_v4->dst_port = sctp_spec->hdr.dst_port;
1885 p_v4->src_port = sctp_spec->hdr.src_port;
1886 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1888 p_v6->dst_port = sctp_spec->hdr.dst_port;
1889 p_v6->src_port = sctp_spec->hdr.src_port;
1892 case RTE_FLOW_ITEM_TYPE_VOID:
1894 case RTE_FLOW_ITEM_TYPE_VXLAN:
1895 l3 = RTE_FLOW_ITEM_TYPE_END;
1896 vxlan_spec = item->spec;
1897 vxlan_mask = item->mask;
1900 if (vxlan_spec || vxlan_mask) {
1901 rte_flow_error_set(error, EINVAL,
1902 RTE_FLOW_ERROR_TYPE_ITEM,
1904 "Invalid vxlan field");
1909 case RTE_FLOW_ITEM_TYPE_GTPU:
1910 l3 = RTE_FLOW_ITEM_TYPE_END;
1911 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1912 gtp_spec = item->spec;
1913 gtp_mask = item->mask;
1915 if (!(gtp_spec && gtp_mask))
1918 if (gtp_mask->v_pt_rsv_flags ||
1919 gtp_mask->msg_type ||
1920 gtp_mask->msg_len) {
1921 rte_flow_error_set(error, EINVAL,
1922 RTE_FLOW_ERROR_TYPE_ITEM,
1924 "Invalid GTP mask");
1928 if (gtp_mask->teid == UINT32_MAX)
1929 input_set_o |= ICE_INSET_GTPU_TEID;
1931 filter->input.gtpu_data.teid = gtp_spec->teid;
1933 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1934 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
1935 gtp_psc_spec = item->spec;
1936 gtp_psc_mask = item->mask;
1938 if (!(gtp_psc_spec && gtp_psc_mask))
1941 if (gtp_psc_mask->qfi == UINT8_MAX)
1942 input_set_o |= ICE_INSET_GTPU_QFI;
1944 filter->input.gtpu_data.qfi =
1948 rte_flow_error_set(error, EINVAL,
1949 RTE_FLOW_ERROR_TYPE_ITEM,
1951 "Invalid pattern item.");
1956 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
1957 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
1958 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
1959 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
1960 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
1961 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
1962 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
1963 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1964 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
1965 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
1966 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
1967 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
1969 filter->tunnel_type = tunnel_type;
1970 filter->input.flow_type = flow_type;
1971 filter->input_set_o = input_set_o;
1972 filter->input_set_i = input_set_i;
1978 ice_fdir_parse(struct ice_adapter *ad,
1979 struct ice_pattern_match_item *array,
1981 const struct rte_flow_item pattern[],
1982 const struct rte_flow_action actions[],
1984 struct rte_flow_error *error)
1986 struct ice_pf *pf = &ad->pf;
1987 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1988 struct ice_pattern_match_item *item = NULL;
1992 memset(filter, 0, sizeof(*filter));
1993 item = ice_search_pattern_match_item(ad, pattern, array, array_len,
1998 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2001 input_set = filter->input_set_o | filter->input_set_i;
2002 if (!input_set || filter->input_set_o & ~item->input_set_mask_o ||
2003 filter->input_set_i & ~item->input_set_mask_i) {
2004 rte_flow_error_set(error, EINVAL,
2005 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2007 "Invalid input set");
2012 ret = ice_fdir_parse_action(ad, actions, error, filter);
2023 static struct ice_flow_parser ice_fdir_parser = {
2024 .engine = &ice_fdir_engine,
2025 .array = ice_fdir_pattern_list,
2026 .array_len = RTE_DIM(ice_fdir_pattern_list),
2027 .parse_pattern_action = ice_fdir_parse,
2028 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2031 RTE_INIT(ice_fdir_engine_register)
2033 ice_register_flow_engine(&ice_fdir_engine);