1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
14 #include "ice_generic_flow.h"
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
21 #define ICE_FDIR_INSET_ETH_IPV4 (\
23 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
24 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
26 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
30 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
31 ICE_FDIR_INSET_ETH_IPV4 | \
32 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
34 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
35 ICE_FDIR_INSET_ETH_IPV4 | \
36 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
38 #define ICE_FDIR_INSET_ETH_IPV6 (\
40 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
41 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
43 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
44 ICE_FDIR_INSET_ETH_IPV6 | \
45 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
47 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
51 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
52 ICE_FDIR_INSET_ETH_IPV6 | \
53 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
55 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
56 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
58 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
59 ICE_FDIR_INSET_VXLAN_IPV4 | \
60 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
62 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
63 ICE_FDIR_INSET_VXLAN_IPV4 | \
64 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
66 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
67 ICE_FDIR_INSET_VXLAN_IPV4 | \
68 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
70 #define ICE_FDIR_INSET_GTPU_IPV4 (\
71 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
73 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
74 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
75 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
76 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
77 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
78 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
79 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
80 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
81 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
82 {pattern_eth_ipv4_udp_vxlan_ipv4,
83 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
84 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
85 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
86 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
87 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
88 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
89 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
90 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
91 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
92 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
93 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
94 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
95 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
96 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
97 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
100 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
101 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
102 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
103 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
104 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
105 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
106 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
107 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
108 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
109 {pattern_eth_ipv4_udp_vxlan_ipv4,
110 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
111 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
112 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
113 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
114 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
115 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
116 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
118 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
119 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
120 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
121 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
122 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
123 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
124 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
125 {pattern_eth_ipv4_gtpu_ipv4, ICE_FDIR_INSET_GTPU_IPV4, ICE_INSET_NONE},
128 static struct ice_flow_parser ice_fdir_parser_os;
129 static struct ice_flow_parser ice_fdir_parser_comms;
131 static const struct rte_memzone *
132 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
134 const struct rte_memzone *mz;
136 mz = rte_memzone_lookup(name);
140 return rte_memzone_reserve_aligned(name, len, socket_id,
141 RTE_MEMZONE_IOVA_CONTIG,
142 ICE_RING_BASE_ALIGN);
145 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
148 ice_fdir_prof_alloc(struct ice_hw *hw)
150 enum ice_fltr_ptype ptype, fltr_ptype;
152 if (!hw->fdir_prof) {
153 hw->fdir_prof = (struct ice_fd_hw_prof **)
154 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
155 sizeof(*hw->fdir_prof));
159 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
160 ptype < ICE_FLTR_PTYPE_MAX;
162 if (!hw->fdir_prof[ptype]) {
163 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
164 ice_malloc(hw, sizeof(**hw->fdir_prof));
165 if (!hw->fdir_prof[ptype])
172 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
175 rte_free(hw->fdir_prof[fltr_ptype]);
176 hw->fdir_prof[fltr_ptype] = NULL;
179 rte_free(hw->fdir_prof);
180 hw->fdir_prof = NULL;
186 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
187 struct ice_fdir_counter_pool_container *container,
188 uint32_t index_start,
191 struct ice_fdir_counter_pool *pool;
195 pool = rte_zmalloc("ice_fdir_counter_pool",
197 sizeof(struct ice_fdir_counter) * len,
201 "Failed to allocate memory for fdir counter pool");
205 TAILQ_INIT(&pool->counter_list);
206 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
208 for (i = 0; i < len; i++) {
209 struct ice_fdir_counter *counter = &pool->counters[i];
211 counter->hw_index = index_start + i;
212 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
215 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
216 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
221 container->pools[container->index_free++] = pool;
230 ice_fdir_counter_init(struct ice_pf *pf)
232 struct ice_hw *hw = ICE_PF_TO_HW(pf);
233 struct ice_fdir_info *fdir_info = &pf->fdir;
234 struct ice_fdir_counter_pool_container *container =
236 uint32_t cnt_index, len;
239 TAILQ_INIT(&container->pool_list);
241 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
242 len = ICE_FDIR_COUNTERS_PER_BLOCK;
244 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
246 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
254 ice_fdir_counter_release(struct ice_pf *pf)
256 struct ice_fdir_info *fdir_info = &pf->fdir;
257 struct ice_fdir_counter_pool_container *container =
261 for (i = 0; i < container->index_free; i++) {
262 rte_free(container->pools[i]);
263 container->pools[i] = NULL;
266 TAILQ_INIT(&container->pool_list);
267 container->index_free = 0;
272 static struct ice_fdir_counter *
273 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
277 struct ice_fdir_counter_pool *pool;
278 struct ice_fdir_counter *counter;
281 TAILQ_FOREACH(pool, &container->pool_list, next) {
282 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
283 counter = &pool->counters[i];
285 if (counter->shared &&
295 static struct ice_fdir_counter *
296 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
298 struct ice_hw *hw = ICE_PF_TO_HW(pf);
299 struct ice_fdir_info *fdir_info = &pf->fdir;
300 struct ice_fdir_counter_pool_container *container =
302 struct ice_fdir_counter_pool *pool = NULL;
303 struct ice_fdir_counter *counter_free = NULL;
306 counter_free = ice_fdir_counter_shared_search(container, id);
308 if (counter_free->ref_cnt + 1 == 0) {
312 counter_free->ref_cnt++;
317 TAILQ_FOREACH(pool, &container->pool_list, next) {
318 counter_free = TAILQ_FIRST(&pool->counter_list);
325 PMD_DRV_LOG(ERR, "No free counter found\n");
329 counter_free->shared = shared;
330 counter_free->id = id;
331 counter_free->ref_cnt = 1;
332 counter_free->pool = pool;
334 /* reset statistic counter value */
335 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
336 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
338 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
339 if (TAILQ_EMPTY(&pool->counter_list)) {
340 TAILQ_REMOVE(&container->pool_list, pool, next);
341 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
348 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
349 struct ice_fdir_counter *counter)
354 if (--counter->ref_cnt == 0) {
355 struct ice_fdir_counter_pool *pool = counter->pool;
357 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
362 ice_fdir_init_filter_list(struct ice_pf *pf)
364 struct rte_eth_dev *dev = pf->adapter->eth_dev;
365 struct ice_fdir_info *fdir_info = &pf->fdir;
366 char fdir_hash_name[RTE_HASH_NAMESIZE];
369 struct rte_hash_parameters fdir_hash_params = {
370 .name = fdir_hash_name,
371 .entries = ICE_MAX_FDIR_FILTER_NUM,
372 .key_len = sizeof(struct ice_fdir_fltr_pattern),
373 .hash_func = rte_hash_crc,
374 .hash_func_init_val = 0,
375 .socket_id = rte_socket_id(),
376 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
379 /* Initialize hash */
380 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
381 "fdir_%s", dev->device->name);
382 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
383 if (!fdir_info->hash_table) {
384 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
387 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
388 sizeof(*fdir_info->hash_map) *
389 ICE_MAX_FDIR_FILTER_NUM,
391 if (!fdir_info->hash_map) {
393 "Failed to allocate memory for fdir hash map!");
395 goto err_fdir_hash_map_alloc;
399 err_fdir_hash_map_alloc:
400 rte_hash_free(fdir_info->hash_table);
406 ice_fdir_release_filter_list(struct ice_pf *pf)
408 struct ice_fdir_info *fdir_info = &pf->fdir;
410 if (fdir_info->hash_map)
411 rte_free(fdir_info->hash_map);
412 if (fdir_info->hash_table)
413 rte_hash_free(fdir_info->hash_table);
415 fdir_info->hash_map = NULL;
416 fdir_info->hash_table = NULL;
420 * ice_fdir_setup - reserve and initialize the Flow Director resources
421 * @pf: board private structure
424 ice_fdir_setup(struct ice_pf *pf)
426 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
427 struct ice_hw *hw = ICE_PF_TO_HW(pf);
428 const struct rte_memzone *mz = NULL;
429 char z_name[RTE_MEMZONE_NAMESIZE];
431 int err = ICE_SUCCESS;
433 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
434 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
438 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
439 " fd_fltr_best_effort = %u.",
440 hw->func_caps.fd_fltr_guar,
441 hw->func_caps.fd_fltr_best_effort);
443 if (pf->fdir.fdir_vsi) {
444 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
448 /* make new FDIR VSI */
449 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
451 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
454 pf->fdir.fdir_vsi = vsi;
456 err = ice_fdir_init_filter_list(pf);
458 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
462 err = ice_fdir_counter_init(pf);
464 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
468 /*Fdir tx queue setup*/
469 err = ice_fdir_setup_tx_resources(pf);
471 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
475 /*Fdir rx queue setup*/
476 err = ice_fdir_setup_rx_resources(pf);
478 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
482 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
484 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
488 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
490 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
494 /* reserve memory for the fdir programming packet */
495 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
497 eth_dev->data->port_id);
498 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
500 PMD_DRV_LOG(ERR, "Cannot init memzone for "
501 "flow director program packet.");
505 pf->fdir.prg_pkt = mz->addr;
506 pf->fdir.dma_addr = mz->iova;
509 err = ice_fdir_prof_alloc(hw);
511 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
512 "flow director profile.");
517 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
522 rte_memzone_free(pf->fdir.mz);
525 ice_rx_queue_release(pf->fdir.rxq);
528 ice_tx_queue_release(pf->fdir.txq);
531 ice_release_vsi(vsi);
532 pf->fdir.fdir_vsi = NULL;
537 ice_fdir_prof_free(struct ice_hw *hw)
539 enum ice_fltr_ptype ptype;
541 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
542 ptype < ICE_FLTR_PTYPE_MAX;
544 rte_free(hw->fdir_prof[ptype]);
545 hw->fdir_prof[ptype] = NULL;
548 rte_free(hw->fdir_prof);
549 hw->fdir_prof = NULL;
552 /* Remove a profile for some filter type */
554 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
556 struct ice_hw *hw = ICE_PF_TO_HW(pf);
557 struct ice_fd_hw_prof *hw_prof;
562 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
565 hw_prof = hw->fdir_prof[ptype];
567 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
568 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
569 if (hw_prof->entry_h[i][is_tunnel]) {
570 vsi_num = ice_get_hw_vsi_num(hw,
572 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
574 ice_flow_rem_entry(hw,
575 hw_prof->entry_h[i][is_tunnel]);
576 hw_prof->entry_h[i][is_tunnel] = 0;
579 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
580 rte_free(hw_prof->fdir_seg[is_tunnel]);
581 hw_prof->fdir_seg[is_tunnel] = NULL;
583 for (i = 0; i < hw_prof->cnt; i++)
584 hw_prof->vsi_h[i] = 0;
585 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
588 /* Remove all created profiles */
590 ice_fdir_prof_rm_all(struct ice_pf *pf)
592 enum ice_fltr_ptype ptype;
594 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
595 ptype < ICE_FLTR_PTYPE_MAX;
597 ice_fdir_prof_rm(pf, ptype, false);
598 ice_fdir_prof_rm(pf, ptype, true);
603 * ice_fdir_teardown - release the Flow Director resources
604 * @pf: board private structure
607 ice_fdir_teardown(struct ice_pf *pf)
609 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
610 struct ice_hw *hw = ICE_PF_TO_HW(pf);
614 vsi = pf->fdir.fdir_vsi;
618 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
620 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
622 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
624 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
626 err = ice_fdir_counter_release(pf);
628 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
630 ice_fdir_release_filter_list(pf);
632 ice_tx_queue_release(pf->fdir.txq);
634 ice_rx_queue_release(pf->fdir.rxq);
636 ice_fdir_prof_rm_all(pf);
637 ice_fdir_prof_free(hw);
638 ice_release_vsi(vsi);
639 pf->fdir.fdir_vsi = NULL;
642 err = rte_memzone_free(pf->fdir.mz);
645 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
650 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
651 struct ice_vsi *ctrl_vsi,
652 struct ice_flow_seg_info *seg,
653 enum ice_fltr_ptype ptype,
656 struct ice_hw *hw = ICE_PF_TO_HW(pf);
657 enum ice_flow_dir dir = ICE_FLOW_RX;
658 struct ice_flow_seg_info *ori_seg;
659 struct ice_fd_hw_prof *hw_prof;
660 struct ice_flow_prof *prof;
661 uint64_t entry_1 = 0;
662 uint64_t entry_2 = 0;
667 hw_prof = hw->fdir_prof[ptype];
668 ori_seg = hw_prof->fdir_seg[is_tunnel];
671 if (!memcmp(ori_seg, seg, sizeof(*seg)))
674 if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
678 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
681 ice_fdir_prof_rm(pf, ptype, is_tunnel);
684 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
685 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
686 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
689 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
690 vsi->idx, ICE_FLOW_PRIO_NORMAL,
691 seg, NULL, 0, &entry_1);
693 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
697 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
698 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
699 seg, NULL, 0, &entry_2);
701 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
706 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
708 hw_prof->fdir_seg[is_tunnel] = seg;
709 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
710 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
711 pf->hw_prof_cnt[ptype][is_tunnel]++;
712 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
713 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
714 pf->hw_prof_cnt[ptype][is_tunnel]++;
719 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
720 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
721 ice_flow_rem_entry(hw, entry_1);
723 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
729 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
733 struct ice_inset_map {
735 enum ice_flow_field fld;
737 static const struct ice_inset_map ice_inset_map[] = {
738 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
739 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
740 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
741 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
742 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
743 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
744 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
745 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
746 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
747 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
748 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
749 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
750 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
751 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
752 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
753 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
754 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
755 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
756 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
757 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
758 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
759 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
760 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
761 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
762 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
763 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
764 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
767 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
768 if ((inset & ice_inset_map[i].inset) ==
769 ice_inset_map[i].inset)
770 field[j++] = ice_inset_map[i].fld;
775 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
776 uint64_t input_set, bool is_tunnel)
778 struct ice_flow_seg_info *seg;
779 struct ice_flow_seg_info *seg_tun = NULL;
780 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
786 seg = (struct ice_flow_seg_info *)
787 ice_malloc(hw, sizeof(*seg));
789 PMD_DRV_LOG(ERR, "No memory can be allocated");
793 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
794 field[i] = ICE_FLOW_FIELD_IDX_MAX;
795 ice_fdir_input_set_parse(input_set, field);
798 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
799 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
800 ICE_FLOW_SEG_HDR_IPV4);
802 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
803 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
804 ICE_FLOW_SEG_HDR_IPV4);
806 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
807 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
808 ICE_FLOW_SEG_HDR_IPV4);
810 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
811 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
813 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
814 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
815 ICE_FLOW_SEG_HDR_IPV6);
817 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
818 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
819 ICE_FLOW_SEG_HDR_IPV6);
821 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
822 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
823 ICE_FLOW_SEG_HDR_IPV6);
825 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
826 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
828 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
829 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
830 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
831 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
832 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
833 ICE_FLOW_SEG_HDR_IPV4);
836 PMD_DRV_LOG(ERR, "not supported filter type.");
840 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
841 ice_flow_set_fld(seg, field[i],
842 ICE_FLOW_FLD_OFF_INVAL,
843 ICE_FLOW_FLD_OFF_INVAL,
844 ICE_FLOW_FLD_OFF_INVAL, false);
848 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
851 seg_tun = (struct ice_flow_seg_info *)
852 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
854 PMD_DRV_LOG(ERR, "No memory can be allocated");
858 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
859 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
860 seg_tun, flow, true);
865 } else if (ret < 0) {
869 return (ret == -EAGAIN) ? 0 : ret;
876 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
877 bool is_tunnel, bool add)
879 struct ice_hw *hw = ICE_PF_TO_HW(pf);
882 cnt = (add) ? 1 : -1;
883 hw->fdir_active_fltr += cnt;
884 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
885 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
887 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
891 ice_fdir_init(struct ice_adapter *ad)
893 struct ice_pf *pf = &ad->pf;
894 struct ice_flow_parser *parser;
897 ret = ice_fdir_setup(pf);
901 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
902 parser = &ice_fdir_parser_comms;
903 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
904 parser = &ice_fdir_parser_os;
908 return ice_register_parser(parser, ad);
912 ice_fdir_uninit(struct ice_adapter *ad)
914 struct ice_pf *pf = &ad->pf;
915 struct ice_flow_parser *parser;
917 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
918 parser = &ice_fdir_parser_comms;
920 parser = &ice_fdir_parser_os;
922 ice_unregister_parser(parser, ad);
924 ice_fdir_teardown(pf);
928 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
930 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
937 ice_fdir_add_del_filter(struct ice_pf *pf,
938 struct ice_fdir_filter_conf *filter,
941 struct ice_fltr_desc desc;
942 struct ice_hw *hw = ICE_PF_TO_HW(pf);
943 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
947 filter->input.dest_vsi = pf->main_vsi->idx;
949 memset(&desc, 0, sizeof(desc));
950 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
952 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
954 memset(pkt, 0, ICE_FDIR_PKT_LEN);
955 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
957 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
961 return ice_fdir_programming(pf, &desc);
965 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
966 struct ice_fdir_filter_conf *filter)
968 struct ice_fdir_fltr *input = &filter->input;
969 memset(key, 0, sizeof(*key));
971 key->flow_type = input->flow_type;
972 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
973 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
974 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
975 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
977 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
978 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
980 key->tunnel_type = filter->tunnel_type;
983 /* Check if there exists the flow director filter */
984 static struct ice_fdir_filter_conf *
985 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
986 const struct ice_fdir_fltr_pattern *key)
990 ret = rte_hash_lookup(fdir_info->hash_table, key);
994 return fdir_info->hash_map[ret];
997 /* Add a flow director entry into the SW list */
999 ice_fdir_entry_insert(struct ice_pf *pf,
1000 struct ice_fdir_filter_conf *entry,
1001 struct ice_fdir_fltr_pattern *key)
1003 struct ice_fdir_info *fdir_info = &pf->fdir;
1006 ret = rte_hash_add_key(fdir_info->hash_table, key);
1009 "Failed to insert fdir entry to hash table %d!",
1013 fdir_info->hash_map[ret] = entry;
1018 /* Delete a flow director entry from the SW list */
1020 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1022 struct ice_fdir_info *fdir_info = &pf->fdir;
1025 ret = rte_hash_del_key(fdir_info->hash_table, key);
1028 "Failed to delete fdir filter to hash table %d!",
1032 fdir_info->hash_map[ret] = NULL;
1038 ice_fdir_create_filter(struct ice_adapter *ad,
1039 struct rte_flow *flow,
1041 struct rte_flow_error *error)
1043 struct ice_pf *pf = &ad->pf;
1044 struct ice_fdir_filter_conf *filter = meta;
1045 struct ice_fdir_info *fdir_info = &pf->fdir;
1046 struct ice_fdir_filter_conf *entry, *node;
1047 struct ice_fdir_fltr_pattern key;
1051 ice_fdir_extract_fltr_key(&key, filter);
1052 node = ice_fdir_entry_lookup(fdir_info, &key);
1054 rte_flow_error_set(error, EEXIST,
1055 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1056 "Rule already exists!");
1060 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1062 rte_flow_error_set(error, ENOMEM,
1063 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1064 "Failed to allocate memory");
1068 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1070 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1071 filter->input_set, is_tun);
1073 rte_flow_error_set(error, -ret,
1074 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1075 "Profile configure failed.");
1079 /* alloc counter for FDIR */
1080 if (filter->input.cnt_ena) {
1081 struct rte_flow_action_count *act_count = &filter->act_count;
1083 filter->counter = ice_fdir_counter_alloc(pf,
1086 if (!filter->counter) {
1087 rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1089 "Failed to alloc FDIR counter.");
1092 filter->input.cnt_index = filter->counter->hw_index;
1095 ret = ice_fdir_add_del_filter(pf, filter, true);
1097 rte_flow_error_set(error, -ret,
1098 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1099 "Add filter rule failed.");
1103 rte_memcpy(entry, filter, sizeof(*entry));
1104 ret = ice_fdir_entry_insert(pf, entry, &key);
1106 rte_flow_error_set(error, -ret,
1107 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1108 "Insert entry to table failed.");
1113 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1118 if (filter->counter) {
1119 ice_fdir_counter_free(pf, filter->counter);
1120 filter->counter = NULL;
1129 ice_fdir_destroy_filter(struct ice_adapter *ad,
1130 struct rte_flow *flow,
1131 struct rte_flow_error *error)
1133 struct ice_pf *pf = &ad->pf;
1134 struct ice_fdir_info *fdir_info = &pf->fdir;
1135 struct ice_fdir_filter_conf *filter, *entry;
1136 struct ice_fdir_fltr_pattern key;
1140 filter = (struct ice_fdir_filter_conf *)flow->rule;
1142 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1144 if (filter->counter) {
1145 ice_fdir_counter_free(pf, filter->counter);
1146 filter->counter = NULL;
1149 ice_fdir_extract_fltr_key(&key, filter);
1150 entry = ice_fdir_entry_lookup(fdir_info, &key);
1152 rte_flow_error_set(error, ENOENT,
1153 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1154 "Can't find entry.");
1158 ret = ice_fdir_add_del_filter(pf, filter, false);
1160 rte_flow_error_set(error, -ret,
1161 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1162 "Del filter rule failed.");
1166 ret = ice_fdir_entry_del(pf, &key);
1168 rte_flow_error_set(error, -ret,
1169 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1170 "Remove entry from table failed.");
1174 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1183 ice_fdir_query_count(struct ice_adapter *ad,
1184 struct rte_flow *flow,
1185 struct rte_flow_query_count *flow_stats,
1186 struct rte_flow_error *error)
1188 struct ice_pf *pf = &ad->pf;
1189 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1190 struct ice_fdir_filter_conf *filter = flow->rule;
1191 struct ice_fdir_counter *counter = filter->counter;
1192 uint64_t hits_lo, hits_hi;
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_ACTION,
1198 "FDIR counters not available");
1203 * Reading the low 32-bits latches the high 32-bits into a shadow
1204 * register. Reading the high 32-bit returns the value in the
1207 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1208 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1210 flow_stats->hits_set = 1;
1211 flow_stats->hits = hits_lo | (hits_hi << 32);
1212 flow_stats->bytes_set = 0;
1213 flow_stats->bytes = 0;
1215 if (flow_stats->reset) {
1216 /* reset statistic counter value */
1217 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1218 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1224 static struct ice_flow_engine ice_fdir_engine = {
1225 .init = ice_fdir_init,
1226 .uninit = ice_fdir_uninit,
1227 .create = ice_fdir_create_filter,
1228 .destroy = ice_fdir_destroy_filter,
1229 .query_count = ice_fdir_query_count,
1230 .type = ICE_FLOW_ENGINE_FDIR,
1234 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1235 struct rte_flow_error *error,
1236 const struct rte_flow_action *act,
1237 struct ice_fdir_filter_conf *filter)
1239 const struct rte_flow_action_rss *rss = act->conf;
1242 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION, act,
1249 if (rss->queue_num <= 1) {
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ACTION, act,
1252 "Queue region size can't be 0 or 1.");
1256 /* check if queue index for queue region is continuous */
1257 for (i = 0; i < rss->queue_num - 1; i++) {
1258 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1259 rte_flow_error_set(error, EINVAL,
1260 RTE_FLOW_ERROR_TYPE_ACTION, act,
1261 "Discontinuous queue region");
1266 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1267 rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ACTION, act,
1269 "Invalid queue region indexes.");
1273 if (!(rte_is_power_of_2(rss->queue_num) &&
1274 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1275 rte_flow_error_set(error, EINVAL,
1276 RTE_FLOW_ERROR_TYPE_ACTION, act,
1277 "The region size should be any of the following values:"
1278 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1279 "of queues do not exceed the VSI allocation.");
1283 filter->input.q_index = rss->queue[0];
1284 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1285 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1291 ice_fdir_parse_action(struct ice_adapter *ad,
1292 const struct rte_flow_action actions[],
1293 struct rte_flow_error *error,
1294 struct ice_fdir_filter_conf *filter)
1296 struct ice_pf *pf = &ad->pf;
1297 const struct rte_flow_action_queue *act_q;
1298 const struct rte_flow_action_mark *mark_spec = NULL;
1299 const struct rte_flow_action_count *act_count;
1300 uint32_t dest_num = 0;
1301 uint32_t mark_num = 0;
1302 uint32_t counter_num = 0;
1305 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1306 switch (actions->type) {
1307 case RTE_FLOW_ACTION_TYPE_VOID:
1309 case RTE_FLOW_ACTION_TYPE_QUEUE:
1312 act_q = actions->conf;
1313 filter->input.q_index = act_q->index;
1314 if (filter->input.q_index >=
1315 pf->dev_data->nb_rx_queues) {
1316 rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_ACTION,
1319 "Invalid queue for FDIR.");
1322 filter->input.dest_ctl =
1323 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1325 case RTE_FLOW_ACTION_TYPE_DROP:
1328 filter->input.dest_ctl =
1329 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1331 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1334 filter->input.dest_ctl =
1335 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1336 filter->input.q_index = 0;
1338 case RTE_FLOW_ACTION_TYPE_RSS:
1341 ret = ice_fdir_parse_action_qregion(pf,
1342 error, actions, filter);
1346 case RTE_FLOW_ACTION_TYPE_MARK:
1349 mark_spec = actions->conf;
1350 filter->input.fltr_id = mark_spec->id;
1352 case RTE_FLOW_ACTION_TYPE_COUNT:
1355 act_count = actions->conf;
1356 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1357 rte_memcpy(&filter->act_count, act_count,
1358 sizeof(filter->act_count));
1362 rte_flow_error_set(error, EINVAL,
1363 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1369 if (dest_num == 0 || dest_num >= 2) {
1370 rte_flow_error_set(error, EINVAL,
1371 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1372 "Unsupported action combination");
1376 if (mark_num >= 2) {
1377 rte_flow_error_set(error, EINVAL,
1378 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1379 "Too many mark actions");
1383 if (counter_num >= 2) {
1384 rte_flow_error_set(error, EINVAL,
1385 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1386 "Too many count actions");
1394 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1395 const struct rte_flow_item pattern[],
1396 struct rte_flow_error *error,
1397 struct ice_fdir_filter_conf *filter)
1399 const struct rte_flow_item *item = pattern;
1400 enum rte_flow_item_type item_type;
1401 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1402 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1403 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1404 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1405 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1406 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1407 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1408 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1409 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1410 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1411 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1412 uint64_t input_set = ICE_INSET_NONE;
1413 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1414 uint8_t ipv6_addr_mask[16] = {
1415 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1416 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1418 uint32_t vtc_flow_cpu;
1421 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1423 rte_flow_error_set(error, EINVAL,
1424 RTE_FLOW_ERROR_TYPE_ITEM,
1426 "Not support range");
1429 item_type = item->type;
1431 switch (item_type) {
1432 case RTE_FLOW_ITEM_TYPE_ETH:
1433 eth_spec = item->spec;
1434 eth_mask = item->mask;
1436 if (eth_spec && eth_mask) {
1437 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1438 !rte_is_zero_ether_addr(ð_mask->src)) {
1439 rte_flow_error_set(error, EINVAL,
1440 RTE_FLOW_ERROR_TYPE_ITEM,
1442 "Src mac not support");
1446 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
1447 rte_flow_error_set(error, EINVAL,
1448 RTE_FLOW_ERROR_TYPE_ITEM,
1450 "Invalid mac addr mask");
1454 input_set |= ICE_INSET_DMAC;
1455 rte_memcpy(&filter->input.ext_data.dst_mac,
1457 RTE_ETHER_ADDR_LEN);
1460 case RTE_FLOW_ITEM_TYPE_IPV4:
1461 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1462 ipv4_spec = item->spec;
1463 ipv4_mask = item->mask;
1465 if (ipv4_spec && ipv4_mask) {
1466 /* Check IPv4 mask and update input set */
1467 if (ipv4_mask->hdr.version_ihl ||
1468 ipv4_mask->hdr.total_length ||
1469 ipv4_mask->hdr.packet_id ||
1470 ipv4_mask->hdr.fragment_offset ||
1471 ipv4_mask->hdr.hdr_checksum) {
1472 rte_flow_error_set(error, EINVAL,
1473 RTE_FLOW_ERROR_TYPE_ITEM,
1475 "Invalid IPv4 mask.");
1478 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1479 input_set |= tunnel_type ?
1480 ICE_INSET_TUN_IPV4_SRC :
1482 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1483 input_set |= tunnel_type ?
1484 ICE_INSET_TUN_IPV4_DST :
1486 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1487 input_set |= ICE_INSET_IPV4_TOS;
1488 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1489 input_set |= ICE_INSET_IPV4_TTL;
1490 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1491 input_set |= ICE_INSET_IPV4_PROTO;
1493 filter->input.ip.v4.dst_ip =
1494 ipv4_spec->hdr.src_addr;
1495 filter->input.ip.v4.src_ip =
1496 ipv4_spec->hdr.dst_addr;
1497 filter->input.ip.v4.tos =
1498 ipv4_spec->hdr.type_of_service;
1499 filter->input.ip.v4.ttl =
1500 ipv4_spec->hdr.time_to_live;
1501 filter->input.ip.v4.proto =
1502 ipv4_spec->hdr.next_proto_id;
1505 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1507 case RTE_FLOW_ITEM_TYPE_IPV6:
1508 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1509 ipv6_spec = item->spec;
1510 ipv6_mask = item->mask;
1512 if (ipv6_spec && ipv6_mask) {
1513 /* Check IPv6 mask and update input set */
1514 if (ipv6_mask->hdr.payload_len) {
1515 rte_flow_error_set(error, EINVAL,
1516 RTE_FLOW_ERROR_TYPE_ITEM,
1518 "Invalid IPv6 mask");
1522 if (!memcmp(ipv6_mask->hdr.src_addr,
1524 RTE_DIM(ipv6_mask->hdr.src_addr)))
1525 input_set |= ICE_INSET_IPV6_SRC;
1526 if (!memcmp(ipv6_mask->hdr.dst_addr,
1528 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1529 input_set |= ICE_INSET_IPV6_DST;
1531 if ((ipv6_mask->hdr.vtc_flow &
1532 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1533 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1534 input_set |= ICE_INSET_IPV6_TC;
1535 if (ipv6_mask->hdr.proto == UINT8_MAX)
1536 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1537 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1538 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1540 rte_memcpy(filter->input.ip.v6.dst_ip,
1541 ipv6_spec->hdr.src_addr, 16);
1542 rte_memcpy(filter->input.ip.v6.src_ip,
1543 ipv6_spec->hdr.dst_addr, 16);
1546 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1547 filter->input.ip.v6.tc =
1548 (uint8_t)(vtc_flow_cpu >>
1549 ICE_FDIR_IPV6_TC_OFFSET);
1550 filter->input.ip.v6.proto =
1551 ipv6_spec->hdr.proto;
1552 filter->input.ip.v6.hlim =
1553 ipv6_spec->hdr.hop_limits;
1556 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1558 case RTE_FLOW_ITEM_TYPE_TCP:
1559 tcp_spec = item->spec;
1560 tcp_mask = item->mask;
1562 if (tcp_spec && tcp_mask) {
1563 /* Check TCP mask and update input set */
1564 if (tcp_mask->hdr.sent_seq ||
1565 tcp_mask->hdr.recv_ack ||
1566 tcp_mask->hdr.data_off ||
1567 tcp_mask->hdr.tcp_flags ||
1568 tcp_mask->hdr.rx_win ||
1569 tcp_mask->hdr.cksum ||
1570 tcp_mask->hdr.tcp_urp) {
1571 rte_flow_error_set(error, EINVAL,
1572 RTE_FLOW_ERROR_TYPE_ITEM,
1574 "Invalid TCP mask");
1578 if (tcp_mask->hdr.src_port == UINT16_MAX)
1579 input_set |= tunnel_type ?
1580 ICE_INSET_TUN_TCP_SRC_PORT :
1581 ICE_INSET_TCP_SRC_PORT;
1582 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1583 input_set |= tunnel_type ?
1584 ICE_INSET_TUN_TCP_DST_PORT :
1585 ICE_INSET_TCP_DST_PORT;
1587 /* Get filter info */
1588 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1589 filter->input.ip.v4.dst_port =
1590 tcp_spec->hdr.src_port;
1591 filter->input.ip.v4.src_port =
1592 tcp_spec->hdr.dst_port;
1594 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1595 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1596 filter->input.ip.v6.dst_port =
1597 tcp_spec->hdr.src_port;
1598 filter->input.ip.v6.src_port =
1599 tcp_spec->hdr.dst_port;
1601 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1605 case RTE_FLOW_ITEM_TYPE_UDP:
1606 udp_spec = item->spec;
1607 udp_mask = item->mask;
1609 if (udp_spec && udp_mask) {
1610 /* Check UDP mask and update input set*/
1611 if (udp_mask->hdr.dgram_len ||
1612 udp_mask->hdr.dgram_cksum) {
1613 rte_flow_error_set(error, EINVAL,
1614 RTE_FLOW_ERROR_TYPE_ITEM,
1616 "Invalid UDP mask");
1620 if (udp_mask->hdr.src_port == UINT16_MAX)
1621 input_set |= tunnel_type ?
1622 ICE_INSET_TUN_UDP_SRC_PORT :
1623 ICE_INSET_UDP_SRC_PORT;
1624 if (udp_mask->hdr.dst_port == UINT16_MAX)
1625 input_set |= tunnel_type ?
1626 ICE_INSET_TUN_UDP_DST_PORT :
1627 ICE_INSET_UDP_DST_PORT;
1629 /* Get filter info */
1630 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1631 filter->input.ip.v4.dst_port =
1632 udp_spec->hdr.src_port;
1633 filter->input.ip.v4.src_port =
1634 udp_spec->hdr.dst_port;
1636 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1637 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1638 filter->input.ip.v6.src_port =
1639 udp_spec->hdr.dst_port;
1640 filter->input.ip.v6.dst_port =
1641 udp_spec->hdr.src_port;
1643 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1647 case RTE_FLOW_ITEM_TYPE_SCTP:
1648 sctp_spec = item->spec;
1649 sctp_mask = item->mask;
1651 if (sctp_spec && sctp_mask) {
1652 /* Check SCTP mask and update input set */
1653 if (sctp_mask->hdr.cksum) {
1654 rte_flow_error_set(error, EINVAL,
1655 RTE_FLOW_ERROR_TYPE_ITEM,
1657 "Invalid UDP mask");
1661 if (sctp_mask->hdr.src_port == UINT16_MAX)
1662 input_set |= tunnel_type ?
1663 ICE_INSET_TUN_SCTP_SRC_PORT :
1664 ICE_INSET_SCTP_SRC_PORT;
1665 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1666 input_set |= tunnel_type ?
1667 ICE_INSET_TUN_SCTP_DST_PORT :
1668 ICE_INSET_SCTP_DST_PORT;
1670 /* Get filter info */
1671 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1672 filter->input.ip.v4.dst_port =
1673 sctp_spec->hdr.src_port;
1674 filter->input.ip.v4.src_port =
1675 sctp_spec->hdr.dst_port;
1677 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1678 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1679 filter->input.ip.v6.dst_port =
1680 sctp_spec->hdr.src_port;
1681 filter->input.ip.v6.src_port =
1682 sctp_spec->hdr.dst_port;
1684 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1688 case RTE_FLOW_ITEM_TYPE_VOID:
1690 case RTE_FLOW_ITEM_TYPE_VXLAN:
1691 l3 = RTE_FLOW_ITEM_TYPE_END;
1692 vxlan_spec = item->spec;
1693 vxlan_mask = item->mask;
1695 if (vxlan_spec || vxlan_mask) {
1696 rte_flow_error_set(error, EINVAL,
1697 RTE_FLOW_ERROR_TYPE_ITEM,
1699 "Invalid vxlan field");
1703 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1705 case RTE_FLOW_ITEM_TYPE_GTPU:
1706 l3 = RTE_FLOW_ITEM_TYPE_END;
1707 gtp_spec = item->spec;
1708 gtp_mask = item->mask;
1710 if (gtp_spec && gtp_mask) {
1711 if (gtp_mask->v_pt_rsv_flags ||
1712 gtp_mask->msg_type ||
1713 gtp_mask->msg_len) {
1714 rte_flow_error_set(error, EINVAL,
1715 RTE_FLOW_ERROR_TYPE_ITEM,
1717 "Invalid GTP mask");
1721 if (gtp_mask->teid == UINT32_MAX)
1722 input_set |= ICE_INSET_GTPU_TEID;
1724 filter->input.gtpu_data.teid = gtp_spec->teid;
1727 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1728 gtp_psc_spec = item->spec;
1729 gtp_psc_mask = item->mask;
1731 if (gtp_psc_spec && gtp_psc_mask) {
1732 if (gtp_psc_mask->qfi == UINT8_MAX)
1733 input_set |= ICE_INSET_GTPU_QFI;
1735 filter->input.gtpu_data.qfi =
1739 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1742 rte_flow_error_set(error, EINVAL,
1743 RTE_FLOW_ERROR_TYPE_ITEM,
1745 "Invalid pattern item.");
1750 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1751 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1753 filter->tunnel_type = tunnel_type;
1754 filter->input.flow_type = flow_type;
1755 filter->input_set = input_set;
1761 ice_fdir_parse(struct ice_adapter *ad,
1762 struct ice_pattern_match_item *array,
1764 const struct rte_flow_item pattern[],
1765 const struct rte_flow_action actions[],
1767 struct rte_flow_error *error)
1769 struct ice_pf *pf = &ad->pf;
1770 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1771 struct ice_pattern_match_item *item = NULL;
1775 memset(filter, 0, sizeof(*filter));
1776 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1780 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1783 input_set = filter->input_set;
1784 if (!input_set || input_set & ~item->input_set_mask) {
1785 rte_flow_error_set(error, EINVAL,
1786 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1788 "Invalid input set");
1792 ret = ice_fdir_parse_action(ad, actions, error, filter);
1801 static struct ice_flow_parser ice_fdir_parser_os = {
1802 .engine = &ice_fdir_engine,
1803 .array = ice_fdir_pattern_os,
1804 .array_len = RTE_DIM(ice_fdir_pattern_os),
1805 .parse_pattern_action = ice_fdir_parse,
1806 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1809 static struct ice_flow_parser ice_fdir_parser_comms = {
1810 .engine = &ice_fdir_engine,
1811 .array = ice_fdir_pattern_comms,
1812 .array_len = RTE_DIM(ice_fdir_pattern_comms),
1813 .parse_pattern_action = ice_fdir_parse,
1814 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1817 RTE_INIT(ice_fdir_engine_register)
1819 ice_register_flow_engine(&ice_fdir_engine);