1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
14 #include "ice_generic_flow.h"
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
21 #define ICE_FDIR_INSET_ETH (\
22 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25 ICE_FDIR_INSET_ETH | \
26 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30 ICE_FDIR_INSET_ETH_IPV4 | \
31 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34 ICE_FDIR_INSET_ETH_IPV4 | \
35 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38 ICE_FDIR_INSET_ETH_IPV4 | \
39 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
43 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
47 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
51 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
52 ICE_FDIR_INSET_ETH_IPV6 | \
53 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
55 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
56 ICE_FDIR_INSET_ETH_IPV6 | \
57 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
59 #define ICE_FDIR_INSET_IPV4 (\
60 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
63 #define ICE_FDIR_INSET_IPV4_TCP (\
64 ICE_FDIR_INSET_IPV4 | \
65 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
67 #define ICE_FDIR_INSET_IPV4_UDP (\
68 ICE_FDIR_INSET_IPV4 | \
69 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
71 #define ICE_FDIR_INSET_IPV4_SCTP (\
72 ICE_FDIR_INSET_IPV4 | \
73 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
75 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
76 ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
79 #define ICE_FDIR_INSET_IPV4_GTPU (\
80 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
82 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
83 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
84 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
86 #define ICE_FDIR_INSET_IPV6_GTPU (\
87 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
89 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
90 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
91 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
93 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
94 {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
95 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
96 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
97 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
98 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
99 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
100 {pattern_eth_ipv6_frag_ext, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
101 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
102 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
103 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
104 {pattern_eth_ipv4_udp_vxlan_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4, ICE_INSET_NONE},
105 {pattern_eth_ipv4_udp_vxlan_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_UDP, ICE_INSET_NONE},
106 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_TCP, ICE_INSET_NONE},
107 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_SCTP, ICE_INSET_NONE},
108 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
109 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
110 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
111 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
112 /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
113 {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_IPV4_GTPU, ICE_FDIR_INSET_IPV4_GTPU, ICE_INSET_NONE},
114 {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_INSET_NONE},
115 {pattern_eth_ipv6_gtpu, ICE_FDIR_INSET_IPV6_GTPU, ICE_FDIR_INSET_IPV6_GTPU, ICE_INSET_NONE},
116 {pattern_eth_ipv6_gtpu_eh, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_INSET_NONE},
119 static struct ice_flow_parser ice_fdir_parser;
122 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
124 static const struct rte_memzone *
125 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
127 const struct rte_memzone *mz;
129 mz = rte_memzone_lookup(name);
133 return rte_memzone_reserve_aligned(name, len, socket_id,
134 RTE_MEMZONE_IOVA_CONTIG,
135 ICE_RING_BASE_ALIGN);
138 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
141 ice_fdir_prof_alloc(struct ice_hw *hw)
143 enum ice_fltr_ptype ptype, fltr_ptype;
145 if (!hw->fdir_prof) {
146 hw->fdir_prof = (struct ice_fd_hw_prof **)
147 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
148 sizeof(*hw->fdir_prof));
152 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
153 ptype < ICE_FLTR_PTYPE_MAX;
155 if (!hw->fdir_prof[ptype]) {
156 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
157 ice_malloc(hw, sizeof(**hw->fdir_prof));
158 if (!hw->fdir_prof[ptype])
165 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
168 rte_free(hw->fdir_prof[fltr_ptype]);
169 hw->fdir_prof[fltr_ptype] = NULL;
172 rte_free(hw->fdir_prof);
173 hw->fdir_prof = NULL;
179 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
180 struct ice_fdir_counter_pool_container *container,
181 uint32_t index_start,
184 struct ice_fdir_counter_pool *pool;
188 pool = rte_zmalloc("ice_fdir_counter_pool",
190 sizeof(struct ice_fdir_counter) * len,
194 "Failed to allocate memory for fdir counter pool");
198 TAILQ_INIT(&pool->counter_list);
199 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
201 for (i = 0; i < len; i++) {
202 struct ice_fdir_counter *counter = &pool->counters[i];
204 counter->hw_index = index_start + i;
205 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
208 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
209 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
214 container->pools[container->index_free++] = pool;
223 ice_fdir_counter_init(struct ice_pf *pf)
225 struct ice_hw *hw = ICE_PF_TO_HW(pf);
226 struct ice_fdir_info *fdir_info = &pf->fdir;
227 struct ice_fdir_counter_pool_container *container =
229 uint32_t cnt_index, len;
232 TAILQ_INIT(&container->pool_list);
234 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
235 len = ICE_FDIR_COUNTERS_PER_BLOCK;
237 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
239 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
247 ice_fdir_counter_release(struct ice_pf *pf)
249 struct ice_fdir_info *fdir_info = &pf->fdir;
250 struct ice_fdir_counter_pool_container *container =
254 for (i = 0; i < container->index_free; i++) {
255 rte_free(container->pools[i]);
256 container->pools[i] = NULL;
259 TAILQ_INIT(&container->pool_list);
260 container->index_free = 0;
265 static struct ice_fdir_counter *
266 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
270 struct ice_fdir_counter_pool *pool;
271 struct ice_fdir_counter *counter;
274 TAILQ_FOREACH(pool, &container->pool_list, next) {
275 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
276 counter = &pool->counters[i];
278 if (counter->shared &&
288 static struct ice_fdir_counter *
289 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
291 struct ice_hw *hw = ICE_PF_TO_HW(pf);
292 struct ice_fdir_info *fdir_info = &pf->fdir;
293 struct ice_fdir_counter_pool_container *container =
295 struct ice_fdir_counter_pool *pool = NULL;
296 struct ice_fdir_counter *counter_free = NULL;
299 counter_free = ice_fdir_counter_shared_search(container, id);
301 if (counter_free->ref_cnt + 1 == 0) {
305 counter_free->ref_cnt++;
310 TAILQ_FOREACH(pool, &container->pool_list, next) {
311 counter_free = TAILQ_FIRST(&pool->counter_list);
318 PMD_DRV_LOG(ERR, "No free counter found\n");
322 counter_free->shared = shared;
323 counter_free->id = id;
324 counter_free->ref_cnt = 1;
325 counter_free->pool = pool;
327 /* reset statistic counter value */
328 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
329 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
331 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
332 if (TAILQ_EMPTY(&pool->counter_list)) {
333 TAILQ_REMOVE(&container->pool_list, pool, next);
334 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
341 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
342 struct ice_fdir_counter *counter)
347 if (--counter->ref_cnt == 0) {
348 struct ice_fdir_counter_pool *pool = counter->pool;
350 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
355 ice_fdir_init_filter_list(struct ice_pf *pf)
357 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
358 struct ice_fdir_info *fdir_info = &pf->fdir;
359 char fdir_hash_name[RTE_HASH_NAMESIZE];
362 struct rte_hash_parameters fdir_hash_params = {
363 .name = fdir_hash_name,
364 .entries = ICE_MAX_FDIR_FILTER_NUM,
365 .key_len = sizeof(struct ice_fdir_fltr_pattern),
366 .hash_func = rte_hash_crc,
367 .hash_func_init_val = 0,
368 .socket_id = rte_socket_id(),
369 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
372 /* Initialize hash */
373 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
374 "fdir_%s", dev->device->name);
375 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
376 if (!fdir_info->hash_table) {
377 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
380 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
381 sizeof(*fdir_info->hash_map) *
382 ICE_MAX_FDIR_FILTER_NUM,
384 if (!fdir_info->hash_map) {
386 "Failed to allocate memory for fdir hash map!");
388 goto err_fdir_hash_map_alloc;
392 err_fdir_hash_map_alloc:
393 rte_hash_free(fdir_info->hash_table);
399 ice_fdir_release_filter_list(struct ice_pf *pf)
401 struct ice_fdir_info *fdir_info = &pf->fdir;
403 if (fdir_info->hash_map)
404 rte_free(fdir_info->hash_map);
405 if (fdir_info->hash_table)
406 rte_hash_free(fdir_info->hash_table);
408 fdir_info->hash_map = NULL;
409 fdir_info->hash_table = NULL;
413 * ice_fdir_setup - reserve and initialize the Flow Director resources
414 * @pf: board private structure
417 ice_fdir_setup(struct ice_pf *pf)
419 struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
420 struct ice_hw *hw = ICE_PF_TO_HW(pf);
421 const struct rte_memzone *mz = NULL;
422 char z_name[RTE_MEMZONE_NAMESIZE];
424 int err = ICE_SUCCESS;
426 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
427 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
431 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
432 " fd_fltr_best_effort = %u.",
433 hw->func_caps.fd_fltr_guar,
434 hw->func_caps.fd_fltr_best_effort);
436 if (pf->fdir.fdir_vsi) {
437 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
441 /* make new FDIR VSI */
442 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
444 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
447 pf->fdir.fdir_vsi = vsi;
449 err = ice_fdir_init_filter_list(pf);
451 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
455 err = ice_fdir_counter_init(pf);
457 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
461 /*Fdir tx queue setup*/
462 err = ice_fdir_setup_tx_resources(pf);
464 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
468 /*Fdir rx queue setup*/
469 err = ice_fdir_setup_rx_resources(pf);
471 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
475 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
477 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
481 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
483 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
487 /* Enable FDIR MSIX interrupt */
488 vsi->nb_used_qps = 1;
489 ice_vsi_queues_bind_intr(vsi);
490 ice_vsi_enable_queues_intr(vsi);
492 /* reserve memory for the fdir programming packet */
493 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
495 eth_dev->data->port_id);
496 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
498 PMD_DRV_LOG(ERR, "Cannot init memzone for "
499 "flow director program packet.");
503 pf->fdir.prg_pkt = mz->addr;
504 pf->fdir.dma_addr = mz->iova;
507 err = ice_fdir_prof_alloc(hw);
509 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
510 "flow director profile.");
515 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
520 rte_memzone_free(pf->fdir.mz);
523 ice_rx_queue_release(pf->fdir.rxq);
526 ice_tx_queue_release(pf->fdir.txq);
529 ice_release_vsi(vsi);
530 pf->fdir.fdir_vsi = NULL;
535 ice_fdir_prof_free(struct ice_hw *hw)
537 enum ice_fltr_ptype ptype;
539 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
540 ptype < ICE_FLTR_PTYPE_MAX;
542 rte_free(hw->fdir_prof[ptype]);
543 hw->fdir_prof[ptype] = NULL;
546 rte_free(hw->fdir_prof);
547 hw->fdir_prof = NULL;
550 /* Remove a profile for some filter type */
552 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
554 struct ice_hw *hw = ICE_PF_TO_HW(pf);
555 struct ice_fd_hw_prof *hw_prof;
560 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
563 hw_prof = hw->fdir_prof[ptype];
565 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
566 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
567 if (hw_prof->entry_h[i][is_tunnel]) {
568 vsi_num = ice_get_hw_vsi_num(hw,
570 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
572 ice_flow_rem_entry(hw, ICE_BLK_FD,
573 hw_prof->entry_h[i][is_tunnel]);
574 hw_prof->entry_h[i][is_tunnel] = 0;
577 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
578 rte_free(hw_prof->fdir_seg[is_tunnel]);
579 hw_prof->fdir_seg[is_tunnel] = NULL;
581 for (i = 0; i < hw_prof->cnt; i++)
582 hw_prof->vsi_h[i] = 0;
583 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
586 /* Remove all created profiles */
588 ice_fdir_prof_rm_all(struct ice_pf *pf)
590 enum ice_fltr_ptype ptype;
592 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
593 ptype < ICE_FLTR_PTYPE_MAX;
595 ice_fdir_prof_rm(pf, ptype, false);
596 ice_fdir_prof_rm(pf, ptype, true);
601 * ice_fdir_teardown - release the Flow Director resources
602 * @pf: board private structure
605 ice_fdir_teardown(struct ice_pf *pf)
607 struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
608 struct ice_hw *hw = ICE_PF_TO_HW(pf);
612 vsi = pf->fdir.fdir_vsi;
616 ice_vsi_disable_queues_intr(vsi);
618 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
620 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
622 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
624 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
626 err = ice_fdir_counter_release(pf);
628 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
630 ice_fdir_release_filter_list(pf);
632 ice_tx_queue_release(pf->fdir.txq);
634 ice_rx_queue_release(pf->fdir.rxq);
636 ice_fdir_prof_rm_all(pf);
637 ice_fdir_prof_free(hw);
638 ice_release_vsi(vsi);
639 pf->fdir.fdir_vsi = NULL;
642 err = rte_memzone_free(pf->fdir.mz);
645 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
650 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
651 enum ice_fltr_ptype ptype,
652 struct ice_flow_seg_info *seg,
655 struct ice_hw *hw = ICE_PF_TO_HW(pf);
656 struct ice_flow_seg_info *ori_seg;
657 struct ice_fd_hw_prof *hw_prof;
659 hw_prof = hw->fdir_prof[ptype];
660 ori_seg = hw_prof->fdir_seg[is_tunnel];
662 /* profile does not exist */
666 /* if no input set conflict, return -EEXIST */
667 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
668 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
669 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
674 /* a rule with input set conflict already exist, so give up */
675 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
676 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
681 /* it's safe to delete an empty profile */
682 ice_fdir_prof_rm(pf, ptype, is_tunnel);
687 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
688 enum ice_fltr_ptype ptype,
691 struct ice_hw *hw = ICE_PF_TO_HW(pf);
692 struct ice_fd_hw_prof *hw_prof;
693 struct ice_flow_seg_info *seg;
695 hw_prof = hw->fdir_prof[ptype];
696 seg = hw_prof->fdir_seg[is_tunnel];
698 /* profile does not exist */
702 /* profile exists and rule exists, fail to resolve the conflict */
703 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
706 /* it's safe to delete an empty profile */
707 ice_fdir_prof_rm(pf, ptype, is_tunnel);
713 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
714 enum ice_fltr_ptype ptype,
717 enum ice_fltr_ptype cflct_ptype;
721 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
722 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
723 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
724 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
725 if (!ice_fdir_prof_resolve_conflict
726 (pf, cflct_ptype, is_tunnel))
729 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
730 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
731 if (!ice_fdir_prof_resolve_conflict
732 (pf, cflct_ptype, is_tunnel))
734 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
735 if (!ice_fdir_prof_resolve_conflict
736 (pf, cflct_ptype, is_tunnel))
738 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
739 if (!ice_fdir_prof_resolve_conflict
740 (pf, cflct_ptype, is_tunnel))
744 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
745 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
746 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
747 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
748 if (!ice_fdir_prof_resolve_conflict
749 (pf, cflct_ptype, is_tunnel))
752 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
753 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
754 if (!ice_fdir_prof_resolve_conflict
755 (pf, cflct_ptype, is_tunnel))
757 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
758 if (!ice_fdir_prof_resolve_conflict
759 (pf, cflct_ptype, is_tunnel))
761 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
762 if (!ice_fdir_prof_resolve_conflict
763 (pf, cflct_ptype, is_tunnel))
767 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
768 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
769 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
770 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
771 if (!ice_fdir_prof_resolve_conflict
772 (pf, cflct_ptype, is_tunnel))
775 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
776 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
777 if (!ice_fdir_prof_resolve_conflict
778 (pf, cflct_ptype, is_tunnel))
780 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
781 if (!ice_fdir_prof_resolve_conflict
782 (pf, cflct_ptype, is_tunnel))
784 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
785 if (!ice_fdir_prof_resolve_conflict
786 (pf, cflct_ptype, is_tunnel))
794 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
800 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
801 struct ice_vsi *ctrl_vsi,
802 struct ice_flow_seg_info *seg,
803 enum ice_fltr_ptype ptype,
806 struct ice_hw *hw = ICE_PF_TO_HW(pf);
807 enum ice_flow_dir dir = ICE_FLOW_RX;
808 struct ice_fd_hw_prof *hw_prof;
809 struct ice_flow_prof *prof;
810 uint64_t entry_1 = 0;
811 uint64_t entry_2 = 0;
816 /* check if have input set conflict on current profile. */
817 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
821 /* check if the profile is conflict with other profile. */
822 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
826 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
827 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
828 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
831 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
832 vsi->idx, ICE_FLOW_PRIO_NORMAL,
833 seg, NULL, 0, &entry_1);
835 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
839 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
840 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
841 seg, NULL, 0, &entry_2);
843 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
848 hw_prof = hw->fdir_prof[ptype];
849 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
851 hw_prof->fdir_seg[is_tunnel] = seg;
852 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
853 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
854 pf->hw_prof_cnt[ptype][is_tunnel]++;
855 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
856 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
857 pf->hw_prof_cnt[ptype][is_tunnel]++;
862 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
863 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
864 ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
866 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
872 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
876 struct ice_inset_map {
878 enum ice_flow_field fld;
880 static const struct ice_inset_map ice_inset_map[] = {
881 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
882 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
883 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
884 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
885 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
886 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
887 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
888 {ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
889 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
890 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
891 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
892 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
893 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
894 {ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
895 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
896 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
897 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
898 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
899 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
900 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
901 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
902 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
903 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
904 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
905 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
906 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
907 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
908 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
909 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
910 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
911 {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
914 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
915 if ((inset & ice_inset_map[i].inset) ==
916 ice_inset_map[i].inset)
917 field[j++] = ice_inset_map[i].fld;
922 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
925 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
926 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
927 ICE_FLOW_SEG_HDR_IPV4 |
928 ICE_FLOW_SEG_HDR_IPV_OTHER);
930 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
931 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
932 ICE_FLOW_SEG_HDR_IPV4 |
933 ICE_FLOW_SEG_HDR_IPV_OTHER);
935 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
936 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
937 ICE_FLOW_SEG_HDR_IPV4 |
938 ICE_FLOW_SEG_HDR_IPV_OTHER);
940 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
941 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
942 ICE_FLOW_SEG_HDR_IPV_OTHER);
944 case ICE_FLTR_PTYPE_FRAG_IPV4:
945 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
946 ICE_FLOW_SEG_HDR_IPV_FRAG);
948 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
949 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
950 ICE_FLOW_SEG_HDR_IPV6 |
951 ICE_FLOW_SEG_HDR_IPV_OTHER);
953 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
954 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
955 ICE_FLOW_SEG_HDR_IPV6 |
956 ICE_FLOW_SEG_HDR_IPV_OTHER);
958 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
959 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
960 ICE_FLOW_SEG_HDR_IPV6 |
961 ICE_FLOW_SEG_HDR_IPV_OTHER);
963 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
964 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
965 ICE_FLOW_SEG_HDR_IPV_OTHER);
967 case ICE_FLTR_PTYPE_FRAG_IPV6:
968 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
969 ICE_FLOW_SEG_HDR_IPV_FRAG);
971 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN:
972 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
973 ICE_FLOW_SEG_HDR_IPV4 |
974 ICE_FLOW_SEG_HDR_VXLAN |
975 ICE_FLOW_SEG_HDR_IPV_OTHER);
977 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
978 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
979 ICE_FLOW_SEG_HDR_IPV4 |
980 ICE_FLOW_SEG_HDR_IPV_OTHER);
982 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
983 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
984 ICE_FLOW_SEG_HDR_GTPU_IP |
985 ICE_FLOW_SEG_HDR_IPV4 |
986 ICE_FLOW_SEG_HDR_IPV_OTHER);
988 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
989 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
990 ICE_FLOW_SEG_HDR_IPV6 |
991 ICE_FLOW_SEG_HDR_IPV_OTHER);
993 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
994 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
995 ICE_FLOW_SEG_HDR_GTPU_IP |
996 ICE_FLOW_SEG_HDR_IPV6 |
997 ICE_FLOW_SEG_HDR_IPV_OTHER);
999 case ICE_FLTR_PTYPE_NON_IP_L2:
1000 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1003 PMD_DRV_LOG(ERR, "not supported filter type.");
1009 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1010 uint64_t inner_input_set, uint64_t outer_input_set,
1011 enum ice_fdir_tunnel_type ttype)
1013 struct ice_flow_seg_info *seg;
1014 struct ice_flow_seg_info *seg_tun = NULL;
1015 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1020 if (!(inner_input_set | outer_input_set))
1023 seg_tun = (struct ice_flow_seg_info *)
1024 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1026 PMD_DRV_LOG(ERR, "No memory can be allocated");
1030 /* use seg_tun[1] to record tunnel inner part */
1031 for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1033 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1037 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1038 field[i] = ICE_FLOW_FIELD_IDX_MAX;
1040 ice_fdir_input_set_parse(input_set, field);
1042 ice_fdir_input_set_hdrs(flow, seg);
1044 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1045 ice_flow_set_fld(seg, field[i],
1046 ICE_FLOW_FLD_OFF_INVAL,
1047 ICE_FLOW_FLD_OFF_INVAL,
1048 ICE_FLOW_FLD_OFF_INVAL, false);
1052 is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1054 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1055 seg_tun, flow, is_tunnel);
1059 } else if (ret < 0) {
1061 return (ret == -EEXIST) ? 0 : ret;
1068 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1069 bool is_tunnel, bool add)
1071 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1074 cnt = (add) ? 1 : -1;
1075 hw->fdir_active_fltr += cnt;
1076 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1077 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1079 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1083 ice_fdir_init(struct ice_adapter *ad)
1085 struct ice_pf *pf = &ad->pf;
1086 struct ice_flow_parser *parser;
1089 if (ad->hw.dcf_enabled)
1092 ret = ice_fdir_setup(pf);
1096 parser = &ice_fdir_parser;
1098 return ice_register_parser(parser, ad);
1102 ice_fdir_uninit(struct ice_adapter *ad)
1104 struct ice_flow_parser *parser;
1105 struct ice_pf *pf = &ad->pf;
1107 if (ad->hw.dcf_enabled)
1110 parser = &ice_fdir_parser;
1112 ice_unregister_parser(parser, ad);
1114 ice_fdir_teardown(pf);
1118 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1120 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1127 ice_fdir_add_del_filter(struct ice_pf *pf,
1128 struct ice_fdir_filter_conf *filter,
1131 struct ice_fltr_desc desc;
1132 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1133 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1137 filter->input.dest_vsi = pf->main_vsi->idx;
1139 memset(&desc, 0, sizeof(desc));
1140 filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1141 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1143 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1145 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1146 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1148 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1152 return ice_fdir_programming(pf, &desc);
1156 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1157 struct ice_fdir_filter_conf *filter)
1159 struct ice_fdir_fltr *input = &filter->input;
1160 memset(key, 0, sizeof(*key));
1162 key->flow_type = input->flow_type;
1163 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1164 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1165 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1166 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1168 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1169 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1171 key->tunnel_type = filter->tunnel_type;
1174 /* Check if there exists the flow director filter */
1175 static struct ice_fdir_filter_conf *
1176 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1177 const struct ice_fdir_fltr_pattern *key)
1181 ret = rte_hash_lookup(fdir_info->hash_table, key);
1185 return fdir_info->hash_map[ret];
1188 /* Add a flow director entry into the SW list */
1190 ice_fdir_entry_insert(struct ice_pf *pf,
1191 struct ice_fdir_filter_conf *entry,
1192 struct ice_fdir_fltr_pattern *key)
1194 struct ice_fdir_info *fdir_info = &pf->fdir;
1197 ret = rte_hash_add_key(fdir_info->hash_table, key);
1200 "Failed to insert fdir entry to hash table %d!",
1204 fdir_info->hash_map[ret] = entry;
1209 /* Delete a flow director entry from the SW list */
1211 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1213 struct ice_fdir_info *fdir_info = &pf->fdir;
1216 ret = rte_hash_del_key(fdir_info->hash_table, key);
1219 "Failed to delete fdir filter to hash table %d!",
1223 fdir_info->hash_map[ret] = NULL;
1229 ice_fdir_create_filter(struct ice_adapter *ad,
1230 struct rte_flow *flow,
1232 struct rte_flow_error *error)
1234 struct ice_pf *pf = &ad->pf;
1235 struct ice_fdir_filter_conf *filter = meta;
1236 struct ice_fdir_info *fdir_info = &pf->fdir;
1237 struct ice_fdir_filter_conf *entry, *node;
1238 struct ice_fdir_fltr_pattern key;
1242 ice_fdir_extract_fltr_key(&key, filter);
1243 node = ice_fdir_entry_lookup(fdir_info, &key);
1245 rte_flow_error_set(error, EEXIST,
1246 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1247 "Rule already exists!");
1251 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1253 rte_flow_error_set(error, ENOMEM,
1254 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1255 "Failed to allocate memory");
1259 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1261 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1262 filter->input_set_i, filter->input_set_o,
1263 filter->tunnel_type);
1265 rte_flow_error_set(error, -ret,
1266 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1267 "Profile configure failed.");
1271 /* alloc counter for FDIR */
1272 if (filter->input.cnt_ena) {
1273 struct rte_flow_action_count *act_count = &filter->act_count;
1275 filter->counter = ice_fdir_counter_alloc(pf,
1278 if (!filter->counter) {
1279 rte_flow_error_set(error, EINVAL,
1280 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1281 "Failed to alloc FDIR counter.");
1284 filter->input.cnt_index = filter->counter->hw_index;
1287 ret = ice_fdir_add_del_filter(pf, filter, true);
1289 rte_flow_error_set(error, -ret,
1290 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1291 "Add filter rule failed.");
1295 if (filter->mark_flag == 1)
1296 ice_fdir_rx_parsing_enable(ad, 1);
1298 rte_memcpy(entry, filter, sizeof(*entry));
1299 ret = ice_fdir_entry_insert(pf, entry, &key);
1301 rte_flow_error_set(error, -ret,
1302 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1303 "Insert entry to table failed.");
1308 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1313 if (filter->counter) {
1314 ice_fdir_counter_free(pf, filter->counter);
1315 filter->counter = NULL;
1324 ice_fdir_destroy_filter(struct ice_adapter *ad,
1325 struct rte_flow *flow,
1326 struct rte_flow_error *error)
1328 struct ice_pf *pf = &ad->pf;
1329 struct ice_fdir_info *fdir_info = &pf->fdir;
1330 struct ice_fdir_filter_conf *filter, *entry;
1331 struct ice_fdir_fltr_pattern key;
1335 filter = (struct ice_fdir_filter_conf *)flow->rule;
1337 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1339 if (filter->counter) {
1340 ice_fdir_counter_free(pf, filter->counter);
1341 filter->counter = NULL;
1344 ice_fdir_extract_fltr_key(&key, filter);
1345 entry = ice_fdir_entry_lookup(fdir_info, &key);
1347 rte_flow_error_set(error, ENOENT,
1348 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1349 "Can't find entry.");
1353 ret = ice_fdir_add_del_filter(pf, filter, false);
1355 rte_flow_error_set(error, -ret,
1356 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1357 "Del filter rule failed.");
1361 ret = ice_fdir_entry_del(pf, &key);
1363 rte_flow_error_set(error, -ret,
1364 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1365 "Remove entry from table failed.");
1369 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1371 if (filter->mark_flag == 1)
1372 ice_fdir_rx_parsing_enable(ad, 0);
1382 ice_fdir_query_count(struct ice_adapter *ad,
1383 struct rte_flow *flow,
1384 struct rte_flow_query_count *flow_stats,
1385 struct rte_flow_error *error)
1387 struct ice_pf *pf = &ad->pf;
1388 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1389 struct ice_fdir_filter_conf *filter = flow->rule;
1390 struct ice_fdir_counter *counter = filter->counter;
1391 uint64_t hits_lo, hits_hi;
1394 rte_flow_error_set(error, EINVAL,
1395 RTE_FLOW_ERROR_TYPE_ACTION,
1397 "FDIR counters not available");
1402 * Reading the low 32-bits latches the high 32-bits into a shadow
1403 * register. Reading the high 32-bit returns the value in the
1406 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1407 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1409 flow_stats->hits_set = 1;
1410 flow_stats->hits = hits_lo | (hits_hi << 32);
1411 flow_stats->bytes_set = 0;
1412 flow_stats->bytes = 0;
1414 if (flow_stats->reset) {
1415 /* reset statistic counter value */
1416 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1417 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1423 static struct ice_flow_engine ice_fdir_engine = {
1424 .init = ice_fdir_init,
1425 .uninit = ice_fdir_uninit,
1426 .create = ice_fdir_create_filter,
1427 .destroy = ice_fdir_destroy_filter,
1428 .query_count = ice_fdir_query_count,
1429 .type = ICE_FLOW_ENGINE_FDIR,
1433 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1434 struct rte_flow_error *error,
1435 const struct rte_flow_action *act,
1436 struct ice_fdir_filter_conf *filter)
1438 const struct rte_flow_action_rss *rss = act->conf;
1441 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1442 rte_flow_error_set(error, EINVAL,
1443 RTE_FLOW_ERROR_TYPE_ACTION, act,
1448 if (rss->queue_num <= 1) {
1449 rte_flow_error_set(error, EINVAL,
1450 RTE_FLOW_ERROR_TYPE_ACTION, act,
1451 "Queue region size can't be 0 or 1.");
1455 /* check if queue index for queue region is continuous */
1456 for (i = 0; i < rss->queue_num - 1; i++) {
1457 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1458 rte_flow_error_set(error, EINVAL,
1459 RTE_FLOW_ERROR_TYPE_ACTION, act,
1460 "Discontinuous queue region");
1465 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1466 rte_flow_error_set(error, EINVAL,
1467 RTE_FLOW_ERROR_TYPE_ACTION, act,
1468 "Invalid queue region indexes.");
1472 if (!(rte_is_power_of_2(rss->queue_num) &&
1473 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1474 rte_flow_error_set(error, EINVAL,
1475 RTE_FLOW_ERROR_TYPE_ACTION, act,
1476 "The region size should be any of the following values:"
1477 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1478 "of queues do not exceed the VSI allocation.");
1482 filter->input.q_index = rss->queue[0];
1483 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1484 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1490 ice_fdir_parse_action(struct ice_adapter *ad,
1491 const struct rte_flow_action actions[],
1492 struct rte_flow_error *error,
1493 struct ice_fdir_filter_conf *filter)
1495 struct ice_pf *pf = &ad->pf;
1496 const struct rte_flow_action_queue *act_q;
1497 const struct rte_flow_action_mark *mark_spec = NULL;
1498 const struct rte_flow_action_count *act_count;
1499 uint32_t dest_num = 0;
1500 uint32_t mark_num = 0;
1501 uint32_t counter_num = 0;
1504 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1505 switch (actions->type) {
1506 case RTE_FLOW_ACTION_TYPE_VOID:
1508 case RTE_FLOW_ACTION_TYPE_QUEUE:
1511 act_q = actions->conf;
1512 filter->input.q_index = act_q->index;
1513 if (filter->input.q_index >=
1514 pf->dev_data->nb_rx_queues) {
1515 rte_flow_error_set(error, EINVAL,
1516 RTE_FLOW_ERROR_TYPE_ACTION,
1518 "Invalid queue for FDIR.");
1521 filter->input.dest_ctl =
1522 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1524 case RTE_FLOW_ACTION_TYPE_DROP:
1527 filter->input.dest_ctl =
1528 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1530 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1533 filter->input.dest_ctl =
1534 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1536 case RTE_FLOW_ACTION_TYPE_RSS:
1539 ret = ice_fdir_parse_action_qregion(pf,
1540 error, actions, filter);
1544 case RTE_FLOW_ACTION_TYPE_MARK:
1546 filter->mark_flag = 1;
1547 mark_spec = actions->conf;
1548 filter->input.fltr_id = mark_spec->id;
1549 filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1551 case RTE_FLOW_ACTION_TYPE_COUNT:
1554 act_count = actions->conf;
1555 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1556 rte_memcpy(&filter->act_count, act_count,
1557 sizeof(filter->act_count));
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1568 if (dest_num >= 2) {
1569 rte_flow_error_set(error, EINVAL,
1570 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1571 "Unsupported action combination");
1575 if (mark_num >= 2) {
1576 rte_flow_error_set(error, EINVAL,
1577 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1578 "Too many mark actions");
1582 if (counter_num >= 2) {
1583 rte_flow_error_set(error, EINVAL,
1584 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1585 "Too many count actions");
1589 if (dest_num + mark_num + counter_num == 0) {
1590 rte_flow_error_set(error, EINVAL,
1591 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1596 /* set default action to PASSTHRU mode, in "mark/count only" case. */
1598 filter->input.dest_ctl =
1599 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1605 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1606 const struct rte_flow_item pattern[],
1607 struct rte_flow_error *error,
1608 struct ice_fdir_filter_conf *filter)
1610 const struct rte_flow_item *item = pattern;
1611 enum rte_flow_item_type item_type;
1612 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1613 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1614 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1615 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
1616 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1617 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
1618 *ipv6_frag_last, *ipv6_frag_mask;
1619 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1620 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1621 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1622 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1623 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1624 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1625 uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1626 uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1627 uint64_t *input_set;
1628 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1629 uint8_t ipv6_addr_mask[16] = {
1630 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1631 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1633 uint32_t vtc_flow_cpu;
1634 uint16_t ether_type;
1635 enum rte_flow_item_type next_type;
1636 bool is_outer = true;
1637 struct ice_fdir_extra *p_ext_data;
1638 struct ice_fdir_v4 *p_v4 = NULL;
1639 struct ice_fdir_v6 *p_v6 = NULL;
1641 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1642 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1643 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1644 /* To align with shared code behavior, save gtpu outer
1645 * fields in inner struct.
1647 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1648 item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1653 /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1654 * flow. input_set_i is used for inner part.
1656 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1657 item_type = item->type;
1659 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1661 RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
1662 rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ITEM, item,
1664 "Not support range");
1667 input_set = (tunnel_type && !is_outer) ?
1668 &input_set_i : &input_set_o;
1670 switch (item_type) {
1671 case RTE_FLOW_ITEM_TYPE_ETH:
1672 flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1673 eth_spec = item->spec;
1674 eth_mask = item->mask;
1676 if (!(eth_spec && eth_mask))
1679 if (!rte_is_zero_ether_addr(ð_mask->dst))
1680 *input_set |= ICE_INSET_DMAC;
1681 if (!rte_is_zero_ether_addr(ð_mask->src))
1682 *input_set |= ICE_INSET_SMAC;
1684 next_type = (item + 1)->type;
1685 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1686 if (eth_mask->type == RTE_BE16(0xffff) &&
1687 next_type == RTE_FLOW_ITEM_TYPE_END) {
1688 *input_set |= ICE_INSET_ETHERTYPE;
1689 ether_type = rte_be_to_cpu_16(eth_spec->type);
1691 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1692 ether_type == RTE_ETHER_TYPE_IPV6) {
1693 rte_flow_error_set(error, EINVAL,
1694 RTE_FLOW_ERROR_TYPE_ITEM,
1696 "Unsupported ether_type.");
1701 p_ext_data = (tunnel_type && is_outer) ?
1702 &filter->input.ext_data_outer :
1703 &filter->input.ext_data;
1704 rte_memcpy(&p_ext_data->src_mac,
1705 ð_spec->src, RTE_ETHER_ADDR_LEN);
1706 rte_memcpy(&p_ext_data->dst_mac,
1707 ð_spec->dst, RTE_ETHER_ADDR_LEN);
1708 rte_memcpy(&p_ext_data->ether_type,
1709 ð_spec->type, sizeof(eth_spec->type));
1711 case RTE_FLOW_ITEM_TYPE_IPV4:
1712 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1713 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1714 ipv4_spec = item->spec;
1715 ipv4_last = item->last;
1716 ipv4_mask = item->mask;
1717 p_v4 = (tunnel_type && is_outer) ?
1718 &filter->input.ip_outer.v4 :
1719 &filter->input.ip.v4;
1721 if (!(ipv4_spec && ipv4_mask))
1724 /* Check IPv4 mask and update input set */
1725 if (ipv4_mask->hdr.version_ihl ||
1726 ipv4_mask->hdr.total_length ||
1727 ipv4_mask->hdr.hdr_checksum) {
1728 rte_flow_error_set(error, EINVAL,
1729 RTE_FLOW_ERROR_TYPE_ITEM,
1731 "Invalid IPv4 mask.");
1736 (ipv4_last->hdr.version_ihl ||
1737 ipv4_last->hdr.type_of_service ||
1738 ipv4_last->hdr.time_to_live ||
1739 ipv4_last->hdr.total_length |
1740 ipv4_last->hdr.next_proto_id ||
1741 ipv4_last->hdr.hdr_checksum ||
1742 ipv4_last->hdr.src_addr ||
1743 ipv4_last->hdr.dst_addr)) {
1744 rte_flow_error_set(error, EINVAL,
1745 RTE_FLOW_ERROR_TYPE_ITEM,
1746 item, "Invalid IPv4 last.");
1750 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1751 *input_set |= ICE_INSET_IPV4_DST;
1752 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1753 *input_set |= ICE_INSET_IPV4_SRC;
1754 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1755 *input_set |= ICE_INSET_IPV4_TTL;
1756 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1757 *input_set |= ICE_INSET_IPV4_PROTO;
1758 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1759 *input_set |= ICE_INSET_IPV4_TOS;
1761 p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
1762 p_v4->src_ip = ipv4_spec->hdr.src_addr;
1763 p_v4->ttl = ipv4_spec->hdr.time_to_live;
1764 p_v4->proto = ipv4_spec->hdr.next_proto_id;
1765 p_v4->tos = ipv4_spec->hdr.type_of_service;
1767 /* only support any packet id for fragment IPv4
1769 * spec is 0, last is 0xffff, mask is 0xffff
1771 * spec is 0x2000, mask is 0xffff
1773 if (ipv4_last && ipv4_spec->hdr.packet_id == 0 &&
1774 ipv4_last->hdr.packet_id == UINT16_MAX &&
1775 ipv4_mask->hdr.packet_id == UINT16_MAX &&
1776 ipv4_spec->hdr.fragment_offset ==
1777 rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
1778 ipv4_mask->hdr.fragment_offset == UINT16_MAX) {
1779 /* all IPv4 fragment packet has the same
1780 * ethertype, if the spec is for all valid
1781 * packet id, set ethertype into input set.
1783 *input_set |= ICE_INSET_ETHERTYPE;
1784 input_set_o |= ICE_INSET_ETHERTYPE;
1785 } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
1786 rte_flow_error_set(error, EINVAL,
1787 RTE_FLOW_ERROR_TYPE_ITEM,
1788 item, "Invalid IPv4 mask.");
1793 case RTE_FLOW_ITEM_TYPE_IPV6:
1794 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1795 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1796 ipv6_spec = item->spec;
1797 ipv6_mask = item->mask;
1798 p_v6 = (tunnel_type && is_outer) ?
1799 &filter->input.ip_outer.v6 :
1800 &filter->input.ip.v6;
1802 if (!(ipv6_spec && ipv6_mask))
1805 /* Check IPv6 mask and update input set */
1806 if (ipv6_mask->hdr.payload_len) {
1807 rte_flow_error_set(error, EINVAL,
1808 RTE_FLOW_ERROR_TYPE_ITEM,
1810 "Invalid IPv6 mask");
1814 if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1815 RTE_DIM(ipv6_mask->hdr.src_addr)))
1816 *input_set |= ICE_INSET_IPV6_SRC;
1817 if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1818 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1819 *input_set |= ICE_INSET_IPV6_DST;
1821 if ((ipv6_mask->hdr.vtc_flow &
1822 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1823 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1824 *input_set |= ICE_INSET_IPV6_TC;
1825 if (ipv6_mask->hdr.proto == UINT8_MAX)
1826 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
1827 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1828 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1830 rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
1831 rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
1832 vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1833 p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
1834 p_v6->proto = ipv6_spec->hdr.proto;
1835 p_v6->hlim = ipv6_spec->hdr.hop_limits;
1837 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1838 l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
1839 flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
1840 ipv6_frag_spec = item->spec;
1841 ipv6_frag_last = item->last;
1842 ipv6_frag_mask = item->mask;
1844 if (!(ipv6_frag_spec && ipv6_frag_mask))
1847 /* only support any packet id for fragment IPv6
1849 * spec is 0, last is 0xffffffff, mask is 0xffffffff
1851 * spec is 0x1, mask is 0xffff
1853 if (ipv6_frag_last && ipv6_frag_spec->hdr.id == 0 &&
1854 ipv6_frag_last->hdr.id == UINT32_MAX &&
1855 ipv6_frag_mask->hdr.id == UINT32_MAX &&
1856 ipv6_frag_spec->hdr.frag_data ==
1857 rte_cpu_to_be_16(1) &&
1858 ipv6_frag_mask->hdr.frag_data == UINT16_MAX) {
1859 /* all IPv6 fragment packet has the same
1860 * ethertype, if the spec is for all valid
1861 * packet id, set ethertype into input set.
1863 *input_set |= ICE_INSET_ETHERTYPE;
1864 input_set_o |= ICE_INSET_ETHERTYPE;
1865 } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1866 rte_flow_error_set(error, EINVAL,
1867 RTE_FLOW_ERROR_TYPE_ITEM,
1868 item, "Invalid IPv6 mask.");
1874 case RTE_FLOW_ITEM_TYPE_TCP:
1875 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1876 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1877 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1878 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1880 tcp_spec = item->spec;
1881 tcp_mask = item->mask;
1883 if (!(tcp_spec && tcp_mask))
1886 /* Check TCP mask and update input set */
1887 if (tcp_mask->hdr.sent_seq ||
1888 tcp_mask->hdr.recv_ack ||
1889 tcp_mask->hdr.data_off ||
1890 tcp_mask->hdr.tcp_flags ||
1891 tcp_mask->hdr.rx_win ||
1892 tcp_mask->hdr.cksum ||
1893 tcp_mask->hdr.tcp_urp) {
1894 rte_flow_error_set(error, EINVAL,
1895 RTE_FLOW_ERROR_TYPE_ITEM,
1897 "Invalid TCP mask");
1901 if (tcp_mask->hdr.src_port == UINT16_MAX)
1902 *input_set |= ICE_INSET_TCP_SRC_PORT;
1903 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1904 *input_set |= ICE_INSET_TCP_DST_PORT;
1906 /* Get filter info */
1907 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1909 p_v4->dst_port = tcp_spec->hdr.dst_port;
1910 p_v4->src_port = tcp_spec->hdr.src_port;
1911 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1913 p_v6->dst_port = tcp_spec->hdr.dst_port;
1914 p_v6->src_port = tcp_spec->hdr.src_port;
1917 case RTE_FLOW_ITEM_TYPE_UDP:
1918 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1919 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1920 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1921 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1923 udp_spec = item->spec;
1924 udp_mask = item->mask;
1926 if (!(udp_spec && udp_mask))
1929 /* Check UDP mask and update input set*/
1930 if (udp_mask->hdr.dgram_len ||
1931 udp_mask->hdr.dgram_cksum) {
1932 rte_flow_error_set(error, EINVAL,
1933 RTE_FLOW_ERROR_TYPE_ITEM,
1935 "Invalid UDP mask");
1939 if (udp_mask->hdr.src_port == UINT16_MAX)
1940 *input_set |= ICE_INSET_UDP_SRC_PORT;
1941 if (udp_mask->hdr.dst_port == UINT16_MAX)
1942 *input_set |= ICE_INSET_UDP_DST_PORT;
1944 /* Get filter info */
1945 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1947 p_v4->dst_port = udp_spec->hdr.dst_port;
1948 p_v4->src_port = udp_spec->hdr.src_port;
1949 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1951 p_v6->src_port = udp_spec->hdr.src_port;
1952 p_v6->dst_port = udp_spec->hdr.dst_port;
1955 case RTE_FLOW_ITEM_TYPE_SCTP:
1956 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1957 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1958 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1959 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1961 sctp_spec = item->spec;
1962 sctp_mask = item->mask;
1964 if (!(sctp_spec && sctp_mask))
1967 /* Check SCTP mask and update input set */
1968 if (sctp_mask->hdr.cksum) {
1969 rte_flow_error_set(error, EINVAL,
1970 RTE_FLOW_ERROR_TYPE_ITEM,
1972 "Invalid UDP mask");
1976 if (sctp_mask->hdr.src_port == UINT16_MAX)
1977 *input_set |= ICE_INSET_SCTP_SRC_PORT;
1978 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1979 *input_set |= ICE_INSET_SCTP_DST_PORT;
1981 /* Get filter info */
1982 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1984 p_v4->dst_port = sctp_spec->hdr.dst_port;
1985 p_v4->src_port = sctp_spec->hdr.src_port;
1986 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1988 p_v6->dst_port = sctp_spec->hdr.dst_port;
1989 p_v6->src_port = sctp_spec->hdr.src_port;
1992 case RTE_FLOW_ITEM_TYPE_VOID:
1994 case RTE_FLOW_ITEM_TYPE_VXLAN:
1995 l3 = RTE_FLOW_ITEM_TYPE_END;
1996 vxlan_spec = item->spec;
1997 vxlan_mask = item->mask;
2000 if (!(vxlan_spec && vxlan_mask))
2003 if (vxlan_mask->hdr.vx_flags) {
2004 rte_flow_error_set(error, EINVAL,
2005 RTE_FLOW_ERROR_TYPE_ITEM,
2007 "Invalid vxlan field");
2011 if (vxlan_mask->hdr.vx_vni)
2012 *input_set |= ICE_INSET_VXLAN_VNI;
2014 filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
2017 case RTE_FLOW_ITEM_TYPE_GTPU:
2018 l3 = RTE_FLOW_ITEM_TYPE_END;
2019 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
2020 gtp_spec = item->spec;
2021 gtp_mask = item->mask;
2023 if (!(gtp_spec && gtp_mask))
2026 if (gtp_mask->v_pt_rsv_flags ||
2027 gtp_mask->msg_type ||
2028 gtp_mask->msg_len) {
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2032 "Invalid GTP mask");
2036 if (gtp_mask->teid == UINT32_MAX)
2037 input_set_o |= ICE_INSET_GTPU_TEID;
2039 filter->input.gtpu_data.teid = gtp_spec->teid;
2041 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2042 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2043 gtp_psc_spec = item->spec;
2044 gtp_psc_mask = item->mask;
2046 if (!(gtp_psc_spec && gtp_psc_mask))
2049 if (gtp_psc_mask->qfi == UINT8_MAX)
2050 input_set_o |= ICE_INSET_GTPU_QFI;
2052 filter->input.gtpu_data.qfi =
2056 rte_flow_error_set(error, EINVAL,
2057 RTE_FLOW_ERROR_TYPE_ITEM,
2059 "Invalid pattern item.");
2064 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2065 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2066 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
2067 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2068 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2069 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
2070 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2071 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2072 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
2073 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2074 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2075 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
2076 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
2077 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN;
2079 filter->tunnel_type = tunnel_type;
2080 filter->input.flow_type = flow_type;
2081 filter->input_set_o = input_set_o;
2082 filter->input_set_i = input_set_i;
2088 ice_fdir_parse(struct ice_adapter *ad,
2089 struct ice_pattern_match_item *array,
2091 const struct rte_flow_item pattern[],
2092 const struct rte_flow_action actions[],
2093 uint32_t priority __rte_unused,
2095 struct rte_flow_error *error)
2097 struct ice_pf *pf = &ad->pf;
2098 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2099 struct ice_pattern_match_item *item = NULL;
2103 memset(filter, 0, sizeof(*filter));
2104 item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2109 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2112 input_set = filter->input_set_o | filter->input_set_i;
2113 if (!input_set || filter->input_set_o &
2114 ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
2115 filter->input_set_i & ~item->input_set_mask_i) {
2116 rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2119 "Invalid input set");
2124 ret = ice_fdir_parse_action(ad, actions, error, filter);
2135 static struct ice_flow_parser ice_fdir_parser = {
2136 .engine = &ice_fdir_engine,
2137 .array = ice_fdir_pattern_list,
2138 .array_len = RTE_DIM(ice_fdir_pattern_list),
2139 .parse_pattern_action = ice_fdir_parse,
2140 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2143 RTE_INIT(ice_fdir_engine_register)
2145 ice_register_flow_engine(&ice_fdir_engine);