1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
8 #include <rte_hash_crc.h>
9 #include "base/ice_fdir.h"
10 #include "base/ice_flow.h"
11 #include "base/ice_type.h"
12 #include "ice_ethdev.h"
14 #include "ice_generic_flow.h"
16 #define ICE_FDIR_IPV6_TC_OFFSET 20
17 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
19 #define ICE_FDIR_MAX_QREGION_SIZE 128
21 #define ICE_FDIR_INSET_ETH (\
22 ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE)
24 #define ICE_FDIR_INSET_ETH_IPV4 (\
25 ICE_FDIR_INSET_ETH | \
26 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
27 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_PKID)
29 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
30 ICE_FDIR_INSET_ETH_IPV4 | \
31 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
33 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
34 ICE_FDIR_INSET_ETH_IPV4 | \
35 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
37 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
38 ICE_FDIR_INSET_ETH_IPV4 | \
39 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
41 #define ICE_FDIR_INSET_ETH_IPV6 (\
43 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
44 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR | \
47 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
51 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
52 ICE_FDIR_INSET_ETH_IPV6 | \
53 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
55 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
56 ICE_FDIR_INSET_ETH_IPV6 | \
57 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
59 #define ICE_FDIR_INSET_IPV4 (\
60 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
63 #define ICE_FDIR_INSET_IPV4_TCP (\
64 ICE_FDIR_INSET_IPV4 | \
65 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
67 #define ICE_FDIR_INSET_IPV4_UDP (\
68 ICE_FDIR_INSET_IPV4 | \
69 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
71 #define ICE_FDIR_INSET_IPV4_SCTP (\
72 ICE_FDIR_INSET_IPV4 | \
73 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
75 #define ICE_FDIR_INSET_ETH_IPV4_VXLAN (\
76 ICE_FDIR_INSET_ETH | ICE_FDIR_INSET_ETH_IPV4 | \
79 #define ICE_FDIR_INSET_IPV4_GTPU (\
80 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_GTPU_TEID)
82 #define ICE_FDIR_INSET_IPV4_GTPU_EH (\
83 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
84 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
86 #define ICE_FDIR_INSET_IPV6_GTPU (\
87 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_GTPU_TEID)
89 #define ICE_FDIR_INSET_IPV6_GTPU_EH (\
90 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
91 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
93 #define ICE_FDIR_INSET_IPV4_ESP (\
94 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
97 #define ICE_FDIR_INSET_IPV6_ESP (\
98 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
101 #define ICE_FDIR_INSET_IPV4_NATT_ESP (\
102 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \
103 ICE_INSET_NAT_T_ESP_SPI)
105 #define ICE_FDIR_INSET_IPV6_NATT_ESP (\
106 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | \
107 ICE_INSET_NAT_T_ESP_SPI)
109 static struct ice_pattern_match_item ice_fdir_pattern_list[] = {
110 {pattern_ethertype, ICE_FDIR_INSET_ETH, ICE_INSET_NONE, ICE_INSET_NONE},
111 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE, ICE_INSET_NONE},
112 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
113 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
114 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
115 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
116 {pattern_eth_ipv6_frag_ext, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE, ICE_INSET_NONE},
117 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE},
118 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE},
119 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE, ICE_INSET_NONE},
120 {pattern_eth_ipv4_esp, ICE_FDIR_INSET_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
121 {pattern_eth_ipv4_udp_esp, ICE_FDIR_INSET_IPV4_NATT_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
122 {pattern_eth_ipv6_esp, ICE_FDIR_INSET_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
123 {pattern_eth_ipv6_udp_esp, ICE_FDIR_INSET_IPV6_NATT_ESP, ICE_INSET_NONE, ICE_INSET_NONE},
124 {pattern_eth_ipv4_udp_vxlan_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4, ICE_INSET_NONE},
125 {pattern_eth_ipv4_udp_vxlan_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_UDP, ICE_INSET_NONE},
126 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_TCP, ICE_INSET_NONE},
127 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_IPV4_SCTP, ICE_INSET_NONE},
128 {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
129 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
130 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
131 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_VXLAN, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
132 /* duplicated GTPU input set in 3rd column to align with shared code behavior. Ideally, only put GTPU field in 2nd column. */
133 {pattern_eth_ipv4_gtpu, ICE_FDIR_INSET_IPV4_GTPU, ICE_FDIR_INSET_IPV4_GTPU, ICE_INSET_NONE},
134 {pattern_eth_ipv4_gtpu_eh, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_FDIR_INSET_IPV4_GTPU_EH, ICE_INSET_NONE},
135 {pattern_eth_ipv6_gtpu, ICE_FDIR_INSET_IPV6_GTPU, ICE_FDIR_INSET_IPV6_GTPU, ICE_INSET_NONE},
136 {pattern_eth_ipv6_gtpu_eh, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_FDIR_INSET_IPV6_GTPU_EH, ICE_INSET_NONE},
139 static struct ice_flow_parser ice_fdir_parser;
142 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type);
144 static const struct rte_memzone *
145 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
147 const struct rte_memzone *mz;
149 mz = rte_memzone_lookup(name);
153 return rte_memzone_reserve_aligned(name, len, socket_id,
154 RTE_MEMZONE_IOVA_CONTIG,
155 ICE_RING_BASE_ALIGN);
158 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
161 ice_fdir_prof_alloc(struct ice_hw *hw)
163 enum ice_fltr_ptype ptype, fltr_ptype;
165 if (!hw->fdir_prof) {
166 hw->fdir_prof = (struct ice_fd_hw_prof **)
167 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
168 sizeof(*hw->fdir_prof));
172 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
173 ptype < ICE_FLTR_PTYPE_MAX;
175 if (!hw->fdir_prof[ptype]) {
176 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
177 ice_malloc(hw, sizeof(**hw->fdir_prof));
178 if (!hw->fdir_prof[ptype])
185 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
188 rte_free(hw->fdir_prof[fltr_ptype]);
189 hw->fdir_prof[fltr_ptype] = NULL;
192 rte_free(hw->fdir_prof);
193 hw->fdir_prof = NULL;
199 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
200 struct ice_fdir_counter_pool_container *container,
201 uint32_t index_start,
204 struct ice_fdir_counter_pool *pool;
208 pool = rte_zmalloc("ice_fdir_counter_pool",
210 sizeof(struct ice_fdir_counter) * len,
214 "Failed to allocate memory for fdir counter pool");
218 TAILQ_INIT(&pool->counter_list);
219 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
221 for (i = 0; i < len; i++) {
222 struct ice_fdir_counter *counter = &pool->counters[i];
224 counter->hw_index = index_start + i;
225 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
228 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
229 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
234 container->pools[container->index_free++] = pool;
243 ice_fdir_counter_init(struct ice_pf *pf)
245 struct ice_hw *hw = ICE_PF_TO_HW(pf);
246 struct ice_fdir_info *fdir_info = &pf->fdir;
247 struct ice_fdir_counter_pool_container *container =
249 uint32_t cnt_index, len;
252 TAILQ_INIT(&container->pool_list);
254 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
255 len = ICE_FDIR_COUNTERS_PER_BLOCK;
257 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
259 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
267 ice_fdir_counter_release(struct ice_pf *pf)
269 struct ice_fdir_info *fdir_info = &pf->fdir;
270 struct ice_fdir_counter_pool_container *container =
274 for (i = 0; i < container->index_free; i++) {
275 rte_free(container->pools[i]);
276 container->pools[i] = NULL;
279 TAILQ_INIT(&container->pool_list);
280 container->index_free = 0;
285 static struct ice_fdir_counter *
286 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
290 struct ice_fdir_counter_pool *pool;
291 struct ice_fdir_counter *counter;
294 TAILQ_FOREACH(pool, &container->pool_list, next) {
295 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
296 counter = &pool->counters[i];
298 if (counter->shared &&
308 static struct ice_fdir_counter *
309 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
311 struct ice_hw *hw = ICE_PF_TO_HW(pf);
312 struct ice_fdir_info *fdir_info = &pf->fdir;
313 struct ice_fdir_counter_pool_container *container =
315 struct ice_fdir_counter_pool *pool = NULL;
316 struct ice_fdir_counter *counter_free = NULL;
319 counter_free = ice_fdir_counter_shared_search(container, id);
321 if (counter_free->ref_cnt + 1 == 0) {
325 counter_free->ref_cnt++;
330 TAILQ_FOREACH(pool, &container->pool_list, next) {
331 counter_free = TAILQ_FIRST(&pool->counter_list);
338 PMD_DRV_LOG(ERR, "No free counter found\n");
342 counter_free->shared = shared;
343 counter_free->id = id;
344 counter_free->ref_cnt = 1;
345 counter_free->pool = pool;
347 /* reset statistic counter value */
348 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
349 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
351 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
352 if (TAILQ_EMPTY(&pool->counter_list)) {
353 TAILQ_REMOVE(&container->pool_list, pool, next);
354 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
361 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
362 struct ice_fdir_counter *counter)
367 if (--counter->ref_cnt == 0) {
368 struct ice_fdir_counter_pool *pool = counter->pool;
370 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
375 ice_fdir_init_filter_list(struct ice_pf *pf)
377 struct rte_eth_dev *dev = &rte_eth_devices[pf->dev_data->port_id];
378 struct ice_fdir_info *fdir_info = &pf->fdir;
379 char fdir_hash_name[RTE_HASH_NAMESIZE];
382 struct rte_hash_parameters fdir_hash_params = {
383 .name = fdir_hash_name,
384 .entries = ICE_MAX_FDIR_FILTER_NUM,
385 .key_len = sizeof(struct ice_fdir_fltr_pattern),
386 .hash_func = rte_hash_crc,
387 .hash_func_init_val = 0,
388 .socket_id = rte_socket_id(),
389 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
392 /* Initialize hash */
393 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
394 "fdir_%s", dev->device->name);
395 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
396 if (!fdir_info->hash_table) {
397 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
400 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
401 sizeof(*fdir_info->hash_map) *
402 ICE_MAX_FDIR_FILTER_NUM,
404 if (!fdir_info->hash_map) {
406 "Failed to allocate memory for fdir hash map!");
408 goto err_fdir_hash_map_alloc;
412 err_fdir_hash_map_alloc:
413 rte_hash_free(fdir_info->hash_table);
419 ice_fdir_release_filter_list(struct ice_pf *pf)
421 struct ice_fdir_info *fdir_info = &pf->fdir;
423 if (fdir_info->hash_map)
424 rte_free(fdir_info->hash_map);
425 if (fdir_info->hash_table)
426 rte_hash_free(fdir_info->hash_table);
428 fdir_info->hash_map = NULL;
429 fdir_info->hash_table = NULL;
433 * ice_fdir_setup - reserve and initialize the Flow Director resources
434 * @pf: board private structure
437 ice_fdir_setup(struct ice_pf *pf)
439 struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
440 struct ice_hw *hw = ICE_PF_TO_HW(pf);
441 const struct rte_memzone *mz = NULL;
442 char z_name[RTE_MEMZONE_NAMESIZE];
444 int err = ICE_SUCCESS;
446 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
447 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
451 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
452 " fd_fltr_best_effort = %u.",
453 hw->func_caps.fd_fltr_guar,
454 hw->func_caps.fd_fltr_best_effort);
456 if (pf->fdir.fdir_vsi) {
457 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
461 /* make new FDIR VSI */
462 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
464 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
467 pf->fdir.fdir_vsi = vsi;
469 err = ice_fdir_init_filter_list(pf);
471 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
475 err = ice_fdir_counter_init(pf);
477 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
481 /*Fdir tx queue setup*/
482 err = ice_fdir_setup_tx_resources(pf);
484 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
488 /*Fdir rx queue setup*/
489 err = ice_fdir_setup_rx_resources(pf);
491 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
495 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
497 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
501 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
503 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
507 /* Enable FDIR MSIX interrupt */
508 vsi->nb_used_qps = 1;
509 ice_vsi_queues_bind_intr(vsi);
510 ice_vsi_enable_queues_intr(vsi);
512 /* reserve memory for the fdir programming packet */
513 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
515 eth_dev->data->port_id);
516 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
518 PMD_DRV_LOG(ERR, "Cannot init memzone for "
519 "flow director program packet.");
523 pf->fdir.prg_pkt = mz->addr;
524 pf->fdir.dma_addr = mz->iova;
527 err = ice_fdir_prof_alloc(hw);
529 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
530 "flow director profile.");
535 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
540 rte_memzone_free(pf->fdir.mz);
543 ice_rx_queue_release(pf->fdir.rxq);
546 ice_tx_queue_release(pf->fdir.txq);
549 ice_release_vsi(vsi);
550 pf->fdir.fdir_vsi = NULL;
555 ice_fdir_prof_free(struct ice_hw *hw)
557 enum ice_fltr_ptype ptype;
559 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
560 ptype < ICE_FLTR_PTYPE_MAX;
562 rte_free(hw->fdir_prof[ptype]);
563 hw->fdir_prof[ptype] = NULL;
566 rte_free(hw->fdir_prof);
567 hw->fdir_prof = NULL;
570 /* Remove a profile for some filter type */
572 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
574 struct ice_hw *hw = ICE_PF_TO_HW(pf);
575 struct ice_fd_hw_prof *hw_prof;
580 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
583 hw_prof = hw->fdir_prof[ptype];
585 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
586 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
587 if (hw_prof->entry_h[i][is_tunnel]) {
588 vsi_num = ice_get_hw_vsi_num(hw,
590 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
592 ice_flow_rem_entry(hw, ICE_BLK_FD,
593 hw_prof->entry_h[i][is_tunnel]);
594 hw_prof->entry_h[i][is_tunnel] = 0;
597 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
598 rte_free(hw_prof->fdir_seg[is_tunnel]);
599 hw_prof->fdir_seg[is_tunnel] = NULL;
601 for (i = 0; i < hw_prof->cnt; i++)
602 hw_prof->vsi_h[i] = 0;
603 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
606 /* Remove all created profiles */
608 ice_fdir_prof_rm_all(struct ice_pf *pf)
610 enum ice_fltr_ptype ptype;
612 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
613 ptype < ICE_FLTR_PTYPE_MAX;
615 ice_fdir_prof_rm(pf, ptype, false);
616 ice_fdir_prof_rm(pf, ptype, true);
621 * ice_fdir_teardown - release the Flow Director resources
622 * @pf: board private structure
625 ice_fdir_teardown(struct ice_pf *pf)
627 struct rte_eth_dev *eth_dev = &rte_eth_devices[pf->dev_data->port_id];
628 struct ice_hw *hw = ICE_PF_TO_HW(pf);
632 vsi = pf->fdir.fdir_vsi;
636 ice_vsi_disable_queues_intr(vsi);
638 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
640 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
642 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
644 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
646 err = ice_fdir_counter_release(pf);
648 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
650 ice_fdir_release_filter_list(pf);
652 ice_tx_queue_release(pf->fdir.txq);
654 rte_eth_dma_zone_free(eth_dev, "fdir_tx_ring", ICE_FDIR_QUEUE_ID);
655 ice_rx_queue_release(pf->fdir.rxq);
657 rte_eth_dma_zone_free(eth_dev, "fdir_rx_ring", ICE_FDIR_QUEUE_ID);
658 ice_fdir_prof_rm_all(pf);
659 ice_fdir_prof_free(hw);
660 ice_release_vsi(vsi);
661 pf->fdir.fdir_vsi = NULL;
664 err = rte_memzone_free(pf->fdir.mz);
667 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
672 ice_fdir_cur_prof_conflict(struct ice_pf *pf,
673 enum ice_fltr_ptype ptype,
674 struct ice_flow_seg_info *seg,
677 struct ice_hw *hw = ICE_PF_TO_HW(pf);
678 struct ice_flow_seg_info *ori_seg;
679 struct ice_fd_hw_prof *hw_prof;
681 hw_prof = hw->fdir_prof[ptype];
682 ori_seg = hw_prof->fdir_seg[is_tunnel];
684 /* profile does not exist */
688 /* if no input set conflict, return -EEXIST */
689 if ((!is_tunnel && !memcmp(ori_seg, seg, sizeof(*seg))) ||
690 (is_tunnel && !memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))) {
691 PMD_DRV_LOG(DEBUG, "Profile already exists for flow type %d.",
696 /* a rule with input set conflict already exist, so give up */
697 if (pf->fdir_fltr_cnt[ptype][is_tunnel]) {
698 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule.",
703 /* it's safe to delete an empty profile */
704 ice_fdir_prof_rm(pf, ptype, is_tunnel);
709 ice_fdir_prof_resolve_conflict(struct ice_pf *pf,
710 enum ice_fltr_ptype ptype,
713 struct ice_hw *hw = ICE_PF_TO_HW(pf);
714 struct ice_fd_hw_prof *hw_prof;
715 struct ice_flow_seg_info *seg;
717 hw_prof = hw->fdir_prof[ptype];
718 seg = hw_prof->fdir_seg[is_tunnel];
720 /* profile does not exist */
724 /* profile exists and rule exists, fail to resolve the conflict */
725 if (pf->fdir_fltr_cnt[ptype][is_tunnel] != 0)
728 /* it's safe to delete an empty profile */
729 ice_fdir_prof_rm(pf, ptype, is_tunnel);
735 ice_fdir_cross_prof_conflict(struct ice_pf *pf,
736 enum ice_fltr_ptype ptype,
739 enum ice_fltr_ptype cflct_ptype;
743 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
744 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
745 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
746 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
747 if (!ice_fdir_prof_resolve_conflict
748 (pf, cflct_ptype, is_tunnel))
751 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
752 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
753 if (!ice_fdir_prof_resolve_conflict
754 (pf, cflct_ptype, is_tunnel))
756 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
757 if (!ice_fdir_prof_resolve_conflict
758 (pf, cflct_ptype, is_tunnel))
760 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
761 if (!ice_fdir_prof_resolve_conflict
762 (pf, cflct_ptype, is_tunnel))
766 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
767 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
768 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
769 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
770 if (!ice_fdir_prof_resolve_conflict
771 (pf, cflct_ptype, is_tunnel))
774 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
775 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP;
776 if (!ice_fdir_prof_resolve_conflict
777 (pf, cflct_ptype, is_tunnel))
779 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP;
780 if (!ice_fdir_prof_resolve_conflict
781 (pf, cflct_ptype, is_tunnel))
783 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP;
784 if (!ice_fdir_prof_resolve_conflict
785 (pf, cflct_ptype, is_tunnel))
789 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
790 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
791 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
792 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
793 if (!ice_fdir_prof_resolve_conflict
794 (pf, cflct_ptype, is_tunnel))
797 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
798 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
799 if (!ice_fdir_prof_resolve_conflict
800 (pf, cflct_ptype, is_tunnel))
802 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
803 if (!ice_fdir_prof_resolve_conflict
804 (pf, cflct_ptype, is_tunnel))
806 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
807 if (!ice_fdir_prof_resolve_conflict
808 (pf, cflct_ptype, is_tunnel))
811 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
812 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
813 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
814 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
815 if (!ice_fdir_prof_resolve_conflict
816 (pf, cflct_ptype, is_tunnel))
819 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
820 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
821 if (!ice_fdir_prof_resolve_conflict
822 (pf, cflct_ptype, is_tunnel))
824 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
825 if (!ice_fdir_prof_resolve_conflict
826 (pf, cflct_ptype, is_tunnel))
828 cflct_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
829 if (!ice_fdir_prof_resolve_conflict
830 (pf, cflct_ptype, is_tunnel))
838 PMD_DRV_LOG(DEBUG, "Failed to create profile for flow type %d due to conflict with existing rule of flow type %d.",
844 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
845 struct ice_vsi *ctrl_vsi,
846 struct ice_flow_seg_info *seg,
847 enum ice_fltr_ptype ptype,
850 struct ice_hw *hw = ICE_PF_TO_HW(pf);
851 enum ice_flow_dir dir = ICE_FLOW_RX;
852 struct ice_fd_hw_prof *hw_prof;
853 struct ice_flow_prof *prof;
854 uint64_t entry_1 = 0;
855 uint64_t entry_2 = 0;
860 /* check if have input set conflict on current profile. */
861 ret = ice_fdir_cur_prof_conflict(pf, ptype, seg, is_tunnel);
865 /* check if the profile is conflict with other profile. */
866 ret = ice_fdir_cross_prof_conflict(pf, ptype, is_tunnel);
870 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
871 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
872 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
875 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
876 vsi->idx, ICE_FLOW_PRIO_NORMAL,
877 seg, NULL, 0, &entry_1);
879 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
883 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
884 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
885 seg, NULL, 0, &entry_2);
887 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
892 hw_prof = hw->fdir_prof[ptype];
893 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
895 hw_prof->fdir_seg[is_tunnel] = seg;
896 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
897 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
898 pf->hw_prof_cnt[ptype][is_tunnel]++;
899 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
900 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
901 pf->hw_prof_cnt[ptype][is_tunnel]++;
906 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
907 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
908 ice_flow_rem_entry(hw, ICE_BLK_FD, entry_1);
910 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
916 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
920 struct ice_inset_map {
922 enum ice_flow_field fld;
924 static const struct ice_inset_map ice_inset_map[] = {
925 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
926 {ICE_INSET_ETHERTYPE, ICE_FLOW_FIELD_IDX_ETH_TYPE},
927 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
928 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
929 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
930 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
931 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
932 {ICE_INSET_IPV4_PKID, ICE_FLOW_FIELD_IDX_IPV4_ID},
933 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
934 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
935 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
936 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
937 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
938 {ICE_INSET_IPV6_PKID, ICE_FLOW_FIELD_IDX_IPV6_ID},
939 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
940 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
941 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
942 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
943 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
944 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
945 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
946 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
947 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
948 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
949 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
950 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
951 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
952 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
953 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_IP_TEID},
954 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
955 {ICE_INSET_VXLAN_VNI, ICE_FLOW_FIELD_IDX_VXLAN_VNI},
956 {ICE_INSET_ESP_SPI, ICE_FLOW_FIELD_IDX_ESP_SPI},
957 {ICE_INSET_NAT_T_ESP_SPI, ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI},
960 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
961 if ((inset & ice_inset_map[i].inset) ==
962 ice_inset_map[i].inset)
963 field[j++] = ice_inset_map[i].fld;
968 ice_fdir_input_set_hdrs(enum ice_fltr_ptype flow, struct ice_flow_seg_info *seg)
971 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
972 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
973 ICE_FLOW_SEG_HDR_IPV4 |
974 ICE_FLOW_SEG_HDR_IPV_OTHER);
976 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
977 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
978 ICE_FLOW_SEG_HDR_IPV4 |
979 ICE_FLOW_SEG_HDR_IPV_OTHER);
981 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
982 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
983 ICE_FLOW_SEG_HDR_IPV4 |
984 ICE_FLOW_SEG_HDR_IPV_OTHER);
986 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
987 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
988 ICE_FLOW_SEG_HDR_IPV_OTHER);
990 case ICE_FLTR_PTYPE_FRAG_IPV4:
991 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4 |
992 ICE_FLOW_SEG_HDR_IPV_FRAG);
994 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
995 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
996 ICE_FLOW_SEG_HDR_IPV6 |
997 ICE_FLOW_SEG_HDR_IPV_OTHER);
999 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
1000 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
1001 ICE_FLOW_SEG_HDR_IPV6 |
1002 ICE_FLOW_SEG_HDR_IPV_OTHER);
1004 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
1005 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
1006 ICE_FLOW_SEG_HDR_IPV6 |
1007 ICE_FLOW_SEG_HDR_IPV_OTHER);
1009 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
1010 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1011 ICE_FLOW_SEG_HDR_IPV_OTHER);
1013 case ICE_FLTR_PTYPE_FRAG_IPV6:
1014 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6 |
1015 ICE_FLOW_SEG_HDR_IPV_FRAG);
1017 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP:
1018 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP:
1019 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP:
1021 case ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER:
1022 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV_OTHER);
1024 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU:
1025 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1026 ICE_FLOW_SEG_HDR_IPV4 |
1027 ICE_FLOW_SEG_HDR_IPV_OTHER);
1029 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH:
1030 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1031 ICE_FLOW_SEG_HDR_GTPU_IP |
1032 ICE_FLOW_SEG_HDR_IPV4 |
1033 ICE_FLOW_SEG_HDR_IPV_OTHER);
1035 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU:
1036 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_IP |
1037 ICE_FLOW_SEG_HDR_IPV6 |
1038 ICE_FLOW_SEG_HDR_IPV_OTHER);
1040 case ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH:
1041 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
1042 ICE_FLOW_SEG_HDR_GTPU_IP |
1043 ICE_FLOW_SEG_HDR_IPV6 |
1044 ICE_FLOW_SEG_HDR_IPV_OTHER);
1046 case ICE_FLTR_PTYPE_NON_IP_L2:
1047 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH_NON_IP);
1049 case ICE_FLTR_PTYPE_NONF_IPV4_ESP:
1050 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1051 ICE_FLOW_SEG_HDR_IPV4 |
1052 ICE_FLOW_SEG_HDR_IPV_OTHER);
1054 case ICE_FLTR_PTYPE_NONF_IPV6_ESP:
1055 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ESP |
1056 ICE_FLOW_SEG_HDR_IPV6 |
1057 ICE_FLOW_SEG_HDR_IPV_OTHER);
1059 case ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP:
1060 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1061 ICE_FLOW_SEG_HDR_IPV4 |
1062 ICE_FLOW_SEG_HDR_IPV_OTHER);
1064 case ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP:
1065 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_NAT_T_ESP |
1066 ICE_FLOW_SEG_HDR_IPV6 |
1067 ICE_FLOW_SEG_HDR_IPV_OTHER);
1070 PMD_DRV_LOG(ERR, "not supported filter type.");
1076 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
1077 uint64_t inner_input_set, uint64_t outer_input_set,
1078 enum ice_fdir_tunnel_type ttype)
1080 struct ice_flow_seg_info *seg;
1081 struct ice_flow_seg_info *seg_tun = NULL;
1082 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
1087 if (!(inner_input_set | outer_input_set))
1090 seg_tun = (struct ice_flow_seg_info *)
1091 ice_malloc(hw, sizeof(*seg_tun) * ICE_FD_HW_SEG_MAX);
1093 PMD_DRV_LOG(ERR, "No memory can be allocated");
1097 /* use seg_tun[1] to record tunnel inner part */
1098 for (k = 0; k <= ICE_FD_HW_SEG_TUN; k++) {
1100 input_set = (k == ICE_FD_HW_SEG_TUN) ? inner_input_set : outer_input_set;
1104 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
1105 field[i] = ICE_FLOW_FIELD_IDX_MAX;
1107 ice_fdir_input_set_parse(input_set, field);
1109 ice_fdir_input_set_hdrs(flow, seg);
1111 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
1112 ice_flow_set_fld(seg, field[i],
1113 ICE_FLOW_FLD_OFF_INVAL,
1114 ICE_FLOW_FLD_OFF_INVAL,
1115 ICE_FLOW_FLD_OFF_INVAL, false);
1119 is_tunnel = ice_fdir_is_tunnel_profile(ttype);
1121 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
1122 seg_tun, flow, is_tunnel);
1126 } else if (ret < 0) {
1128 return (ret == -EEXIST) ? 0 : ret;
1135 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
1136 bool is_tunnel, bool add)
1138 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1141 cnt = (add) ? 1 : -1;
1142 hw->fdir_active_fltr += cnt;
1143 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
1144 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
1146 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
1150 ice_fdir_init(struct ice_adapter *ad)
1152 struct ice_pf *pf = &ad->pf;
1153 struct ice_flow_parser *parser;
1156 if (ad->hw.dcf_enabled)
1159 ret = ice_fdir_setup(pf);
1163 parser = &ice_fdir_parser;
1165 return ice_register_parser(parser, ad);
1169 ice_fdir_uninit(struct ice_adapter *ad)
1171 struct ice_flow_parser *parser;
1172 struct ice_pf *pf = &ad->pf;
1174 if (ad->hw.dcf_enabled)
1177 parser = &ice_fdir_parser;
1179 ice_unregister_parser(parser, ad);
1181 ice_fdir_teardown(pf);
1185 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
1187 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
1194 ice_fdir_add_del_filter(struct ice_pf *pf,
1195 struct ice_fdir_filter_conf *filter,
1198 struct ice_fltr_desc desc;
1199 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1200 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
1204 filter->input.dest_vsi = pf->main_vsi->idx;
1206 memset(&desc, 0, sizeof(desc));
1207 filter->input.comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW;
1208 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
1210 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1212 memset(pkt, 0, ICE_FDIR_PKT_LEN);
1213 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
1215 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
1219 return ice_fdir_programming(pf, &desc);
1223 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
1224 struct ice_fdir_filter_conf *filter)
1226 struct ice_fdir_fltr *input = &filter->input;
1227 memset(key, 0, sizeof(*key));
1229 key->flow_type = input->flow_type;
1230 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
1231 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
1232 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
1233 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
1235 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
1236 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
1238 key->tunnel_type = filter->tunnel_type;
1241 /* Check if there exists the flow director filter */
1242 static struct ice_fdir_filter_conf *
1243 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
1244 const struct ice_fdir_fltr_pattern *key)
1248 ret = rte_hash_lookup(fdir_info->hash_table, key);
1252 return fdir_info->hash_map[ret];
1255 /* Add a flow director entry into the SW list */
1257 ice_fdir_entry_insert(struct ice_pf *pf,
1258 struct ice_fdir_filter_conf *entry,
1259 struct ice_fdir_fltr_pattern *key)
1261 struct ice_fdir_info *fdir_info = &pf->fdir;
1264 ret = rte_hash_add_key(fdir_info->hash_table, key);
1267 "Failed to insert fdir entry to hash table %d!",
1271 fdir_info->hash_map[ret] = entry;
1276 /* Delete a flow director entry from the SW list */
1278 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1280 struct ice_fdir_info *fdir_info = &pf->fdir;
1283 ret = rte_hash_del_key(fdir_info->hash_table, key);
1286 "Failed to delete fdir filter to hash table %d!",
1290 fdir_info->hash_map[ret] = NULL;
1296 ice_fdir_create_filter(struct ice_adapter *ad,
1297 struct rte_flow *flow,
1299 struct rte_flow_error *error)
1301 struct ice_pf *pf = &ad->pf;
1302 struct ice_fdir_filter_conf *filter = meta;
1303 struct ice_fdir_info *fdir_info = &pf->fdir;
1304 struct ice_fdir_filter_conf *entry, *node;
1305 struct ice_fdir_fltr_pattern key;
1309 ice_fdir_extract_fltr_key(&key, filter);
1310 node = ice_fdir_entry_lookup(fdir_info, &key);
1312 rte_flow_error_set(error, EEXIST,
1313 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1314 "Rule already exists!");
1318 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1320 rte_flow_error_set(error, ENOMEM,
1321 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1322 "Failed to allocate memory");
1326 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1328 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1329 filter->input_set_i, filter->input_set_o,
1330 filter->tunnel_type);
1332 rte_flow_error_set(error, -ret,
1333 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1334 "Profile configure failed.");
1338 /* alloc counter for FDIR */
1339 if (filter->input.cnt_ena) {
1340 struct rte_flow_action_count *act_count = &filter->act_count;
1342 filter->counter = ice_fdir_counter_alloc(pf,
1345 if (!filter->counter) {
1346 rte_flow_error_set(error, EINVAL,
1347 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1348 "Failed to alloc FDIR counter.");
1351 filter->input.cnt_index = filter->counter->hw_index;
1354 ret = ice_fdir_add_del_filter(pf, filter, true);
1356 rte_flow_error_set(error, -ret,
1357 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1358 "Add filter rule failed.");
1362 if (filter->mark_flag == 1)
1363 ice_fdir_rx_parsing_enable(ad, 1);
1365 rte_memcpy(entry, filter, sizeof(*entry));
1366 ret = ice_fdir_entry_insert(pf, entry, &key);
1368 rte_flow_error_set(error, -ret,
1369 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1370 "Insert entry to table failed.");
1375 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1380 if (filter->counter) {
1381 ice_fdir_counter_free(pf, filter->counter);
1382 filter->counter = NULL;
1391 ice_fdir_destroy_filter(struct ice_adapter *ad,
1392 struct rte_flow *flow,
1393 struct rte_flow_error *error)
1395 struct ice_pf *pf = &ad->pf;
1396 struct ice_fdir_info *fdir_info = &pf->fdir;
1397 struct ice_fdir_filter_conf *filter, *entry;
1398 struct ice_fdir_fltr_pattern key;
1402 filter = (struct ice_fdir_filter_conf *)flow->rule;
1404 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1406 if (filter->counter) {
1407 ice_fdir_counter_free(pf, filter->counter);
1408 filter->counter = NULL;
1411 ice_fdir_extract_fltr_key(&key, filter);
1412 entry = ice_fdir_entry_lookup(fdir_info, &key);
1414 rte_flow_error_set(error, ENOENT,
1415 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1416 "Can't find entry.");
1420 ret = ice_fdir_add_del_filter(pf, filter, false);
1422 rte_flow_error_set(error, -ret,
1423 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1424 "Del filter rule failed.");
1428 ret = ice_fdir_entry_del(pf, &key);
1430 rte_flow_error_set(error, -ret,
1431 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1432 "Remove entry from table failed.");
1436 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1438 if (filter->mark_flag == 1)
1439 ice_fdir_rx_parsing_enable(ad, 0);
1449 ice_fdir_query_count(struct ice_adapter *ad,
1450 struct rte_flow *flow,
1451 struct rte_flow_query_count *flow_stats,
1452 struct rte_flow_error *error)
1454 struct ice_pf *pf = &ad->pf;
1455 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1456 struct ice_fdir_filter_conf *filter = flow->rule;
1457 struct ice_fdir_counter *counter = filter->counter;
1458 uint64_t hits_lo, hits_hi;
1461 rte_flow_error_set(error, EINVAL,
1462 RTE_FLOW_ERROR_TYPE_ACTION,
1464 "FDIR counters not available");
1469 * Reading the low 32-bits latches the high 32-bits into a shadow
1470 * register. Reading the high 32-bit returns the value in the
1473 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1474 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1476 flow_stats->hits_set = 1;
1477 flow_stats->hits = hits_lo | (hits_hi << 32);
1478 flow_stats->bytes_set = 0;
1479 flow_stats->bytes = 0;
1481 if (flow_stats->reset) {
1482 /* reset statistic counter value */
1483 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1484 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1490 static struct ice_flow_engine ice_fdir_engine = {
1491 .init = ice_fdir_init,
1492 .uninit = ice_fdir_uninit,
1493 .create = ice_fdir_create_filter,
1494 .destroy = ice_fdir_destroy_filter,
1495 .query_count = ice_fdir_query_count,
1496 .type = ICE_FLOW_ENGINE_FDIR,
1500 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1501 struct rte_flow_error *error,
1502 const struct rte_flow_action *act,
1503 struct ice_fdir_filter_conf *filter)
1505 const struct rte_flow_action_rss *rss = act->conf;
1508 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1509 rte_flow_error_set(error, EINVAL,
1510 RTE_FLOW_ERROR_TYPE_ACTION, act,
1515 if (rss->queue_num <= 1) {
1516 rte_flow_error_set(error, EINVAL,
1517 RTE_FLOW_ERROR_TYPE_ACTION, act,
1518 "Queue region size can't be 0 or 1.");
1522 /* check if queue index for queue region is continuous */
1523 for (i = 0; i < rss->queue_num - 1; i++) {
1524 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1525 rte_flow_error_set(error, EINVAL,
1526 RTE_FLOW_ERROR_TYPE_ACTION, act,
1527 "Discontinuous queue region");
1532 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1533 rte_flow_error_set(error, EINVAL,
1534 RTE_FLOW_ERROR_TYPE_ACTION, act,
1535 "Invalid queue region indexes.");
1539 if (!(rte_is_power_of_2(rss->queue_num) &&
1540 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1541 rte_flow_error_set(error, EINVAL,
1542 RTE_FLOW_ERROR_TYPE_ACTION, act,
1543 "The region size should be any of the following values:"
1544 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1545 "of queues do not exceed the VSI allocation.");
1549 filter->input.q_index = rss->queue[0];
1550 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1551 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1557 ice_fdir_parse_action(struct ice_adapter *ad,
1558 const struct rte_flow_action actions[],
1559 struct rte_flow_error *error,
1560 struct ice_fdir_filter_conf *filter)
1562 struct ice_pf *pf = &ad->pf;
1563 const struct rte_flow_action_queue *act_q;
1564 const struct rte_flow_action_mark *mark_spec = NULL;
1565 const struct rte_flow_action_count *act_count;
1566 uint32_t dest_num = 0;
1567 uint32_t mark_num = 0;
1568 uint32_t counter_num = 0;
1571 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1572 switch (actions->type) {
1573 case RTE_FLOW_ACTION_TYPE_VOID:
1575 case RTE_FLOW_ACTION_TYPE_QUEUE:
1578 act_q = actions->conf;
1579 filter->input.q_index = act_q->index;
1580 if (filter->input.q_index >=
1581 pf->dev_data->nb_rx_queues) {
1582 rte_flow_error_set(error, EINVAL,
1583 RTE_FLOW_ERROR_TYPE_ACTION,
1585 "Invalid queue for FDIR.");
1588 filter->input.dest_ctl =
1589 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1591 case RTE_FLOW_ACTION_TYPE_DROP:
1594 filter->input.dest_ctl =
1595 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1597 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1600 filter->input.dest_ctl =
1601 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1603 case RTE_FLOW_ACTION_TYPE_RSS:
1606 ret = ice_fdir_parse_action_qregion(pf,
1607 error, actions, filter);
1611 case RTE_FLOW_ACTION_TYPE_MARK:
1613 filter->mark_flag = 1;
1614 mark_spec = actions->conf;
1615 filter->input.fltr_id = mark_spec->id;
1616 filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
1618 case RTE_FLOW_ACTION_TYPE_COUNT:
1621 act_count = actions->conf;
1622 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1623 rte_memcpy(&filter->act_count, act_count,
1624 sizeof(filter->act_count));
1628 rte_flow_error_set(error, EINVAL,
1629 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1635 if (dest_num >= 2) {
1636 rte_flow_error_set(error, EINVAL,
1637 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1638 "Unsupported action combination");
1642 if (mark_num >= 2) {
1643 rte_flow_error_set(error, EINVAL,
1644 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1645 "Too many mark actions");
1649 if (counter_num >= 2) {
1650 rte_flow_error_set(error, EINVAL,
1651 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1652 "Too many count actions");
1656 if (dest_num + mark_num + counter_num == 0) {
1657 rte_flow_error_set(error, EINVAL,
1658 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1663 /* set default action to PASSTHRU mode, in "mark/count only" case. */
1665 filter->input.dest_ctl =
1666 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
1672 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1673 const struct rte_flow_item pattern[],
1674 struct rte_flow_error *error,
1675 struct ice_fdir_filter_conf *filter)
1677 const struct rte_flow_item *item = pattern;
1678 enum rte_flow_item_type item_type;
1679 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1680 enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
1681 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1682 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1683 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
1684 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1685 const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
1687 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1688 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1689 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1690 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1691 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1692 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1693 const struct rte_flow_item_esp *esp_spec, *esp_mask;
1694 uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
1695 uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
1696 uint64_t *input_set;
1697 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1698 uint8_t ipv6_addr_mask[16] = {
1699 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1700 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1702 uint32_t vtc_flow_cpu;
1703 uint16_t ether_type;
1704 enum rte_flow_item_type next_type;
1705 bool is_outer = true;
1706 struct ice_fdir_extra *p_ext_data;
1707 struct ice_fdir_v4 *p_v4 = NULL;
1708 struct ice_fdir_v6 *p_v6 = NULL;
1710 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1711 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
1712 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1713 /* To align with shared code behavior, save gtpu outer
1714 * fields in inner struct.
1716 if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
1717 item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
1722 /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
1723 * flow. input_set_i is used for inner part.
1725 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1726 item_type = item->type;
1728 if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1730 RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
1731 rte_flow_error_set(error, EINVAL,
1732 RTE_FLOW_ERROR_TYPE_ITEM, item,
1733 "Not support range");
1736 input_set = (tunnel_type && !is_outer) ?
1737 &input_set_i : &input_set_o;
1739 switch (item_type) {
1740 case RTE_FLOW_ITEM_TYPE_ETH:
1741 flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
1742 eth_spec = item->spec;
1743 eth_mask = item->mask;
1745 if (!(eth_spec && eth_mask))
1748 if (!rte_is_zero_ether_addr(ð_mask->dst))
1749 *input_set |= ICE_INSET_DMAC;
1750 if (!rte_is_zero_ether_addr(ð_mask->src))
1751 *input_set |= ICE_INSET_SMAC;
1753 next_type = (item + 1)->type;
1754 /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
1755 if (eth_mask->type == RTE_BE16(0xffff) &&
1756 next_type == RTE_FLOW_ITEM_TYPE_END) {
1757 *input_set |= ICE_INSET_ETHERTYPE;
1758 ether_type = rte_be_to_cpu_16(eth_spec->type);
1760 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
1761 ether_type == RTE_ETHER_TYPE_IPV6) {
1762 rte_flow_error_set(error, EINVAL,
1763 RTE_FLOW_ERROR_TYPE_ITEM,
1765 "Unsupported ether_type.");
1770 p_ext_data = (tunnel_type && is_outer) ?
1771 &filter->input.ext_data_outer :
1772 &filter->input.ext_data;
1773 rte_memcpy(&p_ext_data->src_mac,
1774 ð_spec->src, RTE_ETHER_ADDR_LEN);
1775 rte_memcpy(&p_ext_data->dst_mac,
1776 ð_spec->dst, RTE_ETHER_ADDR_LEN);
1777 rte_memcpy(&p_ext_data->ether_type,
1778 ð_spec->type, sizeof(eth_spec->type));
1780 case RTE_FLOW_ITEM_TYPE_IPV4:
1781 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1782 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1783 ipv4_spec = item->spec;
1784 ipv4_last = item->last;
1785 ipv4_mask = item->mask;
1786 p_v4 = (tunnel_type && is_outer) ?
1787 &filter->input.ip_outer.v4 :
1788 &filter->input.ip.v4;
1790 if (!(ipv4_spec && ipv4_mask))
1793 /* Check IPv4 mask and update input set */
1794 if (ipv4_mask->hdr.version_ihl ||
1795 ipv4_mask->hdr.total_length ||
1796 ipv4_mask->hdr.hdr_checksum) {
1797 rte_flow_error_set(error, EINVAL,
1798 RTE_FLOW_ERROR_TYPE_ITEM,
1800 "Invalid IPv4 mask.");
1805 (ipv4_last->hdr.version_ihl ||
1806 ipv4_last->hdr.type_of_service ||
1807 ipv4_last->hdr.time_to_live ||
1808 ipv4_last->hdr.total_length |
1809 ipv4_last->hdr.next_proto_id ||
1810 ipv4_last->hdr.hdr_checksum ||
1811 ipv4_last->hdr.src_addr ||
1812 ipv4_last->hdr.dst_addr)) {
1813 rte_flow_error_set(error, EINVAL,
1814 RTE_FLOW_ERROR_TYPE_ITEM,
1815 item, "Invalid IPv4 last.");
1819 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1820 *input_set |= ICE_INSET_IPV4_DST;
1821 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1822 *input_set |= ICE_INSET_IPV4_SRC;
1823 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1824 *input_set |= ICE_INSET_IPV4_TTL;
1825 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1826 *input_set |= ICE_INSET_IPV4_PROTO;
1827 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1828 *input_set |= ICE_INSET_IPV4_TOS;
1830 p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
1831 p_v4->src_ip = ipv4_spec->hdr.src_addr;
1832 p_v4->ttl = ipv4_spec->hdr.time_to_live;
1833 p_v4->proto = ipv4_spec->hdr.next_proto_id;
1834 p_v4->tos = ipv4_spec->hdr.type_of_service;
1837 * spec is 0x2000, mask is 0x2000
1839 if (ipv4_spec->hdr.fragment_offset ==
1840 rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
1841 ipv4_mask->hdr.fragment_offset ==
1842 rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
1843 /* all IPv4 fragment packet has the same
1844 * ethertype, if the spec and mask is valid,
1845 * set ethertype into input set.
1847 flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
1848 *input_set |= ICE_INSET_ETHERTYPE;
1849 input_set_o |= ICE_INSET_ETHERTYPE;
1850 } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
1851 rte_flow_error_set(error, EINVAL,
1852 RTE_FLOW_ERROR_TYPE_ITEM,
1853 item, "Invalid IPv4 mask.");
1858 case RTE_FLOW_ITEM_TYPE_IPV6:
1859 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1860 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1861 ipv6_spec = item->spec;
1862 ipv6_mask = item->mask;
1863 p_v6 = (tunnel_type && is_outer) ?
1864 &filter->input.ip_outer.v6 :
1865 &filter->input.ip.v6;
1867 if (!(ipv6_spec && ipv6_mask))
1870 /* Check IPv6 mask and update input set */
1871 if (ipv6_mask->hdr.payload_len) {
1872 rte_flow_error_set(error, EINVAL,
1873 RTE_FLOW_ERROR_TYPE_ITEM,
1875 "Invalid IPv6 mask");
1879 if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
1880 RTE_DIM(ipv6_mask->hdr.src_addr)))
1881 *input_set |= ICE_INSET_IPV6_SRC;
1882 if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
1883 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1884 *input_set |= ICE_INSET_IPV6_DST;
1886 if ((ipv6_mask->hdr.vtc_flow &
1887 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1888 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1889 *input_set |= ICE_INSET_IPV6_TC;
1890 if (ipv6_mask->hdr.proto == UINT8_MAX)
1891 *input_set |= ICE_INSET_IPV6_NEXT_HDR;
1892 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1893 *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1895 rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
1896 rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
1897 vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1898 p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
1899 p_v6->proto = ipv6_spec->hdr.proto;
1900 p_v6->hlim = ipv6_spec->hdr.hop_limits;
1902 case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
1903 l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
1904 flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
1905 ipv6_frag_spec = item->spec;
1906 ipv6_frag_mask = item->mask;
1908 if (!(ipv6_frag_spec && ipv6_frag_mask))
1912 * spec is 0x1, mask is 0x1
1914 if (ipv6_frag_spec->hdr.frag_data ==
1915 rte_cpu_to_be_16(1) &&
1916 ipv6_frag_mask->hdr.frag_data ==
1917 rte_cpu_to_be_16(1)) {
1918 /* all IPv6 fragment packet has the same
1919 * ethertype, if the spec and mask is valid,
1920 * set ethertype into input set.
1922 *input_set |= ICE_INSET_ETHERTYPE;
1923 input_set_o |= ICE_INSET_ETHERTYPE;
1924 } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
1925 rte_flow_error_set(error, EINVAL,
1926 RTE_FLOW_ERROR_TYPE_ITEM,
1927 item, "Invalid IPv6 mask.");
1933 case RTE_FLOW_ITEM_TYPE_TCP:
1934 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1935 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1936 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1937 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1939 tcp_spec = item->spec;
1940 tcp_mask = item->mask;
1942 if (!(tcp_spec && tcp_mask))
1945 /* Check TCP mask and update input set */
1946 if (tcp_mask->hdr.sent_seq ||
1947 tcp_mask->hdr.recv_ack ||
1948 tcp_mask->hdr.data_off ||
1949 tcp_mask->hdr.tcp_flags ||
1950 tcp_mask->hdr.rx_win ||
1951 tcp_mask->hdr.cksum ||
1952 tcp_mask->hdr.tcp_urp) {
1953 rte_flow_error_set(error, EINVAL,
1954 RTE_FLOW_ERROR_TYPE_ITEM,
1956 "Invalid TCP mask");
1960 if (tcp_mask->hdr.src_port == UINT16_MAX)
1961 *input_set |= ICE_INSET_TCP_SRC_PORT;
1962 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1963 *input_set |= ICE_INSET_TCP_DST_PORT;
1965 /* Get filter info */
1966 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1968 p_v4->dst_port = tcp_spec->hdr.dst_port;
1969 p_v4->src_port = tcp_spec->hdr.src_port;
1970 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1972 p_v6->dst_port = tcp_spec->hdr.dst_port;
1973 p_v6->src_port = tcp_spec->hdr.src_port;
1976 case RTE_FLOW_ITEM_TYPE_UDP:
1977 l4 = RTE_FLOW_ITEM_TYPE_UDP;
1978 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1979 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1980 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1981 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1983 udp_spec = item->spec;
1984 udp_mask = item->mask;
1986 if (!(udp_spec && udp_mask))
1989 /* Check UDP mask and update input set*/
1990 if (udp_mask->hdr.dgram_len ||
1991 udp_mask->hdr.dgram_cksum) {
1992 rte_flow_error_set(error, EINVAL,
1993 RTE_FLOW_ERROR_TYPE_ITEM,
1995 "Invalid UDP mask");
1999 if (udp_mask->hdr.src_port == UINT16_MAX)
2000 *input_set |= ICE_INSET_UDP_SRC_PORT;
2001 if (udp_mask->hdr.dst_port == UINT16_MAX)
2002 *input_set |= ICE_INSET_UDP_DST_PORT;
2004 /* Get filter info */
2005 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2007 p_v4->dst_port = udp_spec->hdr.dst_port;
2008 p_v4->src_port = udp_spec->hdr.src_port;
2009 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2011 p_v6->src_port = udp_spec->hdr.src_port;
2012 p_v6->dst_port = udp_spec->hdr.dst_port;
2015 case RTE_FLOW_ITEM_TYPE_SCTP:
2016 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2017 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
2018 if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2019 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
2021 sctp_spec = item->spec;
2022 sctp_mask = item->mask;
2024 if (!(sctp_spec && sctp_mask))
2027 /* Check SCTP mask and update input set */
2028 if (sctp_mask->hdr.cksum) {
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2032 "Invalid UDP mask");
2036 if (sctp_mask->hdr.src_port == UINT16_MAX)
2037 *input_set |= ICE_INSET_SCTP_SRC_PORT;
2038 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2039 *input_set |= ICE_INSET_SCTP_DST_PORT;
2041 /* Get filter info */
2042 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2044 p_v4->dst_port = sctp_spec->hdr.dst_port;
2045 p_v4->src_port = sctp_spec->hdr.src_port;
2046 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2048 p_v6->dst_port = sctp_spec->hdr.dst_port;
2049 p_v6->src_port = sctp_spec->hdr.src_port;
2052 case RTE_FLOW_ITEM_TYPE_VOID:
2054 case RTE_FLOW_ITEM_TYPE_VXLAN:
2055 l3 = RTE_FLOW_ITEM_TYPE_END;
2056 vxlan_spec = item->spec;
2057 vxlan_mask = item->mask;
2060 if (!(vxlan_spec && vxlan_mask))
2063 if (vxlan_mask->hdr.vx_flags) {
2064 rte_flow_error_set(error, EINVAL,
2065 RTE_FLOW_ERROR_TYPE_ITEM,
2067 "Invalid vxlan field");
2071 if (vxlan_mask->hdr.vx_vni)
2072 *input_set |= ICE_INSET_VXLAN_VNI;
2074 filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
2077 case RTE_FLOW_ITEM_TYPE_GTPU:
2078 l3 = RTE_FLOW_ITEM_TYPE_END;
2079 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
2080 gtp_spec = item->spec;
2081 gtp_mask = item->mask;
2083 if (!(gtp_spec && gtp_mask))
2086 if (gtp_mask->v_pt_rsv_flags ||
2087 gtp_mask->msg_type ||
2088 gtp_mask->msg_len) {
2089 rte_flow_error_set(error, EINVAL,
2090 RTE_FLOW_ERROR_TYPE_ITEM,
2092 "Invalid GTP mask");
2096 if (gtp_mask->teid == UINT32_MAX)
2097 input_set_o |= ICE_INSET_GTPU_TEID;
2099 filter->input.gtpu_data.teid = gtp_spec->teid;
2101 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2102 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
2103 gtp_psc_spec = item->spec;
2104 gtp_psc_mask = item->mask;
2106 if (!(gtp_psc_spec && gtp_psc_mask))
2109 if (gtp_psc_mask->qfi == UINT8_MAX)
2110 input_set_o |= ICE_INSET_GTPU_QFI;
2112 filter->input.gtpu_data.qfi =
2115 case RTE_FLOW_ITEM_TYPE_ESP:
2116 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2117 l4 == RTE_FLOW_ITEM_TYPE_UDP)
2118 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
2119 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2120 l4 == RTE_FLOW_ITEM_TYPE_UDP)
2121 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
2122 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
2123 l4 == RTE_FLOW_ITEM_TYPE_END)
2124 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
2125 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
2126 l4 == RTE_FLOW_ITEM_TYPE_END)
2127 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
2129 esp_spec = item->spec;
2130 esp_mask = item->mask;
2132 if (!(esp_spec && esp_mask))
2135 if (esp_mask->hdr.spi == UINT32_MAX) {
2136 if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
2137 *input_set |= ICE_INSET_NAT_T_ESP_SPI;
2139 *input_set |= ICE_INSET_ESP_SPI;
2142 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2143 filter->input.ip.v4.sec_parm_idx =
2145 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2146 filter->input.ip.v6.sec_parm_idx =
2150 rte_flow_error_set(error, EINVAL,
2151 RTE_FLOW_ERROR_TYPE_ITEM,
2153 "Invalid pattern item.");
2158 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2159 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2160 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
2161 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2162 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2163 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
2164 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
2165 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2166 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
2167 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
2168 flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
2169 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
2170 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2171 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
2172 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
2173 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2174 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
2175 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
2176 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2177 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
2178 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
2179 else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
2180 flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
2181 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
2183 filter->tunnel_type = tunnel_type;
2184 filter->input.flow_type = flow_type;
2185 filter->input_set_o = input_set_o;
2186 filter->input_set_i = input_set_i;
2192 ice_fdir_parse(struct ice_adapter *ad,
2193 struct ice_pattern_match_item *array,
2195 const struct rte_flow_item pattern[],
2196 const struct rte_flow_action actions[],
2199 struct rte_flow_error *error)
2201 struct ice_pf *pf = &ad->pf;
2202 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
2203 struct ice_pattern_match_item *item = NULL;
2207 memset(filter, 0, sizeof(*filter));
2208 item = ice_search_pattern_match_item(ad, pattern, array, array_len,
2211 if (!ad->devargs.pipe_mode_support && priority >= 1)
2217 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
2220 input_set = filter->input_set_o | filter->input_set_i;
2221 if (!input_set || filter->input_set_o &
2222 ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
2223 filter->input_set_i & ~item->input_set_mask_i) {
2224 rte_flow_error_set(error, EINVAL,
2225 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2227 "Invalid input set");
2232 ret = ice_fdir_parse_action(ad, actions, error, filter);
2243 static struct ice_flow_parser ice_fdir_parser = {
2244 .engine = &ice_fdir_engine,
2245 .array = ice_fdir_pattern_list,
2246 .array_len = RTE_DIM(ice_fdir_pattern_list),
2247 .parse_pattern_action = ice_fdir_parse,
2248 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
2251 RTE_INIT(ice_fdir_engine_register)
2253 ice_register_flow_engine(&ice_fdir_engine);