4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
10 #include "ice_generic_flow.h"
12 #define ICE_FDIR_IPV6_TC_OFFSET 20
13 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
15 #define ICE_FDIR_MAX_QREGION_SIZE 128
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
19 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23 ICE_FDIR_INSET_ETH_IPV4 | \
24 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31 ICE_FDIR_INSET_ETH_IPV4 | \
32 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
36 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40 ICE_FDIR_INSET_ETH_IPV6 | \
41 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44 ICE_FDIR_INSET_ETH_IPV6 | \
45 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55 ICE_FDIR_INSET_VXLAN_IPV4 | \
56 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59 ICE_FDIR_INSET_VXLAN_IPV4 | \
60 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63 ICE_FDIR_INSET_VXLAN_IPV4 | \
64 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
69 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
70 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
71 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
72 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
73 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
74 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
75 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
76 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
77 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
78 {pattern_eth_ipv4_udp_vxlan_ipv4,
79 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
80 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
81 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
82 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
83 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
84 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
85 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
86 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
87 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
88 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
89 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
90 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
91 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
92 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
93 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
96 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
97 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
98 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
99 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
100 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
101 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
102 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
103 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
104 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
105 {pattern_eth_ipv4_udp_vxlan_ipv4,
106 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
107 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
108 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
109 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
110 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
111 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
112 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
113 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
114 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
115 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
116 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
117 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
118 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
119 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
120 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
121 {pattern_eth_ipv4_gtpu_ipv4, ICE_FDIR_INSET_GTPU_IPV4, ICE_INSET_NONE},
124 static struct ice_flow_parser ice_fdir_parser_os;
125 static struct ice_flow_parser ice_fdir_parser_comms;
127 static const struct rte_memzone *
128 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
130 const struct rte_memzone *mz;
132 mz = rte_memzone_lookup(name);
136 return rte_memzone_reserve_aligned(name, len, socket_id,
137 RTE_MEMZONE_IOVA_CONTIG,
138 ICE_RING_BASE_ALIGN);
141 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
144 ice_fdir_prof_alloc(struct ice_hw *hw)
146 enum ice_fltr_ptype ptype, fltr_ptype;
148 if (!hw->fdir_prof) {
149 hw->fdir_prof = (struct ice_fd_hw_prof **)
150 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
151 sizeof(*hw->fdir_prof));
155 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
156 ptype < ICE_FLTR_PTYPE_MAX;
158 if (!hw->fdir_prof[ptype]) {
159 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
160 ice_malloc(hw, sizeof(**hw->fdir_prof));
161 if (!hw->fdir_prof[ptype])
168 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
171 rte_free(hw->fdir_prof[fltr_ptype]);
172 rte_free(hw->fdir_prof);
177 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
178 struct ice_fdir_counter_pool_container *container,
179 uint32_t index_start,
182 struct ice_fdir_counter_pool *pool;
186 pool = rte_zmalloc("ice_fdir_counter_pool",
188 sizeof(struct ice_fdir_counter) * len,
192 "Failed to allocate memory for fdir counter pool");
196 TAILQ_INIT(&pool->counter_list);
197 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
199 for (i = 0; i < len; i++) {
200 struct ice_fdir_counter *counter = &pool->counters[i];
202 counter->hw_index = index_start + i;
203 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
206 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
207 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
212 container->pools[container->index_free++] = pool;
221 ice_fdir_counter_init(struct ice_pf *pf)
223 struct ice_hw *hw = ICE_PF_TO_HW(pf);
224 struct ice_fdir_info *fdir_info = &pf->fdir;
225 struct ice_fdir_counter_pool_container *container =
227 uint32_t cnt_index, len;
230 TAILQ_INIT(&container->pool_list);
232 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
233 len = ICE_FDIR_COUNTERS_PER_BLOCK;
235 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
237 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
245 ice_fdir_counter_release(struct ice_pf *pf)
247 struct ice_fdir_info *fdir_info = &pf->fdir;
248 struct ice_fdir_counter_pool_container *container =
252 for (i = 0; i < container->index_free; i++)
253 rte_free(container->pools[i]);
255 TAILQ_INIT(&container->pool_list);
256 container->index_free = 0;
261 static struct ice_fdir_counter *
262 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
266 struct ice_fdir_counter_pool *pool;
267 struct ice_fdir_counter *counter;
270 TAILQ_FOREACH(pool, &container->pool_list, next) {
271 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
272 counter = &pool->counters[i];
274 if (counter->shared &&
284 static struct ice_fdir_counter *
285 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
287 struct ice_hw *hw = ICE_PF_TO_HW(pf);
288 struct ice_fdir_info *fdir_info = &pf->fdir;
289 struct ice_fdir_counter_pool_container *container =
291 struct ice_fdir_counter_pool *pool = NULL;
292 struct ice_fdir_counter *counter_free = NULL;
295 counter_free = ice_fdir_counter_shared_search(container, id);
297 if (counter_free->ref_cnt + 1 == 0) {
301 counter_free->ref_cnt++;
306 TAILQ_FOREACH(pool, &container->pool_list, next) {
307 counter_free = TAILQ_FIRST(&pool->counter_list);
314 PMD_DRV_LOG(ERR, "No free counter found\n");
318 counter_free->shared = shared;
319 counter_free->id = id;
320 counter_free->ref_cnt = 1;
321 counter_free->pool = pool;
323 /* reset statistic counter value */
324 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
325 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
327 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
328 if (TAILQ_EMPTY(&pool->counter_list)) {
329 TAILQ_REMOVE(&container->pool_list, pool, next);
330 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
337 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
338 struct ice_fdir_counter *counter)
343 if (--counter->ref_cnt == 0) {
344 struct ice_fdir_counter_pool *pool = counter->pool;
346 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
351 ice_fdir_init_filter_list(struct ice_pf *pf)
353 struct rte_eth_dev *dev = pf->adapter->eth_dev;
354 struct ice_fdir_info *fdir_info = &pf->fdir;
355 char fdir_hash_name[RTE_HASH_NAMESIZE];
358 struct rte_hash_parameters fdir_hash_params = {
359 .name = fdir_hash_name,
360 .entries = ICE_MAX_FDIR_FILTER_NUM,
361 .key_len = sizeof(struct ice_fdir_fltr_pattern),
362 .hash_func = rte_hash_crc,
363 .hash_func_init_val = 0,
364 .socket_id = rte_socket_id(),
365 .extra_flag = RTE_HASH_EXTRA_FLAGS_EXT_TABLE,
368 /* Initialize hash */
369 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
370 "fdir_%s", dev->device->name);
371 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
372 if (!fdir_info->hash_table) {
373 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
376 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
377 sizeof(*fdir_info->hash_map) *
378 ICE_MAX_FDIR_FILTER_NUM,
380 if (!fdir_info->hash_map) {
382 "Failed to allocate memory for fdir hash map!");
384 goto err_fdir_hash_map_alloc;
388 err_fdir_hash_map_alloc:
389 rte_hash_free(fdir_info->hash_table);
395 ice_fdir_release_filter_list(struct ice_pf *pf)
397 struct ice_fdir_info *fdir_info = &pf->fdir;
399 if (fdir_info->hash_map)
400 rte_free(fdir_info->hash_map);
401 if (fdir_info->hash_table)
402 rte_hash_free(fdir_info->hash_table);
406 * ice_fdir_setup - reserve and initialize the Flow Director resources
407 * @pf: board private structure
410 ice_fdir_setup(struct ice_pf *pf)
412 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
413 struct ice_hw *hw = ICE_PF_TO_HW(pf);
414 const struct rte_memzone *mz = NULL;
415 char z_name[RTE_MEMZONE_NAMESIZE];
417 int err = ICE_SUCCESS;
419 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
420 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
424 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
425 " fd_fltr_best_effort = %u.",
426 hw->func_caps.fd_fltr_guar,
427 hw->func_caps.fd_fltr_best_effort);
429 if (pf->fdir.fdir_vsi) {
430 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
434 /* make new FDIR VSI */
435 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
437 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
440 pf->fdir.fdir_vsi = vsi;
442 err = ice_fdir_init_filter_list(pf);
444 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
448 err = ice_fdir_counter_init(pf);
450 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
454 /*Fdir tx queue setup*/
455 err = ice_fdir_setup_tx_resources(pf);
457 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
461 /*Fdir rx queue setup*/
462 err = ice_fdir_setup_rx_resources(pf);
464 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
468 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
470 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
474 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
476 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
480 /* reserve memory for the fdir programming packet */
481 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
483 eth_dev->data->port_id);
484 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
486 PMD_DRV_LOG(ERR, "Cannot init memzone for "
487 "flow director program packet.");
491 pf->fdir.prg_pkt = mz->addr;
492 pf->fdir.dma_addr = mz->iova;
495 err = ice_fdir_prof_alloc(hw);
497 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
498 "flow director profile.");
503 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
508 rte_memzone_free(pf->fdir.mz);
511 ice_rx_queue_release(pf->fdir.rxq);
514 ice_tx_queue_release(pf->fdir.txq);
517 ice_release_vsi(vsi);
518 pf->fdir.fdir_vsi = NULL;
523 ice_fdir_prof_free(struct ice_hw *hw)
525 enum ice_fltr_ptype ptype;
527 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
528 ptype < ICE_FLTR_PTYPE_MAX;
530 rte_free(hw->fdir_prof[ptype]);
532 rte_free(hw->fdir_prof);
535 /* Remove a profile for some filter type */
537 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
539 struct ice_hw *hw = ICE_PF_TO_HW(pf);
540 struct ice_fd_hw_prof *hw_prof;
545 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
548 hw_prof = hw->fdir_prof[ptype];
550 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
551 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
552 if (hw_prof->entry_h[i][is_tunnel]) {
553 vsi_num = ice_get_hw_vsi_num(hw,
555 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
557 ice_flow_rem_entry(hw,
558 hw_prof->entry_h[i][is_tunnel]);
559 hw_prof->entry_h[i][is_tunnel] = 0;
562 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
563 rte_free(hw_prof->fdir_seg[is_tunnel]);
564 hw_prof->fdir_seg[is_tunnel] = NULL;
566 for (i = 0; i < hw_prof->cnt; i++)
567 hw_prof->vsi_h[i] = 0;
568 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
571 /* Remove all created profiles */
573 ice_fdir_prof_rm_all(struct ice_pf *pf)
575 enum ice_fltr_ptype ptype;
577 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
578 ptype < ICE_FLTR_PTYPE_MAX;
580 ice_fdir_prof_rm(pf, ptype, false);
581 ice_fdir_prof_rm(pf, ptype, true);
586 * ice_fdir_teardown - release the Flow Director resources
587 * @pf: board private structure
590 ice_fdir_teardown(struct ice_pf *pf)
592 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
593 struct ice_hw *hw = ICE_PF_TO_HW(pf);
597 vsi = pf->fdir.fdir_vsi;
601 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
603 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
605 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
607 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
609 err = ice_fdir_counter_release(pf);
611 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
613 ice_fdir_release_filter_list(pf);
615 ice_tx_queue_release(pf->fdir.txq);
617 ice_rx_queue_release(pf->fdir.rxq);
619 ice_fdir_prof_rm_all(pf);
620 ice_fdir_prof_free(hw);
621 ice_release_vsi(vsi);
622 pf->fdir.fdir_vsi = NULL;
625 err = rte_memzone_free(pf->fdir.mz);
628 PMD_DRV_LOG(ERR, "Failed to free FDIR memezone.");
633 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
634 struct ice_vsi *ctrl_vsi,
635 struct ice_flow_seg_info *seg,
636 enum ice_fltr_ptype ptype,
639 struct ice_hw *hw = ICE_PF_TO_HW(pf);
640 enum ice_flow_dir dir = ICE_FLOW_RX;
641 struct ice_flow_seg_info *ori_seg;
642 struct ice_fd_hw_prof *hw_prof;
643 struct ice_flow_prof *prof;
644 uint64_t entry_1 = 0;
645 uint64_t entry_2 = 0;
650 hw_prof = hw->fdir_prof[ptype];
651 ori_seg = hw_prof->fdir_seg[is_tunnel];
654 if (!memcmp(ori_seg, seg, sizeof(*seg)))
657 if (!memcmp(&ori_seg[1], &seg[1], sizeof(*seg)))
661 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
664 ice_fdir_prof_rm(pf, ptype, is_tunnel);
667 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
668 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
669 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
672 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
673 vsi->idx, ICE_FLOW_PRIO_NORMAL,
674 seg, NULL, 0, &entry_1);
676 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
680 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
681 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
682 seg, NULL, 0, &entry_2);
684 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
689 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
691 hw_prof->fdir_seg[is_tunnel] = seg;
692 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
693 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
694 pf->hw_prof_cnt[ptype][is_tunnel]++;
695 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
696 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
697 pf->hw_prof_cnt[ptype][is_tunnel]++;
702 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
703 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
704 ice_flow_rem_entry(hw, entry_1);
706 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
712 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
716 struct ice_inset_map {
718 enum ice_flow_field fld;
720 static const struct ice_inset_map ice_inset_map[] = {
721 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
722 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
723 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
724 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
725 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
726 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
727 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
728 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
729 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
730 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
731 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
732 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
733 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
734 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
735 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
736 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
737 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
738 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
739 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
740 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
741 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
742 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
743 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
744 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
745 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
746 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
747 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
750 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
751 if ((inset & ice_inset_map[i].inset) ==
752 ice_inset_map[i].inset)
753 field[j++] = ice_inset_map[i].fld;
758 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
759 uint64_t input_set, bool is_tunnel)
761 struct ice_flow_seg_info *seg;
762 struct ice_flow_seg_info *seg_tun = NULL;
763 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
769 seg = (struct ice_flow_seg_info *)
770 ice_malloc(hw, sizeof(*seg));
772 PMD_DRV_LOG(ERR, "No memory can be allocated");
776 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
777 field[i] = ICE_FLOW_FIELD_IDX_MAX;
778 ice_fdir_input_set_parse(input_set, field);
781 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
782 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
783 ICE_FLOW_SEG_HDR_IPV4);
785 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
786 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
787 ICE_FLOW_SEG_HDR_IPV4);
789 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
790 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
791 ICE_FLOW_SEG_HDR_IPV4);
793 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
794 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
796 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
797 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
798 ICE_FLOW_SEG_HDR_IPV6);
800 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
801 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
802 ICE_FLOW_SEG_HDR_IPV6);
804 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
805 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
806 ICE_FLOW_SEG_HDR_IPV6);
808 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
809 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
811 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
812 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
813 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
814 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
815 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
816 ICE_FLOW_SEG_HDR_IPV4);
819 PMD_DRV_LOG(ERR, "not supported filter type.");
823 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
824 ice_flow_set_fld(seg, field[i],
825 ICE_FLOW_FLD_OFF_INVAL,
826 ICE_FLOW_FLD_OFF_INVAL,
827 ICE_FLOW_FLD_OFF_INVAL, false);
831 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
834 seg_tun = (struct ice_flow_seg_info *)
835 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
837 PMD_DRV_LOG(ERR, "No memory can be allocated");
841 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
842 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
843 seg_tun, flow, true);
848 } else if (ret < 0) {
852 return (ret == -EAGAIN) ? 0 : ret;
859 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
860 bool is_tunnel, bool add)
862 struct ice_hw *hw = ICE_PF_TO_HW(pf);
865 cnt = (add) ? 1 : -1;
866 hw->fdir_active_fltr += cnt;
867 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
868 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
870 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
874 ice_fdir_init(struct ice_adapter *ad)
876 struct ice_pf *pf = &ad->pf;
877 struct ice_flow_parser *parser;
880 ret = ice_fdir_setup(pf);
884 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
885 parser = &ice_fdir_parser_comms;
886 else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT)
887 parser = &ice_fdir_parser_os;
891 return ice_register_parser(parser, ad);
895 ice_fdir_uninit(struct ice_adapter *ad)
897 struct ice_pf *pf = &ad->pf;
898 struct ice_flow_parser *parser;
900 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
901 parser = &ice_fdir_parser_comms;
903 parser = &ice_fdir_parser_os;
905 ice_unregister_parser(parser, ad);
907 ice_fdir_teardown(pf);
911 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
913 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
920 ice_fdir_add_del_filter(struct ice_pf *pf,
921 struct ice_fdir_filter_conf *filter,
924 struct ice_fltr_desc desc;
925 struct ice_hw *hw = ICE_PF_TO_HW(pf);
926 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
930 filter->input.dest_vsi = pf->main_vsi->idx;
932 memset(&desc, 0, sizeof(desc));
933 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
935 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
937 memset(pkt, 0, ICE_FDIR_PKT_LEN);
938 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
940 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
944 return ice_fdir_programming(pf, &desc);
948 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
949 struct ice_fdir_filter_conf *filter)
951 struct ice_fdir_fltr *input = &filter->input;
952 memset(key, 0, sizeof(*key));
954 key->flow_type = input->flow_type;
955 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
956 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
957 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
958 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
960 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
961 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
963 key->tunnel_type = filter->tunnel_type;
966 /* Check if there exists the flow director filter */
967 static struct ice_fdir_filter_conf *
968 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
969 const struct ice_fdir_fltr_pattern *key)
973 ret = rte_hash_lookup(fdir_info->hash_table, key);
977 return fdir_info->hash_map[ret];
980 /* Add a flow director entry into the SW list */
982 ice_fdir_entry_insert(struct ice_pf *pf,
983 struct ice_fdir_filter_conf *entry,
984 struct ice_fdir_fltr_pattern *key)
986 struct ice_fdir_info *fdir_info = &pf->fdir;
989 ret = rte_hash_add_key(fdir_info->hash_table, key);
992 "Failed to insert fdir entry to hash table %d!",
996 fdir_info->hash_map[ret] = entry;
1001 /* Delete a flow director entry from the SW list */
1003 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
1005 struct ice_fdir_info *fdir_info = &pf->fdir;
1008 ret = rte_hash_del_key(fdir_info->hash_table, key);
1011 "Failed to delete fdir filter to hash table %d!",
1015 fdir_info->hash_map[ret] = NULL;
1021 ice_fdir_create_filter(struct ice_adapter *ad,
1022 struct rte_flow *flow,
1024 struct rte_flow_error *error)
1026 struct ice_pf *pf = &ad->pf;
1027 struct ice_fdir_filter_conf *filter = meta;
1028 struct ice_fdir_info *fdir_info = &pf->fdir;
1029 struct ice_fdir_filter_conf *entry, *node;
1030 struct ice_fdir_fltr_pattern key;
1034 ice_fdir_extract_fltr_key(&key, filter);
1035 node = ice_fdir_entry_lookup(fdir_info, &key);
1037 rte_flow_error_set(error, EEXIST,
1038 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1039 "Rule already exists!");
1043 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1045 rte_flow_error_set(error, ENOMEM,
1046 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1047 "Failed to allocate memory");
1051 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1053 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1054 filter->input_set, is_tun);
1056 rte_flow_error_set(error, -ret,
1057 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1058 "Profile configure failed.");
1062 /* alloc counter for FDIR */
1063 if (filter->input.cnt_ena) {
1064 struct rte_flow_action_count *act_count = &filter->act_count;
1066 filter->counter = ice_fdir_counter_alloc(pf,
1069 if (!filter->counter) {
1070 rte_flow_error_set(error, EINVAL,
1071 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1072 "Failed to alloc FDIR counter.");
1075 filter->input.cnt_index = filter->counter->hw_index;
1078 ret = ice_fdir_add_del_filter(pf, filter, true);
1080 rte_flow_error_set(error, -ret,
1081 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1082 "Add filter rule failed.");
1086 rte_memcpy(entry, filter, sizeof(*entry));
1087 ret = ice_fdir_entry_insert(pf, entry, &key);
1089 rte_flow_error_set(error, -ret,
1090 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1091 "Insert entry to table failed.");
1096 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1101 if (filter->counter) {
1102 ice_fdir_counter_free(pf, filter->counter);
1103 filter->counter = NULL;
1112 ice_fdir_destroy_filter(struct ice_adapter *ad,
1113 struct rte_flow *flow,
1114 struct rte_flow_error *error)
1116 struct ice_pf *pf = &ad->pf;
1117 struct ice_fdir_info *fdir_info = &pf->fdir;
1118 struct ice_fdir_filter_conf *filter, *entry;
1119 struct ice_fdir_fltr_pattern key;
1123 filter = (struct ice_fdir_filter_conf *)flow->rule;
1125 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1127 if (filter->counter) {
1128 ice_fdir_counter_free(pf, filter->counter);
1129 filter->counter = NULL;
1132 ice_fdir_extract_fltr_key(&key, filter);
1133 entry = ice_fdir_entry_lookup(fdir_info, &key);
1135 rte_flow_error_set(error, ENOENT,
1136 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1137 "Can't find entry.");
1141 ret = ice_fdir_add_del_filter(pf, filter, false);
1143 rte_flow_error_set(error, -ret,
1144 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1145 "Del filter rule failed.");
1149 ret = ice_fdir_entry_del(pf, &key);
1151 rte_flow_error_set(error, -ret,
1152 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1153 "Remove entry from table failed.");
1157 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1166 ice_fdir_query_count(struct ice_adapter *ad,
1167 struct rte_flow *flow,
1168 struct rte_flow_query_count *flow_stats,
1169 struct rte_flow_error *error)
1171 struct ice_pf *pf = &ad->pf;
1172 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1173 struct ice_fdir_filter_conf *filter = flow->rule;
1174 struct ice_fdir_counter *counter = filter->counter;
1175 uint64_t hits_lo, hits_hi;
1178 rte_flow_error_set(error, EINVAL,
1179 RTE_FLOW_ERROR_TYPE_ACTION,
1181 "FDIR counters not available");
1186 * Reading the low 32-bits latches the high 32-bits into a shadow
1187 * register. Reading the high 32-bit returns the value in the
1190 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1191 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1193 flow_stats->hits_set = 1;
1194 flow_stats->hits = hits_lo | (hits_hi << 32);
1195 flow_stats->bytes_set = 0;
1196 flow_stats->bytes = 0;
1198 if (flow_stats->reset) {
1199 /* reset statistic counter value */
1200 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1201 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1207 static struct ice_flow_engine ice_fdir_engine = {
1208 .init = ice_fdir_init,
1209 .uninit = ice_fdir_uninit,
1210 .create = ice_fdir_create_filter,
1211 .destroy = ice_fdir_destroy_filter,
1212 .query_count = ice_fdir_query_count,
1213 .type = ICE_FLOW_ENGINE_FDIR,
1217 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1218 struct rte_flow_error *error,
1219 const struct rte_flow_action *act,
1220 struct ice_fdir_filter_conf *filter)
1222 const struct rte_flow_action_rss *rss = act->conf;
1225 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1226 rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ACTION, act,
1232 if (rss->queue_num <= 1) {
1233 rte_flow_error_set(error, EINVAL,
1234 RTE_FLOW_ERROR_TYPE_ACTION, act,
1235 "Queue region size can't be 0 or 1.");
1239 /* check if queue index for queue region is continuous */
1240 for (i = 0; i < rss->queue_num - 1; i++) {
1241 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1242 rte_flow_error_set(error, EINVAL,
1243 RTE_FLOW_ERROR_TYPE_ACTION, act,
1244 "Discontinuous queue region");
1249 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ACTION, act,
1252 "Invalid queue region indexes.");
1256 if (!(rte_is_power_of_2(rss->queue_num) &&
1257 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1258 rte_flow_error_set(error, EINVAL,
1259 RTE_FLOW_ERROR_TYPE_ACTION, act,
1260 "The region size should be any of the following values:"
1261 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1262 "of queues do not exceed the VSI allocation.");
1266 filter->input.q_index = rss->queue[0];
1267 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1268 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1274 ice_fdir_parse_action(struct ice_adapter *ad,
1275 const struct rte_flow_action actions[],
1276 struct rte_flow_error *error,
1277 struct ice_fdir_filter_conf *filter)
1279 struct ice_pf *pf = &ad->pf;
1280 const struct rte_flow_action_queue *act_q;
1281 const struct rte_flow_action_mark *mark_spec = NULL;
1282 const struct rte_flow_action_count *act_count;
1283 uint32_t dest_num = 0;
1284 uint32_t mark_num = 0;
1285 uint32_t counter_num = 0;
1288 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1289 switch (actions->type) {
1290 case RTE_FLOW_ACTION_TYPE_VOID:
1292 case RTE_FLOW_ACTION_TYPE_QUEUE:
1295 act_q = actions->conf;
1296 filter->input.q_index = act_q->index;
1297 if (filter->input.q_index >=
1298 pf->dev_data->nb_rx_queues) {
1299 rte_flow_error_set(error, EINVAL,
1300 RTE_FLOW_ERROR_TYPE_ACTION,
1302 "Invalid queue for FDIR.");
1305 filter->input.dest_ctl =
1306 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1308 case RTE_FLOW_ACTION_TYPE_DROP:
1311 filter->input.dest_ctl =
1312 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1314 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1317 filter->input.dest_ctl =
1318 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1319 filter->input.q_index = 0;
1321 case RTE_FLOW_ACTION_TYPE_RSS:
1324 ret = ice_fdir_parse_action_qregion(pf,
1325 error, actions, filter);
1329 case RTE_FLOW_ACTION_TYPE_MARK:
1332 mark_spec = actions->conf;
1333 filter->input.fltr_id = mark_spec->id;
1335 case RTE_FLOW_ACTION_TYPE_COUNT:
1338 act_count = actions->conf;
1339 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1340 rte_memcpy(&filter->act_count, act_count,
1341 sizeof(filter->act_count));
1345 rte_flow_error_set(error, EINVAL,
1346 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1352 if (dest_num == 0 || dest_num >= 2) {
1353 rte_flow_error_set(error, EINVAL,
1354 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1355 "Unsupported action combination");
1359 if (mark_num >= 2) {
1360 rte_flow_error_set(error, EINVAL,
1361 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1362 "Too many mark actions");
1366 if (counter_num >= 2) {
1367 rte_flow_error_set(error, EINVAL,
1368 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1369 "Too many count actions");
1377 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1378 const struct rte_flow_item pattern[],
1379 struct rte_flow_error *error,
1380 struct ice_fdir_filter_conf *filter)
1382 const struct rte_flow_item *item = pattern;
1383 enum rte_flow_item_type item_type;
1384 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1385 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1386 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1387 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1388 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1389 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1390 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1391 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1392 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1393 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1394 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1395 uint64_t input_set = ICE_INSET_NONE;
1396 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1397 uint8_t ipv6_addr_mask[16] = {
1398 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1399 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1401 uint32_t vtc_flow_cpu;
1404 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1406 rte_flow_error_set(error, EINVAL,
1407 RTE_FLOW_ERROR_TYPE_ITEM,
1409 "Not support range");
1412 item_type = item->type;
1414 switch (item_type) {
1415 case RTE_FLOW_ITEM_TYPE_ETH:
1416 eth_spec = item->spec;
1417 eth_mask = item->mask;
1419 if (eth_spec && eth_mask) {
1420 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1421 !rte_is_zero_ether_addr(ð_mask->src)) {
1422 rte_flow_error_set(error, EINVAL,
1423 RTE_FLOW_ERROR_TYPE_ITEM,
1425 "Src mac not support");
1429 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
1430 rte_flow_error_set(error, EINVAL,
1431 RTE_FLOW_ERROR_TYPE_ITEM,
1433 "Invalid mac addr mask");
1437 input_set |= ICE_INSET_DMAC;
1438 rte_memcpy(&filter->input.ext_data.dst_mac,
1440 RTE_ETHER_ADDR_LEN);
1443 case RTE_FLOW_ITEM_TYPE_IPV4:
1444 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1445 ipv4_spec = item->spec;
1446 ipv4_mask = item->mask;
1448 if (ipv4_spec && ipv4_mask) {
1449 /* Check IPv4 mask and update input set */
1450 if (ipv4_mask->hdr.version_ihl ||
1451 ipv4_mask->hdr.total_length ||
1452 ipv4_mask->hdr.packet_id ||
1453 ipv4_mask->hdr.fragment_offset ||
1454 ipv4_mask->hdr.hdr_checksum) {
1455 rte_flow_error_set(error, EINVAL,
1456 RTE_FLOW_ERROR_TYPE_ITEM,
1458 "Invalid IPv4 mask.");
1461 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1462 input_set |= tunnel_type ?
1463 ICE_INSET_TUN_IPV4_SRC :
1465 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1466 input_set |= tunnel_type ?
1467 ICE_INSET_TUN_IPV4_DST :
1469 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1470 input_set |= ICE_INSET_IPV4_TOS;
1471 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1472 input_set |= ICE_INSET_IPV4_TTL;
1473 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1474 input_set |= ICE_INSET_IPV4_PROTO;
1476 filter->input.ip.v4.dst_ip =
1477 ipv4_spec->hdr.src_addr;
1478 filter->input.ip.v4.src_ip =
1479 ipv4_spec->hdr.dst_addr;
1480 filter->input.ip.v4.tos =
1481 ipv4_spec->hdr.type_of_service;
1482 filter->input.ip.v4.ttl =
1483 ipv4_spec->hdr.time_to_live;
1484 filter->input.ip.v4.proto =
1485 ipv4_spec->hdr.next_proto_id;
1488 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1490 case RTE_FLOW_ITEM_TYPE_IPV6:
1491 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1492 ipv6_spec = item->spec;
1493 ipv6_mask = item->mask;
1495 if (ipv6_spec && ipv6_mask) {
1496 /* Check IPv6 mask and update input set */
1497 if (ipv6_mask->hdr.payload_len) {
1498 rte_flow_error_set(error, EINVAL,
1499 RTE_FLOW_ERROR_TYPE_ITEM,
1501 "Invalid IPv6 mask");
1505 if (!memcmp(ipv6_mask->hdr.src_addr,
1507 RTE_DIM(ipv6_mask->hdr.src_addr)))
1508 input_set |= ICE_INSET_IPV6_SRC;
1509 if (!memcmp(ipv6_mask->hdr.dst_addr,
1511 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1512 input_set |= ICE_INSET_IPV6_DST;
1514 if ((ipv6_mask->hdr.vtc_flow &
1515 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1516 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1517 input_set |= ICE_INSET_IPV6_TC;
1518 if (ipv6_mask->hdr.proto == UINT8_MAX)
1519 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1520 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1521 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1523 rte_memcpy(filter->input.ip.v6.dst_ip,
1524 ipv6_spec->hdr.src_addr, 16);
1525 rte_memcpy(filter->input.ip.v6.src_ip,
1526 ipv6_spec->hdr.dst_addr, 16);
1529 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1530 filter->input.ip.v6.tc =
1531 (uint8_t)(vtc_flow_cpu >>
1532 ICE_FDIR_IPV6_TC_OFFSET);
1533 filter->input.ip.v6.proto =
1534 ipv6_spec->hdr.proto;
1535 filter->input.ip.v6.hlim =
1536 ipv6_spec->hdr.hop_limits;
1539 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1541 case RTE_FLOW_ITEM_TYPE_TCP:
1542 tcp_spec = item->spec;
1543 tcp_mask = item->mask;
1545 if (tcp_spec && tcp_mask) {
1546 /* Check TCP mask and update input set */
1547 if (tcp_mask->hdr.sent_seq ||
1548 tcp_mask->hdr.recv_ack ||
1549 tcp_mask->hdr.data_off ||
1550 tcp_mask->hdr.tcp_flags ||
1551 tcp_mask->hdr.rx_win ||
1552 tcp_mask->hdr.cksum ||
1553 tcp_mask->hdr.tcp_urp) {
1554 rte_flow_error_set(error, EINVAL,
1555 RTE_FLOW_ERROR_TYPE_ITEM,
1557 "Invalid TCP mask");
1561 if (tcp_mask->hdr.src_port == UINT16_MAX)
1562 input_set |= tunnel_type ?
1563 ICE_INSET_TUN_TCP_SRC_PORT :
1564 ICE_INSET_TCP_SRC_PORT;
1565 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1566 input_set |= tunnel_type ?
1567 ICE_INSET_TUN_TCP_DST_PORT :
1568 ICE_INSET_TCP_DST_PORT;
1570 /* Get filter info */
1571 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1572 filter->input.ip.v4.dst_port =
1573 tcp_spec->hdr.src_port;
1574 filter->input.ip.v4.src_port =
1575 tcp_spec->hdr.dst_port;
1577 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1578 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1579 filter->input.ip.v6.dst_port =
1580 tcp_spec->hdr.src_port;
1581 filter->input.ip.v6.src_port =
1582 tcp_spec->hdr.dst_port;
1584 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1588 case RTE_FLOW_ITEM_TYPE_UDP:
1589 udp_spec = item->spec;
1590 udp_mask = item->mask;
1592 if (udp_spec && udp_mask) {
1593 /* Check UDP mask and update input set*/
1594 if (udp_mask->hdr.dgram_len ||
1595 udp_mask->hdr.dgram_cksum) {
1596 rte_flow_error_set(error, EINVAL,
1597 RTE_FLOW_ERROR_TYPE_ITEM,
1599 "Invalid UDP mask");
1603 if (udp_mask->hdr.src_port == UINT16_MAX)
1604 input_set |= tunnel_type ?
1605 ICE_INSET_TUN_UDP_SRC_PORT :
1606 ICE_INSET_UDP_SRC_PORT;
1607 if (udp_mask->hdr.dst_port == UINT16_MAX)
1608 input_set |= tunnel_type ?
1609 ICE_INSET_TUN_UDP_DST_PORT :
1610 ICE_INSET_UDP_DST_PORT;
1612 /* Get filter info */
1613 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1614 filter->input.ip.v4.dst_port =
1615 udp_spec->hdr.src_port;
1616 filter->input.ip.v4.src_port =
1617 udp_spec->hdr.dst_port;
1619 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1620 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1621 filter->input.ip.v6.src_port =
1622 udp_spec->hdr.dst_port;
1623 filter->input.ip.v6.dst_port =
1624 udp_spec->hdr.src_port;
1626 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1630 case RTE_FLOW_ITEM_TYPE_SCTP:
1631 sctp_spec = item->spec;
1632 sctp_mask = item->mask;
1634 if (sctp_spec && sctp_mask) {
1635 /* Check SCTP mask and update input set */
1636 if (sctp_mask->hdr.cksum) {
1637 rte_flow_error_set(error, EINVAL,
1638 RTE_FLOW_ERROR_TYPE_ITEM,
1640 "Invalid UDP mask");
1644 if (sctp_mask->hdr.src_port == UINT16_MAX)
1645 input_set |= tunnel_type ?
1646 ICE_INSET_TUN_SCTP_SRC_PORT :
1647 ICE_INSET_SCTP_SRC_PORT;
1648 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1649 input_set |= tunnel_type ?
1650 ICE_INSET_TUN_SCTP_DST_PORT :
1651 ICE_INSET_SCTP_DST_PORT;
1653 /* Get filter info */
1654 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1655 filter->input.ip.v4.dst_port =
1656 sctp_spec->hdr.src_port;
1657 filter->input.ip.v4.src_port =
1658 sctp_spec->hdr.dst_port;
1660 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1661 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1662 filter->input.ip.v6.dst_port =
1663 sctp_spec->hdr.src_port;
1664 filter->input.ip.v6.src_port =
1665 sctp_spec->hdr.dst_port;
1667 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1671 case RTE_FLOW_ITEM_TYPE_VOID:
1673 case RTE_FLOW_ITEM_TYPE_VXLAN:
1674 l3 = RTE_FLOW_ITEM_TYPE_END;
1675 vxlan_spec = item->spec;
1676 vxlan_mask = item->mask;
1678 if (vxlan_spec || vxlan_mask) {
1679 rte_flow_error_set(error, EINVAL,
1680 RTE_FLOW_ERROR_TYPE_ITEM,
1682 "Invalid vxlan field");
1686 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1688 case RTE_FLOW_ITEM_TYPE_GTPU:
1689 l3 = RTE_FLOW_ITEM_TYPE_END;
1690 gtp_spec = item->spec;
1691 gtp_mask = item->mask;
1693 if (gtp_spec && gtp_mask) {
1694 if (gtp_mask->v_pt_rsv_flags ||
1695 gtp_mask->msg_type ||
1696 gtp_mask->msg_len) {
1697 rte_flow_error_set(error, EINVAL,
1698 RTE_FLOW_ERROR_TYPE_ITEM,
1700 "Invalid GTP mask");
1704 if (gtp_mask->teid == UINT32_MAX)
1705 input_set |= ICE_INSET_GTPU_TEID;
1707 filter->input.gtpu_data.teid = gtp_spec->teid;
1710 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1711 gtp_psc_spec = item->spec;
1712 gtp_psc_mask = item->mask;
1714 if (gtp_psc_spec && gtp_psc_mask) {
1715 if (gtp_psc_mask->qfi == UINT8_MAX)
1716 input_set |= ICE_INSET_GTPU_QFI;
1718 filter->input.gtpu_data.qfi =
1722 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1725 rte_flow_error_set(error, EINVAL,
1726 RTE_FLOW_ERROR_TYPE_ITEM,
1728 "Invalid pattern item.");
1733 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1734 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1736 filter->tunnel_type = tunnel_type;
1737 filter->input.flow_type = flow_type;
1738 filter->input_set = input_set;
1744 ice_fdir_parse(struct ice_adapter *ad,
1745 struct ice_pattern_match_item *array,
1747 const struct rte_flow_item pattern[],
1748 const struct rte_flow_action actions[],
1750 struct rte_flow_error *error)
1752 struct ice_pf *pf = &ad->pf;
1753 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1754 struct ice_pattern_match_item *item = NULL;
1758 memset(filter, 0, sizeof(*filter));
1759 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1763 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1766 input_set = filter->input_set;
1767 if (!input_set || input_set & ~item->input_set_mask) {
1768 rte_flow_error_set(error, EINVAL,
1769 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1771 "Invalid input set");
1775 ret = ice_fdir_parse_action(ad, actions, error, filter);
1784 static struct ice_flow_parser ice_fdir_parser_os = {
1785 .engine = &ice_fdir_engine,
1786 .array = ice_fdir_pattern_os,
1787 .array_len = RTE_DIM(ice_fdir_pattern_os),
1788 .parse_pattern_action = ice_fdir_parse,
1789 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1792 static struct ice_flow_parser ice_fdir_parser_comms = {
1793 .engine = &ice_fdir_engine,
1794 .array = ice_fdir_pattern_comms,
1795 .array_len = RTE_DIM(ice_fdir_pattern_comms),
1796 .parse_pattern_action = ice_fdir_parse,
1797 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1800 RTE_INIT(ice_fdir_engine_register)
1802 ice_register_flow_engine(&ice_fdir_engine);