4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
10 #include "ice_generic_flow.h"
12 #define ICE_FDIR_IPV6_TC_OFFSET 20
13 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
15 #define ICE_FDIR_MAX_QREGION_SIZE 128
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
19 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23 ICE_FDIR_INSET_ETH_IPV4 | \
24 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31 ICE_FDIR_INSET_ETH_IPV4 | \
32 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
36 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
37 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
39 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
40 ICE_FDIR_INSET_ETH_IPV6 | \
41 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
43 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
44 ICE_FDIR_INSET_ETH_IPV6 | \
45 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
47 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
48 ICE_FDIR_INSET_ETH_IPV6 | \
49 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
51 #define ICE_FDIR_INSET_VXLAN_IPV4 (\
52 ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST)
54 #define ICE_FDIR_INSET_VXLAN_IPV4_TCP (\
55 ICE_FDIR_INSET_VXLAN_IPV4 | \
56 ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT)
58 #define ICE_FDIR_INSET_VXLAN_IPV4_UDP (\
59 ICE_FDIR_INSET_VXLAN_IPV4 | \
60 ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT)
62 #define ICE_FDIR_INSET_VXLAN_IPV4_SCTP (\
63 ICE_FDIR_INSET_VXLAN_IPV4 | \
64 ICE_INSET_TUN_SCTP_SRC_PORT | ICE_INSET_TUN_SCTP_DST_PORT)
66 #define ICE_FDIR_INSET_GTPU_IPV4 (\
67 ICE_INSET_GTPU_TEID | ICE_INSET_GTPU_QFI)
69 #define ICE_FDIR_INSET_GTPU_IPV4_TCP (\
70 ICE_FDIR_INSET_GTPU_IPV4)
72 #define ICE_FDIR_INSET_GTPU_IPV4_UDP (\
73 ICE_FDIR_INSET_GTPU_IPV4)
75 #define ICE_FDIR_INSET_GTPU_IPV4_UDP (\
76 ICE_FDIR_INSET_GTPU_IPV4)
78 static struct ice_pattern_match_item ice_fdir_pattern_os[] = {
79 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
80 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
81 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
82 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
83 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
84 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
85 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
86 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
87 {pattern_eth_ipv4_udp_vxlan_ipv4,
88 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
89 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
90 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
91 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
92 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
93 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
94 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
95 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
96 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
97 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
98 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
99 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
100 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
101 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
102 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
105 static struct ice_pattern_match_item ice_fdir_pattern_comms[] = {
106 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
107 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
108 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
109 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
110 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
111 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
112 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
113 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
114 {pattern_eth_ipv4_udp_vxlan_ipv4,
115 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
116 {pattern_eth_ipv4_udp_vxlan_ipv4_udp,
117 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
118 {pattern_eth_ipv4_udp_vxlan_ipv4_tcp,
119 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
120 {pattern_eth_ipv4_udp_vxlan_ipv4_sctp,
121 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
122 {pattern_eth_ipv4_udp_vxlan_eth_ipv4,
123 ICE_FDIR_INSET_VXLAN_IPV4, ICE_INSET_NONE},
124 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp,
125 ICE_FDIR_INSET_VXLAN_IPV4_UDP, ICE_INSET_NONE},
126 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp,
127 ICE_FDIR_INSET_VXLAN_IPV4_TCP, ICE_INSET_NONE},
128 {pattern_eth_ipv4_udp_vxlan_eth_ipv4_sctp,
129 ICE_FDIR_INSET_VXLAN_IPV4_SCTP, ICE_INSET_NONE},
130 {pattern_eth_ipv4_gtpu_ipv4, ICE_FDIR_INSET_GTPU_IPV4, ICE_INSET_NONE},
131 {pattern_eth_ipv4_gtpu_ipv4_tcp,
132 ICE_FDIR_INSET_GTPU_IPV4, ICE_INSET_NONE},
133 {pattern_eth_ipv4_gtpu_ipv4_udp,
134 ICE_FDIR_INSET_GTPU_IPV4, ICE_INSET_NONE},
137 static struct ice_flow_parser ice_fdir_parser_os;
138 static struct ice_flow_parser ice_fdir_parser_comms;
140 static const struct rte_memzone *
141 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
143 return rte_memzone_reserve_aligned(name, len, socket_id,
144 RTE_MEMZONE_IOVA_CONTIG,
145 ICE_RING_BASE_ALIGN);
148 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
151 ice_fdir_prof_alloc(struct ice_hw *hw)
153 enum ice_fltr_ptype ptype, fltr_ptype;
155 if (!hw->fdir_prof) {
156 hw->fdir_prof = (struct ice_fd_hw_prof **)
157 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
158 sizeof(*hw->fdir_prof));
162 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
163 ptype < ICE_FLTR_PTYPE_MAX;
165 if (!hw->fdir_prof[ptype]) {
166 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
167 ice_malloc(hw, sizeof(**hw->fdir_prof));
168 if (!hw->fdir_prof[ptype])
175 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
178 rte_free(hw->fdir_prof[fltr_ptype]);
179 rte_free(hw->fdir_prof);
184 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
185 struct ice_fdir_counter_pool_container *container,
186 uint32_t index_start,
189 struct ice_fdir_counter_pool *pool;
193 pool = rte_zmalloc("ice_fdir_counter_pool",
195 sizeof(struct ice_fdir_counter) * len,
199 "Failed to allocate memory for fdir counter pool");
203 TAILQ_INIT(&pool->counter_list);
204 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
206 for (i = 0; i < len; i++) {
207 struct ice_fdir_counter *counter = &pool->counters[i];
209 counter->hw_index = index_start + i;
210 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
213 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
214 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
219 container->pools[container->index_free++] = pool;
228 ice_fdir_counter_init(struct ice_pf *pf)
230 struct ice_hw *hw = ICE_PF_TO_HW(pf);
231 struct ice_fdir_info *fdir_info = &pf->fdir;
232 struct ice_fdir_counter_pool_container *container =
234 uint32_t cnt_index, len;
237 TAILQ_INIT(&container->pool_list);
239 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
240 len = ICE_FDIR_COUNTERS_PER_BLOCK;
242 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
244 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
252 ice_fdir_counter_release(struct ice_pf *pf)
254 struct ice_fdir_info *fdir_info = &pf->fdir;
255 struct ice_fdir_counter_pool_container *container =
259 for (i = 0; i < container->index_free; i++)
260 rte_free(container->pools[i]);
265 static struct ice_fdir_counter *
266 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
270 struct ice_fdir_counter_pool *pool;
271 struct ice_fdir_counter *counter;
274 TAILQ_FOREACH(pool, &container->pool_list, next) {
275 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
276 counter = &pool->counters[i];
278 if (counter->shared &&
288 static struct ice_fdir_counter *
289 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
291 struct ice_hw *hw = ICE_PF_TO_HW(pf);
292 struct ice_fdir_info *fdir_info = &pf->fdir;
293 struct ice_fdir_counter_pool_container *container =
295 struct ice_fdir_counter_pool *pool = NULL;
296 struct ice_fdir_counter *counter_free = NULL;
299 counter_free = ice_fdir_counter_shared_search(container, id);
301 if (counter_free->ref_cnt + 1 == 0) {
305 counter_free->ref_cnt++;
310 TAILQ_FOREACH(pool, &container->pool_list, next) {
311 counter_free = TAILQ_FIRST(&pool->counter_list);
318 PMD_DRV_LOG(ERR, "No free counter found\n");
322 counter_free->shared = shared;
323 counter_free->id = id;
324 counter_free->ref_cnt = 1;
325 counter_free->pool = pool;
327 /* reset statistic counter value */
328 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
329 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
331 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
332 if (TAILQ_EMPTY(&pool->counter_list)) {
333 TAILQ_REMOVE(&container->pool_list, pool, next);
334 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
341 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
342 struct ice_fdir_counter *counter)
347 if (--counter->ref_cnt == 0) {
348 struct ice_fdir_counter_pool *pool = counter->pool;
350 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
355 ice_fdir_init_filter_list(struct ice_pf *pf)
357 struct rte_eth_dev *dev = pf->adapter->eth_dev;
358 struct ice_fdir_info *fdir_info = &pf->fdir;
359 char fdir_hash_name[RTE_HASH_NAMESIZE];
362 struct rte_hash_parameters fdir_hash_params = {
363 .name = fdir_hash_name,
364 .entries = ICE_MAX_FDIR_FILTER_NUM,
365 .key_len = sizeof(struct ice_fdir_fltr_pattern),
366 .hash_func = rte_hash_crc,
367 .hash_func_init_val = 0,
368 .socket_id = rte_socket_id(),
371 /* Initialize hash */
372 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
373 "fdir_%s", dev->device->name);
374 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
375 if (!fdir_info->hash_table) {
376 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
379 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
380 sizeof(*fdir_info->hash_map) *
381 ICE_MAX_FDIR_FILTER_NUM,
383 if (!fdir_info->hash_map) {
385 "Failed to allocate memory for fdir hash map!");
387 goto err_fdir_hash_map_alloc;
391 err_fdir_hash_map_alloc:
392 rte_hash_free(fdir_info->hash_table);
398 ice_fdir_release_filter_list(struct ice_pf *pf)
400 struct ice_fdir_info *fdir_info = &pf->fdir;
402 if (fdir_info->hash_map)
403 rte_free(fdir_info->hash_map);
404 if (fdir_info->hash_table)
405 rte_hash_free(fdir_info->hash_table);
409 * ice_fdir_setup - reserve and initialize the Flow Director resources
410 * @pf: board private structure
413 ice_fdir_setup(struct ice_pf *pf)
415 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
416 struct ice_hw *hw = ICE_PF_TO_HW(pf);
417 const struct rte_memzone *mz = NULL;
418 char z_name[RTE_MEMZONE_NAMESIZE];
420 int err = ICE_SUCCESS;
422 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
423 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
427 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
428 " fd_fltr_best_effort = %u.",
429 hw->func_caps.fd_fltr_guar,
430 hw->func_caps.fd_fltr_best_effort);
432 if (pf->fdir.fdir_vsi) {
433 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
437 /* make new FDIR VSI */
438 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
440 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
443 pf->fdir.fdir_vsi = vsi;
445 err = ice_fdir_init_filter_list(pf);
447 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
451 err = ice_fdir_counter_init(pf);
453 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
457 /*Fdir tx queue setup*/
458 err = ice_fdir_setup_tx_resources(pf);
460 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
464 /*Fdir rx queue setup*/
465 err = ice_fdir_setup_rx_resources(pf);
467 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
471 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
473 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
477 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
479 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
483 /* reserve memory for the fdir programming packet */
484 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
486 eth_dev->data->port_id);
487 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
489 PMD_DRV_LOG(ERR, "Cannot init memzone for "
490 "flow director program packet.");
494 pf->fdir.prg_pkt = mz->addr;
495 pf->fdir.dma_addr = mz->iova;
497 err = ice_fdir_prof_alloc(hw);
499 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
500 "flow director profile.");
505 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
510 ice_rx_queue_release(pf->fdir.rxq);
513 ice_tx_queue_release(pf->fdir.txq);
516 ice_release_vsi(vsi);
517 pf->fdir.fdir_vsi = NULL;
522 ice_fdir_prof_free(struct ice_hw *hw)
524 enum ice_fltr_ptype ptype;
526 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
527 ptype < ICE_FLTR_PTYPE_MAX;
529 rte_free(hw->fdir_prof[ptype]);
531 rte_free(hw->fdir_prof);
534 /* Remove a profile for some filter type */
536 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
538 struct ice_hw *hw = ICE_PF_TO_HW(pf);
539 struct ice_fd_hw_prof *hw_prof;
544 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
547 hw_prof = hw->fdir_prof[ptype];
549 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
550 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
551 if (hw_prof->entry_h[i][is_tunnel]) {
552 vsi_num = ice_get_hw_vsi_num(hw,
554 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
556 ice_flow_rem_entry(hw,
557 hw_prof->entry_h[i][is_tunnel]);
558 hw_prof->entry_h[i][is_tunnel] = 0;
561 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
562 rte_free(hw_prof->fdir_seg[is_tunnel]);
563 hw_prof->fdir_seg[is_tunnel] = NULL;
565 for (i = 0; i < hw_prof->cnt; i++)
566 hw_prof->vsi_h[i] = 0;
567 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
570 /* Remove all created profiles */
572 ice_fdir_prof_rm_all(struct ice_pf *pf)
574 enum ice_fltr_ptype ptype;
576 for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
577 ptype < ICE_FLTR_PTYPE_MAX;
579 ice_fdir_prof_rm(pf, ptype, false);
580 ice_fdir_prof_rm(pf, ptype, true);
585 * ice_fdir_teardown - release the Flow Director resources
586 * @pf: board private structure
589 ice_fdir_teardown(struct ice_pf *pf)
591 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
592 struct ice_hw *hw = ICE_PF_TO_HW(pf);
596 vsi = pf->fdir.fdir_vsi;
600 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
602 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
604 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
606 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
608 err = ice_fdir_counter_release(pf);
610 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
612 ice_fdir_release_filter_list(pf);
614 ice_tx_queue_release(pf->fdir.txq);
616 ice_rx_queue_release(pf->fdir.rxq);
618 ice_fdir_prof_rm_all(pf);
619 ice_fdir_prof_free(hw);
620 ice_release_vsi(vsi);
621 pf->fdir.fdir_vsi = NULL;
625 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
626 struct ice_vsi *ctrl_vsi,
627 struct ice_flow_seg_info *seg,
628 enum ice_fltr_ptype ptype,
631 struct ice_hw *hw = ICE_PF_TO_HW(pf);
632 enum ice_flow_dir dir = ICE_FLOW_RX;
633 struct ice_flow_seg_info *ori_seg;
634 struct ice_fd_hw_prof *hw_prof;
635 struct ice_flow_prof *prof;
636 uint64_t entry_1 = 0;
637 uint64_t entry_2 = 0;
642 hw_prof = hw->fdir_prof[ptype];
643 ori_seg = hw_prof->fdir_seg[is_tunnel];
646 if (!memcmp(ori_seg, seg, sizeof(*seg)))
649 if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
653 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
656 ice_fdir_prof_rm(pf, ptype, is_tunnel);
659 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
660 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
661 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
664 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
665 vsi->idx, ICE_FLOW_PRIO_NORMAL,
666 seg, NULL, 0, &entry_1);
668 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
672 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
673 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
674 seg, NULL, 0, &entry_2);
676 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
681 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
683 hw_prof->fdir_seg[is_tunnel] = seg;
684 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
685 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
686 pf->hw_prof_cnt[ptype][is_tunnel]++;
687 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
688 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
689 pf->hw_prof_cnt[ptype][is_tunnel]++;
694 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
695 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
696 ice_flow_rem_entry(hw, entry_1);
698 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
704 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
708 struct ice_inset_map {
710 enum ice_flow_field fld;
712 static const struct ice_inset_map ice_inset_map[] = {
713 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
714 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
715 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
716 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
717 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
718 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
719 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
720 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
721 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
722 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
723 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
724 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
725 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
726 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
727 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
728 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
729 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
730 {ICE_INSET_TUN_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
731 {ICE_INSET_TUN_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
732 {ICE_INSET_TUN_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
733 {ICE_INSET_TUN_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
734 {ICE_INSET_TUN_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
735 {ICE_INSET_TUN_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
736 {ICE_INSET_TUN_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
737 {ICE_INSET_TUN_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
738 {ICE_INSET_GTPU_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_TEID},
739 {ICE_INSET_GTPU_QFI, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI},
742 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
743 if ((inset & ice_inset_map[i].inset) ==
744 ice_inset_map[i].inset)
745 field[j++] = ice_inset_map[i].fld;
750 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
751 uint64_t input_set, bool is_tunnel)
753 struct ice_flow_seg_info *seg;
754 struct ice_flow_seg_info *seg_tun = NULL;
755 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
761 seg = (struct ice_flow_seg_info *)
762 ice_malloc(hw, sizeof(*seg));
764 PMD_DRV_LOG(ERR, "No memory can be allocated");
768 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
769 field[i] = ICE_FLOW_FIELD_IDX_MAX;
770 ice_fdir_input_set_parse(input_set, field);
773 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
774 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
775 ICE_FLOW_SEG_HDR_IPV4);
777 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
778 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
779 ICE_FLOW_SEG_HDR_IPV4);
781 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
782 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
783 ICE_FLOW_SEG_HDR_IPV4);
785 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
786 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
788 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
789 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
790 ICE_FLOW_SEG_HDR_IPV6);
792 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
793 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
794 ICE_FLOW_SEG_HDR_IPV6);
796 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
797 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
798 ICE_FLOW_SEG_HDR_IPV6);
800 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
801 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
803 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_UDP:
804 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_TCP:
805 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_ICMP:
806 case ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER:
807 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_GTPU_EH |
808 ICE_FLOW_SEG_HDR_IPV4);
811 PMD_DRV_LOG(ERR, "not supported filter type.");
815 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
816 ice_flow_set_fld(seg, field[i],
817 ICE_FLOW_FLD_OFF_INVAL,
818 ICE_FLOW_FLD_OFF_INVAL,
819 ICE_FLOW_FLD_OFF_INVAL, false);
823 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
826 seg_tun = (struct ice_flow_seg_info *)
827 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
829 PMD_DRV_LOG(ERR, "No memory can be allocated");
833 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
834 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
835 seg_tun, flow, true);
840 } else if (ret < 0) {
844 return (ret == -EAGAIN) ? 0 : ret;
851 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
852 bool is_tunnel, bool add)
854 struct ice_hw *hw = ICE_PF_TO_HW(pf);
857 cnt = (add) ? 1 : -1;
858 hw->fdir_active_fltr += cnt;
859 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
860 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
862 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
866 ice_fdir_init(struct ice_adapter *ad)
868 struct ice_pf *pf = &ad->pf;
869 struct ice_flow_parser *parser;
872 ret = ice_fdir_setup(pf);
876 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
877 parser = &ice_fdir_parser_comms;
879 parser = &ice_fdir_parser_os;
881 return ice_register_parser(parser, ad);
885 ice_fdir_uninit(struct ice_adapter *ad)
887 struct ice_pf *pf = &ad->pf;
888 struct ice_flow_parser *parser;
890 if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS)
891 parser = &ice_fdir_parser_comms;
893 parser = &ice_fdir_parser_os;
895 ice_unregister_parser(parser, ad);
897 ice_fdir_teardown(pf);
901 ice_fdir_is_tunnel_profile(enum ice_fdir_tunnel_type tunnel_type)
903 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN)
910 ice_fdir_add_del_filter(struct ice_pf *pf,
911 struct ice_fdir_filter_conf *filter,
914 struct ice_fltr_desc desc;
915 struct ice_hw *hw = ICE_PF_TO_HW(pf);
916 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
920 filter->input.dest_vsi = pf->main_vsi->idx;
922 memset(&desc, 0, sizeof(desc));
923 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
925 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
927 memset(pkt, 0, ICE_FDIR_PKT_LEN);
928 ret = ice_fdir_get_gen_prgm_pkt(hw, &filter->input, pkt, false, is_tun);
930 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
934 return ice_fdir_programming(pf, &desc);
938 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
939 struct ice_fdir_filter_conf *filter)
941 struct ice_fdir_fltr *input = &filter->input;
942 memset(key, 0, sizeof(*key));
944 key->flow_type = input->flow_type;
945 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
946 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
947 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
948 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
950 rte_memcpy(&key->gtpu_data, &input->gtpu_data, sizeof(key->gtpu_data));
951 rte_memcpy(&key->gtpu_mask, &input->gtpu_mask, sizeof(key->gtpu_mask));
953 key->tunnel_type = filter->tunnel_type;
956 /* Check if there exists the flow director filter */
957 static struct ice_fdir_filter_conf *
958 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
959 const struct ice_fdir_fltr_pattern *key)
963 ret = rte_hash_lookup(fdir_info->hash_table, key);
967 return fdir_info->hash_map[ret];
970 /* Add a flow director entry into the SW list */
972 ice_fdir_entry_insert(struct ice_pf *pf,
973 struct ice_fdir_filter_conf *entry,
974 struct ice_fdir_fltr_pattern *key)
976 struct ice_fdir_info *fdir_info = &pf->fdir;
979 ret = rte_hash_add_key(fdir_info->hash_table, key);
982 "Failed to insert fdir entry to hash table %d!",
986 fdir_info->hash_map[ret] = entry;
991 /* Delete a flow director entry from the SW list */
993 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
995 struct ice_fdir_info *fdir_info = &pf->fdir;
998 ret = rte_hash_del_key(fdir_info->hash_table, key);
1001 "Failed to delete fdir filter to hash table %d!",
1005 fdir_info->hash_map[ret] = NULL;
1011 ice_fdir_create_filter(struct ice_adapter *ad,
1012 struct rte_flow *flow,
1014 struct rte_flow_error *error)
1016 struct ice_pf *pf = &ad->pf;
1017 struct ice_fdir_filter_conf *filter = meta;
1018 struct ice_fdir_info *fdir_info = &pf->fdir;
1019 struct ice_fdir_filter_conf *entry, *node;
1020 struct ice_fdir_fltr_pattern key;
1024 ice_fdir_extract_fltr_key(&key, filter);
1025 node = ice_fdir_entry_lookup(fdir_info, &key);
1027 rte_flow_error_set(error, EEXIST,
1028 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1029 "Rule already exists!");
1033 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
1035 rte_flow_error_set(error, ENOMEM,
1036 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1037 "Failed to allocate memory");
1041 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1043 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
1044 filter->input_set, is_tun);
1046 rte_flow_error_set(error, -ret,
1047 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1048 "Profile configure failed.");
1052 /* alloc counter for FDIR */
1053 if (filter->input.cnt_ena) {
1054 struct rte_flow_action_count *act_count = &filter->act_count;
1056 filter->counter = ice_fdir_counter_alloc(pf,
1059 if (!filter->counter) {
1060 rte_flow_error_set(error, EINVAL,
1061 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1062 "Failed to alloc FDIR counter.");
1065 filter->input.cnt_index = filter->counter->hw_index;
1068 ret = ice_fdir_add_del_filter(pf, filter, true);
1070 rte_flow_error_set(error, -ret,
1071 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1072 "Add filter rule failed.");
1076 rte_memcpy(entry, filter, sizeof(*entry));
1077 ret = ice_fdir_entry_insert(pf, entry, &key);
1079 rte_flow_error_set(error, -ret,
1080 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1081 "Insert entry to table failed.");
1086 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, true);
1091 if (filter->counter) {
1092 ice_fdir_counter_free(pf, filter->counter);
1093 filter->counter = NULL;
1102 ice_fdir_destroy_filter(struct ice_adapter *ad,
1103 struct rte_flow *flow,
1104 struct rte_flow_error *error)
1106 struct ice_pf *pf = &ad->pf;
1107 struct ice_fdir_info *fdir_info = &pf->fdir;
1108 struct ice_fdir_filter_conf *filter, *entry;
1109 struct ice_fdir_fltr_pattern key;
1113 filter = (struct ice_fdir_filter_conf *)flow->rule;
1115 is_tun = ice_fdir_is_tunnel_profile(filter->tunnel_type);
1117 if (filter->counter) {
1118 ice_fdir_counter_free(pf, filter->counter);
1119 filter->counter = NULL;
1122 ice_fdir_extract_fltr_key(&key, filter);
1123 entry = ice_fdir_entry_lookup(fdir_info, &key);
1125 rte_flow_error_set(error, ENOENT,
1126 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1127 "Can't find entry.");
1131 ret = ice_fdir_add_del_filter(pf, filter, false);
1133 rte_flow_error_set(error, -ret,
1134 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1135 "Del filter rule failed.");
1139 ret = ice_fdir_entry_del(pf, &key);
1141 rte_flow_error_set(error, -ret,
1142 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1143 "Remove entry from table failed.");
1147 ice_fdir_cnt_update(pf, filter->input.flow_type, is_tun, false);
1156 ice_fdir_query_count(struct ice_adapter *ad,
1157 struct rte_flow *flow,
1158 struct rte_flow_query_count *flow_stats,
1159 struct rte_flow_error *error)
1161 struct ice_pf *pf = &ad->pf;
1162 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1163 struct ice_fdir_filter_conf *filter = flow->rule;
1164 struct ice_fdir_counter *counter = filter->counter;
1165 uint64_t hits_lo, hits_hi;
1168 rte_flow_error_set(error, EINVAL,
1169 RTE_FLOW_ERROR_TYPE_ACTION,
1171 "FDIR counters not available");
1176 * Reading the low 32-bits latches the high 32-bits into a shadow
1177 * register. Reading the high 32-bit returns the value in the
1180 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1181 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1183 flow_stats->hits_set = 1;
1184 flow_stats->hits = hits_lo | (hits_hi << 32);
1185 flow_stats->bytes_set = 0;
1186 flow_stats->bytes = 0;
1188 if (flow_stats->reset) {
1189 /* reset statistic counter value */
1190 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1191 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1197 static struct ice_flow_engine ice_fdir_engine = {
1198 .init = ice_fdir_init,
1199 .uninit = ice_fdir_uninit,
1200 .create = ice_fdir_create_filter,
1201 .destroy = ice_fdir_destroy_filter,
1202 .query_count = ice_fdir_query_count,
1203 .type = ICE_FLOW_ENGINE_FDIR,
1207 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1208 struct rte_flow_error *error,
1209 const struct rte_flow_action *act,
1210 struct ice_fdir_filter_conf *filter)
1212 const struct rte_flow_action_rss *rss = act->conf;
1215 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ACTION, act,
1222 if (rss->queue_num <= 1) {
1223 rte_flow_error_set(error, EINVAL,
1224 RTE_FLOW_ERROR_TYPE_ACTION, act,
1225 "Queue region size can't be 0 or 1.");
1229 /* check if queue index for queue region is continuous */
1230 for (i = 0; i < rss->queue_num - 1; i++) {
1231 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1232 rte_flow_error_set(error, EINVAL,
1233 RTE_FLOW_ERROR_TYPE_ACTION, act,
1234 "Discontinuous queue region");
1239 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1240 rte_flow_error_set(error, EINVAL,
1241 RTE_FLOW_ERROR_TYPE_ACTION, act,
1242 "Invalid queue region indexes.");
1246 if (!(rte_is_power_of_2(rss->queue_num) &&
1247 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ACTION, act,
1250 "The region size should be any of the following values:"
1251 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1252 "of queues do not exceed the VSI allocation.");
1256 filter->input.q_index = rss->queue[0];
1257 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1258 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1264 ice_fdir_parse_action(struct ice_adapter *ad,
1265 const struct rte_flow_action actions[],
1266 struct rte_flow_error *error,
1267 struct ice_fdir_filter_conf *filter)
1269 struct ice_pf *pf = &ad->pf;
1270 const struct rte_flow_action_queue *act_q;
1271 const struct rte_flow_action_mark *mark_spec = NULL;
1272 const struct rte_flow_action_count *act_count;
1273 uint32_t dest_num = 0;
1274 uint32_t mark_num = 0;
1275 uint32_t counter_num = 0;
1278 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1279 switch (actions->type) {
1280 case RTE_FLOW_ACTION_TYPE_VOID:
1282 case RTE_FLOW_ACTION_TYPE_QUEUE:
1285 act_q = actions->conf;
1286 filter->input.q_index = act_q->index;
1287 if (filter->input.q_index >=
1288 pf->dev_data->nb_rx_queues) {
1289 rte_flow_error_set(error, EINVAL,
1290 RTE_FLOW_ERROR_TYPE_ACTION,
1292 "Invalid queue for FDIR.");
1295 filter->input.dest_ctl =
1296 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1298 case RTE_FLOW_ACTION_TYPE_DROP:
1301 filter->input.dest_ctl =
1302 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1304 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1307 filter->input.dest_ctl =
1308 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1309 filter->input.q_index = 0;
1311 case RTE_FLOW_ACTION_TYPE_RSS:
1314 ret = ice_fdir_parse_action_qregion(pf,
1315 error, actions, filter);
1319 case RTE_FLOW_ACTION_TYPE_MARK:
1322 mark_spec = actions->conf;
1323 filter->input.fltr_id = mark_spec->id;
1325 case RTE_FLOW_ACTION_TYPE_COUNT:
1328 act_count = actions->conf;
1329 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1330 rte_memcpy(&filter->act_count, act_count,
1331 sizeof(filter->act_count));
1335 rte_flow_error_set(error, EINVAL,
1336 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1342 if (dest_num == 0 || dest_num >= 2) {
1343 rte_flow_error_set(error, EINVAL,
1344 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1345 "Unsupported action combination");
1349 if (mark_num >= 2) {
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1352 "Too many mark actions");
1356 if (counter_num >= 2) {
1357 rte_flow_error_set(error, EINVAL,
1358 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1359 "Too many count actions");
1367 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1368 const struct rte_flow_item pattern[],
1369 struct rte_flow_error *error,
1370 struct ice_fdir_filter_conf *filter)
1372 const struct rte_flow_item *item = pattern;
1373 enum rte_flow_item_type item_type;
1374 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1375 enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
1376 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1377 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1378 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1379 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1380 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1381 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1382 const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
1383 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
1384 const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
1385 uint64_t input_set = ICE_INSET_NONE;
1386 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1387 uint8_t ipv6_addr_mask[16] = {
1388 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1389 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1391 uint32_t vtc_flow_cpu;
1394 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1396 rte_flow_error_set(error, EINVAL,
1397 RTE_FLOW_ERROR_TYPE_ITEM,
1399 "Not support range");
1402 item_type = item->type;
1404 switch (item_type) {
1405 case RTE_FLOW_ITEM_TYPE_ETH:
1406 eth_spec = item->spec;
1407 eth_mask = item->mask;
1409 if (eth_spec && eth_mask) {
1410 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1411 !rte_is_zero_ether_addr(ð_mask->src)) {
1412 rte_flow_error_set(error, EINVAL,
1413 RTE_FLOW_ERROR_TYPE_ITEM,
1415 "Src mac not support");
1419 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
1420 rte_flow_error_set(error, EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ITEM,
1423 "Invalid mac addr mask");
1427 input_set |= ICE_INSET_DMAC;
1428 rte_memcpy(&filter->input.ext_data.dst_mac,
1430 RTE_ETHER_ADDR_LEN);
1433 case RTE_FLOW_ITEM_TYPE_IPV4:
1434 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1435 ipv4_spec = item->spec;
1436 ipv4_mask = item->mask;
1438 if (ipv4_spec && ipv4_mask) {
1439 /* Check IPv4 mask and update input set */
1440 if (ipv4_mask->hdr.version_ihl ||
1441 ipv4_mask->hdr.total_length ||
1442 ipv4_mask->hdr.packet_id ||
1443 ipv4_mask->hdr.fragment_offset ||
1444 ipv4_mask->hdr.hdr_checksum) {
1445 rte_flow_error_set(error, EINVAL,
1446 RTE_FLOW_ERROR_TYPE_ITEM,
1448 "Invalid IPv4 mask.");
1451 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1452 input_set |= tunnel_type ?
1453 ICE_INSET_TUN_IPV4_SRC :
1455 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1456 input_set |= tunnel_type ?
1457 ICE_INSET_TUN_IPV4_DST :
1459 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1460 input_set |= ICE_INSET_IPV4_TOS;
1461 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1462 input_set |= ICE_INSET_IPV4_TTL;
1463 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1464 input_set |= ICE_INSET_IPV4_PROTO;
1466 filter->input.ip.v4.dst_ip =
1467 ipv4_spec->hdr.src_addr;
1468 filter->input.ip.v4.src_ip =
1469 ipv4_spec->hdr.dst_addr;
1470 filter->input.ip.v4.tos =
1471 ipv4_spec->hdr.type_of_service;
1472 filter->input.ip.v4.ttl =
1473 ipv4_spec->hdr.time_to_live;
1474 filter->input.ip.v4.proto =
1475 ipv4_spec->hdr.next_proto_id;
1478 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1480 case RTE_FLOW_ITEM_TYPE_IPV6:
1481 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1482 ipv6_spec = item->spec;
1483 ipv6_mask = item->mask;
1485 if (ipv6_spec && ipv6_mask) {
1486 /* Check IPv6 mask and update input set */
1487 if (ipv6_mask->hdr.payload_len) {
1488 rte_flow_error_set(error, EINVAL,
1489 RTE_FLOW_ERROR_TYPE_ITEM,
1491 "Invalid IPv6 mask");
1495 if (!memcmp(ipv6_mask->hdr.src_addr,
1497 RTE_DIM(ipv6_mask->hdr.src_addr)))
1498 input_set |= ICE_INSET_IPV6_SRC;
1499 if (!memcmp(ipv6_mask->hdr.dst_addr,
1501 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1502 input_set |= ICE_INSET_IPV6_DST;
1504 if ((ipv6_mask->hdr.vtc_flow &
1505 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1506 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1507 input_set |= ICE_INSET_IPV6_TC;
1508 if (ipv6_mask->hdr.proto == UINT8_MAX)
1509 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1510 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1511 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1513 rte_memcpy(filter->input.ip.v6.dst_ip,
1514 ipv6_spec->hdr.src_addr, 16);
1515 rte_memcpy(filter->input.ip.v6.src_ip,
1516 ipv6_spec->hdr.dst_addr, 16);
1519 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1520 filter->input.ip.v6.tc =
1521 (uint8_t)(vtc_flow_cpu >>
1522 ICE_FDIR_IPV6_TC_OFFSET);
1523 filter->input.ip.v6.proto =
1524 ipv6_spec->hdr.proto;
1525 filter->input.ip.v6.hlim =
1526 ipv6_spec->hdr.hop_limits;
1529 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1531 case RTE_FLOW_ITEM_TYPE_TCP:
1532 tcp_spec = item->spec;
1533 tcp_mask = item->mask;
1535 if (tcp_spec && tcp_mask) {
1536 /* Check TCP mask and update input set */
1537 if (tcp_mask->hdr.sent_seq ||
1538 tcp_mask->hdr.recv_ack ||
1539 tcp_mask->hdr.data_off ||
1540 tcp_mask->hdr.tcp_flags ||
1541 tcp_mask->hdr.rx_win ||
1542 tcp_mask->hdr.cksum ||
1543 tcp_mask->hdr.tcp_urp) {
1544 rte_flow_error_set(error, EINVAL,
1545 RTE_FLOW_ERROR_TYPE_ITEM,
1547 "Invalid TCP mask");
1551 if (tcp_mask->hdr.src_port == UINT16_MAX)
1552 input_set |= tunnel_type ?
1553 ICE_INSET_TUN_TCP_SRC_PORT :
1554 ICE_INSET_TCP_SRC_PORT;
1555 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1556 input_set |= tunnel_type ?
1557 ICE_INSET_TUN_TCP_DST_PORT :
1558 ICE_INSET_TCP_DST_PORT;
1560 /* Get filter info */
1561 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1562 filter->input.ip.v4.dst_port =
1563 tcp_spec->hdr.src_port;
1564 filter->input.ip.v4.src_port =
1565 tcp_spec->hdr.dst_port;
1567 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1568 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1569 filter->input.ip.v6.dst_port =
1570 tcp_spec->hdr.src_port;
1571 filter->input.ip.v6.src_port =
1572 tcp_spec->hdr.dst_port;
1574 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1578 case RTE_FLOW_ITEM_TYPE_UDP:
1579 udp_spec = item->spec;
1580 udp_mask = item->mask;
1582 if (udp_spec && udp_mask) {
1583 /* Check UDP mask and update input set*/
1584 if (udp_mask->hdr.dgram_len ||
1585 udp_mask->hdr.dgram_cksum) {
1586 rte_flow_error_set(error, EINVAL,
1587 RTE_FLOW_ERROR_TYPE_ITEM,
1589 "Invalid UDP mask");
1593 if (udp_mask->hdr.src_port == UINT16_MAX)
1594 input_set |= tunnel_type ?
1595 ICE_INSET_TUN_UDP_SRC_PORT :
1596 ICE_INSET_UDP_SRC_PORT;
1597 if (udp_mask->hdr.dst_port == UINT16_MAX)
1598 input_set |= tunnel_type ?
1599 ICE_INSET_TUN_UDP_DST_PORT :
1600 ICE_INSET_UDP_DST_PORT;
1602 /* Get filter info */
1603 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1604 filter->input.ip.v4.dst_port =
1605 udp_spec->hdr.src_port;
1606 filter->input.ip.v4.src_port =
1607 udp_spec->hdr.dst_port;
1609 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1610 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1611 filter->input.ip.v6.src_port =
1612 udp_spec->hdr.dst_port;
1613 filter->input.ip.v6.dst_port =
1614 udp_spec->hdr.src_port;
1616 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1620 case RTE_FLOW_ITEM_TYPE_SCTP:
1621 sctp_spec = item->spec;
1622 sctp_mask = item->mask;
1624 if (sctp_spec && sctp_mask) {
1625 /* Check SCTP mask and update input set */
1626 if (sctp_mask->hdr.cksum) {
1627 rte_flow_error_set(error, EINVAL,
1628 RTE_FLOW_ERROR_TYPE_ITEM,
1630 "Invalid UDP mask");
1634 if (sctp_mask->hdr.src_port == UINT16_MAX)
1635 input_set |= tunnel_type ?
1636 ICE_INSET_TUN_SCTP_SRC_PORT :
1637 ICE_INSET_SCTP_SRC_PORT;
1638 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1639 input_set |= tunnel_type ?
1640 ICE_INSET_TUN_SCTP_DST_PORT :
1641 ICE_INSET_SCTP_DST_PORT;
1643 /* Get filter info */
1644 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1645 filter->input.ip.v4.dst_port =
1646 sctp_spec->hdr.src_port;
1647 filter->input.ip.v4.src_port =
1648 sctp_spec->hdr.dst_port;
1650 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1651 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1652 filter->input.ip.v6.dst_port =
1653 sctp_spec->hdr.src_port;
1654 filter->input.ip.v6.src_port =
1655 sctp_spec->hdr.dst_port;
1657 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1661 case RTE_FLOW_ITEM_TYPE_VOID:
1663 case RTE_FLOW_ITEM_TYPE_VXLAN:
1664 l3 = RTE_FLOW_ITEM_TYPE_END;
1665 vxlan_spec = item->spec;
1666 vxlan_mask = item->mask;
1668 if (vxlan_spec || vxlan_mask) {
1669 rte_flow_error_set(error, EINVAL,
1670 RTE_FLOW_ERROR_TYPE_ITEM,
1672 "Invalid vxlan field");
1676 tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
1678 case RTE_FLOW_ITEM_TYPE_GTPU:
1679 l3 = RTE_FLOW_ITEM_TYPE_END;
1680 gtp_spec = item->spec;
1681 gtp_mask = item->mask;
1683 if (gtp_spec && gtp_mask) {
1684 if (gtp_mask->v_pt_rsv_flags ||
1685 gtp_mask->msg_type ||
1686 gtp_mask->msg_len) {
1687 rte_flow_error_set(error, EINVAL,
1688 RTE_FLOW_ERROR_TYPE_ITEM,
1690 "Invalid GTP mask");
1694 if (gtp_mask->teid == UINT32_MAX)
1695 input_set |= ICE_INSET_GTPU_TEID;
1697 filter->input.gtpu_data.teid = gtp_spec->teid;
1700 case RTE_FLOW_ITEM_TYPE_GTP_PSC:
1701 gtp_psc_spec = item->spec;
1702 gtp_psc_mask = item->mask;
1704 if (gtp_psc_spec && gtp_psc_mask) {
1705 if (gtp_psc_mask->qfi == UINT8_MAX)
1706 input_set |= ICE_INSET_GTPU_QFI;
1708 filter->input.gtpu_data.qfi =
1712 tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
1715 rte_flow_error_set(error, EINVAL,
1716 RTE_FLOW_ERROR_TYPE_ITEM,
1718 "Invalid pattern item.");
1723 if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU)
1724 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_IPV4_OTHER;
1726 filter->tunnel_type = tunnel_type;
1727 filter->input.flow_type = flow_type;
1728 filter->input_set = input_set;
1734 ice_fdir_parse(struct ice_adapter *ad,
1735 struct ice_pattern_match_item *array,
1737 const struct rte_flow_item pattern[],
1738 const struct rte_flow_action actions[],
1740 struct rte_flow_error *error)
1742 struct ice_pf *pf = &ad->pf;
1743 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1744 struct ice_pattern_match_item *item = NULL;
1748 memset(filter, 0, sizeof(*filter));
1749 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1753 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1756 input_set = filter->input_set;
1757 if (!input_set || input_set & ~item->input_set_mask) {
1758 rte_flow_error_set(error, EINVAL,
1759 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1761 "Invalid input set");
1765 ret = ice_fdir_parse_action(ad, actions, error, filter);
1774 static struct ice_flow_parser ice_fdir_parser_os = {
1775 .engine = &ice_fdir_engine,
1776 .array = ice_fdir_pattern_os,
1777 .array_len = RTE_DIM(ice_fdir_pattern_os),
1778 .parse_pattern_action = ice_fdir_parse,
1779 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1782 static struct ice_flow_parser ice_fdir_parser_comms = {
1783 .engine = &ice_fdir_engine,
1784 .array = ice_fdir_pattern_comms,
1785 .array_len = RTE_DIM(ice_fdir_pattern_comms),
1786 .parse_pattern_action = ice_fdir_parse,
1787 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1790 RTE_INIT(ice_fdir_engine_register)
1792 ice_register_flow_engine(&ice_fdir_engine);