4 #include <rte_hash_crc.h>
5 #include "base/ice_fdir.h"
6 #include "base/ice_flow.h"
7 #include "base/ice_type.h"
8 #include "ice_ethdev.h"
10 #include "ice_generic_flow.h"
12 #define ICE_FDIR_IPV6_TC_OFFSET 20
13 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
15 #define ICE_FDIR_MAX_QREGION_SIZE 128
17 #define ICE_FDIR_INSET_ETH_IPV4 (\
19 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
20 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
22 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
23 ICE_FDIR_INSET_ETH_IPV4 | \
24 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
26 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
30 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
31 ICE_FDIR_INSET_ETH_IPV4 | \
32 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
34 #define ICE_FDIR_INSET_ETH_IPV6 (\
35 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
36 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
38 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
39 ICE_FDIR_INSET_ETH_IPV6 | \
40 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
42 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
43 ICE_FDIR_INSET_ETH_IPV6 | \
44 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
46 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
47 ICE_FDIR_INSET_ETH_IPV6 | \
48 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
50 static struct ice_pattern_match_item ice_fdir_pattern[] = {
51 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
52 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
53 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
54 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
55 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
56 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
57 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
58 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
61 static struct ice_flow_parser ice_fdir_parser;
63 static const struct rte_memzone *
64 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
66 return rte_memzone_reserve_aligned(name, len, socket_id,
67 RTE_MEMZONE_IOVA_CONTIG,
71 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
74 ice_fdir_prof_alloc(struct ice_hw *hw)
76 enum ice_fltr_ptype ptype, fltr_ptype;
79 hw->fdir_prof = (struct ice_fd_hw_prof **)
80 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
81 sizeof(*hw->fdir_prof));
85 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
86 ptype < ICE_FLTR_PTYPE_MAX;
88 if (!hw->fdir_prof[ptype]) {
89 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
90 ice_malloc(hw, sizeof(**hw->fdir_prof));
91 if (!hw->fdir_prof[ptype])
98 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
101 rte_free(hw->fdir_prof[fltr_ptype]);
102 rte_free(hw->fdir_prof);
107 ice_fdir_counter_pool_add(__rte_unused struct ice_pf *pf,
108 struct ice_fdir_counter_pool_container *container,
109 uint32_t index_start,
112 struct ice_fdir_counter_pool *pool;
116 pool = rte_zmalloc("ice_fdir_counter_pool",
118 sizeof(struct ice_fdir_counter) * len,
122 "Failed to allocate memory for fdir counter pool");
126 TAILQ_INIT(&pool->counter_list);
127 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
129 for (i = 0; i < len; i++) {
130 struct ice_fdir_counter *counter = &pool->counters[i];
132 counter->hw_index = index_start + i;
133 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
136 if (container->index_free == ICE_FDIR_COUNTER_MAX_POOL_SIZE) {
137 PMD_INIT_LOG(ERR, "FDIR counter pool is full");
142 container->pools[container->index_free++] = pool;
151 ice_fdir_counter_init(struct ice_pf *pf)
153 struct ice_hw *hw = ICE_PF_TO_HW(pf);
154 struct ice_fdir_info *fdir_info = &pf->fdir;
155 struct ice_fdir_counter_pool_container *container =
157 uint32_t cnt_index, len;
160 TAILQ_INIT(&container->pool_list);
162 cnt_index = ICE_FDIR_COUNTER_INDEX(hw->fd_ctr_base);
163 len = ICE_FDIR_COUNTERS_PER_BLOCK;
165 ret = ice_fdir_counter_pool_add(pf, container, cnt_index, len);
167 PMD_INIT_LOG(ERR, "Failed to add fdir pool to container");
175 ice_fdir_counter_release(struct ice_pf *pf)
177 struct ice_fdir_info *fdir_info = &pf->fdir;
178 struct ice_fdir_counter_pool_container *container =
182 for (i = 0; i < container->index_free; i++)
183 rte_free(container->pools[i]);
188 static struct ice_fdir_counter *
189 ice_fdir_counter_shared_search(struct ice_fdir_counter_pool_container
193 struct ice_fdir_counter_pool *pool;
194 struct ice_fdir_counter *counter;
197 TAILQ_FOREACH(pool, &container->pool_list, next) {
198 for (i = 0; i < ICE_FDIR_COUNTERS_PER_BLOCK; i++) {
199 counter = &pool->counters[i];
201 if (counter->shared &&
211 static struct ice_fdir_counter *
212 ice_fdir_counter_alloc(struct ice_pf *pf, uint32_t shared, uint32_t id)
214 struct ice_hw *hw = ICE_PF_TO_HW(pf);
215 struct ice_fdir_info *fdir_info = &pf->fdir;
216 struct ice_fdir_counter_pool_container *container =
218 struct ice_fdir_counter_pool *pool = NULL;
219 struct ice_fdir_counter *counter_free = NULL;
222 counter_free = ice_fdir_counter_shared_search(container, id);
224 if (counter_free->ref_cnt + 1 == 0) {
228 counter_free->ref_cnt++;
233 TAILQ_FOREACH(pool, &container->pool_list, next) {
234 counter_free = TAILQ_FIRST(&pool->counter_list);
241 PMD_DRV_LOG(ERR, "No free counter found\n");
245 counter_free->shared = shared;
246 counter_free->id = id;
247 counter_free->ref_cnt = 1;
248 counter_free->pool = pool;
250 /* reset statistic counter value */
251 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter_free->hw_index), 0);
252 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter_free->hw_index), 0);
254 TAILQ_REMOVE(&pool->counter_list, counter_free, next);
255 if (TAILQ_EMPTY(&pool->counter_list)) {
256 TAILQ_REMOVE(&container->pool_list, pool, next);
257 TAILQ_INSERT_TAIL(&container->pool_list, pool, next);
264 ice_fdir_counter_free(__rte_unused struct ice_pf *pf,
265 struct ice_fdir_counter *counter)
270 if (--counter->ref_cnt == 0) {
271 struct ice_fdir_counter_pool *pool = counter->pool;
273 TAILQ_INSERT_TAIL(&pool->counter_list, counter, next);
278 ice_fdir_init_filter_list(struct ice_pf *pf)
280 struct rte_eth_dev *dev = pf->adapter->eth_dev;
281 struct ice_fdir_info *fdir_info = &pf->fdir;
282 char fdir_hash_name[RTE_HASH_NAMESIZE];
285 struct rte_hash_parameters fdir_hash_params = {
286 .name = fdir_hash_name,
287 .entries = ICE_MAX_FDIR_FILTER_NUM,
288 .key_len = sizeof(struct ice_fdir_fltr_pattern),
289 .hash_func = rte_hash_crc,
290 .hash_func_init_val = 0,
291 .socket_id = rte_socket_id(),
294 /* Initialize hash */
295 snprintf(fdir_hash_name, RTE_HASH_NAMESIZE,
296 "fdir_%s", dev->device->name);
297 fdir_info->hash_table = rte_hash_create(&fdir_hash_params);
298 if (!fdir_info->hash_table) {
299 PMD_INIT_LOG(ERR, "Failed to create fdir hash table!");
302 fdir_info->hash_map = rte_zmalloc("ice_fdir_hash_map",
303 sizeof(*fdir_info->hash_map) *
304 ICE_MAX_FDIR_FILTER_NUM,
306 if (!fdir_info->hash_map) {
308 "Failed to allocate memory for fdir hash map!");
310 goto err_fdir_hash_map_alloc;
314 err_fdir_hash_map_alloc:
315 rte_hash_free(fdir_info->hash_table);
321 ice_fdir_release_filter_list(struct ice_pf *pf)
323 struct ice_fdir_info *fdir_info = &pf->fdir;
325 if (fdir_info->hash_map)
326 rte_free(fdir_info->hash_map);
327 if (fdir_info->hash_table)
328 rte_hash_free(fdir_info->hash_table);
332 * ice_fdir_setup - reserve and initialize the Flow Director resources
333 * @pf: board private structure
336 ice_fdir_setup(struct ice_pf *pf)
338 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
339 struct ice_hw *hw = ICE_PF_TO_HW(pf);
340 const struct rte_memzone *mz = NULL;
341 char z_name[RTE_MEMZONE_NAMESIZE];
343 int err = ICE_SUCCESS;
345 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
346 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
350 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
351 " fd_fltr_best_effort = %u.",
352 hw->func_caps.fd_fltr_guar,
353 hw->func_caps.fd_fltr_best_effort);
355 if (pf->fdir.fdir_vsi) {
356 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
360 /* make new FDIR VSI */
361 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
363 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
366 pf->fdir.fdir_vsi = vsi;
368 err = ice_fdir_init_filter_list(pf);
370 PMD_DRV_LOG(ERR, "Failed to init FDIR filter list.");
374 err = ice_fdir_counter_init(pf);
376 PMD_DRV_LOG(ERR, "Failed to init FDIR counter.");
380 /*Fdir tx queue setup*/
381 err = ice_fdir_setup_tx_resources(pf);
383 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
387 /*Fdir rx queue setup*/
388 err = ice_fdir_setup_rx_resources(pf);
390 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
394 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
396 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
400 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
402 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
406 /* reserve memory for the fdir programming packet */
407 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
409 eth_dev->data->port_id);
410 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
412 PMD_DRV_LOG(ERR, "Cannot init memzone for "
413 "flow director program packet.");
417 pf->fdir.prg_pkt = mz->addr;
418 pf->fdir.dma_addr = mz->iova;
420 err = ice_fdir_prof_alloc(hw);
422 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
423 "flow director profile.");
428 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
433 ice_rx_queue_release(pf->fdir.rxq);
436 ice_tx_queue_release(pf->fdir.txq);
439 ice_release_vsi(vsi);
440 pf->fdir.fdir_vsi = NULL;
445 ice_fdir_prof_free(struct ice_hw *hw)
447 enum ice_fltr_ptype ptype;
449 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
450 ptype < ICE_FLTR_PTYPE_MAX;
452 rte_free(hw->fdir_prof[ptype]);
454 rte_free(hw->fdir_prof);
457 /* Remove a profile for some filter type */
459 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
461 struct ice_hw *hw = ICE_PF_TO_HW(pf);
462 struct ice_fd_hw_prof *hw_prof;
467 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
470 hw_prof = hw->fdir_prof[ptype];
472 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
473 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
474 if (hw_prof->entry_h[i][is_tunnel]) {
475 vsi_num = ice_get_hw_vsi_num(hw,
477 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
479 ice_flow_rem_entry(hw,
480 hw_prof->entry_h[i][is_tunnel]);
481 hw_prof->entry_h[i][is_tunnel] = 0;
484 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
485 rte_free(hw_prof->fdir_seg[is_tunnel]);
486 hw_prof->fdir_seg[is_tunnel] = NULL;
488 for (i = 0; i < hw_prof->cnt; i++)
489 hw_prof->vsi_h[i] = 0;
490 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
493 /* Remove all created profiles */
495 ice_fdir_prof_rm_all(struct ice_pf *pf)
497 enum ice_fltr_ptype ptype;
499 for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
500 ptype < ICE_FLTR_PTYPE_MAX;
502 ice_fdir_prof_rm(pf, ptype, false);
503 ice_fdir_prof_rm(pf, ptype, true);
508 * ice_fdir_teardown - release the Flow Director resources
509 * @pf: board private structure
512 ice_fdir_teardown(struct ice_pf *pf)
514 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
515 struct ice_hw *hw = ICE_PF_TO_HW(pf);
519 vsi = pf->fdir.fdir_vsi;
523 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
525 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
527 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
529 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
531 err = ice_fdir_counter_release(pf);
533 PMD_DRV_LOG(ERR, "Failed to release FDIR counter resource.");
535 ice_fdir_release_filter_list(pf);
537 ice_tx_queue_release(pf->fdir.txq);
539 ice_rx_queue_release(pf->fdir.rxq);
541 ice_fdir_prof_rm_all(pf);
542 ice_fdir_prof_free(hw);
543 ice_release_vsi(vsi);
544 pf->fdir.fdir_vsi = NULL;
548 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
549 struct ice_vsi *ctrl_vsi,
550 struct ice_flow_seg_info *seg,
551 enum ice_fltr_ptype ptype,
554 struct ice_hw *hw = ICE_PF_TO_HW(pf);
555 enum ice_flow_dir dir = ICE_FLOW_RX;
556 struct ice_flow_seg_info *ori_seg;
557 struct ice_fd_hw_prof *hw_prof;
558 struct ice_flow_prof *prof;
559 uint64_t entry_1 = 0;
560 uint64_t entry_2 = 0;
565 hw_prof = hw->fdir_prof[ptype];
566 ori_seg = hw_prof->fdir_seg[is_tunnel];
569 if (!memcmp(ori_seg, seg, sizeof(*seg)))
572 if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
576 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
579 ice_fdir_prof_rm(pf, ptype, is_tunnel);
582 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
583 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
584 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
587 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
588 vsi->idx, ICE_FLOW_PRIO_NORMAL,
589 seg, NULL, 0, &entry_1);
591 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
595 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
596 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
597 seg, NULL, 0, &entry_2);
599 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
604 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
606 hw_prof->fdir_seg[is_tunnel] = seg;
607 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
608 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
609 pf->hw_prof_cnt[ptype][is_tunnel]++;
610 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
611 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
612 pf->hw_prof_cnt[ptype][is_tunnel]++;
617 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
618 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
619 ice_flow_rem_entry(hw, entry_1);
621 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
627 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
631 struct ice_inset_map {
633 enum ice_flow_field fld;
635 static const struct ice_inset_map ice_inset_map[] = {
636 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
637 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
638 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
639 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
640 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
641 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
642 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
643 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
644 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
645 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
646 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
647 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
648 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
649 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
650 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
651 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
652 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
655 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
656 if ((inset & ice_inset_map[i].inset) ==
657 ice_inset_map[i].inset)
658 field[j++] = ice_inset_map[i].fld;
663 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
664 uint64_t input_set, bool is_tunnel)
666 struct ice_flow_seg_info *seg;
667 struct ice_flow_seg_info *seg_tun = NULL;
668 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
674 seg = (struct ice_flow_seg_info *)
675 ice_malloc(hw, sizeof(*seg));
677 PMD_DRV_LOG(ERR, "No memory can be allocated");
681 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
682 field[i] = ICE_FLOW_FIELD_IDX_MAX;
683 ice_fdir_input_set_parse(input_set, field);
686 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
687 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
688 ICE_FLOW_SEG_HDR_IPV4);
690 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
691 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
692 ICE_FLOW_SEG_HDR_IPV4);
694 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
695 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
696 ICE_FLOW_SEG_HDR_IPV4);
698 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
699 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
701 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
702 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
703 ICE_FLOW_SEG_HDR_IPV6);
705 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
706 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
707 ICE_FLOW_SEG_HDR_IPV6);
709 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
710 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
711 ICE_FLOW_SEG_HDR_IPV6);
713 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
714 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
717 PMD_DRV_LOG(ERR, "not supported filter type.");
721 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
722 ice_flow_set_fld(seg, field[i],
723 ICE_FLOW_FLD_OFF_INVAL,
724 ICE_FLOW_FLD_OFF_INVAL,
725 ICE_FLOW_FLD_OFF_INVAL, false);
729 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
732 seg_tun = (struct ice_flow_seg_info *)
733 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
735 PMD_DRV_LOG(ERR, "No memory can be allocated");
739 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
740 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
741 seg_tun, flow, true);
746 } else if (ret < 0) {
750 return (ret == -EAGAIN) ? 0 : ret;
757 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
758 bool is_tunnel, bool add)
760 struct ice_hw *hw = ICE_PF_TO_HW(pf);
763 cnt = (add) ? 1 : -1;
764 hw->fdir_active_fltr += cnt;
765 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
766 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
768 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
772 ice_fdir_init(struct ice_adapter *ad)
774 struct ice_pf *pf = &ad->pf;
777 ret = ice_fdir_setup(pf);
781 return ice_register_parser(&ice_fdir_parser, ad);
785 ice_fdir_uninit(struct ice_adapter *ad)
787 struct ice_pf *pf = &ad->pf;
789 ice_unregister_parser(&ice_fdir_parser, ad);
791 ice_fdir_teardown(pf);
795 ice_fdir_add_del_filter(struct ice_pf *pf,
796 struct ice_fdir_filter_conf *filter,
799 struct ice_fltr_desc desc;
800 struct ice_hw *hw = ICE_PF_TO_HW(pf);
801 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
804 filter->input.dest_vsi = pf->main_vsi->idx;
806 memset(&desc, 0, sizeof(desc));
807 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
809 memset(pkt, 0, ICE_FDIR_PKT_LEN);
810 ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
812 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
816 return ice_fdir_programming(pf, &desc);
820 ice_fdir_extract_fltr_key(struct ice_fdir_fltr_pattern *key,
821 struct ice_fdir_filter_conf *filter)
823 struct ice_fdir_fltr *input = &filter->input;
824 memset(key, 0, sizeof(*key));
826 key->flow_type = input->flow_type;
827 rte_memcpy(&key->ip, &input->ip, sizeof(key->ip));
828 rte_memcpy(&key->mask, &input->mask, sizeof(key->mask));
829 rte_memcpy(&key->ext_data, &input->ext_data, sizeof(key->ext_data));
830 rte_memcpy(&key->ext_mask, &input->ext_mask, sizeof(key->ext_mask));
833 /* Check if there exists the flow director filter */
834 static struct ice_fdir_filter_conf *
835 ice_fdir_entry_lookup(struct ice_fdir_info *fdir_info,
836 const struct ice_fdir_fltr_pattern *key)
840 ret = rte_hash_lookup(fdir_info->hash_table, key);
844 return fdir_info->hash_map[ret];
847 /* Add a flow director entry into the SW list */
849 ice_fdir_entry_insert(struct ice_pf *pf,
850 struct ice_fdir_filter_conf *entry,
851 struct ice_fdir_fltr_pattern *key)
853 struct ice_fdir_info *fdir_info = &pf->fdir;
856 ret = rte_hash_add_key(fdir_info->hash_table, key);
859 "Failed to insert fdir entry to hash table %d!",
863 fdir_info->hash_map[ret] = entry;
868 /* Delete a flow director entry from the SW list */
870 ice_fdir_entry_del(struct ice_pf *pf, struct ice_fdir_fltr_pattern *key)
872 struct ice_fdir_info *fdir_info = &pf->fdir;
875 ret = rte_hash_del_key(fdir_info->hash_table, key);
878 "Failed to delete fdir filter to hash table %d!",
882 fdir_info->hash_map[ret] = NULL;
888 ice_fdir_create_filter(struct ice_adapter *ad,
889 struct rte_flow *flow,
891 struct rte_flow_error *error)
893 struct ice_pf *pf = &ad->pf;
894 struct ice_fdir_filter_conf *filter = meta;
895 struct ice_fdir_info *fdir_info = &pf->fdir;
896 struct ice_fdir_filter_conf *entry, *node;
897 struct ice_fdir_fltr_pattern key;
900 ice_fdir_extract_fltr_key(&key, filter);
901 node = ice_fdir_entry_lookup(fdir_info, &key);
903 rte_flow_error_set(error, EEXIST,
904 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
905 "Rule already exists!");
909 entry = rte_zmalloc("fdir_entry", sizeof(*entry), 0);
911 rte_flow_error_set(error, ENOMEM,
912 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
913 "Failed to allocate memory");
917 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
918 filter->input_set, false);
920 rte_flow_error_set(error, -ret,
921 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
922 "Profile configure failed.");
926 /* alloc counter for FDIR */
927 if (filter->input.cnt_ena) {
928 struct rte_flow_action_count *act_count = &filter->act_count;
930 filter->counter = ice_fdir_counter_alloc(pf,
933 if (!filter->counter) {
934 rte_flow_error_set(error, EINVAL,
935 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
936 "Failed to alloc FDIR counter.");
939 filter->input.cnt_index = filter->counter->hw_index;
942 ret = ice_fdir_add_del_filter(pf, filter, true);
944 rte_flow_error_set(error, -ret,
945 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
946 "Add filter rule failed.");
950 rte_memcpy(entry, filter, sizeof(*entry));
951 ret = ice_fdir_entry_insert(pf, entry, &key);
953 rte_flow_error_set(error, -ret,
954 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
955 "Insert entry to table failed.");
960 ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
965 if (filter->counter) {
966 ice_fdir_counter_free(pf, filter->counter);
967 filter->counter = NULL;
976 ice_fdir_destroy_filter(struct ice_adapter *ad,
977 struct rte_flow *flow,
978 struct rte_flow_error *error)
980 struct ice_pf *pf = &ad->pf;
981 struct ice_fdir_info *fdir_info = &pf->fdir;
982 struct ice_fdir_filter_conf *filter, *entry;
983 struct ice_fdir_fltr_pattern key;
986 filter = (struct ice_fdir_filter_conf *)flow->rule;
988 if (filter->counter) {
989 ice_fdir_counter_free(pf, filter->counter);
990 filter->counter = NULL;
993 ice_fdir_extract_fltr_key(&key, filter);
994 entry = ice_fdir_entry_lookup(fdir_info, &key);
996 rte_flow_error_set(error, ENOENT,
997 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
998 "Can't find entry.");
1002 ret = ice_fdir_add_del_filter(pf, filter, false);
1004 rte_flow_error_set(error, -ret,
1005 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1006 "Del filter rule failed.");
1010 ret = ice_fdir_entry_del(pf, &key);
1012 rte_flow_error_set(error, -ret,
1013 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1014 "Remove entry from table failed.");
1018 ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
1027 ice_fdir_query_count(struct ice_adapter *ad,
1028 struct rte_flow *flow,
1029 struct rte_flow_query_count *flow_stats,
1030 struct rte_flow_error *error)
1032 struct ice_pf *pf = &ad->pf;
1033 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1034 struct ice_fdir_filter_conf *filter = flow->rule;
1035 struct ice_fdir_counter *counter = filter->counter;
1036 uint64_t hits_lo, hits_hi;
1039 rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ACTION,
1042 "FDIR counters not available");
1047 * Reading the low 32-bits latches the high 32-bits into a shadow
1048 * register. Reading the high 32-bit returns the value in the
1051 hits_lo = ICE_READ_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index));
1052 hits_hi = ICE_READ_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index));
1054 flow_stats->hits_set = 1;
1055 flow_stats->hits = hits_lo | (hits_hi << 32);
1056 flow_stats->bytes_set = 0;
1057 flow_stats->bytes = 0;
1059 if (flow_stats->reset) {
1060 /* reset statistic counter value */
1061 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0H(counter->hw_index), 0);
1062 ICE_WRITE_REG(hw, GLSTAT_FD_CNT0L(counter->hw_index), 0);
1068 static struct ice_flow_engine ice_fdir_engine = {
1069 .init = ice_fdir_init,
1070 .uninit = ice_fdir_uninit,
1071 .create = ice_fdir_create_filter,
1072 .destroy = ice_fdir_destroy_filter,
1073 .query_count = ice_fdir_query_count,
1074 .type = ICE_FLOW_ENGINE_FDIR,
1078 ice_fdir_parse_action_qregion(struct ice_pf *pf,
1079 struct rte_flow_error *error,
1080 const struct rte_flow_action *act,
1081 struct ice_fdir_filter_conf *filter)
1083 const struct rte_flow_action_rss *rss = act->conf;
1086 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1087 rte_flow_error_set(error, EINVAL,
1088 RTE_FLOW_ERROR_TYPE_ACTION, act,
1093 if (rss->queue_num <= 1) {
1094 rte_flow_error_set(error, EINVAL,
1095 RTE_FLOW_ERROR_TYPE_ACTION, act,
1096 "Queue region size can't be 0 or 1.");
1100 /* check if queue index for queue region is continuous */
1101 for (i = 0; i < rss->queue_num - 1; i++) {
1102 if (rss->queue[i + 1] != rss->queue[i] + 1) {
1103 rte_flow_error_set(error, EINVAL,
1104 RTE_FLOW_ERROR_TYPE_ACTION, act,
1105 "Discontinuous queue region");
1110 if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
1111 rte_flow_error_set(error, EINVAL,
1112 RTE_FLOW_ERROR_TYPE_ACTION, act,
1113 "Invalid queue region indexes.");
1117 if (!(rte_is_power_of_2(rss->queue_num) &&
1118 (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
1119 rte_flow_error_set(error, EINVAL,
1120 RTE_FLOW_ERROR_TYPE_ACTION, act,
1121 "The region size should be any of the following values:"
1122 "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
1123 "of queues do not exceed the VSI allocation.");
1127 filter->input.q_index = rss->queue[0];
1128 filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
1129 filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
1135 ice_fdir_parse_action(struct ice_adapter *ad,
1136 const struct rte_flow_action actions[],
1137 struct rte_flow_error *error,
1138 struct ice_fdir_filter_conf *filter)
1140 struct ice_pf *pf = &ad->pf;
1141 const struct rte_flow_action_queue *act_q;
1142 const struct rte_flow_action_mark *mark_spec = NULL;
1143 const struct rte_flow_action_count *act_count;
1144 uint32_t dest_num = 0;
1145 uint32_t mark_num = 0;
1146 uint32_t counter_num = 0;
1149 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1150 switch (actions->type) {
1151 case RTE_FLOW_ACTION_TYPE_VOID:
1153 case RTE_FLOW_ACTION_TYPE_QUEUE:
1156 act_q = actions->conf;
1157 filter->input.q_index = act_q->index;
1158 if (filter->input.q_index >=
1159 pf->dev_data->nb_rx_queues) {
1160 rte_flow_error_set(error, EINVAL,
1161 RTE_FLOW_ERROR_TYPE_ACTION,
1163 "Invalid queue for FDIR.");
1166 filter->input.dest_ctl =
1167 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1169 case RTE_FLOW_ACTION_TYPE_DROP:
1172 filter->input.dest_ctl =
1173 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
1175 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1178 filter->input.dest_ctl =
1179 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
1180 filter->input.q_index = 0;
1182 case RTE_FLOW_ACTION_TYPE_RSS:
1185 ret = ice_fdir_parse_action_qregion(pf,
1186 error, actions, filter);
1190 case RTE_FLOW_ACTION_TYPE_MARK:
1193 mark_spec = actions->conf;
1194 filter->input.fltr_id = mark_spec->id;
1196 case RTE_FLOW_ACTION_TYPE_COUNT:
1199 act_count = actions->conf;
1200 filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
1201 rte_memcpy(&filter->act_count, act_count,
1202 sizeof(filter->act_count));
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1213 if (dest_num == 0 || dest_num >= 2) {
1214 rte_flow_error_set(error, EINVAL,
1215 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1216 "Unsupported action combination");
1220 if (mark_num >= 2) {
1221 rte_flow_error_set(error, EINVAL,
1222 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1223 "Too many mark actions");
1227 if (counter_num >= 2) {
1228 rte_flow_error_set(error, EINVAL,
1229 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1230 "Too many count actions");
1238 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
1239 const struct rte_flow_item pattern[],
1240 struct rte_flow_error *error,
1241 struct ice_fdir_filter_conf *filter)
1243 const struct rte_flow_item *item = pattern;
1244 enum rte_flow_item_type item_type;
1245 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1246 const struct rte_flow_item_eth *eth_spec, *eth_mask;
1247 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1248 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1249 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1250 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1251 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1252 uint64_t input_set = ICE_INSET_NONE;
1253 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
1254 uint8_t ipv6_addr_mask[16] = {
1255 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1256 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
1258 uint32_t vtc_flow_cpu;
1261 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1263 rte_flow_error_set(error, EINVAL,
1264 RTE_FLOW_ERROR_TYPE_ITEM,
1266 "Not support range");
1269 item_type = item->type;
1271 switch (item_type) {
1272 case RTE_FLOW_ITEM_TYPE_ETH:
1273 eth_spec = item->spec;
1274 eth_mask = item->mask;
1276 if (eth_spec && eth_mask) {
1277 if (!rte_is_zero_ether_addr(ð_spec->src) ||
1278 !rte_is_zero_ether_addr(ð_mask->src)) {
1279 rte_flow_error_set(error, EINVAL,
1280 RTE_FLOW_ERROR_TYPE_ITEM,
1282 "Src mac not support");
1286 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
1287 rte_flow_error_set(error, EINVAL,
1288 RTE_FLOW_ERROR_TYPE_ITEM,
1290 "Invalid mac addr mask");
1294 input_set |= ICE_INSET_DMAC;
1295 rte_memcpy(&filter->input.ext_data.dst_mac,
1297 RTE_ETHER_ADDR_LEN);
1300 case RTE_FLOW_ITEM_TYPE_IPV4:
1301 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1302 ipv4_spec = item->spec;
1303 ipv4_mask = item->mask;
1305 if (ipv4_spec && ipv4_mask) {
1306 /* Check IPv4 mask and update input set */
1307 if (ipv4_mask->hdr.version_ihl ||
1308 ipv4_mask->hdr.total_length ||
1309 ipv4_mask->hdr.packet_id ||
1310 ipv4_mask->hdr.fragment_offset ||
1311 ipv4_mask->hdr.hdr_checksum) {
1312 rte_flow_error_set(error, EINVAL,
1313 RTE_FLOW_ERROR_TYPE_ITEM,
1315 "Invalid IPv4 mask.");
1318 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1319 input_set |= ICE_INSET_IPV4_SRC;
1320 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1321 input_set |= ICE_INSET_IPV4_DST;
1322 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1323 input_set |= ICE_INSET_IPV4_TOS;
1324 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1325 input_set |= ICE_INSET_IPV4_TTL;
1326 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1327 input_set |= ICE_INSET_IPV4_PROTO;
1329 filter->input.ip.v4.dst_ip =
1330 ipv4_spec->hdr.src_addr;
1331 filter->input.ip.v4.src_ip =
1332 ipv4_spec->hdr.dst_addr;
1333 filter->input.ip.v4.tos =
1334 ipv4_spec->hdr.type_of_service;
1335 filter->input.ip.v4.ttl =
1336 ipv4_spec->hdr.time_to_live;
1337 filter->input.ip.v4.proto =
1338 ipv4_spec->hdr.next_proto_id;
1341 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
1343 case RTE_FLOW_ITEM_TYPE_IPV6:
1344 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1345 ipv6_spec = item->spec;
1346 ipv6_mask = item->mask;
1348 if (ipv6_spec && ipv6_mask) {
1349 /* Check IPv6 mask and update input set */
1350 if (ipv6_mask->hdr.payload_len) {
1351 rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ITEM,
1354 "Invalid IPv6 mask");
1358 if (!memcmp(ipv6_mask->hdr.src_addr,
1360 RTE_DIM(ipv6_mask->hdr.src_addr)))
1361 input_set |= ICE_INSET_IPV6_SRC;
1362 if (!memcmp(ipv6_mask->hdr.dst_addr,
1364 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1365 input_set |= ICE_INSET_IPV6_DST;
1367 if ((ipv6_mask->hdr.vtc_flow &
1368 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1369 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
1370 input_set |= ICE_INSET_IPV6_TC;
1371 if (ipv6_mask->hdr.proto == UINT8_MAX)
1372 input_set |= ICE_INSET_IPV6_NEXT_HDR;
1373 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1374 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
1376 rte_memcpy(filter->input.ip.v6.dst_ip,
1377 ipv6_spec->hdr.src_addr, 16);
1378 rte_memcpy(filter->input.ip.v6.src_ip,
1379 ipv6_spec->hdr.dst_addr, 16);
1382 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
1383 filter->input.ip.v6.tc =
1384 (uint8_t)(vtc_flow_cpu >>
1385 ICE_FDIR_IPV6_TC_OFFSET);
1386 filter->input.ip.v6.proto =
1387 ipv6_spec->hdr.proto;
1388 filter->input.ip.v6.hlim =
1389 ipv6_spec->hdr.hop_limits;
1392 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
1394 case RTE_FLOW_ITEM_TYPE_TCP:
1395 tcp_spec = item->spec;
1396 tcp_mask = item->mask;
1398 if (tcp_spec && tcp_mask) {
1399 /* Check TCP mask and update input set */
1400 if (tcp_mask->hdr.sent_seq ||
1401 tcp_mask->hdr.recv_ack ||
1402 tcp_mask->hdr.data_off ||
1403 tcp_mask->hdr.tcp_flags ||
1404 tcp_mask->hdr.rx_win ||
1405 tcp_mask->hdr.cksum ||
1406 tcp_mask->hdr.tcp_urp) {
1407 rte_flow_error_set(error, EINVAL,
1408 RTE_FLOW_ERROR_TYPE_ITEM,
1410 "Invalid TCP mask");
1414 if (tcp_mask->hdr.src_port == UINT16_MAX)
1415 input_set |= ICE_INSET_TCP_SRC_PORT;
1416 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1417 input_set |= ICE_INSET_TCP_DST_PORT;
1419 /* Get filter info */
1420 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1421 filter->input.ip.v4.dst_port =
1422 tcp_spec->hdr.src_port;
1423 filter->input.ip.v4.src_port =
1424 tcp_spec->hdr.dst_port;
1426 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
1427 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1428 filter->input.ip.v6.dst_port =
1429 tcp_spec->hdr.src_port;
1430 filter->input.ip.v6.src_port =
1431 tcp_spec->hdr.dst_port;
1433 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
1437 case RTE_FLOW_ITEM_TYPE_UDP:
1438 udp_spec = item->spec;
1439 udp_mask = item->mask;
1441 if (udp_spec && udp_mask) {
1442 /* Check UDP mask and update input set*/
1443 if (udp_mask->hdr.dgram_len ||
1444 udp_mask->hdr.dgram_cksum) {
1445 rte_flow_error_set(error, EINVAL,
1446 RTE_FLOW_ERROR_TYPE_ITEM,
1448 "Invalid UDP mask");
1452 if (udp_mask->hdr.src_port == UINT16_MAX)
1453 input_set |= ICE_INSET_UDP_SRC_PORT;
1454 if (udp_mask->hdr.dst_port == UINT16_MAX)
1455 input_set |= ICE_INSET_UDP_DST_PORT;
1457 /* Get filter info */
1458 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1459 filter->input.ip.v4.dst_port =
1460 udp_spec->hdr.src_port;
1461 filter->input.ip.v4.src_port =
1462 udp_spec->hdr.dst_port;
1464 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
1465 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1466 filter->input.ip.v6.src_port =
1467 udp_spec->hdr.src_port;
1468 filter->input.ip.v6.dst_port =
1469 udp_spec->hdr.dst_port;
1471 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
1475 case RTE_FLOW_ITEM_TYPE_SCTP:
1476 sctp_spec = item->spec;
1477 sctp_mask = item->mask;
1479 if (sctp_spec && sctp_mask) {
1480 /* Check SCTP mask and update input set */
1481 if (sctp_mask->hdr.cksum) {
1482 rte_flow_error_set(error, EINVAL,
1483 RTE_FLOW_ERROR_TYPE_ITEM,
1485 "Invalid UDP mask");
1489 if (sctp_mask->hdr.src_port == UINT16_MAX)
1490 input_set |= ICE_INSET_SCTP_SRC_PORT;
1491 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1492 input_set |= ICE_INSET_SCTP_DST_PORT;
1494 /* Get filter info */
1495 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1496 filter->input.ip.v4.dst_port =
1497 sctp_spec->hdr.src_port;
1498 filter->input.ip.v4.src_port =
1499 sctp_spec->hdr.dst_port;
1501 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
1502 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1503 filter->input.ip.v6.dst_port =
1504 sctp_spec->hdr.src_port;
1505 filter->input.ip.v6.src_port =
1506 sctp_spec->hdr.dst_port;
1508 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1512 case RTE_FLOW_ITEM_TYPE_VOID:
1515 rte_flow_error_set(error, EINVAL,
1516 RTE_FLOW_ERROR_TYPE_ITEM,
1518 "Invalid pattern item.");
1523 filter->input.flow_type = flow_type;
1524 filter->input_set = input_set;
1530 ice_fdir_parse(struct ice_adapter *ad,
1531 struct ice_pattern_match_item *array,
1533 const struct rte_flow_item pattern[],
1534 const struct rte_flow_action actions[],
1536 struct rte_flow_error *error)
1538 struct ice_pf *pf = &ad->pf;
1539 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1540 struct ice_pattern_match_item *item = NULL;
1544 memset(filter, 0, sizeof(*filter));
1545 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1549 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1552 input_set = filter->input_set;
1553 if (!input_set || input_set & ~item->input_set_mask) {
1554 rte_flow_error_set(error, EINVAL,
1555 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1557 "Invalid input set");
1561 ret = ice_fdir_parse_action(ad, actions, error, filter);
1570 static struct ice_flow_parser ice_fdir_parser = {
1571 .engine = &ice_fdir_engine,
1572 .array = ice_fdir_pattern,
1573 .array_len = RTE_DIM(ice_fdir_pattern),
1574 .parse_pattern_action = ice_fdir_parse,
1575 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1578 RTE_INIT(ice_fdir_engine_register)
1580 ice_register_flow_engine(&ice_fdir_engine);