3 #include "base/ice_fdir.h"
4 #include "base/ice_flow.h"
5 #include "base/ice_type.h"
6 #include "ice_ethdev.h"
8 #include "ice_generic_flow.h"
10 #define ICE_FDIR_IPV6_TC_OFFSET 20
11 #define ICE_IPV6_TC_MASK (0xFF << ICE_FDIR_IPV6_TC_OFFSET)
13 #define ICE_FDIR_INSET_ETH_IPV4 (\
15 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_TOS | \
16 ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_PROTO)
18 #define ICE_FDIR_INSET_ETH_IPV4_UDP (\
19 ICE_FDIR_INSET_ETH_IPV4 | \
20 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
22 #define ICE_FDIR_INSET_ETH_IPV4_TCP (\
23 ICE_FDIR_INSET_ETH_IPV4 | \
24 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
26 #define ICE_FDIR_INSET_ETH_IPV4_SCTP (\
27 ICE_FDIR_INSET_ETH_IPV4 | \
28 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
30 #define ICE_FDIR_INSET_ETH_IPV6 (\
31 ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_TC | \
32 ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_NEXT_HDR)
34 #define ICE_FDIR_INSET_ETH_IPV6_UDP (\
35 ICE_FDIR_INSET_ETH_IPV6 | \
36 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
38 #define ICE_FDIR_INSET_ETH_IPV6_TCP (\
39 ICE_FDIR_INSET_ETH_IPV6 | \
40 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
42 #define ICE_FDIR_INSET_ETH_IPV6_SCTP (\
43 ICE_FDIR_INSET_ETH_IPV6 | \
44 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
46 static struct ice_pattern_match_item ice_fdir_pattern[] = {
47 {pattern_eth_ipv4, ICE_FDIR_INSET_ETH_IPV4, ICE_INSET_NONE},
48 {pattern_eth_ipv4_udp, ICE_FDIR_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
49 {pattern_eth_ipv4_tcp, ICE_FDIR_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
50 {pattern_eth_ipv4_sctp, ICE_FDIR_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
51 {pattern_eth_ipv6, ICE_FDIR_INSET_ETH_IPV6, ICE_INSET_NONE},
52 {pattern_eth_ipv6_udp, ICE_FDIR_INSET_ETH_IPV6_UDP, ICE_INSET_NONE},
53 {pattern_eth_ipv6_tcp, ICE_FDIR_INSET_ETH_IPV6_TCP, ICE_INSET_NONE},
54 {pattern_eth_ipv6_sctp, ICE_FDIR_INSET_ETH_IPV6_SCTP, ICE_INSET_NONE},
57 static struct ice_flow_parser ice_fdir_parser;
59 static const struct rte_memzone *
60 ice_memzone_reserve(const char *name, uint32_t len, int socket_id)
62 return rte_memzone_reserve_aligned(name, len, socket_id,
63 RTE_MEMZONE_IOVA_CONTIG,
67 #define ICE_FDIR_MZ_NAME "FDIR_MEMZONE"
70 ice_fdir_prof_alloc(struct ice_hw *hw)
72 enum ice_fltr_ptype ptype, fltr_ptype;
75 hw->fdir_prof = (struct ice_fd_hw_prof **)
76 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
77 sizeof(*hw->fdir_prof));
81 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
82 ptype < ICE_FLTR_PTYPE_MAX;
84 if (!hw->fdir_prof[ptype]) {
85 hw->fdir_prof[ptype] = (struct ice_fd_hw_prof *)
86 ice_malloc(hw, sizeof(**hw->fdir_prof));
87 if (!hw->fdir_prof[ptype])
94 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
97 rte_free(hw->fdir_prof[fltr_ptype]);
98 rte_free(hw->fdir_prof);
103 * ice_fdir_setup - reserve and initialize the Flow Director resources
104 * @pf: board private structure
107 ice_fdir_setup(struct ice_pf *pf)
109 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
110 struct ice_hw *hw = ICE_PF_TO_HW(pf);
111 const struct rte_memzone *mz = NULL;
112 char z_name[RTE_MEMZONE_NAMESIZE];
114 int err = ICE_SUCCESS;
116 if ((pf->flags & ICE_FLAG_FDIR) == 0) {
117 PMD_INIT_LOG(ERR, "HW doesn't support FDIR");
121 PMD_DRV_LOG(INFO, "FDIR HW Capabilities: fd_fltr_guar = %u,"
122 " fd_fltr_best_effort = %u.",
123 hw->func_caps.fd_fltr_guar,
124 hw->func_caps.fd_fltr_best_effort);
126 if (pf->fdir.fdir_vsi) {
127 PMD_DRV_LOG(INFO, "FDIR initialization has been done.");
131 /* make new FDIR VSI */
132 vsi = ice_setup_vsi(pf, ICE_VSI_CTRL);
134 PMD_DRV_LOG(ERR, "Couldn't create FDIR VSI.");
137 pf->fdir.fdir_vsi = vsi;
139 /*Fdir tx queue setup*/
140 err = ice_fdir_setup_tx_resources(pf);
142 PMD_DRV_LOG(ERR, "Failed to setup FDIR TX resources.");
146 /*Fdir rx queue setup*/
147 err = ice_fdir_setup_rx_resources(pf);
149 PMD_DRV_LOG(ERR, "Failed to setup FDIR RX resources.");
153 err = ice_fdir_tx_queue_start(eth_dev, pf->fdir.txq->queue_id);
155 PMD_DRV_LOG(ERR, "Failed to start FDIR TX queue.");
159 err = ice_fdir_rx_queue_start(eth_dev, pf->fdir.rxq->queue_id);
161 PMD_DRV_LOG(ERR, "Failed to start FDIR RX queue.");
165 /* reserve memory for the fdir programming packet */
166 snprintf(z_name, sizeof(z_name), "ICE_%s_%d",
168 eth_dev->data->port_id);
169 mz = ice_memzone_reserve(z_name, ICE_FDIR_PKT_LEN, SOCKET_ID_ANY);
171 PMD_DRV_LOG(ERR, "Cannot init memzone for "
172 "flow director program packet.");
176 pf->fdir.prg_pkt = mz->addr;
177 pf->fdir.dma_addr = mz->iova;
179 err = ice_fdir_prof_alloc(hw);
181 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
182 "flow director profile.");
187 PMD_DRV_LOG(INFO, "FDIR setup successfully, with programming queue %u.",
192 ice_rx_queue_release(pf->fdir.rxq);
195 ice_tx_queue_release(pf->fdir.txq);
198 ice_release_vsi(vsi);
199 pf->fdir.fdir_vsi = NULL;
204 ice_fdir_prof_free(struct ice_hw *hw)
206 enum ice_fltr_ptype ptype;
208 for (ptype = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
209 ptype < ICE_FLTR_PTYPE_MAX;
211 rte_free(hw->fdir_prof[ptype]);
213 rte_free(hw->fdir_prof);
216 /* Remove a profile for some filter type */
218 ice_fdir_prof_rm(struct ice_pf *pf, enum ice_fltr_ptype ptype, bool is_tunnel)
220 struct ice_hw *hw = ICE_PF_TO_HW(pf);
221 struct ice_fd_hw_prof *hw_prof;
226 if (!hw->fdir_prof || !hw->fdir_prof[ptype])
229 hw_prof = hw->fdir_prof[ptype];
231 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
232 for (i = 0; i < pf->hw_prof_cnt[ptype][is_tunnel]; i++) {
233 if (hw_prof->entry_h[i][is_tunnel]) {
234 vsi_num = ice_get_hw_vsi_num(hw,
236 ice_rem_prof_id_flow(hw, ICE_BLK_FD,
238 ice_flow_rem_entry(hw,
239 hw_prof->entry_h[i][is_tunnel]);
240 hw_prof->entry_h[i][is_tunnel] = 0;
243 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
244 rte_free(hw_prof->fdir_seg[is_tunnel]);
245 hw_prof->fdir_seg[is_tunnel] = NULL;
247 for (i = 0; i < hw_prof->cnt; i++)
248 hw_prof->vsi_h[i] = 0;
249 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
252 /* Remove all created profiles */
254 ice_fdir_prof_rm_all(struct ice_pf *pf)
256 enum ice_fltr_ptype ptype;
258 for (ptype = ICE_FLTR_PTYPE_NONF_NONE;
259 ptype < ICE_FLTR_PTYPE_MAX;
261 ice_fdir_prof_rm(pf, ptype, false);
262 ice_fdir_prof_rm(pf, ptype, true);
267 * ice_fdir_teardown - release the Flow Director resources
268 * @pf: board private structure
271 ice_fdir_teardown(struct ice_pf *pf)
273 struct rte_eth_dev *eth_dev = pf->adapter->eth_dev;
274 struct ice_hw *hw = ICE_PF_TO_HW(pf);
278 vsi = pf->fdir.fdir_vsi;
282 err = ice_fdir_tx_queue_stop(eth_dev, pf->fdir.txq->queue_id);
284 PMD_DRV_LOG(ERR, "Failed to stop TX queue.");
286 err = ice_fdir_rx_queue_stop(eth_dev, pf->fdir.rxq->queue_id);
288 PMD_DRV_LOG(ERR, "Failed to stop RX queue.");
290 ice_tx_queue_release(pf->fdir.txq);
292 ice_rx_queue_release(pf->fdir.rxq);
294 ice_fdir_prof_rm_all(pf);
295 ice_fdir_prof_free(hw);
296 ice_release_vsi(vsi);
297 pf->fdir.fdir_vsi = NULL;
301 ice_fdir_hw_tbl_conf(struct ice_pf *pf, struct ice_vsi *vsi,
302 struct ice_vsi *ctrl_vsi,
303 struct ice_flow_seg_info *seg,
304 enum ice_fltr_ptype ptype,
307 struct ice_hw *hw = ICE_PF_TO_HW(pf);
308 enum ice_flow_dir dir = ICE_FLOW_RX;
309 struct ice_flow_seg_info *ori_seg;
310 struct ice_fd_hw_prof *hw_prof;
311 struct ice_flow_prof *prof;
312 uint64_t entry_1 = 0;
313 uint64_t entry_2 = 0;
318 hw_prof = hw->fdir_prof[ptype];
319 ori_seg = hw_prof->fdir_seg[is_tunnel];
322 if (!memcmp(ori_seg, seg, sizeof(*seg)))
325 if (!memcmp(ori_seg, &seg[1], sizeof(*seg)))
329 if (pf->fdir_fltr_cnt[ptype][is_tunnel])
332 ice_fdir_prof_rm(pf, ptype, is_tunnel);
335 prof_id = ptype + is_tunnel * ICE_FLTR_PTYPE_MAX;
336 ret = ice_flow_add_prof(hw, ICE_BLK_FD, dir, prof_id, seg,
337 (is_tunnel) ? 2 : 1, NULL, 0, &prof);
340 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
341 vsi->idx, ICE_FLOW_PRIO_NORMAL,
342 seg, NULL, 0, &entry_1);
344 PMD_DRV_LOG(ERR, "Failed to add main VSI flow entry for %d.",
348 ret = ice_flow_add_entry(hw, ICE_BLK_FD, prof_id, vsi->idx,
349 ctrl_vsi->idx, ICE_FLOW_PRIO_NORMAL,
350 seg, NULL, 0, &entry_2);
352 PMD_DRV_LOG(ERR, "Failed to add control VSI flow entry for %d.",
357 pf->hw_prof_cnt[ptype][is_tunnel] = 0;
359 hw_prof->fdir_seg[is_tunnel] = seg;
360 hw_prof->vsi_h[hw_prof->cnt] = vsi->idx;
361 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_1;
362 pf->hw_prof_cnt[ptype][is_tunnel]++;
363 hw_prof->vsi_h[hw_prof->cnt] = ctrl_vsi->idx;
364 hw_prof->entry_h[hw_prof->cnt++][is_tunnel] = entry_2;
365 pf->hw_prof_cnt[ptype][is_tunnel]++;
370 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
371 ice_rem_prof_id_flow(hw, ICE_BLK_FD, vsi_num, prof_id);
372 ice_flow_rem_entry(hw, entry_1);
374 ice_flow_rem_prof(hw, ICE_BLK_FD, prof_id);
380 ice_fdir_input_set_parse(uint64_t inset, enum ice_flow_field *field)
384 struct ice_inset_map {
386 enum ice_flow_field fld;
388 static const struct ice_inset_map ice_inset_map[] = {
389 {ICE_INSET_DMAC, ICE_FLOW_FIELD_IDX_ETH_DA},
390 {ICE_INSET_IPV4_SRC, ICE_FLOW_FIELD_IDX_IPV4_SA},
391 {ICE_INSET_IPV4_DST, ICE_FLOW_FIELD_IDX_IPV4_DA},
392 {ICE_INSET_IPV4_TOS, ICE_FLOW_FIELD_IDX_IPV4_DSCP},
393 {ICE_INSET_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_TTL},
394 {ICE_INSET_IPV4_PROTO, ICE_FLOW_FIELD_IDX_IPV4_PROT},
395 {ICE_INSET_IPV6_SRC, ICE_FLOW_FIELD_IDX_IPV6_SA},
396 {ICE_INSET_IPV6_DST, ICE_FLOW_FIELD_IDX_IPV6_DA},
397 {ICE_INSET_IPV6_TC, ICE_FLOW_FIELD_IDX_IPV6_DSCP},
398 {ICE_INSET_IPV6_NEXT_HDR, ICE_FLOW_FIELD_IDX_IPV6_PROT},
399 {ICE_INSET_IPV6_HOP_LIMIT, ICE_FLOW_FIELD_IDX_IPV6_TTL},
400 {ICE_INSET_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_SRC_PORT},
401 {ICE_INSET_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT},
402 {ICE_INSET_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT},
403 {ICE_INSET_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT},
404 {ICE_INSET_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT},
405 {ICE_INSET_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT},
408 for (i = 0, j = 0; i < RTE_DIM(ice_inset_map); i++) {
409 if ((inset & ice_inset_map[i].inset) ==
410 ice_inset_map[i].inset)
411 field[j++] = ice_inset_map[i].fld;
416 ice_fdir_input_set_conf(struct ice_pf *pf, enum ice_fltr_ptype flow,
417 uint64_t input_set, bool is_tunnel)
419 struct ice_flow_seg_info *seg;
420 struct ice_flow_seg_info *seg_tun = NULL;
421 enum ice_flow_field field[ICE_FLOW_FIELD_IDX_MAX];
427 seg = (struct ice_flow_seg_info *)
428 ice_malloc(hw, sizeof(*seg));
430 PMD_DRV_LOG(ERR, "No memory can be allocated");
434 for (i = 0; i < ICE_FLOW_FIELD_IDX_MAX; i++)
435 field[i] = ICE_FLOW_FIELD_IDX_MAX;
436 ice_fdir_input_set_parse(input_set, field);
439 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
440 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
441 ICE_FLOW_SEG_HDR_IPV4);
443 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
444 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
445 ICE_FLOW_SEG_HDR_IPV4);
447 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
448 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
449 ICE_FLOW_SEG_HDR_IPV4);
451 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
452 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
454 case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
455 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP |
456 ICE_FLOW_SEG_HDR_IPV6);
458 case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
459 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP |
460 ICE_FLOW_SEG_HDR_IPV6);
462 case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
463 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP |
464 ICE_FLOW_SEG_HDR_IPV6);
466 case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
467 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV6);
470 PMD_DRV_LOG(ERR, "not supported filter type.");
474 for (i = 0; field[i] != ICE_FLOW_FIELD_IDX_MAX; i++) {
475 ice_flow_set_fld(seg, field[i],
476 ICE_FLOW_FLD_OFF_INVAL,
477 ICE_FLOW_FLD_OFF_INVAL,
478 ICE_FLOW_FLD_OFF_INVAL, false);
482 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
485 seg_tun = (struct ice_flow_seg_info *)
486 ice_malloc(hw, sizeof(*seg) * ICE_FD_HW_SEG_MAX);
488 PMD_DRV_LOG(ERR, "No memory can be allocated");
492 rte_memcpy(&seg_tun[1], seg, sizeof(*seg));
493 ret = ice_fdir_hw_tbl_conf(pf, pf->main_vsi, pf->fdir.fdir_vsi,
494 seg_tun, flow, true);
499 } else if (ret < 0) {
503 return (ret == -EAGAIN) ? 0 : ret;
510 ice_fdir_cnt_update(struct ice_pf *pf, enum ice_fltr_ptype ptype,
511 bool is_tunnel, bool add)
513 struct ice_hw *hw = ICE_PF_TO_HW(pf);
516 cnt = (add) ? 1 : -1;
517 hw->fdir_active_fltr += cnt;
518 if (ptype == ICE_FLTR_PTYPE_NONF_NONE || ptype >= ICE_FLTR_PTYPE_MAX)
519 PMD_DRV_LOG(ERR, "Unknown filter type %d", ptype);
521 pf->fdir_fltr_cnt[ptype][is_tunnel] += cnt;
525 ice_fdir_init(struct ice_adapter *ad)
527 struct ice_pf *pf = &ad->pf;
530 ret = ice_fdir_setup(pf);
534 return ice_register_parser(&ice_fdir_parser, ad);
538 ice_fdir_uninit(struct ice_adapter *ad)
540 struct ice_pf *pf = &ad->pf;
542 ice_unregister_parser(&ice_fdir_parser, ad);
544 ice_fdir_teardown(pf);
548 ice_fdir_add_del_filter(struct ice_pf *pf,
549 struct ice_fdir_filter_conf *filter,
552 struct ice_fltr_desc desc;
553 struct ice_hw *hw = ICE_PF_TO_HW(pf);
554 unsigned char *pkt = (unsigned char *)pf->fdir.prg_pkt;
557 filter->input.dest_vsi = pf->main_vsi->idx;
559 memset(&desc, 0, sizeof(desc));
560 ice_fdir_get_prgm_desc(hw, &filter->input, &desc, add);
562 memset(pkt, 0, ICE_FDIR_PKT_LEN);
563 ret = ice_fdir_get_prgm_pkt(&filter->input, pkt, false);
565 PMD_DRV_LOG(ERR, "Generate dummy packet failed");
569 return ice_fdir_programming(pf, &desc);
573 ice_fdir_create_filter(struct ice_adapter *ad,
574 struct rte_flow *flow,
576 struct rte_flow_error *error)
578 struct ice_pf *pf = &ad->pf;
579 struct ice_fdir_filter_conf *filter = meta;
580 struct ice_fdir_filter_conf *rule;
583 rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
585 rte_flow_error_set(error, ENOMEM,
586 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
587 "Failed to allocate memory");
591 ret = ice_fdir_input_set_conf(pf, filter->input.flow_type,
592 filter->input_set, false);
594 rte_flow_error_set(error, -ret,
595 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
596 "Profile configure failed.");
600 ret = ice_fdir_add_del_filter(pf, filter, true);
602 rte_flow_error_set(error, -ret,
603 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
604 "Add filter rule failed.");
608 rte_memcpy(rule, filter, sizeof(*rule));
610 ice_fdir_cnt_update(pf, filter->input.flow_type, false, true);
619 ice_fdir_destroy_filter(struct ice_adapter *ad,
620 struct rte_flow *flow,
621 struct rte_flow_error *error)
623 struct ice_pf *pf = &ad->pf;
624 struct ice_fdir_filter_conf *filter;
627 filter = (struct ice_fdir_filter_conf *)flow->rule;
629 ret = ice_fdir_add_del_filter(pf, filter, false);
631 rte_flow_error_set(error, -ret,
632 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
633 "Del filter rule failed.");
637 ice_fdir_cnt_update(pf, filter->input.flow_type, false, false);
645 static struct ice_flow_engine ice_fdir_engine = {
646 .init = ice_fdir_init,
647 .uninit = ice_fdir_uninit,
648 .create = ice_fdir_create_filter,
649 .destroy = ice_fdir_destroy_filter,
650 .type = ICE_FLOW_ENGINE_FDIR,
654 ice_fdir_parse_action(struct ice_adapter *ad,
655 const struct rte_flow_action actions[],
656 struct rte_flow_error *error,
657 struct ice_fdir_filter_conf *filter)
659 struct ice_pf *pf = &ad->pf;
660 const struct rte_flow_action_queue *act_q;
661 const struct rte_flow_action_mark *mark_spec = NULL;
662 uint32_t dest_num = 0;
663 uint32_t mark_num = 0;
665 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
666 switch (actions->type) {
667 case RTE_FLOW_ACTION_TYPE_VOID:
669 case RTE_FLOW_ACTION_TYPE_QUEUE:
672 act_q = actions->conf;
673 filter->input.q_index = act_q->index;
674 if (filter->input.q_index >=
675 pf->dev_data->nb_rx_queues) {
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ACTION,
679 "Invalid queue for FDIR.");
682 filter->input.dest_ctl =
683 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
685 case RTE_FLOW_ACTION_TYPE_DROP:
688 filter->input.dest_ctl =
689 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
691 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
694 filter->input.dest_ctl =
695 ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
696 filter->input.q_index = 0;
698 case RTE_FLOW_ACTION_TYPE_MARK:
701 mark_spec = actions->conf;
702 filter->input.fltr_id = mark_spec->id;
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ACTION, actions,
712 if (dest_num == 0 || dest_num >= 2) {
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ACTION, actions,
715 "Unsupported action combination");
720 rte_flow_error_set(error, EINVAL,
721 RTE_FLOW_ERROR_TYPE_ACTION, actions,
722 "Too many mark actions");
730 ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
731 const struct rte_flow_item pattern[],
732 struct rte_flow_error *error,
733 struct ice_fdir_filter_conf *filter)
735 const struct rte_flow_item *item = pattern;
736 enum rte_flow_item_type item_type;
737 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
738 const struct rte_flow_item_eth *eth_spec, *eth_mask;
739 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
740 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
741 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
742 const struct rte_flow_item_udp *udp_spec, *udp_mask;
743 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
744 uint64_t input_set = ICE_INSET_NONE;
745 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
746 uint8_t ipv6_addr_mask[16] = {
747 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
748 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
750 uint32_t vtc_flow_cpu;
753 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
755 rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ITEM,
758 "Not support range");
761 item_type = item->type;
764 case RTE_FLOW_ITEM_TYPE_ETH:
765 eth_spec = item->spec;
766 eth_mask = item->mask;
768 if (eth_spec && eth_mask) {
769 if (!rte_is_zero_ether_addr(ð_spec->src) ||
770 !rte_is_zero_ether_addr(ð_mask->src)) {
771 rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_ITEM,
774 "Src mac not support");
778 if (!rte_is_broadcast_ether_addr(ð_mask->dst)) {
779 rte_flow_error_set(error, EINVAL,
780 RTE_FLOW_ERROR_TYPE_ITEM,
782 "Invalid mac addr mask");
786 input_set |= ICE_INSET_DMAC;
787 rte_memcpy(&filter->input.ext_data.dst_mac,
792 case RTE_FLOW_ITEM_TYPE_IPV4:
793 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
794 ipv4_spec = item->spec;
795 ipv4_mask = item->mask;
797 if (ipv4_spec && ipv4_mask) {
798 /* Check IPv4 mask and update input set */
799 if (ipv4_mask->hdr.version_ihl ||
800 ipv4_mask->hdr.total_length ||
801 ipv4_mask->hdr.packet_id ||
802 ipv4_mask->hdr.fragment_offset ||
803 ipv4_mask->hdr.hdr_checksum) {
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ITEM,
807 "Invalid IPv4 mask.");
810 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
811 input_set |= ICE_INSET_IPV4_SRC;
812 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
813 input_set |= ICE_INSET_IPV4_DST;
814 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
815 input_set |= ICE_INSET_IPV4_TOS;
816 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
817 input_set |= ICE_INSET_IPV4_TTL;
818 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
819 input_set |= ICE_INSET_IPV4_PROTO;
821 filter->input.ip.v4.dst_ip =
822 ipv4_spec->hdr.src_addr;
823 filter->input.ip.v4.src_ip =
824 ipv4_spec->hdr.dst_addr;
825 filter->input.ip.v4.tos =
826 ipv4_spec->hdr.type_of_service;
827 filter->input.ip.v4.ttl =
828 ipv4_spec->hdr.time_to_live;
829 filter->input.ip.v4.proto =
830 ipv4_spec->hdr.next_proto_id;
833 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
835 case RTE_FLOW_ITEM_TYPE_IPV6:
836 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
837 ipv6_spec = item->spec;
838 ipv6_mask = item->mask;
840 if (ipv6_spec && ipv6_mask) {
841 /* Check IPv6 mask and update input set */
842 if (ipv6_mask->hdr.payload_len) {
843 rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ITEM,
846 "Invalid IPv6 mask");
850 if (!memcmp(ipv6_mask->hdr.src_addr,
852 RTE_DIM(ipv6_mask->hdr.src_addr)))
853 input_set |= ICE_INSET_IPV6_SRC;
854 if (!memcmp(ipv6_mask->hdr.dst_addr,
856 RTE_DIM(ipv6_mask->hdr.dst_addr)))
857 input_set |= ICE_INSET_IPV6_DST;
859 if ((ipv6_mask->hdr.vtc_flow &
860 rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
861 == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
862 input_set |= ICE_INSET_IPV6_TC;
863 if (ipv6_mask->hdr.proto == UINT8_MAX)
864 input_set |= ICE_INSET_IPV6_NEXT_HDR;
865 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
866 input_set |= ICE_INSET_IPV6_HOP_LIMIT;
868 rte_memcpy(filter->input.ip.v6.dst_ip,
869 ipv6_spec->hdr.src_addr, 16);
870 rte_memcpy(filter->input.ip.v6.src_ip,
871 ipv6_spec->hdr.dst_addr, 16);
874 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
875 filter->input.ip.v6.tc =
876 (uint8_t)(vtc_flow_cpu >>
877 ICE_FDIR_IPV6_TC_OFFSET);
878 filter->input.ip.v6.proto =
879 ipv6_spec->hdr.proto;
880 filter->input.ip.v6.hlim =
881 ipv6_spec->hdr.hop_limits;
884 flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
886 case RTE_FLOW_ITEM_TYPE_TCP:
887 tcp_spec = item->spec;
888 tcp_mask = item->mask;
890 if (tcp_spec && tcp_mask) {
891 /* Check TCP mask and update input set */
892 if (tcp_mask->hdr.sent_seq ||
893 tcp_mask->hdr.recv_ack ||
894 tcp_mask->hdr.data_off ||
895 tcp_mask->hdr.tcp_flags ||
896 tcp_mask->hdr.rx_win ||
897 tcp_mask->hdr.cksum ||
898 tcp_mask->hdr.tcp_urp) {
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ITEM,
906 if (tcp_mask->hdr.src_port == UINT16_MAX)
907 input_set |= ICE_INSET_TCP_SRC_PORT;
908 if (tcp_mask->hdr.dst_port == UINT16_MAX)
909 input_set |= ICE_INSET_TCP_DST_PORT;
911 /* Get filter info */
912 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
913 filter->input.ip.v4.dst_port =
914 tcp_spec->hdr.src_port;
915 filter->input.ip.v4.src_port =
916 tcp_spec->hdr.dst_port;
918 ICE_FLTR_PTYPE_NONF_IPV4_TCP;
919 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
920 filter->input.ip.v6.dst_port =
921 tcp_spec->hdr.src_port;
922 filter->input.ip.v6.src_port =
923 tcp_spec->hdr.dst_port;
925 ICE_FLTR_PTYPE_NONF_IPV6_TCP;
929 case RTE_FLOW_ITEM_TYPE_UDP:
930 udp_spec = item->spec;
931 udp_mask = item->mask;
933 if (udp_spec && udp_mask) {
934 /* Check UDP mask and update input set*/
935 if (udp_mask->hdr.dgram_len ||
936 udp_mask->hdr.dgram_cksum) {
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ITEM,
944 if (udp_mask->hdr.src_port == UINT16_MAX)
945 input_set |= ICE_INSET_UDP_SRC_PORT;
946 if (udp_mask->hdr.dst_port == UINT16_MAX)
947 input_set |= ICE_INSET_UDP_DST_PORT;
949 /* Get filter info */
950 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
951 filter->input.ip.v4.dst_port =
952 udp_spec->hdr.src_port;
953 filter->input.ip.v4.src_port =
954 udp_spec->hdr.dst_port;
956 ICE_FLTR_PTYPE_NONF_IPV4_UDP;
957 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
958 filter->input.ip.v6.src_port =
959 udp_spec->hdr.src_port;
960 filter->input.ip.v6.dst_port =
961 udp_spec->hdr.dst_port;
963 ICE_FLTR_PTYPE_NONF_IPV6_UDP;
967 case RTE_FLOW_ITEM_TYPE_SCTP:
968 sctp_spec = item->spec;
969 sctp_mask = item->mask;
971 if (sctp_spec && sctp_mask) {
972 /* Check SCTP mask and update input set */
973 if (sctp_mask->hdr.cksum) {
974 rte_flow_error_set(error, EINVAL,
975 RTE_FLOW_ERROR_TYPE_ITEM,
981 if (sctp_mask->hdr.src_port == UINT16_MAX)
982 input_set |= ICE_INSET_SCTP_SRC_PORT;
983 if (sctp_mask->hdr.dst_port == UINT16_MAX)
984 input_set |= ICE_INSET_SCTP_DST_PORT;
986 /* Get filter info */
987 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
988 filter->input.ip.v4.dst_port =
989 sctp_spec->hdr.src_port;
990 filter->input.ip.v4.src_port =
991 sctp_spec->hdr.dst_port;
993 ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
994 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
995 filter->input.ip.v6.dst_port =
996 sctp_spec->hdr.src_port;
997 filter->input.ip.v6.src_port =
998 sctp_spec->hdr.dst_port;
1000 ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
1004 case RTE_FLOW_ITEM_TYPE_VOID:
1007 rte_flow_error_set(error, EINVAL,
1008 RTE_FLOW_ERROR_TYPE_ITEM,
1010 "Invalid pattern item.");
1015 filter->input.flow_type = flow_type;
1016 filter->input_set = input_set;
1022 ice_fdir_parse(struct ice_adapter *ad,
1023 struct ice_pattern_match_item *array,
1025 const struct rte_flow_item pattern[],
1026 const struct rte_flow_action actions[],
1028 struct rte_flow_error *error)
1030 struct ice_pf *pf = &ad->pf;
1031 struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
1032 struct ice_pattern_match_item *item = NULL;
1036 memset(filter, 0, sizeof(*filter));
1037 item = ice_search_pattern_match_item(pattern, array, array_len, error);
1041 ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
1044 input_set = filter->input_set;
1045 if (!input_set || input_set & ~item->input_set_mask) {
1046 rte_flow_error_set(error, EINVAL,
1047 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1049 "Invalid input set");
1053 ret = ice_fdir_parse_action(ad, actions, error, filter);
1062 static struct ice_flow_parser ice_fdir_parser = {
1063 .engine = &ice_fdir_engine,
1064 .array = ice_fdir_pattern,
1065 .array_len = RTE_DIM(ice_fdir_pattern),
1066 .parse_pattern_action = ice_fdir_parse,
1067 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1070 RTE_INIT(ice_fdir_engine_register)
1072 ice_register_flow_engine(&ice_fdir_engine);