1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include <rte_bitmap.h>
22 #include "base/ice_type.h"
23 #include "base/ice_acl.h"
25 #include "ice_ethdev.h"
26 #include "ice_generic_flow.h"
27 #include "base/ice_flow.h"
29 #define MAX_ACL_SLOTS_ID 2048
31 #define ICE_ACL_INSET_ETH_IPV4 ( \
32 ICE_INSET_SMAC | ICE_INSET_DMAC | \
33 ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
34 #define ICE_ACL_INSET_ETH_IPV4_UDP ( \
35 ICE_ACL_INSET_ETH_IPV4 | \
36 ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
37 #define ICE_ACL_INSET_ETH_IPV4_TCP ( \
38 ICE_ACL_INSET_ETH_IPV4 | \
39 ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
40 #define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
41 ICE_ACL_INSET_ETH_IPV4 | \
42 ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
44 static struct ice_flow_parser ice_acl_parser;
47 enum ice_fltr_ptype flow_type;
52 ice_pattern_match_item ice_acl_pattern[] = {
53 {pattern_eth_ipv4, ICE_ACL_INSET_ETH_IPV4, ICE_INSET_NONE},
54 {pattern_eth_ipv4_udp, ICE_ACL_INSET_ETH_IPV4_UDP, ICE_INSET_NONE},
55 {pattern_eth_ipv4_tcp, ICE_ACL_INSET_ETH_IPV4_TCP, ICE_INSET_NONE},
56 {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP, ICE_INSET_NONE},
60 ice_acl_prof_alloc(struct ice_hw *hw)
62 enum ice_fltr_ptype ptype, fltr_ptype;
65 hw->acl_prof = (struct ice_fd_hw_prof **)
66 ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
67 sizeof(*hw->acl_prof));
72 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
73 ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
74 if (!hw->acl_prof[ptype]) {
75 hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
76 ice_malloc(hw, sizeof(**hw->acl_prof));
77 if (!hw->acl_prof[ptype])
85 for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
86 fltr_ptype < ptype; fltr_ptype++) {
87 rte_free(hw->acl_prof[fltr_ptype]);
88 hw->acl_prof[fltr_ptype] = NULL;
91 rte_free(hw->acl_prof);
98 * ice_acl_setup - Reserve and initialize the ACL resources
99 * @pf: board private structure
102 ice_acl_setup(struct ice_pf *pf)
104 struct ice_hw *hw = ICE_PF_TO_HW(pf);
105 uint32_t pf_num = hw->dev_caps.num_funcs;
106 struct ice_acl_tbl_params params;
110 memset(¶ms, 0, sizeof(params));
112 /* create for IPV4 table */
114 params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
116 params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
118 params.depth = ICE_AQC_ACL_TCAM_DEPTH;
119 params.entry_act_pairs = 1;
120 params.concurr = false;
122 err = ice_acl_create_tbl(hw, ¶ms);
126 err = ice_acl_create_scen(hw, params.width, params.depth,
135 * ice_deinit_acl - Unroll the initialization of the ACL block
136 * @pf: ptr to PF device
138 * returns 0 on success, negative on error
140 static void ice_deinit_acl(struct ice_pf *pf)
142 struct ice_hw *hw = ICE_PF_TO_HW(pf);
144 ice_acl_destroy_tbl(hw);
146 rte_free(hw->acl_tbl);
150 rte_free(pf->acl.slots);
151 pf->acl.slots = NULL;
156 acl_add_prof_prepare(struct ice_hw *hw, struct ice_flow_seg_info *seg,
157 bool is_l4, uint16_t src_port, uint16_t dst_port)
159 uint16_t val_loc, mask_loc;
161 if (hw->dev_caps.num_funcs < 4) {
162 /* mac source address */
163 val_loc = offsetof(struct ice_fdir_fltr,
165 mask_loc = offsetof(struct ice_fdir_fltr,
167 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
169 ICE_FLOW_FLD_OFF_INVAL, false);
171 /* mac destination address */
172 val_loc = offsetof(struct ice_fdir_fltr,
174 mask_loc = offsetof(struct ice_fdir_fltr,
176 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
178 ICE_FLOW_FLD_OFF_INVAL, false);
181 /* IP source address */
182 val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
183 mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
184 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
185 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
187 /* IP destination address */
188 val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
189 mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
190 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
191 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
194 /* Layer 4 source port */
195 val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
196 mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
197 ice_flow_set_fld(seg, src_port, val_loc,
198 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
200 /* Layer 4 destination port */
201 val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
202 mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
203 ice_flow_set_fld(seg, dst_port, val_loc,
204 mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
209 * ice_acl_prof_init - Initialize ACL profile
210 * @pf: ice PF structure
212 * Returns 0 on success.
215 ice_acl_prof_init(struct ice_pf *pf)
217 struct ice_hw *hw = ICE_PF_TO_HW(pf);
218 struct ice_flow_prof *prof_ipv4 = NULL;
219 struct ice_flow_prof *prof_ipv4_udp = NULL;
220 struct ice_flow_prof *prof_ipv4_tcp = NULL;
221 struct ice_flow_prof *prof_ipv4_sctp = NULL;
222 struct ice_flow_seg_info *seg;
226 seg = (struct ice_flow_seg_info *)
227 ice_malloc(hw, sizeof(*seg));
231 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
232 acl_add_prof_prepare(hw, seg, false, 0, 0);
233 ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
234 ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
235 seg, 1, NULL, 0, &prof_ipv4);
239 ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
240 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
241 acl_add_prof_prepare(hw, seg, true,
242 ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
243 ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
244 ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
245 ICE_FLTR_PTYPE_NONF_IPV4_UDP,
246 seg, 1, NULL, 0, &prof_ipv4_udp);
248 goto err_add_prof_ipv4_udp;
250 ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
251 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
252 acl_add_prof_prepare(hw, seg, true,
253 ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
254 ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
255 ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
256 ICE_FLTR_PTYPE_NONF_IPV4_TCP,
257 seg, 1, NULL, 0, &prof_ipv4_tcp);
259 goto err_add_prof_ipv4_tcp;
261 ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
262 ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
263 acl_add_prof_prepare(hw, seg, true,
264 ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
265 ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
266 ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
267 ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
268 seg, 1, NULL, 0, &prof_ipv4_sctp);
270 goto err_add_prof_ipv4_sctp;
272 for (i = 0; i < pf->main_vsi->idx; i++) {
273 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4, i);
277 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i);
281 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_tcp, i);
285 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_sctp, i);
292 ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_SCTP);
293 err_add_prof_ipv4_sctp:
294 ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
295 err_add_prof_ipv4_tcp:
296 ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
297 err_add_prof_ipv4_udp:
298 ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_OTHER);
305 * ice_acl_set_input_set - Helper function to set the input set for ACL
306 * @hw: pointer to HW instance
307 * @filter: pointer to ACL info
308 * @input: filter structure
310 * Return error value or 0 on success.
313 ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
316 return ICE_ERR_BAD_PTR;
318 input->q_index = filter->input.q_index;
319 input->dest_vsi = filter->input.dest_vsi;
320 input->dest_ctl = filter->input.dest_ctl;
321 input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
322 input->flow_type = filter->input.flow_type;
324 switch (input->flow_type) {
325 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
326 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
327 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
328 input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
329 input->ip.v4.src_port = filter->input.ip.v4.src_port;
330 input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
331 input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
333 input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
334 input->mask.v4.src_port = filter->input.mask.v4.src_port;
335 input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
336 input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
338 ice_memcpy(&input->ext_data.src_mac,
339 &filter->input.ext_data.src_mac,
341 ICE_NONDMA_TO_NONDMA);
343 ice_memcpy(&input->ext_mask.src_mac,
344 &filter->input.ext_mask.src_mac,
346 ICE_NONDMA_TO_NONDMA);
348 ice_memcpy(&input->ext_data.dst_mac,
349 &filter->input.ext_data.dst_mac,
351 ICE_NONDMA_TO_NONDMA);
352 ice_memcpy(&input->ext_mask.dst_mac,
353 &filter->input.ext_mask.dst_mac,
355 ICE_NONDMA_TO_NONDMA);
358 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
359 ice_memcpy(&input->ip.v4, &filter->input.ip.v4,
360 sizeof(struct ice_fdir_v4),
361 ICE_NONDMA_TO_NONDMA);
362 ice_memcpy(&input->mask.v4, &filter->input.mask.v4,
363 sizeof(struct ice_fdir_v4),
364 ICE_NONDMA_TO_NONDMA);
366 ice_memcpy(&input->ext_data.src_mac,
367 &filter->input.ext_data.src_mac,
369 ICE_NONDMA_TO_NONDMA);
370 ice_memcpy(&input->ext_mask.src_mac,
371 &filter->input.ext_mask.src_mac,
373 ICE_NONDMA_TO_NONDMA);
375 ice_memcpy(&input->ext_data.dst_mac,
376 &filter->input.ext_data.dst_mac,
378 ICE_NONDMA_TO_NONDMA);
379 ice_memcpy(&input->ext_mask.dst_mac,
380 &filter->input.ext_mask.dst_mac,
382 ICE_NONDMA_TO_NONDMA);
393 ice_acl_alloc_slot_id(struct rte_bitmap *slots, uint32_t *slot_id)
399 __rte_bitmap_scan_init(slots);
400 if (!rte_bitmap_scan(slots, &pos, &slab))
405 rte_bitmap_clear(slots, pos);
412 ice_acl_hw_set_conf(struct ice_pf *pf, struct ice_fdir_fltr *input,
413 struct ice_flow_action *acts, struct acl_rule *rule,
414 enum ice_fltr_ptype flow_type, int32_t entry_idx)
416 struct ice_hw *hw = ICE_PF_TO_HW(pf);
417 enum ice_block blk = ICE_BLK_ACL;
418 uint64_t entry_id, hw_entry;
419 uint32_t slot_id = 0;
423 /* Allocate slot_id from bitmap table. */
424 ret = ice_acl_alloc_slot_id(pf->acl.slots, &slot_id);
426 PMD_DRV_LOG(ERR, "fail to alloc slot id.");
430 /* For IPV4_OTHER type, should add entry for all types.
431 * For IPV4_UDP/TCP/SCTP type, only add entry for each.
433 if (slot_id < MAX_ACL_ENTRIES) {
434 entry_id = ((uint64_t)flow_type << 32) | slot_id;
435 ret = ice_flow_add_entry(hw, blk, flow_type,
436 entry_id, pf->main_vsi->idx,
437 ICE_FLOW_PRIO_NORMAL, input,
438 acts, act_cnt, &hw_entry);
440 PMD_DRV_LOG(ERR, "Fail to add entry.");
443 rule->entry_id[entry_idx] = slot_id;
444 pf->acl.hw_entry_id[slot_id] = hw_entry;
446 PMD_DRV_LOG(ERR, "Exceed the maximum entry number(%d)"
447 " HW supported!", MAX_ACL_ENTRIES);
455 ice_acl_hw_rem_conf(struct ice_pf *pf, struct acl_rule *rule, int32_t entry_idx)
459 struct ice_hw *hw = ICE_PF_TO_HW(pf);
461 for (i = 0; i < entry_idx; i++) {
462 slot_id = rule->entry_id[i];
463 rte_bitmap_set(pf->acl.slots, slot_id);
464 ice_flow_rem_entry(hw, ICE_BLK_ACL,
465 pf->acl.hw_entry_id[slot_id]);
470 ice_acl_create_filter(struct ice_adapter *ad,
471 struct rte_flow *flow,
473 struct rte_flow_error *error)
475 struct ice_acl_conf *filter = meta;
476 enum ice_fltr_ptype flow_type = filter->input.flow_type;
477 struct ice_flow_action acts[1];
478 struct ice_pf *pf = &ad->pf;
479 struct ice_fdir_fltr *input;
480 struct acl_rule *rule;
483 rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
485 rte_flow_error_set(error, ENOMEM,
486 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
487 "Failed to allocate memory for acl rule");
491 input = rte_zmalloc("acl_entry", sizeof(*input), 0);
493 rte_flow_error_set(error, ENOMEM,
494 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
495 "Failed to allocate memory for acl input");
497 goto err_acl_input_alloc;
500 ret = ice_acl_set_input_set(filter, input);
502 rte_flow_error_set(error, -ret,
503 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
504 "failed to set input set.");
506 goto err_acl_set_input;
509 if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
510 acts[0].type = ICE_FLOW_ACT_DROP;
511 acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
512 acts[0].data.acl_act.prio = 0x3;
513 acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
516 input->acl_fltr = true;
517 ret = ice_acl_hw_set_conf(pf, input, acts, rule, flow_type, 0);
519 rte_flow_error_set(error, -ret,
520 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
521 "failed to set hw configure.");
526 if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
527 ret = ice_acl_hw_set_conf(pf, input, acts, rule,
528 ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1);
530 goto err_acl_hw_set_conf_udp;
531 ret = ice_acl_hw_set_conf(pf, input, acts, rule,
532 ICE_FLTR_PTYPE_NONF_IPV4_TCP, 2);
534 goto err_acl_hw_set_conf_tcp;
535 ret = ice_acl_hw_set_conf(pf, input, acts, rule,
536 ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 3);
538 goto err_acl_hw_set_conf_sctp;
541 rule->flow_type = flow_type;
545 err_acl_hw_set_conf_sctp:
546 ice_acl_hw_rem_conf(pf, rule, 3);
547 err_acl_hw_set_conf_tcp:
548 ice_acl_hw_rem_conf(pf, rule, 2);
549 err_acl_hw_set_conf_udp:
550 ice_acl_hw_rem_conf(pf, rule, 1);
559 ice_acl_destroy_filter(struct ice_adapter *ad,
560 struct rte_flow *flow,
561 struct rte_flow_error *error __rte_unused)
563 struct acl_rule *rule = (struct acl_rule *)flow->rule;
565 struct ice_pf *pf = &ad->pf;
566 struct ice_hw *hw = ICE_PF_TO_HW(pf);
569 switch (rule->flow_type) {
570 case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
571 for (i = 0; i < 4; i++) {
572 slot_id = rule->entry_id[i];
573 rte_bitmap_set(pf->acl.slots, slot_id);
574 ice_flow_rem_entry(hw, ICE_BLK_ACL,
575 pf->acl.hw_entry_id[slot_id]);
578 case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
579 case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
580 case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
581 slot_id = rule->entry_id[0];
582 rte_bitmap_set(pf->acl.slots, slot_id);
583 ice_flow_rem_entry(hw, ICE_BLK_ACL,
584 pf->acl.hw_entry_id[slot_id]);
587 rte_flow_error_set(error, EINVAL,
588 RTE_FLOW_ERROR_TYPE_ITEM,
589 NULL, "Unsupported flow type.");
599 ice_acl_filter_free(struct rte_flow *flow)
601 rte_free(flow->rule);
606 ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
607 const struct rte_flow_action actions[],
608 struct rte_flow_error *error,
609 struct ice_acl_conf *filter)
611 uint32_t dest_num = 0;
613 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
614 switch (actions->type) {
615 case RTE_FLOW_ACTION_TYPE_VOID:
617 case RTE_FLOW_ACTION_TYPE_DROP:
620 filter->input.dest_ctl =
621 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_ACTION, actions,
631 if (dest_num == 0 || dest_num >= 2) {
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_ACTION, actions,
634 "Unsupported action combination");
642 ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
643 const struct rte_flow_item pattern[],
644 struct rte_flow_error *error,
645 struct ice_acl_conf *filter)
647 const struct rte_flow_item *item = pattern;
648 enum rte_flow_item_type item_type;
649 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
650 const struct rte_flow_item_eth *eth_spec, *eth_mask;
651 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
652 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
653 const struct rte_flow_item_udp *udp_spec, *udp_mask;
654 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
655 uint64_t input_set = ICE_INSET_NONE;
656 uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
658 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
659 item_type = item->type;
662 case RTE_FLOW_ITEM_TYPE_ETH:
663 eth_spec = item->spec;
664 eth_mask = item->mask;
666 if (eth_spec && eth_mask) {
667 if (rte_is_broadcast_ether_addr(ð_mask->src) ||
668 rte_is_broadcast_ether_addr(ð_mask->dst)) {
669 rte_flow_error_set(error, EINVAL,
670 RTE_FLOW_ERROR_TYPE_ITEM,
671 item, "Invalid mac addr mask");
675 if (!rte_is_zero_ether_addr(ð_spec->src) &&
676 !rte_is_zero_ether_addr(ð_mask->src)) {
677 input_set |= ICE_INSET_SMAC;
678 ice_memcpy(&filter->input.ext_data.src_mac,
681 ICE_NONDMA_TO_NONDMA);
682 ice_memcpy(&filter->input.ext_mask.src_mac,
685 ICE_NONDMA_TO_NONDMA);
688 if (!rte_is_zero_ether_addr(ð_spec->dst) &&
689 !rte_is_zero_ether_addr(ð_mask->dst)) {
690 input_set |= ICE_INSET_DMAC;
691 ice_memcpy(&filter->input.ext_data.dst_mac,
694 ICE_NONDMA_TO_NONDMA);
695 ice_memcpy(&filter->input.ext_mask.dst_mac,
698 ICE_NONDMA_TO_NONDMA);
702 case RTE_FLOW_ITEM_TYPE_IPV4:
703 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
704 ipv4_spec = item->spec;
705 ipv4_mask = item->mask;
707 if (ipv4_spec && ipv4_mask) {
708 /* Check IPv4 mask and update input set */
709 if (ipv4_mask->hdr.version_ihl ||
710 ipv4_mask->hdr.total_length ||
711 ipv4_mask->hdr.packet_id ||
712 ipv4_mask->hdr.fragment_offset ||
713 ipv4_mask->hdr.hdr_checksum) {
714 rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ITEM,
717 "Invalid IPv4 mask.");
721 if (ipv4_mask->hdr.src_addr == UINT32_MAX ||
722 ipv4_mask->hdr.dst_addr == UINT32_MAX) {
723 rte_flow_error_set(error, EINVAL,
724 RTE_FLOW_ERROR_TYPE_ITEM,
726 "Invalid IPv4 mask.");
730 if (ipv4_mask->hdr.src_addr) {
731 filter->input.ip.v4.src_ip =
732 ipv4_spec->hdr.src_addr;
733 filter->input.mask.v4.src_ip =
734 ipv4_mask->hdr.src_addr;
736 input_set |= ICE_INSET_IPV4_SRC;
739 if (ipv4_mask->hdr.dst_addr) {
740 filter->input.ip.v4.dst_ip =
741 ipv4_spec->hdr.dst_addr;
742 filter->input.mask.v4.dst_ip =
743 ipv4_mask->hdr.dst_addr;
745 input_set |= ICE_INSET_IPV4_DST;
749 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
751 case RTE_FLOW_ITEM_TYPE_TCP:
752 tcp_spec = item->spec;
753 tcp_mask = item->mask;
755 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
756 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
758 if (tcp_spec && tcp_mask) {
759 /* Check TCP mask and update input set */
760 if (tcp_mask->hdr.sent_seq ||
761 tcp_mask->hdr.recv_ack ||
762 tcp_mask->hdr.data_off ||
763 tcp_mask->hdr.tcp_flags ||
764 tcp_mask->hdr.rx_win ||
765 tcp_mask->hdr.cksum ||
766 tcp_mask->hdr.tcp_urp) {
767 rte_flow_error_set(error, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ITEM,
774 if (tcp_mask->hdr.src_port == UINT16_MAX ||
775 tcp_mask->hdr.dst_port == UINT16_MAX) {
776 rte_flow_error_set(error, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ITEM,
783 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
784 tcp_mask->hdr.src_port) {
785 input_set |= ICE_INSET_TCP_SRC_PORT;
786 filter->input.ip.v4.src_port =
787 tcp_spec->hdr.src_port;
788 filter->input.mask.v4.src_port =
789 tcp_mask->hdr.src_port;
792 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
793 tcp_mask->hdr.dst_port) {
794 input_set |= ICE_INSET_TCP_DST_PORT;
795 filter->input.ip.v4.dst_port =
796 tcp_spec->hdr.dst_port;
797 filter->input.mask.v4.dst_port =
798 tcp_mask->hdr.dst_port;
802 case RTE_FLOW_ITEM_TYPE_UDP:
803 udp_spec = item->spec;
804 udp_mask = item->mask;
806 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
807 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
809 if (udp_spec && udp_mask) {
810 /* Check UDP mask and update input set*/
811 if (udp_mask->hdr.dgram_len ||
812 udp_mask->hdr.dgram_cksum) {
813 rte_flow_error_set(error, EINVAL,
814 RTE_FLOW_ERROR_TYPE_ITEM,
820 if (udp_mask->hdr.src_port == UINT16_MAX ||
821 udp_mask->hdr.dst_port == UINT16_MAX) {
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM,
829 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
830 udp_mask->hdr.src_port) {
831 input_set |= ICE_INSET_UDP_SRC_PORT;
832 filter->input.ip.v4.src_port =
833 udp_spec->hdr.src_port;
834 filter->input.mask.v4.src_port =
835 udp_mask->hdr.src_port;
838 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
839 udp_mask->hdr.dst_port) {
840 input_set |= ICE_INSET_UDP_DST_PORT;
841 filter->input.ip.v4.dst_port =
842 udp_spec->hdr.dst_port;
843 filter->input.mask.v4.dst_port =
844 udp_mask->hdr.dst_port;
848 case RTE_FLOW_ITEM_TYPE_SCTP:
849 sctp_spec = item->spec;
850 sctp_mask = item->mask;
852 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
853 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
855 if (sctp_spec && sctp_mask) {
856 if (sctp_mask->hdr.src_port == UINT16_MAX ||
857 sctp_mask->hdr.dst_port == UINT16_MAX) {
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ITEM,
861 "Invalid SCTP mask");
865 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
866 sctp_mask->hdr.src_port) {
867 input_set |= ICE_INSET_SCTP_SRC_PORT;
868 filter->input.ip.v4.src_port =
869 sctp_spec->hdr.src_port;
870 filter->input.mask.v4.src_port =
871 sctp_mask->hdr.src_port;
874 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
875 sctp_mask->hdr.dst_port) {
876 input_set |= ICE_INSET_SCTP_DST_PORT;
877 filter->input.ip.v4.dst_port =
878 sctp_spec->hdr.dst_port;
879 filter->input.mask.v4.dst_port =
880 sctp_mask->hdr.dst_port;
884 case RTE_FLOW_ITEM_TYPE_VOID:
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
890 "Invalid pattern item.");
895 filter->input.flow_type = flow_type;
896 filter->input_set = input_set;
902 ice_acl_parse(struct ice_adapter *ad,
903 struct ice_pattern_match_item *array,
905 const struct rte_flow_item pattern[],
906 const struct rte_flow_action actions[],
908 struct rte_flow_error *error)
910 struct ice_pf *pf = &ad->pf;
911 struct ice_acl_conf *filter = &pf->acl.conf;
912 struct ice_pattern_match_item *item = NULL;
916 memset(filter, 0, sizeof(*filter));
917 item = ice_search_pattern_match_item(ad, pattern, array, array_len,
922 ret = ice_acl_parse_pattern(ad, pattern, error, filter);
925 input_set = filter->input_set;
926 if (!input_set || input_set & ~item->input_set_mask) {
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
930 "Invalid input set");
935 ret = ice_acl_parse_action(ad, actions, error, filter);
948 ice_acl_bitmap_init(struct ice_pf *pf)
952 struct rte_bitmap *slots;
954 bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
955 mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
957 PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
961 slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
963 PMD_DRV_LOG(ERR, "Failed to initialize acl bitmap.");
965 goto err_acl_mem_alloc;
967 pf->acl.slots = slots;
976 ice_acl_init(struct ice_adapter *ad)
979 struct ice_pf *pf = &ad->pf;
980 struct ice_hw *hw = ICE_PF_TO_HW(pf);
981 struct ice_flow_parser *parser = &ice_acl_parser;
983 if (!ad->hw.dcf_enabled)
986 ret = ice_acl_prof_alloc(hw);
988 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
993 ret = ice_acl_setup(pf);
997 ret = ice_acl_bitmap_init(pf);
1001 ret = ice_acl_prof_init(pf);
1005 return ice_register_parser(parser, ad);
1009 ice_acl_prof_free(struct ice_hw *hw)
1011 enum ice_fltr_ptype ptype;
1013 for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
1014 ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
1015 rte_free(hw->acl_prof[ptype]);
1016 hw->acl_prof[ptype] = NULL;
1019 rte_free(hw->acl_prof);
1020 hw->acl_prof = NULL;
1024 ice_acl_uninit(struct ice_adapter *ad)
1026 struct ice_pf *pf = &ad->pf;
1027 struct ice_hw *hw = ICE_PF_TO_HW(pf);
1028 struct ice_flow_parser *parser = &ice_acl_parser;
1030 if (ad->hw.dcf_enabled) {
1031 ice_unregister_parser(parser, ad);
1033 ice_acl_prof_free(hw);
1038 ice_flow_engine ice_acl_engine = {
1039 .init = ice_acl_init,
1040 .uninit = ice_acl_uninit,
1041 .create = ice_acl_create_filter,
1042 .destroy = ice_acl_destroy_filter,
1043 .free = ice_acl_filter_free,
1044 .type = ICE_FLOW_ENGINE_ACL,
1048 ice_flow_parser ice_acl_parser = {
1049 .engine = &ice_acl_engine,
1050 .array = ice_acl_pattern,
1051 .array_len = RTE_DIM(ice_acl_pattern),
1052 .parse_pattern_action = ice_acl_parse,
1053 .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1056 RTE_INIT(ice_acl_engine_init)
1058 struct ice_flow_engine *engine = &ice_acl_engine;
1059 ice_register_flow_engine(engine);