net/ice: support flow priority for DCF switch filter
[dpdk.git] / drivers / net / ice / ice_acl_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <rte_debug.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_log.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_flow.h>
21 #include <rte_bitmap.h>
22 #include "base/ice_type.h"
23 #include "base/ice_acl.h"
24 #include "ice_logs.h"
25 #include "ice_ethdev.h"
26 #include "ice_generic_flow.h"
27 #include "base/ice_flow.h"
28
29 #define MAX_ACL_SLOTS_ID 2048
30
31 #define ICE_ACL_INSET_ETH_IPV4 ( \
32         ICE_INSET_SMAC | ICE_INSET_DMAC | \
33         ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST)
34 #define ICE_ACL_INSET_ETH_IPV4_UDP ( \
35         ICE_ACL_INSET_ETH_IPV4 | \
36         ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT)
37 #define ICE_ACL_INSET_ETH_IPV4_TCP ( \
38         ICE_ACL_INSET_ETH_IPV4 | \
39         ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT)
40 #define ICE_ACL_INSET_ETH_IPV4_SCTP ( \
41         ICE_ACL_INSET_ETH_IPV4 | \
42         ICE_INSET_SCTP_SRC_PORT | ICE_INSET_SCTP_DST_PORT)
43
44 static struct ice_flow_parser ice_acl_parser;
45
46 struct acl_rule {
47         enum ice_fltr_ptype flow_type;
48         uint32_t entry_id[4];
49 };
50
51 static struct
52 ice_pattern_match_item ice_acl_pattern[] = {
53         {pattern_eth_ipv4,      ICE_ACL_INSET_ETH_IPV4,         ICE_INSET_NONE, ICE_INSET_NONE},
54         {pattern_eth_ipv4_udp,  ICE_ACL_INSET_ETH_IPV4_UDP,     ICE_INSET_NONE, ICE_INSET_NONE},
55         {pattern_eth_ipv4_tcp,  ICE_ACL_INSET_ETH_IPV4_TCP,     ICE_INSET_NONE, ICE_INSET_NONE},
56         {pattern_eth_ipv4_sctp, ICE_ACL_INSET_ETH_IPV4_SCTP,    ICE_INSET_NONE, ICE_INSET_NONE},
57 };
58
59 static int
60 ice_acl_prof_alloc(struct ice_hw *hw)
61 {
62         enum ice_fltr_ptype ptype, fltr_ptype;
63
64         if (!hw->acl_prof) {
65                 hw->acl_prof = (struct ice_fd_hw_prof **)
66                         ice_malloc(hw, ICE_FLTR_PTYPE_MAX *
67                                    sizeof(*hw->acl_prof));
68                 if (!hw->acl_prof)
69                         return -ENOMEM;
70         }
71
72         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
73              ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
74                 if (!hw->acl_prof[ptype]) {
75                         hw->acl_prof[ptype] = (struct ice_fd_hw_prof *)
76                                 ice_malloc(hw, sizeof(**hw->acl_prof));
77                         if (!hw->acl_prof[ptype])
78                                 goto fail_mem;
79                 }
80         }
81
82         return 0;
83
84 fail_mem:
85         for (fltr_ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
86              fltr_ptype < ptype; fltr_ptype++) {
87                 rte_free(hw->acl_prof[fltr_ptype]);
88                 hw->acl_prof[fltr_ptype] = NULL;
89         }
90
91         rte_free(hw->acl_prof);
92         hw->acl_prof = NULL;
93
94         return -ENOMEM;
95 }
96
97 /**
98  * ice_acl_setup - Reserve and initialize the ACL resources
99  * @pf: board private structure
100  */
101 static int
102 ice_acl_setup(struct ice_pf *pf)
103 {
104         struct ice_hw *hw = ICE_PF_TO_HW(pf);
105         uint32_t pf_num = hw->dev_caps.num_funcs;
106         struct ice_acl_tbl_params params;
107         uint16_t scen_id;
108         int err = 0;
109
110         memset(&params, 0, sizeof(params));
111
112         /* create for IPV4 table */
113         if (pf_num < 4)
114                 params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 6;
115         else
116                 params.width = ICE_AQC_ACL_KEY_WIDTH_BYTES * 3;
117
118         params.depth = ICE_AQC_ACL_TCAM_DEPTH;
119         params.entry_act_pairs = 1;
120         params.concurr = false;
121
122         err = ice_acl_create_tbl(hw, &params);
123         if (err)
124                 return err;
125
126         err = ice_acl_create_scen(hw, params.width, params.depth,
127                                   &scen_id);
128         if (err)
129                 return err;
130
131         return 0;
132 }
133
134 /**
135  * ice_deinit_acl - Unroll the initialization of the ACL block
136  * @pf: ptr to PF device
137  *
138  * returns 0 on success, negative on error
139  */
140 static void ice_deinit_acl(struct ice_pf *pf)
141 {
142         struct ice_hw *hw = ICE_PF_TO_HW(pf);
143
144         ice_acl_destroy_tbl(hw);
145
146         rte_free(hw->acl_tbl);
147         hw->acl_tbl = NULL;
148
149         if (pf->acl.slots) {
150                 rte_free(pf->acl.slots);
151                 pf->acl.slots = NULL;
152         }
153 }
154
155 static void
156 acl_add_prof_prepare(struct ice_hw *hw, struct ice_flow_seg_info *seg,
157                      bool is_l4, uint16_t src_port, uint16_t dst_port)
158 {
159         uint16_t val_loc, mask_loc;
160
161         if (hw->dev_caps.num_funcs < 4) {
162                 /* mac source address */
163                 val_loc = offsetof(struct ice_fdir_fltr,
164                                    ext_data.src_mac);
165                 mask_loc = offsetof(struct ice_fdir_fltr,
166                                     ext_mask.src_mac);
167                 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
168                                  val_loc, mask_loc,
169                                  ICE_FLOW_FLD_OFF_INVAL, false);
170
171                 /* mac destination address */
172                 val_loc = offsetof(struct ice_fdir_fltr,
173                                    ext_data.dst_mac);
174                 mask_loc = offsetof(struct ice_fdir_fltr,
175                                     ext_mask.dst_mac);
176                 ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
177                                  val_loc, mask_loc,
178                                  ICE_FLOW_FLD_OFF_INVAL, false);
179         }
180
181         /* IP source address */
182         val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_ip);
183         mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_ip);
184         ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_SA, val_loc,
185                          mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
186
187         /* IP destination address */
188         val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_ip);
189         mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_ip);
190         ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_IPV4_DA, val_loc,
191                          mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
192
193         if (is_l4) {
194                 /* Layer 4 source port */
195                 val_loc = offsetof(struct ice_fdir_fltr, ip.v4.src_port);
196                 mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.src_port);
197                 ice_flow_set_fld(seg, src_port, val_loc,
198                                  mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
199
200                 /* Layer 4 destination port */
201                 val_loc = offsetof(struct ice_fdir_fltr, ip.v4.dst_port);
202                 mask_loc = offsetof(struct ice_fdir_fltr, mask.v4.dst_port);
203                 ice_flow_set_fld(seg, dst_port, val_loc,
204                                  mask_loc, ICE_FLOW_FLD_OFF_INVAL, false);
205         }
206 }
207
208 /**
209  * ice_acl_prof_init - Initialize ACL profile
210  * @pf: ice PF structure
211  *
212  * Returns 0 on success.
213  */
214 static int
215 ice_acl_prof_init(struct ice_pf *pf)
216 {
217         struct ice_hw *hw = ICE_PF_TO_HW(pf);
218         struct ice_flow_prof *prof_ipv4 = NULL;
219         struct ice_flow_prof *prof_ipv4_udp = NULL;
220         struct ice_flow_prof *prof_ipv4_tcp = NULL;
221         struct ice_flow_prof *prof_ipv4_sctp = NULL;
222         struct ice_flow_seg_info *seg;
223         int i;
224         int ret;
225
226         seg = (struct ice_flow_seg_info *)
227                  ice_malloc(hw, sizeof(*seg));
228         if (!seg)
229                 return -ENOMEM;
230
231         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_IPV4);
232         acl_add_prof_prepare(hw, seg, false, 0, 0);
233         ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
234                                 ICE_FLTR_PTYPE_NONF_IPV4_OTHER,
235                                 seg, 1, NULL, 0, &prof_ipv4);
236         if (ret)
237                 goto err_add_prof;
238
239         ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
240         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
241         acl_add_prof_prepare(hw, seg, true,
242                              ICE_FLOW_FIELD_IDX_UDP_SRC_PORT,
243                              ICE_FLOW_FIELD_IDX_UDP_DST_PORT);
244         ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
245                                 ICE_FLTR_PTYPE_NONF_IPV4_UDP,
246                                 seg, 1, NULL, 0, &prof_ipv4_udp);
247         if (ret)
248                 goto err_add_prof_ipv4_udp;
249
250         ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
251         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
252         acl_add_prof_prepare(hw, seg, true,
253                              ICE_FLOW_FIELD_IDX_TCP_SRC_PORT,
254                              ICE_FLOW_FIELD_IDX_TCP_DST_PORT);
255         ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
256                                 ICE_FLTR_PTYPE_NONF_IPV4_TCP,
257                                 seg, 1, NULL, 0, &prof_ipv4_tcp);
258         if (ret)
259                 goto err_add_prof_ipv4_tcp;
260
261         ice_memset(seg, 0, sizeof(*seg), ICE_NONDMA_MEM);
262         ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
263         acl_add_prof_prepare(hw, seg, true,
264                              ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT,
265                              ICE_FLOW_FIELD_IDX_SCTP_DST_PORT);
266         ret = ice_flow_add_prof(hw, ICE_BLK_ACL, ICE_FLOW_RX,
267                                 ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
268                                 seg, 1, NULL, 0, &prof_ipv4_sctp);
269         if (ret)
270                 goto err_add_prof_ipv4_sctp;
271
272         for (i = 0; i < pf->main_vsi->idx; i++) {
273                 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4, i);
274                 if (ret)
275                         goto err_assoc_prof;
276
277                 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_udp, i);
278                 if (ret)
279                         goto err_assoc_prof;
280
281                 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_tcp, i);
282                 if (ret)
283                         goto err_assoc_prof;
284
285                 ret = ice_flow_assoc_prof(hw, ICE_BLK_ACL, prof_ipv4_sctp, i);
286                 if (ret)
287                         goto err_assoc_prof;
288         }
289         return 0;
290
291 err_assoc_prof:
292         ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_SCTP);
293 err_add_prof_ipv4_sctp:
294         ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_TCP);
295 err_add_prof_ipv4_tcp:
296         ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_UDP);
297 err_add_prof_ipv4_udp:
298         ice_flow_rem_prof(hw, ICE_BLK_ACL, ICE_FLTR_PTYPE_NONF_IPV4_OTHER);
299 err_add_prof:
300         ice_free(hw, seg);
301         return ret;
302 }
303
304 /**
305  * ice_acl_set_input_set - Helper function to set the input set for ACL
306  * @hw: pointer to HW instance
307  * @filter: pointer to ACL info
308  * @input: filter structure
309  *
310  * Return error value or 0 on success.
311  */
312 static int
313 ice_acl_set_input_set(struct ice_acl_conf *filter, struct ice_fdir_fltr *input)
314 {
315         if (!input)
316                 return ICE_ERR_BAD_PTR;
317
318         input->q_index = filter->input.q_index;
319         input->dest_vsi = filter->input.dest_vsi;
320         input->dest_ctl = filter->input.dest_ctl;
321         input->fltr_status = ICE_FLTR_PRGM_DESC_FD_STATUS_FD_ID;
322         input->flow_type = filter->input.flow_type;
323
324         switch (input->flow_type) {
325         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
326         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
327         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
328                 input->ip.v4.dst_port = filter->input.ip.v4.dst_port;
329                 input->ip.v4.src_port = filter->input.ip.v4.src_port;
330                 input->ip.v4.dst_ip = filter->input.ip.v4.dst_ip;
331                 input->ip.v4.src_ip = filter->input.ip.v4.src_ip;
332
333                 input->mask.v4.dst_port = filter->input.mask.v4.dst_port;
334                 input->mask.v4.src_port = filter->input.mask.v4.src_port;
335                 input->mask.v4.dst_ip = filter->input.mask.v4.dst_ip;
336                 input->mask.v4.src_ip = filter->input.mask.v4.src_ip;
337
338                 ice_memcpy(&input->ext_data.src_mac,
339                            &filter->input.ext_data.src_mac,
340                            RTE_ETHER_ADDR_LEN,
341                            ICE_NONDMA_TO_NONDMA);
342
343                 ice_memcpy(&input->ext_mask.src_mac,
344                            &filter->input.ext_mask.src_mac,
345                            RTE_ETHER_ADDR_LEN,
346                            ICE_NONDMA_TO_NONDMA);
347
348                 ice_memcpy(&input->ext_data.dst_mac,
349                            &filter->input.ext_data.dst_mac,
350                            RTE_ETHER_ADDR_LEN,
351                            ICE_NONDMA_TO_NONDMA);
352                 ice_memcpy(&input->ext_mask.dst_mac,
353                            &filter->input.ext_mask.dst_mac,
354                            RTE_ETHER_ADDR_LEN,
355                            ICE_NONDMA_TO_NONDMA);
356
357                 break;
358         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
359                 ice_memcpy(&input->ip.v4, &filter->input.ip.v4,
360                            sizeof(struct ice_fdir_v4),
361                            ICE_NONDMA_TO_NONDMA);
362                 ice_memcpy(&input->mask.v4, &filter->input.mask.v4,
363                            sizeof(struct ice_fdir_v4),
364                            ICE_NONDMA_TO_NONDMA);
365
366                 ice_memcpy(&input->ext_data.src_mac,
367                            &filter->input.ext_data.src_mac,
368                            RTE_ETHER_ADDR_LEN,
369                            ICE_NONDMA_TO_NONDMA);
370                 ice_memcpy(&input->ext_mask.src_mac,
371                            &filter->input.ext_mask.src_mac,
372                            RTE_ETHER_ADDR_LEN,
373                            ICE_NONDMA_TO_NONDMA);
374
375                 ice_memcpy(&input->ext_data.dst_mac,
376                            &filter->input.ext_data.dst_mac,
377                            RTE_ETHER_ADDR_LEN,
378                            ICE_NONDMA_TO_NONDMA);
379                 ice_memcpy(&input->ext_mask.dst_mac,
380                            &filter->input.ext_mask.dst_mac,
381                            RTE_ETHER_ADDR_LEN,
382                            ICE_NONDMA_TO_NONDMA);
383
384                 break;
385         default:
386                 return -EINVAL;
387         }
388
389         return 0;
390 }
391
392 static inline int
393 ice_acl_alloc_slot_id(struct rte_bitmap *slots, uint32_t *slot_id)
394 {
395         uint32_t pos = 0;
396         uint64_t slab = 0;
397         uint32_t i = 0;
398
399         __rte_bitmap_scan_init(slots);
400         if (!rte_bitmap_scan(slots, &pos, &slab))
401                 return -rte_errno;
402
403         i = rte_bsf64(slab);
404         pos += i;
405         rte_bitmap_clear(slots, pos);
406
407         *slot_id = pos;
408         return 0;
409 }
410
411 static inline int
412 ice_acl_hw_set_conf(struct ice_pf *pf, struct ice_fdir_fltr *input,
413                     struct ice_flow_action *acts, struct acl_rule *rule,
414                     enum ice_fltr_ptype flow_type, int32_t entry_idx)
415 {
416         struct ice_hw *hw = ICE_PF_TO_HW(pf);
417         enum ice_block blk = ICE_BLK_ACL;
418         uint64_t entry_id, hw_entry;
419         uint32_t slot_id = 0;
420         int act_cnt = 1;
421         int ret = 0;
422
423         /* Allocate slot_id from bitmap table. */
424         ret = ice_acl_alloc_slot_id(pf->acl.slots, &slot_id);
425         if (ret) {
426                 PMD_DRV_LOG(ERR, "fail to alloc slot id.");
427                 return ret;
428         }
429
430         /* For IPV4_OTHER type, should add entry for all types.
431          * For IPV4_UDP/TCP/SCTP type, only add entry for each.
432          */
433         if (slot_id < MAX_ACL_ENTRIES) {
434                 entry_id = ((uint64_t)flow_type << 32) | slot_id;
435                 ret = ice_flow_add_entry(hw, blk, flow_type,
436                                          entry_id, pf->main_vsi->idx,
437                                          ICE_FLOW_PRIO_NORMAL, input,
438                                          acts, act_cnt, &hw_entry);
439                 if (ret) {
440                         PMD_DRV_LOG(ERR, "Fail to add entry.");
441                         return ret;
442                 }
443                 rule->entry_id[entry_idx] = slot_id;
444                 pf->acl.hw_entry_id[slot_id] = hw_entry;
445         } else {
446                 PMD_DRV_LOG(ERR, "Exceed the maximum entry number(%d)"
447                             " HW supported!", MAX_ACL_ENTRIES);
448                 return -1;
449         }
450
451         return 0;
452 }
453
454 static inline void
455 ice_acl_hw_rem_conf(struct ice_pf *pf, struct acl_rule *rule, int32_t entry_idx)
456 {
457         uint32_t slot_id;
458         int32_t i;
459         struct ice_hw *hw = ICE_PF_TO_HW(pf);
460
461         for (i = 0; i < entry_idx; i++) {
462                 slot_id = rule->entry_id[i];
463                 rte_bitmap_set(pf->acl.slots, slot_id);
464                 ice_flow_rem_entry(hw, ICE_BLK_ACL,
465                                    pf->acl.hw_entry_id[slot_id]);
466         }
467 }
468
469 static int
470 ice_acl_create_filter(struct ice_adapter *ad,
471                       struct rte_flow *flow,
472                       void *meta,
473                       struct rte_flow_error *error)
474 {
475         struct ice_acl_conf *filter = meta;
476         enum ice_fltr_ptype flow_type = filter->input.flow_type;
477         struct ice_flow_action acts[1];
478         struct ice_pf *pf = &ad->pf;
479         struct ice_fdir_fltr *input;
480         struct acl_rule *rule;
481         int ret;
482
483         rule = rte_zmalloc("acl_rule", sizeof(*rule), 0);
484         if (!rule) {
485                 rte_flow_error_set(error, ENOMEM,
486                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
487                                    "Failed to allocate memory for acl rule");
488                 return -rte_errno;
489         }
490
491         input = rte_zmalloc("acl_entry", sizeof(*input), 0);
492         if (!input) {
493                 rte_flow_error_set(error, ENOMEM,
494                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
495                                    "Failed to allocate memory for acl input");
496                 ret = -rte_errno;
497                 goto err_acl_input_alloc;
498         }
499
500         ret = ice_acl_set_input_set(filter, input);
501         if (ret) {
502                 rte_flow_error_set(error, -ret,
503                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
504                                    "failed to set input set.");
505                 ret = -rte_errno;
506                 goto err_acl_set_input;
507         }
508
509         if (filter->input.dest_ctl == ICE_FLTR_PRGM_DESC_DEST_DROP_PKT) {
510                 acts[0].type = ICE_FLOW_ACT_DROP;
511                 acts[0].data.acl_act.mdid = ICE_MDID_RX_PKT_DROP;
512                 acts[0].data.acl_act.prio = 0x3;
513                 acts[0].data.acl_act.value = CPU_TO_LE16(0x1);
514         }
515
516         input->acl_fltr = true;
517         ret = ice_acl_hw_set_conf(pf, input, acts, rule, flow_type, 0);
518         if (ret) {
519                 rte_flow_error_set(error, -ret,
520                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
521                                    "failed to set hw configure.");
522                 ret = -rte_errno;
523                 return ret;
524         }
525
526         if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
527                 ret = ice_acl_hw_set_conf(pf, input, acts, rule,
528                                           ICE_FLTR_PTYPE_NONF_IPV4_UDP, 1);
529                 if (ret)
530                         goto err_acl_hw_set_conf_udp;
531                 ret = ice_acl_hw_set_conf(pf, input, acts, rule,
532                                           ICE_FLTR_PTYPE_NONF_IPV4_TCP, 2);
533                 if (ret)
534                         goto err_acl_hw_set_conf_tcp;
535                 ret = ice_acl_hw_set_conf(pf, input, acts, rule,
536                                           ICE_FLTR_PTYPE_NONF_IPV4_SCTP, 3);
537                 if (ret)
538                         goto err_acl_hw_set_conf_sctp;
539         }
540
541         rule->flow_type = flow_type;
542         flow->rule = rule;
543         return 0;
544
545 err_acl_hw_set_conf_sctp:
546         ice_acl_hw_rem_conf(pf, rule, 3);
547 err_acl_hw_set_conf_tcp:
548         ice_acl_hw_rem_conf(pf, rule, 2);
549 err_acl_hw_set_conf_udp:
550         ice_acl_hw_rem_conf(pf, rule, 1);
551 err_acl_set_input:
552         rte_free(input);
553 err_acl_input_alloc:
554         rte_free(rule);
555         return ret;
556 }
557
558 static int
559 ice_acl_destroy_filter(struct ice_adapter *ad,
560                        struct rte_flow *flow,
561                        struct rte_flow_error *error __rte_unused)
562 {
563         struct acl_rule *rule = (struct acl_rule *)flow->rule;
564         uint32_t slot_id, i;
565         struct ice_pf *pf = &ad->pf;
566         struct ice_hw *hw = ICE_PF_TO_HW(pf);
567         int ret = 0;
568
569         switch (rule->flow_type) {
570         case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
571                 for (i = 0; i < 4; i++) {
572                         slot_id = rule->entry_id[i];
573                         rte_bitmap_set(pf->acl.slots, slot_id);
574                         ice_flow_rem_entry(hw, ICE_BLK_ACL,
575                                            pf->acl.hw_entry_id[slot_id]);
576                 }
577                 break;
578         case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
579         case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
580         case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
581                 slot_id = rule->entry_id[0];
582                 rte_bitmap_set(pf->acl.slots, slot_id);
583                 ice_flow_rem_entry(hw, ICE_BLK_ACL,
584                                    pf->acl.hw_entry_id[slot_id]);
585                 break;
586         default:
587                 rte_flow_error_set(error, EINVAL,
588                                    RTE_FLOW_ERROR_TYPE_ITEM,
589                                    NULL, "Unsupported flow type.");
590                 break;
591         }
592
593         flow->rule = NULL;
594         rte_free(rule);
595         return ret;
596 }
597
598 static void
599 ice_acl_filter_free(struct rte_flow *flow)
600 {
601         rte_free(flow->rule);
602         flow->rule = NULL;
603 }
604
605 static int
606 ice_acl_parse_action(__rte_unused struct ice_adapter *ad,
607                      const struct rte_flow_action actions[],
608                      struct rte_flow_error *error,
609                      struct ice_acl_conf *filter)
610 {
611         uint32_t dest_num = 0;
612
613         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
614                 switch (actions->type) {
615                 case RTE_FLOW_ACTION_TYPE_VOID:
616                         break;
617                 case RTE_FLOW_ACTION_TYPE_DROP:
618                         dest_num++;
619
620                         filter->input.dest_ctl =
621                                 ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
622                         break;
623                 default:
624                         rte_flow_error_set(error, EINVAL,
625                                    RTE_FLOW_ERROR_TYPE_ACTION, actions,
626                                    "Invalid action.");
627                         return -rte_errno;
628                 }
629         }
630
631         if (dest_num == 0 || dest_num >= 2) {
632                 rte_flow_error_set(error, EINVAL,
633                            RTE_FLOW_ERROR_TYPE_ACTION, actions,
634                            "Unsupported action combination");
635                 return -rte_errno;
636         }
637
638         return 0;
639 }
640
641 static int
642 ice_acl_parse_pattern(__rte_unused struct ice_adapter *ad,
643                        const struct rte_flow_item pattern[],
644                        struct rte_flow_error *error,
645                        struct ice_acl_conf *filter)
646 {
647         const struct rte_flow_item *item = pattern;
648         enum rte_flow_item_type item_type;
649         enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
650         const struct rte_flow_item_eth *eth_spec, *eth_mask;
651         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
652         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
653         const struct rte_flow_item_udp *udp_spec, *udp_mask;
654         const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
655         uint64_t input_set = ICE_INSET_NONE;
656         uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
657
658         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
659                 item_type = item->type;
660
661                 switch (item_type) {
662                 case RTE_FLOW_ITEM_TYPE_ETH:
663                         eth_spec = item->spec;
664                         eth_mask = item->mask;
665
666                         if (eth_spec && eth_mask) {
667                                 if (rte_is_broadcast_ether_addr(&eth_mask->src) ||
668                                     rte_is_broadcast_ether_addr(&eth_mask->dst)) {
669                                         rte_flow_error_set(error, EINVAL,
670                                                 RTE_FLOW_ERROR_TYPE_ITEM,
671                                                 item, "Invalid mac addr mask");
672                                         return -rte_errno;
673                                 }
674
675                                 if (!rte_is_zero_ether_addr(&eth_spec->src) &&
676                                     !rte_is_zero_ether_addr(&eth_mask->src)) {
677                                         input_set |= ICE_INSET_SMAC;
678                                         ice_memcpy(&filter->input.ext_data.src_mac,
679                                                    &eth_spec->src,
680                                                    RTE_ETHER_ADDR_LEN,
681                                                    ICE_NONDMA_TO_NONDMA);
682                                         ice_memcpy(&filter->input.ext_mask.src_mac,
683                                                    &eth_mask->src,
684                                                    RTE_ETHER_ADDR_LEN,
685                                                    ICE_NONDMA_TO_NONDMA);
686                                 }
687
688                                 if (!rte_is_zero_ether_addr(&eth_spec->dst) &&
689                                     !rte_is_zero_ether_addr(&eth_mask->dst)) {
690                                         input_set |= ICE_INSET_DMAC;
691                                         ice_memcpy(&filter->input.ext_data.dst_mac,
692                                                    &eth_spec->dst,
693                                                    RTE_ETHER_ADDR_LEN,
694                                                    ICE_NONDMA_TO_NONDMA);
695                                         ice_memcpy(&filter->input.ext_mask.dst_mac,
696                                                    &eth_mask->dst,
697                                                    RTE_ETHER_ADDR_LEN,
698                                                    ICE_NONDMA_TO_NONDMA);
699                                 }
700                         }
701                         break;
702                 case RTE_FLOW_ITEM_TYPE_IPV4:
703                         l3 = RTE_FLOW_ITEM_TYPE_IPV4;
704                         ipv4_spec = item->spec;
705                         ipv4_mask = item->mask;
706
707                         if (ipv4_spec && ipv4_mask) {
708                                 /* Check IPv4 mask and update input set */
709                                 if (ipv4_mask->hdr.version_ihl ||
710                                     ipv4_mask->hdr.total_length ||
711                                     ipv4_mask->hdr.packet_id ||
712                                     ipv4_mask->hdr.fragment_offset ||
713                                     ipv4_mask->hdr.hdr_checksum) {
714                                         rte_flow_error_set(error, EINVAL,
715                                                 RTE_FLOW_ERROR_TYPE_ITEM,
716                                                 item,
717                                                 "Invalid IPv4 mask.");
718                                         return -rte_errno;
719                                 }
720
721                                 if (ipv4_mask->hdr.src_addr == UINT32_MAX ||
722                                     ipv4_mask->hdr.dst_addr == UINT32_MAX) {
723                                         rte_flow_error_set(error, EINVAL,
724                                                 RTE_FLOW_ERROR_TYPE_ITEM,
725                                                 item,
726                                                 "Invalid IPv4 mask.");
727                                         return -rte_errno;
728                                 }
729
730                                 if (ipv4_mask->hdr.src_addr) {
731                                         filter->input.ip.v4.src_ip =
732                                                 ipv4_spec->hdr.src_addr;
733                                         filter->input.mask.v4.src_ip =
734                                                 ipv4_mask->hdr.src_addr;
735
736                                         input_set |= ICE_INSET_IPV4_SRC;
737                                 }
738
739                                 if (ipv4_mask->hdr.dst_addr) {
740                                         filter->input.ip.v4.dst_ip =
741                                                 ipv4_spec->hdr.dst_addr;
742                                         filter->input.mask.v4.dst_ip =
743                                                 ipv4_mask->hdr.dst_addr;
744
745                                         input_set |= ICE_INSET_IPV4_DST;
746                                 }
747                         }
748
749                         flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
750                         break;
751                 case RTE_FLOW_ITEM_TYPE_TCP:
752                         tcp_spec = item->spec;
753                         tcp_mask = item->mask;
754
755                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
756                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
757
758                         if (tcp_spec && tcp_mask) {
759                                 /* Check TCP mask and update input set */
760                                 if (tcp_mask->hdr.sent_seq ||
761                                     tcp_mask->hdr.recv_ack ||
762                                     tcp_mask->hdr.data_off ||
763                                     tcp_mask->hdr.tcp_flags ||
764                                     tcp_mask->hdr.rx_win ||
765                                     tcp_mask->hdr.cksum ||
766                                     tcp_mask->hdr.tcp_urp) {
767                                         rte_flow_error_set(error, EINVAL,
768                                                 RTE_FLOW_ERROR_TYPE_ITEM,
769                                                 item,
770                                                 "Invalid TCP mask");
771                                         return -rte_errno;
772                                 }
773
774                                 if (tcp_mask->hdr.src_port == UINT16_MAX ||
775                                     tcp_mask->hdr.dst_port == UINT16_MAX) {
776                                         rte_flow_error_set(error, EINVAL,
777                                                 RTE_FLOW_ERROR_TYPE_ITEM,
778                                                 item,
779                                                 "Invalid TCP mask");
780                                         return -rte_errno;
781                                 }
782
783                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
784                                     tcp_mask->hdr.src_port) {
785                                         input_set |= ICE_INSET_TCP_SRC_PORT;
786                                         filter->input.ip.v4.src_port =
787                                                 tcp_spec->hdr.src_port;
788                                         filter->input.mask.v4.src_port =
789                                                 tcp_mask->hdr.src_port;
790                                 }
791
792                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
793                                     tcp_mask->hdr.dst_port) {
794                                         input_set |= ICE_INSET_TCP_DST_PORT;
795                                         filter->input.ip.v4.dst_port =
796                                                 tcp_spec->hdr.dst_port;
797                                         filter->input.mask.v4.dst_port =
798                                                 tcp_mask->hdr.dst_port;
799                                 }
800                         }
801                         break;
802                 case RTE_FLOW_ITEM_TYPE_UDP:
803                         udp_spec = item->spec;
804                         udp_mask = item->mask;
805
806                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
807                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
808
809                         if (udp_spec && udp_mask) {
810                                 /* Check UDP mask and update input set*/
811                                 if (udp_mask->hdr.dgram_len ||
812                                     udp_mask->hdr.dgram_cksum) {
813                                         rte_flow_error_set(error, EINVAL,
814                                                 RTE_FLOW_ERROR_TYPE_ITEM,
815                                                 item,
816                                                 "Invalid UDP mask");
817                                         return -rte_errno;
818                                 }
819
820                                 if (udp_mask->hdr.src_port == UINT16_MAX ||
821                                     udp_mask->hdr.dst_port == UINT16_MAX) {
822                                         rte_flow_error_set(error, EINVAL,
823                                                 RTE_FLOW_ERROR_TYPE_ITEM,
824                                                 item,
825                                                 "Invalid UDP mask");
826                                         return -rte_errno;
827                                 }
828
829                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
830                                     udp_mask->hdr.src_port) {
831                                         input_set |= ICE_INSET_UDP_SRC_PORT;
832                                         filter->input.ip.v4.src_port =
833                                                 udp_spec->hdr.src_port;
834                                         filter->input.mask.v4.src_port =
835                                                 udp_mask->hdr.src_port;
836                                 }
837
838                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
839                                     udp_mask->hdr.dst_port) {
840                                         input_set |= ICE_INSET_UDP_DST_PORT;
841                                         filter->input.ip.v4.dst_port =
842                                                 udp_spec->hdr.dst_port;
843                                         filter->input.mask.v4.dst_port =
844                                                 udp_mask->hdr.dst_port;
845                                 }
846                         }
847                         break;
848                 case RTE_FLOW_ITEM_TYPE_SCTP:
849                         sctp_spec = item->spec;
850                         sctp_mask = item->mask;
851
852                         if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
853                                 flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
854
855                         if (sctp_spec && sctp_mask) {
856                                 if (sctp_mask->hdr.src_port == UINT16_MAX ||
857                                     sctp_mask->hdr.dst_port == UINT16_MAX) {
858                                         rte_flow_error_set(error, EINVAL,
859                                                 RTE_FLOW_ERROR_TYPE_ITEM,
860                                                 item,
861                                                 "Invalid SCTP mask");
862                                         return -rte_errno;
863                                 }
864
865                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
866                                     sctp_mask->hdr.src_port) {
867                                         input_set |= ICE_INSET_SCTP_SRC_PORT;
868                                         filter->input.ip.v4.src_port =
869                                                 sctp_spec->hdr.src_port;
870                                         filter->input.mask.v4.src_port =
871                                                 sctp_mask->hdr.src_port;
872                                 }
873
874                                 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
875                                     sctp_mask->hdr.dst_port) {
876                                         input_set |= ICE_INSET_SCTP_DST_PORT;
877                                         filter->input.ip.v4.dst_port =
878                                                 sctp_spec->hdr.dst_port;
879                                         filter->input.mask.v4.dst_port =
880                                                 sctp_mask->hdr.dst_port;
881                                 }
882                         }
883                         break;
884                 case RTE_FLOW_ITEM_TYPE_VOID:
885                         break;
886                 default:
887                         rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item,
890                                 "Invalid pattern item.");
891                         return -rte_errno;
892                 }
893         }
894
895         filter->input.flow_type = flow_type;
896         filter->input_set = input_set;
897
898         return 0;
899 }
900
901 static int
902 ice_acl_parse(struct ice_adapter *ad,
903                struct ice_pattern_match_item *array,
904                uint32_t array_len,
905                const struct rte_flow_item pattern[],
906                const struct rte_flow_action actions[],
907                uint32_t priority __rte_unused,
908                void **meta,
909                struct rte_flow_error *error)
910 {
911         struct ice_pf *pf = &ad->pf;
912         struct ice_acl_conf *filter = &pf->acl.conf;
913         struct ice_pattern_match_item *item = NULL;
914         uint64_t input_set;
915         int ret;
916
917         memset(filter, 0, sizeof(*filter));
918         item = ice_search_pattern_match_item(ad, pattern, array, array_len,
919                                              error);
920         if (!item)
921                 return -rte_errno;
922
923         ret = ice_acl_parse_pattern(ad, pattern, error, filter);
924         if (ret)
925                 goto error;
926         input_set = filter->input_set;
927         if (!input_set || input_set & ~item->input_set_mask_o) {
928                 rte_flow_error_set(error, EINVAL,
929                                    RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
930                                    pattern,
931                                    "Invalid input set");
932                 ret = -rte_errno;
933                 goto error;
934         }
935
936         ret = ice_acl_parse_action(ad, actions, error, filter);
937         if (ret)
938                 goto error;
939
940         if (meta)
941                 *meta = filter;
942
943 error:
944         rte_free(item);
945         return ret;
946 }
947
948 static int
949 ice_acl_bitmap_init(struct ice_pf *pf)
950 {
951         uint32_t bmp_size;
952         void *mem = NULL;
953         struct rte_bitmap *slots;
954         int ret = 0;
955         bmp_size = rte_bitmap_get_memory_footprint(MAX_ACL_SLOTS_ID);
956         mem = rte_zmalloc("create_acl_bmap", bmp_size, RTE_CACHE_LINE_SIZE);
957         if (mem == NULL) {
958                 PMD_DRV_LOG(ERR, "Failed to allocate memory for acl bitmap.");
959                 return -rte_errno;
960         }
961
962         slots = rte_bitmap_init_with_all_set(MAX_ACL_SLOTS_ID, mem, bmp_size);
963         if (slots == NULL) {
964                 PMD_DRV_LOG(ERR, "Failed to initialize acl bitmap.");
965                 ret = -rte_errno;
966                 goto err_acl_mem_alloc;
967         }
968         pf->acl.slots = slots;
969         return 0;
970
971 err_acl_mem_alloc:
972         rte_free(mem);
973         return ret;
974 }
975
976 static int
977 ice_acl_init(struct ice_adapter *ad)
978 {
979         int ret = 0;
980         struct ice_pf *pf = &ad->pf;
981         struct ice_hw *hw = ICE_PF_TO_HW(pf);
982         struct ice_flow_parser *parser = &ice_acl_parser;
983
984         if (!ad->hw.dcf_enabled)
985                 return 0;
986
987         ret = ice_acl_prof_alloc(hw);
988         if (ret) {
989                 PMD_DRV_LOG(ERR, "Cannot allocate memory for "
990                             "ACL profile.");
991                 return -ENOMEM;
992         }
993
994         ret = ice_acl_setup(pf);
995         if (ret)
996                 return ret;
997
998         ret = ice_acl_bitmap_init(pf);
999         if (ret)
1000                 return ret;
1001
1002         ret = ice_acl_prof_init(pf);
1003         if (ret)
1004                 return ret;
1005
1006         return ice_register_parser(parser, ad);
1007 }
1008
1009 static void
1010 ice_acl_prof_free(struct ice_hw *hw)
1011 {
1012         enum ice_fltr_ptype ptype;
1013
1014         for (ptype = ICE_FLTR_PTYPE_NONF_NONE + 1;
1015              ptype < ICE_FLTR_PTYPE_MAX; ptype++) {
1016                 rte_free(hw->acl_prof[ptype]);
1017                 hw->acl_prof[ptype] = NULL;
1018         }
1019
1020         rte_free(hw->acl_prof);
1021         hw->acl_prof = NULL;
1022 }
1023
1024 static void
1025 ice_acl_uninit(struct ice_adapter *ad)
1026 {
1027         struct ice_pf *pf = &ad->pf;
1028         struct ice_hw *hw = ICE_PF_TO_HW(pf);
1029         struct ice_flow_parser *parser = &ice_acl_parser;
1030
1031         if (ad->hw.dcf_enabled) {
1032                 ice_unregister_parser(parser, ad);
1033                 ice_deinit_acl(pf);
1034                 ice_acl_prof_free(hw);
1035         }
1036 }
1037
1038 static struct
1039 ice_flow_engine ice_acl_engine = {
1040         .init = ice_acl_init,
1041         .uninit = ice_acl_uninit,
1042         .create = ice_acl_create_filter,
1043         .destroy = ice_acl_destroy_filter,
1044         .free = ice_acl_filter_free,
1045         .type = ICE_FLOW_ENGINE_ACL,
1046 };
1047
1048 static struct
1049 ice_flow_parser ice_acl_parser = {
1050         .engine = &ice_acl_engine,
1051         .array = ice_acl_pattern,
1052         .array_len = RTE_DIM(ice_acl_pattern),
1053         .parse_pattern_action = ice_acl_parse,
1054         .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
1055 };
1056
1057 RTE_INIT(ice_acl_engine_init)
1058 {
1059         struct ice_flow_engine *engine = &ice_acl_engine;
1060         ice_register_flow_engine(engine);
1061 }