net/ice/base: support removing advanced rule
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2019
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9
10 #define ICE_ETH_DA_OFFSET               0
11 #define ICE_ETH_ETHTYPE_OFFSET          12
12 #define ICE_ETH_VLAN_TCI_OFFSET         14
13 #define ICE_MAX_VLAN_ID                 0xFFF
14
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16  * struct to configure any switch filter rules.
17  * {DA (6 bytes), SA(6 bytes),
18  * Ether type (2 bytes for header without VLAN tag) OR
19  * VLAN tag (4 bytes for header with VLAN tag) }
20  *
21  * Word on Hardcoded values
22  * byte 0 = 0x2: to identify it as locally administered DA MAC
23  * byte 6 = 0x2: to identify it as locally administered SA MAC
24  * byte 12 = 0x81 & byte 13 = 0x00:
25  *      In case of VLAN filter first two bytes defines ether type (0x8100)
26  *      and remaining two bytes are placeholder for programming a given VLAN ID
27  *      In case of Ether type filter it is treated as header without VLAN tag
28  *      and byte 12 and 13 is used to program a given Ether type instead
29  */
30 #define DUMMY_ETH_HDR_LEN               16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
32                                                         0x2, 0, 0, 0, 0, 0,
33                                                         0x81, 0, 0, 0};
34
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36         (sizeof(struct ice_aqc_sw_rules_elem) - \
37          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38          sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40         (sizeof(struct ice_aqc_sw_rules_elem) - \
41          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42          sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44         (sizeof(struct ice_aqc_sw_rules_elem) - \
45          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46          sizeof(struct ice_sw_rule_lg_act) - \
47          sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48          ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50         (sizeof(struct ice_aqc_sw_rules_elem) - \
51          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52          sizeof(struct ice_sw_rule_vsi_list) - \
53          sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54          ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55
56 static const
57 u8 dummy_gre_packet[] = { 0, 0, 0, 0,           /* Ether starts */
58                           0, 0, 0, 0,
59                           0, 0, 0, 0,
60                           0x08, 0,              /* Ether ends */
61                           0x45, 0, 0, 0x3E,     /* IP starts */
62                           0, 0, 0, 0,
63                           0, 0x2F, 0, 0,
64                           0, 0, 0, 0,
65                           0, 0, 0, 0,           /* IP ends */
66                           0x80, 0, 0x65, 0x58,  /* GRE starts */
67                           0, 0, 0, 0,           /* GRE ends */
68                           0, 0, 0, 0,           /* Ether starts */
69                           0, 0, 0, 0,
70                           0, 0, 0, 0,
71                           0x08, 0,              /* Ether ends */
72                           0x45, 0, 0, 0x14,     /* IP starts */
73                           0, 0, 0, 0,
74                           0, 0, 0, 0,
75                           0, 0, 0, 0,
76                           0, 0, 0, 0            /* IP ends */
77                         };
78
79 static const u8
80 dummy_udp_tun_packet[] = {0, 0, 0, 0,           /* Ether starts */
81                           0, 0, 0, 0,
82                           0, 0, 0, 0,
83                           0x08, 0,              /* Ether ends */
84                           0x45, 0, 0, 0x32,     /* IP starts */
85                           0, 0, 0, 0,
86                           0, 0x11, 0, 0,
87                           0, 0, 0, 0,
88                           0, 0, 0, 0,           /* IP ends */
89                           0, 0, 0x12, 0xB5,     /* UDP start*/
90                           0, 0x1E, 0, 0,        /* UDP end*/
91                           0, 0, 0, 0,           /* VXLAN start */
92                           0, 0, 0, 0,           /* VXLAN end*/
93                           0, 0, 0, 0,           /* Ether starts */
94                           0, 0, 0, 0,
95                           0, 0, 0, 0,
96                           0, 0                  /* Ether ends */
97                         };
98
99 static const u8
100 dummy_tcp_tun_packet[] = {0, 0, 0, 0,           /* Ether starts */
101                           0, 0, 0, 0,
102                           0, 0, 0, 0,
103                           0x08, 0,              /* Ether ends */
104                           0x45, 0, 0, 0x28,     /* IP starts */
105                           0, 0x01, 0, 0,
106                           0x40, 0x06, 0xF5, 0x69,
107                           0, 0, 0, 0,
108                           0, 0, 0, 0,   /* IP ends */
109                           0, 0, 0, 0,
110                           0, 0, 0, 0,
111                           0, 0, 0, 0,
112                           0x50, 0x02, 0x20,
113                           0, 0x9, 0x79, 0, 0,
114                           0, 0 /* 2 bytes padding for 4 byte alignment*/
115                         };
116
117 /* this is a recipe to profile bitmap association */
118 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
119                           ICE_MAX_NUM_PROFILES);
120 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
121
122 /**
123  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
124  * @hw: pointer to hardware structure
125  * @recps: struct that we need to populate
126  * @rid: recipe ID that we are populating
127  *
128  * This function is used to populate all the necessary entries into our
129  * bookkeeping so that we have a current list of all the recipes that are
130  * programmed in the firmware.
131  */
132 static enum ice_status
133 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid)
134 {
135         u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
136         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
137         u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
138         struct ice_aqc_recipe_data_elem *tmp;
139         u16 num_recps = ICE_MAX_NUM_RECIPES;
140         struct ice_prot_lkup_ext *lkup_exts;
141         enum ice_status status;
142
143         /* we need a buffer big enough to accommodate all the recipes */
144         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
145                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
146         if (!tmp)
147                 return ICE_ERR_NO_MEMORY;
148
149         tmp[0].recipe_indx = rid;
150         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
151         /* non-zero status meaning recipe doesn't exist */
152         if (status)
153                 goto err_unroll;
154         lkup_exts = &recps[rid].lkup_exts;
155         /* start populating all the entries for recps[rid] based on lkups from
156          * firmware
157          */
158         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
159                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
160                 struct ice_recp_grp_entry *rg_entry;
161                 u8 prof_id, prot = 0;
162                 u16 off = 0;
163
164                 rg_entry = (struct ice_recp_grp_entry *)
165                         ice_malloc(hw, sizeof(*rg_entry));
166                 if (!rg_entry) {
167                         status = ICE_ERR_NO_MEMORY;
168                         goto err_unroll;
169                 }
170                 /* Avoid 8th bit since its result enable bit */
171                 result_idxs[result_idx] = root_bufs.content.result_indx &
172                         ~ICE_AQ_RECIPE_RESULT_EN;
173                 /* Check if result enable bit is set */
174                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
175                         ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
176                                       result_idxs[result_idx++],
177                                       available_result_ids);
178                 ice_memcpy(r_bitmap,
179                            recipe_to_profile[tmp[sub_recps].recipe_indx],
180                            sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
181                 /* get the first profile that is associated with rid */
182                 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
183                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
184                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
185
186                         rg_entry->fv_idx[i] = lkup_indx;
187                         /* If the recipe is a chained recipe then all its
188                          * child recipe's result will have a result index.
189                          * To fill fv_words we should not use those result
190                          * index, we only need the protocol ids and offsets.
191                          * We will skip all the fv_idx which stores result
192                          * index in them. We also need to skip any fv_idx which
193                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
194                          * valid offset value.
195                          */
196                         if (result_idxs[0] == rg_entry->fv_idx[i] ||
197                             result_idxs[1] == rg_entry->fv_idx[i] ||
198                             result_idxs[2] == rg_entry->fv_idx[i] ||
199                             result_idxs[3] == rg_entry->fv_idx[i] ||
200                             result_idxs[4] == rg_entry->fv_idx[i] ||
201                             rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
202                             rg_entry->fv_idx[i] == 0)
203                                 continue;
204
205                         ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
206                                           rg_entry->fv_idx[i], &prot, &off);
207                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
208                         lkup_exts->fv_words[fv_word_idx].off = off;
209                         fv_word_idx++;
210                 }
211                 /* populate rg_list with the data from the child entry of this
212                  * recipe
213                  */
214                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
215         }
216         lkup_exts->n_val_words = fv_word_idx;
217         recps[rid].n_grp_count = num_recps;
218         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
219                 ice_calloc(hw, recps[rid].n_grp_count,
220                            sizeof(struct ice_aqc_recipe_data_elem));
221         if (!recps[rid].root_buf)
222                 goto err_unroll;
223
224         ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
225                    sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
226         recps[rid].recp_created = true;
227         if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
228                 recps[rid].root_rid = rid;
229 err_unroll:
230         ice_free(hw, tmp);
231         return status;
232 }
233
234 /**
235  * ice_get_recp_to_prof_map - updates recipe to profile mapping
236  * @hw: pointer to hardware structure
237  *
238  * This function is used to populate recipe_to_profile matrix where index to
239  * this array is the recipe ID and the element is the mapping of which profiles
240  * is this recipe mapped to.
241  */
242 static void
243 ice_get_recp_to_prof_map(struct ice_hw *hw)
244 {
245         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
246         u16 i;
247
248         for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
249                 u16 j;
250
251                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
252                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
253                         continue;
254
255                 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
256                         if (ice_is_bit_set(r_bitmap, j))
257                                 ice_set_bit(i, recipe_to_profile[j]);
258         }
259 }
260
261 /**
262  * ice_init_def_sw_recp - initialize the recipe book keeping tables
263  * @hw: pointer to the HW struct
264  *
265  * Allocate memory for the entire recipe table and initialize the structures/
266  * entries corresponding to basic recipes.
267  */
268 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
269 {
270         struct ice_sw_recipe *recps;
271         u8 i;
272
273         recps = (struct ice_sw_recipe *)
274                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
275         if (!recps)
276                 return ICE_ERR_NO_MEMORY;
277
278         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
279                 recps[i].root_rid = i;
280                 INIT_LIST_HEAD(&recps[i].filt_rules);
281                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
282                 INIT_LIST_HEAD(&recps[i].rg_list);
283                 ice_init_lock(&recps[i].filt_rule_lock);
284         }
285
286         hw->switch_info->recp_list = recps;
287
288         return ICE_SUCCESS;
289 }
290
291 /**
292  * ice_aq_get_sw_cfg - get switch configuration
293  * @hw: pointer to the hardware structure
294  * @buf: pointer to the result buffer
295  * @buf_size: length of the buffer available for response
296  * @req_desc: pointer to requested descriptor
297  * @num_elems: pointer to number of elements
298  * @cd: pointer to command details structure or NULL
299  *
300  * Get switch configuration (0x0200) to be placed in 'buff'.
301  * This admin command returns information such as initial VSI/port number
302  * and switch ID it belongs to.
303  *
304  * NOTE: *req_desc is both an input/output parameter.
305  * The caller of this function first calls this function with *request_desc set
306  * to 0. If the response from f/w has *req_desc set to 0, all the switch
307  * configuration information has been returned; if non-zero (meaning not all
308  * the information was returned), the caller should call this function again
309  * with *req_desc set to the previous value returned by f/w to get the
310  * next block of switch configuration information.
311  *
312  * *num_elems is output only parameter. This reflects the number of elements
313  * in response buffer. The caller of this function to use *num_elems while
314  * parsing the response buffer.
315  */
316 static enum ice_status
317 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
318                   u16 buf_size, u16 *req_desc, u16 *num_elems,
319                   struct ice_sq_cd *cd)
320 {
321         struct ice_aqc_get_sw_cfg *cmd;
322         enum ice_status status;
323         struct ice_aq_desc desc;
324
325         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
326         cmd = &desc.params.get_sw_conf;
327         cmd->element = CPU_TO_LE16(*req_desc);
328
329         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
330         if (!status) {
331                 *req_desc = LE16_TO_CPU(cmd->element);
332                 *num_elems = LE16_TO_CPU(cmd->num_elems);
333         }
334
335         return status;
336 }
337
338
339 /**
340  * ice_alloc_sw - allocate resources specific to switch
341  * @hw: pointer to the HW struct
342  * @ena_stats: true to turn on VEB stats
343  * @shared_res: true for shared resource, false for dedicated resource
344  * @sw_id: switch ID returned
345  * @counter_id: VEB counter ID returned
346  *
347  * allocates switch resources (SWID and VEB counter) (0x0208)
348  */
349 enum ice_status
350 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
351              u16 *counter_id)
352 {
353         struct ice_aqc_alloc_free_res_elem *sw_buf;
354         struct ice_aqc_res_elem *sw_ele;
355         enum ice_status status;
356         u16 buf_len;
357
358         buf_len = sizeof(*sw_buf);
359         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
360                    ice_malloc(hw, buf_len);
361         if (!sw_buf)
362                 return ICE_ERR_NO_MEMORY;
363
364         /* Prepare buffer for switch ID.
365          * The number of resource entries in buffer is passed as 1 since only a
366          * single switch/VEB instance is allocated, and hence a single sw_id
367          * is requested.
368          */
369         sw_buf->num_elems = CPU_TO_LE16(1);
370         sw_buf->res_type =
371                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
372                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
373                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
374
375         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
376                                        ice_aqc_opc_alloc_res, NULL);
377
378         if (status)
379                 goto ice_alloc_sw_exit;
380
381         sw_ele = &sw_buf->elem[0];
382         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
383
384         if (ena_stats) {
385                 /* Prepare buffer for VEB Counter */
386                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
387                 struct ice_aqc_alloc_free_res_elem *counter_buf;
388                 struct ice_aqc_res_elem *counter_ele;
389
390                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
391                                 ice_malloc(hw, buf_len);
392                 if (!counter_buf) {
393                         status = ICE_ERR_NO_MEMORY;
394                         goto ice_alloc_sw_exit;
395                 }
396
397                 /* The number of resource entries in buffer is passed as 1 since
398                  * only a single switch/VEB instance is allocated, and hence a
399                  * single VEB counter is requested.
400                  */
401                 counter_buf->num_elems = CPU_TO_LE16(1);
402                 counter_buf->res_type =
403                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
404                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
405                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
406                                                opc, NULL);
407
408                 if (status) {
409                         ice_free(hw, counter_buf);
410                         goto ice_alloc_sw_exit;
411                 }
412                 counter_ele = &counter_buf->elem[0];
413                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
414                 ice_free(hw, counter_buf);
415         }
416
417 ice_alloc_sw_exit:
418         ice_free(hw, sw_buf);
419         return status;
420 }
421
422 /**
423  * ice_free_sw - free resources specific to switch
424  * @hw: pointer to the HW struct
425  * @sw_id: switch ID returned
426  * @counter_id: VEB counter ID returned
427  *
428  * free switch resources (SWID and VEB counter) (0x0209)
429  *
430  * NOTE: This function frees multiple resources. It continues
431  * releasing other resources even after it encounters error.
432  * The error code returned is the last error it encountered.
433  */
434 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
435 {
436         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
437         enum ice_status status, ret_status;
438         u16 buf_len;
439
440         buf_len = sizeof(*sw_buf);
441         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
442                    ice_malloc(hw, buf_len);
443         if (!sw_buf)
444                 return ICE_ERR_NO_MEMORY;
445
446         /* Prepare buffer to free for switch ID res.
447          * The number of resource entries in buffer is passed as 1 since only a
448          * single switch/VEB instance is freed, and hence a single sw_id
449          * is released.
450          */
451         sw_buf->num_elems = CPU_TO_LE16(1);
452         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
453         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
454
455         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
456                                            ice_aqc_opc_free_res, NULL);
457
458         if (ret_status)
459                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
460
461         /* Prepare buffer to free for VEB Counter resource */
462         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
463                         ice_malloc(hw, buf_len);
464         if (!counter_buf) {
465                 ice_free(hw, sw_buf);
466                 return ICE_ERR_NO_MEMORY;
467         }
468
469         /* The number of resource entries in buffer is passed as 1 since only a
470          * single switch/VEB instance is freed, and hence a single VEB counter
471          * is released
472          */
473         counter_buf->num_elems = CPU_TO_LE16(1);
474         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
475         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
476
477         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
478                                        ice_aqc_opc_free_res, NULL);
479         if (status) {
480                 ice_debug(hw, ICE_DBG_SW,
481                           "VEB counter resource could not be freed\n");
482                 ret_status = status;
483         }
484
485         ice_free(hw, counter_buf);
486         ice_free(hw, sw_buf);
487         return ret_status;
488 }
489
490 /**
491  * ice_aq_add_vsi
492  * @hw: pointer to the HW struct
493  * @vsi_ctx: pointer to a VSI context struct
494  * @cd: pointer to command details structure or NULL
495  *
496  * Add a VSI context to the hardware (0x0210)
497  */
498 enum ice_status
499 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
500                struct ice_sq_cd *cd)
501 {
502         struct ice_aqc_add_update_free_vsi_resp *res;
503         struct ice_aqc_add_get_update_free_vsi *cmd;
504         struct ice_aq_desc desc;
505         enum ice_status status;
506
507         cmd = &desc.params.vsi_cmd;
508         res = &desc.params.add_update_free_vsi_res;
509
510         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
511
512         if (!vsi_ctx->alloc_from_pool)
513                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
514                                            ICE_AQ_VSI_IS_VALID);
515
516         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
517
518         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
519
520         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
521                                  sizeof(vsi_ctx->info), cd);
522
523         if (!status) {
524                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
525                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
526                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
527         }
528
529         return status;
530 }
531
532 /**
533  * ice_aq_free_vsi
534  * @hw: pointer to the HW struct
535  * @vsi_ctx: pointer to a VSI context struct
536  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
537  * @cd: pointer to command details structure or NULL
538  *
539  * Free VSI context info from hardware (0x0213)
540  */
541 enum ice_status
542 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
543                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
544 {
545         struct ice_aqc_add_update_free_vsi_resp *resp;
546         struct ice_aqc_add_get_update_free_vsi *cmd;
547         struct ice_aq_desc desc;
548         enum ice_status status;
549
550         cmd = &desc.params.vsi_cmd;
551         resp = &desc.params.add_update_free_vsi_res;
552
553         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
554
555         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
556         if (keep_vsi_alloc)
557                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
558
559         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
560         if (!status) {
561                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
562                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
563         }
564
565         return status;
566 }
567
568 /**
569  * ice_aq_update_vsi
570  * @hw: pointer to the HW struct
571  * @vsi_ctx: pointer to a VSI context struct
572  * @cd: pointer to command details structure or NULL
573  *
574  * Update VSI context in the hardware (0x0211)
575  */
576 enum ice_status
577 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
578                   struct ice_sq_cd *cd)
579 {
580         struct ice_aqc_add_update_free_vsi_resp *resp;
581         struct ice_aqc_add_get_update_free_vsi *cmd;
582         struct ice_aq_desc desc;
583         enum ice_status status;
584
585         cmd = &desc.params.vsi_cmd;
586         resp = &desc.params.add_update_free_vsi_res;
587
588         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
589
590         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
591
592         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
593
594         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
595                                  sizeof(vsi_ctx->info), cd);
596
597         if (!status) {
598                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
599                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
600         }
601
602         return status;
603 }
604
605 /**
606  * ice_is_vsi_valid - check whether the VSI is valid or not
607  * @hw: pointer to the HW struct
608  * @vsi_handle: VSI handle
609  *
610  * check whether the VSI is valid or not
611  */
612 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
613 {
614         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
615 }
616
617 /**
618  * ice_get_hw_vsi_num - return the HW VSI number
619  * @hw: pointer to the HW struct
620  * @vsi_handle: VSI handle
621  *
622  * return the HW VSI number
623  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
624  */
625 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
626 {
627         return hw->vsi_ctx[vsi_handle]->vsi_num;
628 }
629
630 /**
631  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
632  * @hw: pointer to the HW struct
633  * @vsi_handle: VSI handle
634  *
635  * return the VSI context entry for a given VSI handle
636  */
637 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
638 {
639         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
640 }
641
642 /**
643  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
644  * @hw: pointer to the HW struct
645  * @vsi_handle: VSI handle
646  * @vsi: VSI context pointer
647  *
648  * save the VSI context entry for a given VSI handle
649  */
650 static void
651 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
652 {
653         hw->vsi_ctx[vsi_handle] = vsi;
654 }
655
656 /**
657  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
658  * @hw: pointer to the HW struct
659  * @vsi_handle: VSI handle
660  */
661 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
662 {
663         struct ice_vsi_ctx *vsi;
664         u8 i;
665
666         vsi = ice_get_vsi_ctx(hw, vsi_handle);
667         if (!vsi)
668                 return;
669         ice_for_each_traffic_class(i) {
670                 if (vsi->lan_q_ctx[i]) {
671                         ice_free(hw, vsi->lan_q_ctx[i]);
672                         vsi->lan_q_ctx[i] = NULL;
673                 }
674         }
675 }
676
677 /**
678  * ice_clear_vsi_ctx - clear the VSI context entry
679  * @hw: pointer to the HW struct
680  * @vsi_handle: VSI handle
681  *
682  * clear the VSI context entry
683  */
684 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
685 {
686         struct ice_vsi_ctx *vsi;
687
688         vsi = ice_get_vsi_ctx(hw, vsi_handle);
689         if (vsi) {
690                 if (!LIST_EMPTY(&vsi->rss_list_head))
691                         ice_rem_all_rss_vsi_ctx(hw, vsi_handle);
692                 ice_clear_vsi_q_ctx(hw, vsi_handle);
693                 ice_destroy_lock(&vsi->rss_locks);
694                 ice_free(hw, vsi);
695                 hw->vsi_ctx[vsi_handle] = NULL;
696         }
697 }
698
699 /**
700  * ice_clear_all_vsi_ctx - clear all the VSI context entries
701  * @hw: pointer to the HW struct
702  */
703 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
704 {
705         u16 i;
706
707         for (i = 0; i < ICE_MAX_VSI; i++)
708                 ice_clear_vsi_ctx(hw, i);
709 }
710
711 /**
712  * ice_add_vsi - add VSI context to the hardware and VSI handle list
713  * @hw: pointer to the HW struct
714  * @vsi_handle: unique VSI handle provided by drivers
715  * @vsi_ctx: pointer to a VSI context struct
716  * @cd: pointer to command details structure or NULL
717  *
718  * Add a VSI context to the hardware also add it into the VSI handle list.
719  * If this function gets called after reset for existing VSIs then update
720  * with the new HW VSI number in the corresponding VSI handle list entry.
721  */
722 enum ice_status
723 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
724             struct ice_sq_cd *cd)
725 {
726         struct ice_vsi_ctx *tmp_vsi_ctx;
727         enum ice_status status;
728
729         if (vsi_handle >= ICE_MAX_VSI)
730                 return ICE_ERR_PARAM;
731         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
732         if (status)
733                 return status;
734         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
735         if (!tmp_vsi_ctx) {
736                 /* Create a new VSI context */
737                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
738                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
739                 if (!tmp_vsi_ctx) {
740                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
741                         return ICE_ERR_NO_MEMORY;
742                 }
743                 *tmp_vsi_ctx = *vsi_ctx;
744                 ice_init_lock(&tmp_vsi_ctx->rss_locks);
745                 INIT_LIST_HEAD(&tmp_vsi_ctx->rss_list_head);
746                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
747         } else {
748                 /* update with new HW VSI num */
749                 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
750                         tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
751         }
752
753         return ICE_SUCCESS;
754 }
755
756 /**
757  * ice_free_vsi- free VSI context from hardware and VSI handle list
758  * @hw: pointer to the HW struct
759  * @vsi_handle: unique VSI handle
760  * @vsi_ctx: pointer to a VSI context struct
761  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
762  * @cd: pointer to command details structure or NULL
763  *
764  * Free VSI context info from hardware as well as from VSI handle list
765  */
766 enum ice_status
767 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
768              bool keep_vsi_alloc, struct ice_sq_cd *cd)
769 {
770         enum ice_status status;
771
772         if (!ice_is_vsi_valid(hw, vsi_handle))
773                 return ICE_ERR_PARAM;
774         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
775         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
776         if (!status)
777                 ice_clear_vsi_ctx(hw, vsi_handle);
778         return status;
779 }
780
781 /**
782  * ice_update_vsi
783  * @hw: pointer to the HW struct
784  * @vsi_handle: unique VSI handle
785  * @vsi_ctx: pointer to a VSI context struct
786  * @cd: pointer to command details structure or NULL
787  *
788  * Update VSI context in the hardware
789  */
790 enum ice_status
791 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
792                struct ice_sq_cd *cd)
793 {
794         if (!ice_is_vsi_valid(hw, vsi_handle))
795                 return ICE_ERR_PARAM;
796         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
797         return ice_aq_update_vsi(hw, vsi_ctx, cd);
798 }
799
800 /**
801  * ice_aq_get_vsi_params
802  * @hw: pointer to the HW struct
803  * @vsi_ctx: pointer to a VSI context struct
804  * @cd: pointer to command details structure or NULL
805  *
806  * Get VSI context info from hardware (0x0212)
807  */
808 enum ice_status
809 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
810                       struct ice_sq_cd *cd)
811 {
812         struct ice_aqc_add_get_update_free_vsi *cmd;
813         struct ice_aqc_get_vsi_resp *resp;
814         struct ice_aq_desc desc;
815         enum ice_status status;
816
817         cmd = &desc.params.vsi_cmd;
818         resp = &desc.params.get_vsi_resp;
819
820         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
821
822         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
823
824         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
825                                  sizeof(vsi_ctx->info), cd);
826         if (!status) {
827                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
828                                         ICE_AQ_VSI_NUM_M;
829                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
830                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
831         }
832
833         return status;
834 }
835
836 /**
837  * ice_aq_add_update_mir_rule - add/update a mirror rule
838  * @hw: pointer to the HW struct
839  * @rule_type: Rule Type
840  * @dest_vsi: VSI number to which packets will be mirrored
841  * @count: length of the list
842  * @mr_buf: buffer for list of mirrored VSI numbers
843  * @cd: pointer to command details structure or NULL
844  * @rule_id: Rule ID
845  *
846  * Add/Update Mirror Rule (0x260).
847  */
848 enum ice_status
849 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
850                            u16 count, struct ice_mir_rule_buf *mr_buf,
851                            struct ice_sq_cd *cd, u16 *rule_id)
852 {
853         struct ice_aqc_add_update_mir_rule *cmd;
854         struct ice_aq_desc desc;
855         enum ice_status status;
856         __le16 *mr_list = NULL;
857         u16 buf_size = 0;
858
859         switch (rule_type) {
860         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
861         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
862                 /* Make sure count and mr_buf are set for these rule_types */
863                 if (!(count && mr_buf))
864                         return ICE_ERR_PARAM;
865
866                 buf_size = count * sizeof(__le16);
867                 mr_list = (__le16 *)ice_malloc(hw, buf_size);
868                 if (!mr_list)
869                         return ICE_ERR_NO_MEMORY;
870                 break;
871         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
872         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
873                 /* Make sure count and mr_buf are not set for these
874                  * rule_types
875                  */
876                 if (count || mr_buf)
877                         return ICE_ERR_PARAM;
878                 break;
879         default:
880                 ice_debug(hw, ICE_DBG_SW,
881                           "Error due to unsupported rule_type %u\n", rule_type);
882                 return ICE_ERR_OUT_OF_RANGE;
883         }
884
885         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
886
887         /* Pre-process 'mr_buf' items for add/update of virtual port
888          * ingress/egress mirroring (but not physical port ingress/egress
889          * mirroring)
890          */
891         if (mr_buf) {
892                 int i;
893
894                 for (i = 0; i < count; i++) {
895                         u16 id;
896
897                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
898
899                         /* Validate specified VSI number, make sure it is less
900                          * than ICE_MAX_VSI, if not return with error.
901                          */
902                         if (id >= ICE_MAX_VSI) {
903                                 ice_debug(hw, ICE_DBG_SW,
904                                           "Error VSI index (%u) out-of-range\n",
905                                           id);
906                                 ice_free(hw, mr_list);
907                                 return ICE_ERR_OUT_OF_RANGE;
908                         }
909
910                         /* add VSI to mirror rule */
911                         if (mr_buf[i].add)
912                                 mr_list[i] =
913                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
914                         else /* remove VSI from mirror rule */
915                                 mr_list[i] = CPU_TO_LE16(id);
916                 }
917         }
918
919         cmd = &desc.params.add_update_rule;
920         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
921                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
922                                            ICE_AQC_RULE_ID_VALID_M);
923         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
924         cmd->num_entries = CPU_TO_LE16(count);
925         cmd->dest = CPU_TO_LE16(dest_vsi);
926
927         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
928         if (!status)
929                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
930
931         ice_free(hw, mr_list);
932
933         return status;
934 }
935
936 /**
937  * ice_aq_delete_mir_rule - delete a mirror rule
938  * @hw: pointer to the HW struct
939  * @rule_id: Mirror rule ID (to be deleted)
940  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
941  *               otherwise it is returned to the shared pool
942  * @cd: pointer to command details structure or NULL
943  *
944  * Delete Mirror Rule (0x261).
945  */
946 enum ice_status
947 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
948                        struct ice_sq_cd *cd)
949 {
950         struct ice_aqc_delete_mir_rule *cmd;
951         struct ice_aq_desc desc;
952
953         /* rule_id should be in the range 0...63 */
954         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
955                 return ICE_ERR_OUT_OF_RANGE;
956
957         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
958
959         cmd = &desc.params.del_rule;
960         rule_id |= ICE_AQC_RULE_ID_VALID_M;
961         cmd->rule_id = CPU_TO_LE16(rule_id);
962
963         if (keep_allocd)
964                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
965
966         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
967 }
968
969 /**
970  * ice_aq_alloc_free_vsi_list
971  * @hw: pointer to the HW struct
972  * @vsi_list_id: VSI list ID returned or used for lookup
973  * @lkup_type: switch rule filter lookup type
974  * @opc: switch rules population command type - pass in the command opcode
975  *
976  * allocates or free a VSI list resource
977  */
978 static enum ice_status
979 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
980                            enum ice_sw_lkup_type lkup_type,
981                            enum ice_adminq_opc opc)
982 {
983         struct ice_aqc_alloc_free_res_elem *sw_buf;
984         struct ice_aqc_res_elem *vsi_ele;
985         enum ice_status status;
986         u16 buf_len;
987
988         buf_len = sizeof(*sw_buf);
989         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
990                 ice_malloc(hw, buf_len);
991         if (!sw_buf)
992                 return ICE_ERR_NO_MEMORY;
993         sw_buf->num_elems = CPU_TO_LE16(1);
994
995         if (lkup_type == ICE_SW_LKUP_MAC ||
996             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
997             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
998             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
999             lkup_type == ICE_SW_LKUP_PROMISC ||
1000             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1001             lkup_type == ICE_SW_LKUP_LAST) {
1002                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1003         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1004                 sw_buf->res_type =
1005                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1006         } else {
1007                 status = ICE_ERR_PARAM;
1008                 goto ice_aq_alloc_free_vsi_list_exit;
1009         }
1010
1011         if (opc == ice_aqc_opc_free_res)
1012                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1013
1014         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1015         if (status)
1016                 goto ice_aq_alloc_free_vsi_list_exit;
1017
1018         if (opc == ice_aqc_opc_alloc_res) {
1019                 vsi_ele = &sw_buf->elem[0];
1020                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1021         }
1022
1023 ice_aq_alloc_free_vsi_list_exit:
1024         ice_free(hw, sw_buf);
1025         return status;
1026 }
1027
1028 /**
1029  * ice_aq_set_storm_ctrl - Sets storm control configuration
1030  * @hw: pointer to the HW struct
1031  * @bcast_thresh: represents the upper threshold for broadcast storm control
1032  * @mcast_thresh: represents the upper threshold for multicast storm control
1033  * @ctl_bitmask: storm control control knobs
1034  *
1035  * Sets the storm control configuration (0x0280)
1036  */
1037 enum ice_status
1038 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1039                       u32 ctl_bitmask)
1040 {
1041         struct ice_aqc_storm_cfg *cmd;
1042         struct ice_aq_desc desc;
1043
1044         cmd = &desc.params.storm_conf;
1045
1046         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1047
1048         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1049         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1050         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1051
1052         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1053 }
1054
1055 /**
1056  * ice_aq_get_storm_ctrl - gets storm control configuration
1057  * @hw: pointer to the HW struct
1058  * @bcast_thresh: represents the upper threshold for broadcast storm control
1059  * @mcast_thresh: represents the upper threshold for multicast storm control
1060  * @ctl_bitmask: storm control control knobs
1061  *
1062  * Gets the storm control configuration (0x0281)
1063  */
1064 enum ice_status
1065 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1066                       u32 *ctl_bitmask)
1067 {
1068         enum ice_status status;
1069         struct ice_aq_desc desc;
1070
1071         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1072
1073         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1074         if (!status) {
1075                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1076
1077                 if (bcast_thresh)
1078                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1079                                 ICE_AQ_THRESHOLD_M;
1080                 if (mcast_thresh)
1081                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1082                                 ICE_AQ_THRESHOLD_M;
1083                 if (ctl_bitmask)
1084                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1085         }
1086
1087         return status;
1088 }
1089
1090 /**
1091  * ice_aq_sw_rules - add/update/remove switch rules
1092  * @hw: pointer to the HW struct
1093  * @rule_list: pointer to switch rule population list
1094  * @rule_list_sz: total size of the rule list in bytes
1095  * @num_rules: number of switch rules in the rule_list
1096  * @opc: switch rules population command type - pass in the command opcode
1097  * @cd: pointer to command details structure or NULL
1098  *
1099  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1100  */
1101 static enum ice_status
1102 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1103                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1104 {
1105         struct ice_aq_desc desc;
1106
1107         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1108
1109         if (opc != ice_aqc_opc_add_sw_rules &&
1110             opc != ice_aqc_opc_update_sw_rules &&
1111             opc != ice_aqc_opc_remove_sw_rules)
1112                 return ICE_ERR_PARAM;
1113
1114         ice_fill_dflt_direct_cmd_desc(&desc, opc);
1115
1116         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1117         desc.params.sw_rules.num_rules_fltr_entry_index =
1118                 CPU_TO_LE16(num_rules);
1119         return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1120 }
1121
1122 /**
1123  * ice_aq_add_recipe - add switch recipe
1124  * @hw: pointer to the HW struct
1125  * @s_recipe_list: pointer to switch rule population list
1126  * @num_recipes: number of switch recipes in the list
1127  * @cd: pointer to command details structure or NULL
1128  *
1129  * Add(0x0290)
1130  */
1131 enum ice_status
1132 ice_aq_add_recipe(struct ice_hw *hw,
1133                   struct ice_aqc_recipe_data_elem *s_recipe_list,
1134                   u16 num_recipes, struct ice_sq_cd *cd)
1135 {
1136         struct ice_aqc_add_get_recipe *cmd;
1137         struct ice_aq_desc desc;
1138         u16 buf_size;
1139
1140         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1141         cmd = &desc.params.add_get_recipe;
1142         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1143
1144         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1145         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1146
1147         buf_size = num_recipes * sizeof(*s_recipe_list);
1148
1149         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1150 }
1151
1152 /**
1153  * ice_aq_get_recipe - get switch recipe
1154  * @hw: pointer to the HW struct
1155  * @s_recipe_list: pointer to switch rule population list
1156  * @num_recipes: pointer to the number of recipes (input and output)
1157  * @recipe_root: root recipe number of recipe(s) to retrieve
1158  * @cd: pointer to command details structure or NULL
1159  *
1160  * Get(0x0292)
1161  *
1162  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1163  * On output, *num_recipes will equal the number of entries returned in
1164  * s_recipe_list.
1165  *
1166  * The caller must supply enough space in s_recipe_list to hold all possible
1167  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1168  */
1169 enum ice_status
1170 ice_aq_get_recipe(struct ice_hw *hw,
1171                   struct ice_aqc_recipe_data_elem *s_recipe_list,
1172                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1173 {
1174         struct ice_aqc_add_get_recipe *cmd;
1175         struct ice_aq_desc desc;
1176         enum ice_status status;
1177         u16 buf_size;
1178
1179         if (*num_recipes != ICE_MAX_NUM_RECIPES)
1180                 return ICE_ERR_PARAM;
1181
1182         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1183         cmd = &desc.params.add_get_recipe;
1184         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1185
1186         cmd->return_index = CPU_TO_LE16(recipe_root);
1187         cmd->num_sub_recipes = 0;
1188
1189         buf_size = *num_recipes * sizeof(*s_recipe_list);
1190
1191         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1192         /* cppcheck-suppress constArgument */
1193         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1194
1195         return status;
1196 }
1197
1198 /**
1199  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1200  * @hw: pointer to the HW struct
1201  * @profile_id: package profile ID to associate the recipe with
1202  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1203  * @cd: pointer to command details structure or NULL
1204  * Recipe to profile association (0x0291)
1205  */
1206 enum ice_status
1207 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1208                              struct ice_sq_cd *cd)
1209 {
1210         struct ice_aqc_recipe_to_profile *cmd;
1211         struct ice_aq_desc desc;
1212
1213         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1214         cmd = &desc.params.recipe_to_profile;
1215         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1216         cmd->profile_id = CPU_TO_LE16(profile_id);
1217         /* Set the recipe ID bit in the bitmask to let the device know which
1218          * profile we are associating the recipe to
1219          */
1220         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1221                    ICE_NONDMA_TO_NONDMA);
1222
1223         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1224 }
1225
1226 /**
1227  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1228  * @hw: pointer to the HW struct
1229  * @profile_id: package profile ID to associate the recipe with
1230  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1231  * @cd: pointer to command details structure or NULL
1232  * Associate profile ID with given recipe (0x0293)
1233  */
1234 enum ice_status
1235 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1236                              struct ice_sq_cd *cd)
1237 {
1238         struct ice_aqc_recipe_to_profile *cmd;
1239         struct ice_aq_desc desc;
1240         enum ice_status status;
1241
1242         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1243         cmd = &desc.params.recipe_to_profile;
1244         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1245         cmd->profile_id = CPU_TO_LE16(profile_id);
1246
1247         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1248         if (!status)
1249                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1250                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1251
1252         return status;
1253 }
1254
1255 /**
1256  * ice_alloc_recipe - add recipe resource
1257  * @hw: pointer to the hardware structure
1258  * @rid: recipe ID returned as response to AQ call
1259  */
1260 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1261 {
1262         struct ice_aqc_alloc_free_res_elem *sw_buf;
1263         enum ice_status status;
1264         u16 buf_len;
1265
1266         buf_len = sizeof(*sw_buf);
1267         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1268         if (!sw_buf)
1269                 return ICE_ERR_NO_MEMORY;
1270
1271         sw_buf->num_elems = CPU_TO_LE16(1);
1272         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1273                                         ICE_AQC_RES_TYPE_S) |
1274                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
1275         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1276                                        ice_aqc_opc_alloc_res, NULL);
1277         if (!status)
1278                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1279         ice_free(hw, sw_buf);
1280
1281         return status;
1282 }
1283
1284 /* ice_init_port_info - Initialize port_info with switch configuration data
1285  * @pi: pointer to port_info
1286  * @vsi_port_num: VSI number or port number
1287  * @type: Type of switch element (port or VSI)
1288  * @swid: switch ID of the switch the element is attached to
1289  * @pf_vf_num: PF or VF number
1290  * @is_vf: true if the element is a VF, false otherwise
1291  */
1292 static void
1293 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1294                    u16 swid, u16 pf_vf_num, bool is_vf)
1295 {
1296         switch (type) {
1297         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1298                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1299                 pi->sw_id = swid;
1300                 pi->pf_vf_num = pf_vf_num;
1301                 pi->is_vf = is_vf;
1302                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1303                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1304                 break;
1305         default:
1306                 ice_debug(pi->hw, ICE_DBG_SW,
1307                           "incorrect VSI/port type received\n");
1308                 break;
1309         }
1310 }
1311
1312 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1313  * @hw: pointer to the hardware structure
1314  */
1315 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1316 {
1317         struct ice_aqc_get_sw_cfg_resp *rbuf;
1318         enum ice_status status;
1319         u16 num_total_ports;
1320         u16 req_desc = 0;
1321         u16 num_elems;
1322         u16 j = 0;
1323         u16 i;
1324
1325         num_total_ports = 1;
1326
1327         rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1328                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1329
1330         if (!rbuf)
1331                 return ICE_ERR_NO_MEMORY;
1332
1333         /* Multiple calls to ice_aq_get_sw_cfg may be required
1334          * to get all the switch configuration information. The need
1335          * for additional calls is indicated by ice_aq_get_sw_cfg
1336          * writing a non-zero value in req_desc
1337          */
1338         do {
1339                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1340                                            &req_desc, &num_elems, NULL);
1341
1342                 if (status)
1343                         break;
1344
1345                 for (i = 0; i < num_elems; i++) {
1346                         struct ice_aqc_get_sw_cfg_resp_elem *ele;
1347                         u16 pf_vf_num, swid, vsi_port_num;
1348                         bool is_vf = false;
1349                         u8 type;
1350
1351                         ele = rbuf[i].elements;
1352                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1353                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1354
1355                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1356                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1357
1358                         swid = LE16_TO_CPU(ele->swid);
1359
1360                         if (LE16_TO_CPU(ele->pf_vf_num) &
1361                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1362                                 is_vf = true;
1363
1364                         type = LE16_TO_CPU(ele->vsi_port_num) >>
1365                                 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1366
1367                         switch (type) {
1368                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1369                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1370                                 if (j == num_total_ports) {
1371                                         ice_debug(hw, ICE_DBG_SW,
1372                                                   "more ports than expected\n");
1373                                         status = ICE_ERR_CFG;
1374                                         goto out;
1375                                 }
1376                                 ice_init_port_info(hw->port_info,
1377                                                    vsi_port_num, type, swid,
1378                                                    pf_vf_num, is_vf);
1379                                 j++;
1380                                 break;
1381                         default:
1382                                 break;
1383                         }
1384                 }
1385         } while (req_desc && !status);
1386
1387
1388 out:
1389         ice_free(hw, (void *)rbuf);
1390         return status;
1391 }
1392
1393
1394 /**
1395  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1396  * @hw: pointer to the hardware structure
1397  * @fi: filter info structure to fill/update
1398  *
1399  * This helper function populates the lb_en and lan_en elements of the provided
1400  * ice_fltr_info struct using the switch's type and characteristics of the
1401  * switch rule being configured.
1402  */
1403 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1404 {
1405         fi->lb_en = false;
1406         fi->lan_en = false;
1407         if ((fi->flag & ICE_FLTR_TX) &&
1408             (fi->fltr_act == ICE_FWD_TO_VSI ||
1409              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1410              fi->fltr_act == ICE_FWD_TO_Q ||
1411              fi->fltr_act == ICE_FWD_TO_QGRP)) {
1412                 /* Setting LB for prune actions will result in replicated
1413                  * packets to the internal switch that will be dropped.
1414                  */
1415                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1416                         fi->lb_en = true;
1417
1418                 /* Set lan_en to TRUE if
1419                  * 1. The switch is a VEB AND
1420                  * 2
1421                  * 2.1 The lookup is a directional lookup like ethertype,
1422                  * promiscuous, ethertype-MAC, promiscuous-VLAN
1423                  * and default-port OR
1424                  * 2.2 The lookup is VLAN, OR
1425                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1426                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1427                  *
1428                  * OR
1429                  *
1430                  * The switch is a VEPA.
1431                  *
1432                  * In all other cases, the LAN enable has to be set to false.
1433                  */
1434                 if (hw->evb_veb) {
1435                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1436                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1437                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1438                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1439                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
1440                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
1441                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
1442                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1443                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1444                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1445                                 fi->lan_en = true;
1446                 } else {
1447                         fi->lan_en = true;
1448                 }
1449         }
1450 }
1451
1452 /**
1453  * ice_ilog2 - Calculates integer log base 2 of a number
1454  * @n: number on which to perform operation
1455  */
1456 static int ice_ilog2(u64 n)
1457 {
1458         int i;
1459
1460         for (i = 63; i >= 0; i--)
1461                 if (((u64)1 << i) & n)
1462                         return i;
1463
1464         return -1;
1465 }
1466
1467
1468 /**
1469  * ice_fill_sw_rule - Helper function to fill switch rule structure
1470  * @hw: pointer to the hardware structure
1471  * @f_info: entry containing packet forwarding information
1472  * @s_rule: switch rule structure to be filled in based on mac_entry
1473  * @opc: switch rules population command type - pass in the command opcode
1474  */
1475 static void
1476 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1477                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1478 {
1479         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1480         void *daddr = NULL;
1481         u16 eth_hdr_sz;
1482         u8 *eth_hdr;
1483         u32 act = 0;
1484         __be16 *off;
1485         u8 q_rgn;
1486
1487
1488         if (opc == ice_aqc_opc_remove_sw_rules) {
1489                 s_rule->pdata.lkup_tx_rx.act = 0;
1490                 s_rule->pdata.lkup_tx_rx.index =
1491                         CPU_TO_LE16(f_info->fltr_rule_id);
1492                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1493                 return;
1494         }
1495
1496         eth_hdr_sz = sizeof(dummy_eth_header);
1497         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1498
1499         /* initialize the ether header with a dummy header */
1500         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1501         ice_fill_sw_info(hw, f_info);
1502
1503         switch (f_info->fltr_act) {
1504         case ICE_FWD_TO_VSI:
1505                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1506                         ICE_SINGLE_ACT_VSI_ID_M;
1507                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1508                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1509                                 ICE_SINGLE_ACT_VALID_BIT;
1510                 break;
1511         case ICE_FWD_TO_VSI_LIST:
1512                 act |= ICE_SINGLE_ACT_VSI_LIST;
1513                 act |= (f_info->fwd_id.vsi_list_id <<
1514                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1515                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
1516                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1517                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1518                                 ICE_SINGLE_ACT_VALID_BIT;
1519                 break;
1520         case ICE_FWD_TO_Q:
1521                 act |= ICE_SINGLE_ACT_TO_Q;
1522                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1523                         ICE_SINGLE_ACT_Q_INDEX_M;
1524                 break;
1525         case ICE_DROP_PACKET:
1526                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1527                         ICE_SINGLE_ACT_VALID_BIT;
1528                 break;
1529         case ICE_FWD_TO_QGRP:
1530                 q_rgn = f_info->qgrp_size > 0 ?
1531                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
1532                 act |= ICE_SINGLE_ACT_TO_Q;
1533                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1534                         ICE_SINGLE_ACT_Q_INDEX_M;
1535                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1536                         ICE_SINGLE_ACT_Q_REGION_M;
1537                 break;
1538         default:
1539                 return;
1540         }
1541
1542         if (f_info->lb_en)
1543                 act |= ICE_SINGLE_ACT_LB_ENABLE;
1544         if (f_info->lan_en)
1545                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1546
1547         switch (f_info->lkup_type) {
1548         case ICE_SW_LKUP_MAC:
1549                 daddr = f_info->l_data.mac.mac_addr;
1550                 break;
1551         case ICE_SW_LKUP_VLAN:
1552                 vlan_id = f_info->l_data.vlan.vlan_id;
1553                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1554                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1555                         act |= ICE_SINGLE_ACT_PRUNE;
1556                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1557                 }
1558                 break;
1559         case ICE_SW_LKUP_ETHERTYPE_MAC:
1560                 daddr = f_info->l_data.ethertype_mac.mac_addr;
1561                 /* fall-through */
1562         case ICE_SW_LKUP_ETHERTYPE:
1563                 off = (__be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1564                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1565                 break;
1566         case ICE_SW_LKUP_MAC_VLAN:
1567                 daddr = f_info->l_data.mac_vlan.mac_addr;
1568                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1569                 break;
1570         case ICE_SW_LKUP_PROMISC_VLAN:
1571                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1572                 /* fall-through */
1573         case ICE_SW_LKUP_PROMISC:
1574                 daddr = f_info->l_data.mac_vlan.mac_addr;
1575                 break;
1576         default:
1577                 break;
1578         }
1579
1580         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1581                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1582                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1583
1584         /* Recipe set depending on lookup type */
1585         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1586         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1587         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1588
1589         if (daddr)
1590                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1591                            ICE_NONDMA_TO_NONDMA);
1592
1593         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1594                 off = (__be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1595                 *off = CPU_TO_BE16(vlan_id);
1596         }
1597
1598         /* Create the switch rule with the final dummy Ethernet header */
1599         if (opc != ice_aqc_opc_update_sw_rules)
1600                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1601 }
1602
1603 /**
1604  * ice_add_marker_act
1605  * @hw: pointer to the hardware structure
1606  * @m_ent: the management entry for which sw marker needs to be added
1607  * @sw_marker: sw marker to tag the Rx descriptor with
1608  * @l_id: large action resource ID
1609  *
1610  * Create a large action to hold software marker and update the switch rule
1611  * entry pointed by m_ent with newly created large action
1612  */
1613 static enum ice_status
1614 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1615                    u16 sw_marker, u16 l_id)
1616 {
1617         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1618         /* For software marker we need 3 large actions
1619          * 1. FWD action: FWD TO VSI or VSI LIST
1620          * 2. GENERIC VALUE action to hold the profile ID
1621          * 3. GENERIC VALUE action to hold the software marker ID
1622          */
1623         const u16 num_lg_acts = 3;
1624         enum ice_status status;
1625         u16 lg_act_size;
1626         u16 rules_size;
1627         u32 act;
1628         u16 id;
1629
1630         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1631                 return ICE_ERR_PARAM;
1632
1633         /* Create two back-to-back switch rules and submit them to the HW using
1634          * one memory buffer:
1635          *    1. Large Action
1636          *    2. Look up Tx Rx
1637          */
1638         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1639         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1640         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1641         if (!lg_act)
1642                 return ICE_ERR_NO_MEMORY;
1643
1644         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1645
1646         /* Fill in the first switch rule i.e. large action */
1647         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1648         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1649         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1650
1651         /* First action VSI forwarding or VSI list forwarding depending on how
1652          * many VSIs
1653          */
1654         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1655                 m_ent->fltr_info.fwd_id.hw_vsi_id;
1656
1657         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1658         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1659                 ICE_LG_ACT_VSI_LIST_ID_M;
1660         if (m_ent->vsi_count > 1)
1661                 act |= ICE_LG_ACT_VSI_LIST;
1662         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1663
1664         /* Second action descriptor type */
1665         act = ICE_LG_ACT_GENERIC;
1666
1667         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1668         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1669
1670         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1671                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1672
1673         /* Third action Marker value */
1674         act |= ICE_LG_ACT_GENERIC;
1675         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1676                 ICE_LG_ACT_GENERIC_VALUE_M;
1677
1678         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1679
1680         /* call the fill switch rule to fill the lookup Tx Rx structure */
1681         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1682                          ice_aqc_opc_update_sw_rules);
1683
1684         /* Update the action to point to the large action ID */
1685         rx_tx->pdata.lkup_tx_rx.act =
1686                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1687                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1688                              ICE_SINGLE_ACT_PTR_VAL_M));
1689
1690         /* Use the filter rule ID of the previously created rule with single
1691          * act. Once the update happens, hardware will treat this as large
1692          * action
1693          */
1694         rx_tx->pdata.lkup_tx_rx.index =
1695                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1696
1697         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1698                                  ice_aqc_opc_update_sw_rules, NULL);
1699         if (!status) {
1700                 m_ent->lg_act_idx = l_id;
1701                 m_ent->sw_marker_id = sw_marker;
1702         }
1703
1704         ice_free(hw, lg_act);
1705         return status;
1706 }
1707
1708 /**
1709  * ice_add_counter_act - add/update filter rule with counter action
1710  * @hw: pointer to the hardware structure
1711  * @m_ent: the management entry for which counter needs to be added
1712  * @counter_id: VLAN counter ID returned as part of allocate resource
1713  * @l_id: large action resource ID
1714  */
1715 static enum ice_status
1716 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1717                     u16 counter_id, u16 l_id)
1718 {
1719         struct ice_aqc_sw_rules_elem *lg_act;
1720         struct ice_aqc_sw_rules_elem *rx_tx;
1721         enum ice_status status;
1722         /* 2 actions will be added while adding a large action counter */
1723         const int num_acts = 2;
1724         u16 lg_act_size;
1725         u16 rules_size;
1726         u16 f_rule_id;
1727         u32 act;
1728         u16 id;
1729
1730         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1731                 return ICE_ERR_PARAM;
1732
1733         /* Create two back-to-back switch rules and submit them to the HW using
1734          * one memory buffer:
1735          * 1. Large Action
1736          * 2. Look up Tx Rx
1737          */
1738         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1739         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1740         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
1741                                                                  rules_size);
1742         if (!lg_act)
1743                 return ICE_ERR_NO_MEMORY;
1744
1745         rx_tx = (struct ice_aqc_sw_rules_elem *)
1746                 ((u8 *)lg_act + lg_act_size);
1747
1748         /* Fill in the first switch rule i.e. large action */
1749         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1750         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1751         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1752
1753         /* First action VSI forwarding or VSI list forwarding depending on how
1754          * many VSIs
1755          */
1756         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
1757                 m_ent->fltr_info.fwd_id.hw_vsi_id;
1758
1759         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1760         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1761                 ICE_LG_ACT_VSI_LIST_ID_M;
1762         if (m_ent->vsi_count > 1)
1763                 act |= ICE_LG_ACT_VSI_LIST;
1764         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1765
1766         /* Second action counter ID */
1767         act = ICE_LG_ACT_STAT_COUNT;
1768         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1769                 ICE_LG_ACT_STAT_COUNT_M;
1770         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1771
1772         /* call the fill switch rule to fill the lookup Tx Rx structure */
1773         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1774                          ice_aqc_opc_update_sw_rules);
1775
1776         act = ICE_SINGLE_ACT_PTR;
1777         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
1778         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1779
1780         /* Use the filter rule ID of the previously created rule with single
1781          * act. Once the update happens, hardware will treat this as large
1782          * action
1783          */
1784         f_rule_id = m_ent->fltr_info.fltr_rule_id;
1785         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
1786
1787         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1788                                  ice_aqc_opc_update_sw_rules, NULL);
1789         if (!status) {
1790                 m_ent->lg_act_idx = l_id;
1791                 m_ent->counter_index = counter_id;
1792         }
1793
1794         ice_free(hw, lg_act);
1795         return status;
1796 }
1797
1798 /**
1799  * ice_create_vsi_list_map
1800  * @hw: pointer to the hardware structure
1801  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1802  * @num_vsi: number of VSI handles in the array
1803  * @vsi_list_id: VSI list ID generated as part of allocate resource
1804  *
1805  * Helper function to create a new entry of VSI list ID to VSI mapping
1806  * using the given VSI list ID
1807  */
1808 static struct ice_vsi_list_map_info *
1809 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1810                         u16 vsi_list_id)
1811 {
1812         struct ice_switch_info *sw = hw->switch_info;
1813         struct ice_vsi_list_map_info *v_map;
1814         int i;
1815
1816         v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
1817                 sizeof(*v_map));
1818         if (!v_map)
1819                 return NULL;
1820
1821         v_map->vsi_list_id = vsi_list_id;
1822         v_map->ref_cnt = 1;
1823         for (i = 0; i < num_vsi; i++)
1824                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
1825
1826         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
1827         return v_map;
1828 }
1829
1830 /**
1831  * ice_update_vsi_list_rule
1832  * @hw: pointer to the hardware structure
1833  * @vsi_handle_arr: array of VSI handles to form a VSI list
1834  * @num_vsi: number of VSI handles in the array
1835  * @vsi_list_id: VSI list ID generated as part of allocate resource
1836  * @remove: Boolean value to indicate if this is a remove action
1837  * @opc: switch rules population command type - pass in the command opcode
1838  * @lkup_type: lookup type of the filter
1839  *
1840  * Call AQ command to add a new switch rule or update existing switch rule
1841  * using the given VSI list ID
1842  */
1843 static enum ice_status
1844 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1845                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1846                          enum ice_sw_lkup_type lkup_type)
1847 {
1848         struct ice_aqc_sw_rules_elem *s_rule;
1849         enum ice_status status;
1850         u16 s_rule_size;
1851         u16 type;
1852         int i;
1853
1854         if (!num_vsi)
1855                 return ICE_ERR_PARAM;
1856
1857         if (lkup_type == ICE_SW_LKUP_MAC ||
1858             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1859             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1860             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1861             lkup_type == ICE_SW_LKUP_PROMISC ||
1862             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1863             lkup_type == ICE_SW_LKUP_LAST)
1864                 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1865                                 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1866         else if (lkup_type == ICE_SW_LKUP_VLAN)
1867                 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1868                                 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1869         else
1870                 return ICE_ERR_PARAM;
1871
1872         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1873         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
1874         if (!s_rule)
1875                 return ICE_ERR_NO_MEMORY;
1876         for (i = 0; i < num_vsi; i++) {
1877                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1878                         status = ICE_ERR_PARAM;
1879                         goto exit;
1880                 }
1881                 /* AQ call requires hw_vsi_id(s) */
1882                 s_rule->pdata.vsi_list.vsi[i] =
1883                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1884         }
1885
1886         s_rule->type = CPU_TO_LE16(type);
1887         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
1888         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
1889
1890         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1891
1892 exit:
1893         ice_free(hw, s_rule);
1894         return status;
1895 }
1896
1897 /**
1898  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1899  * @hw: pointer to the HW struct
1900  * @vsi_handle_arr: array of VSI handles to form a VSI list
1901  * @num_vsi: number of VSI handles in the array
1902  * @vsi_list_id: stores the ID of the VSI list to be created
1903  * @lkup_type: switch rule filter's lookup type
1904  */
1905 static enum ice_status
1906 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1907                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1908 {
1909         enum ice_status status;
1910
1911         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1912                                             ice_aqc_opc_alloc_res);
1913         if (status)
1914                 return status;
1915
1916         /* Update the newly created VSI list to include the specified VSIs */
1917         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1918                                         *vsi_list_id, false,
1919                                         ice_aqc_opc_add_sw_rules, lkup_type);
1920 }
1921
1922 /**
1923  * ice_create_pkt_fwd_rule
1924  * @hw: pointer to the hardware structure
1925  * @f_entry: entry containing packet forwarding information
1926  *
1927  * Create switch rule with given filter information and add an entry
1928  * to the corresponding filter management list to track this switch rule
1929  * and VSI mapping
1930  */
1931 static enum ice_status
1932 ice_create_pkt_fwd_rule(struct ice_hw *hw,
1933                         struct ice_fltr_list_entry *f_entry)
1934 {
1935         struct ice_fltr_mgmt_list_entry *fm_entry;
1936         struct ice_aqc_sw_rules_elem *s_rule;
1937         enum ice_sw_lkup_type l_type;
1938         struct ice_sw_recipe *recp;
1939         enum ice_status status;
1940
1941         s_rule = (struct ice_aqc_sw_rules_elem *)
1942                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
1943         if (!s_rule)
1944                 return ICE_ERR_NO_MEMORY;
1945         fm_entry = (struct ice_fltr_mgmt_list_entry *)
1946                    ice_malloc(hw, sizeof(*fm_entry));
1947         if (!fm_entry) {
1948                 status = ICE_ERR_NO_MEMORY;
1949                 goto ice_create_pkt_fwd_rule_exit;
1950         }
1951
1952         fm_entry->fltr_info = f_entry->fltr_info;
1953
1954         /* Initialize all the fields for the management entry */
1955         fm_entry->vsi_count = 1;
1956         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1957         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1958         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1959
1960         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1961                          ice_aqc_opc_add_sw_rules);
1962
1963         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1964                                  ice_aqc_opc_add_sw_rules, NULL);
1965         if (status) {
1966                 ice_free(hw, fm_entry);
1967                 goto ice_create_pkt_fwd_rule_exit;
1968         }
1969
1970         f_entry->fltr_info.fltr_rule_id =
1971                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1972         fm_entry->fltr_info.fltr_rule_id =
1973                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1974
1975         /* The book keeping entries will get removed when base driver
1976          * calls remove filter AQ command
1977          */
1978         l_type = fm_entry->fltr_info.lkup_type;
1979         recp = &hw->switch_info->recp_list[l_type];
1980         LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
1981
1982 ice_create_pkt_fwd_rule_exit:
1983         ice_free(hw, s_rule);
1984         return status;
1985 }
1986
1987 /**
1988  * ice_update_pkt_fwd_rule
1989  * @hw: pointer to the hardware structure
1990  * @f_info: filter information for switch rule
1991  *
1992  * Call AQ command to update a previously created switch rule with a
1993  * VSI list ID
1994  */
1995 static enum ice_status
1996 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1997 {
1998         struct ice_aqc_sw_rules_elem *s_rule;
1999         enum ice_status status;
2000
2001         s_rule = (struct ice_aqc_sw_rules_elem *)
2002                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2003         if (!s_rule)
2004                 return ICE_ERR_NO_MEMORY;
2005
2006         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2007
2008         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2009
2010         /* Update switch rule with new rule set to forward VSI list */
2011         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2012                                  ice_aqc_opc_update_sw_rules, NULL);
2013
2014         ice_free(hw, s_rule);
2015         return status;
2016 }
2017
2018 /**
2019  * ice_update_sw_rule_bridge_mode
2020  * @hw: pointer to the HW struct
2021  *
2022  * Updates unicast switch filter rules based on VEB/VEPA mode
2023  */
2024 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2025 {
2026         struct ice_switch_info *sw = hw->switch_info;
2027         struct ice_fltr_mgmt_list_entry *fm_entry;
2028         enum ice_status status = ICE_SUCCESS;
2029         struct LIST_HEAD_TYPE *rule_head;
2030         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2031
2032         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2033         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2034
2035         ice_acquire_lock(rule_lock);
2036         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2037                             list_entry) {
2038                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2039                 u8 *addr = fi->l_data.mac.mac_addr;
2040
2041                 /* Update unicast Tx rules to reflect the selected
2042                  * VEB/VEPA mode
2043                  */
2044                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2045                     (fi->fltr_act == ICE_FWD_TO_VSI ||
2046                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2047                      fi->fltr_act == ICE_FWD_TO_Q ||
2048                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
2049                         status = ice_update_pkt_fwd_rule(hw, fi);
2050                         if (status)
2051                                 break;
2052                 }
2053         }
2054
2055         ice_release_lock(rule_lock);
2056
2057         return status;
2058 }
2059
2060 /**
2061  * ice_add_update_vsi_list
2062  * @hw: pointer to the hardware structure
2063  * @m_entry: pointer to current filter management list entry
2064  * @cur_fltr: filter information from the book keeping entry
2065  * @new_fltr: filter information with the new VSI to be added
2066  *
2067  * Call AQ command to add or update previously created VSI list with new VSI.
2068  *
2069  * Helper function to do book keeping associated with adding filter information
2070  * The algorithm to do the book keeping is described below :
2071  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2072  *      if only one VSI has been added till now
2073  *              Allocate a new VSI list and add two VSIs
2074  *              to this list using switch rule command
2075  *              Update the previously created switch rule with the
2076  *              newly created VSI list ID
2077  *      if a VSI list was previously created
2078  *              Add the new VSI to the previously created VSI list set
2079  *              using the update switch rule command
2080  */
2081 static enum ice_status
2082 ice_add_update_vsi_list(struct ice_hw *hw,
2083                         struct ice_fltr_mgmt_list_entry *m_entry,
2084                         struct ice_fltr_info *cur_fltr,
2085                         struct ice_fltr_info *new_fltr)
2086 {
2087         enum ice_status status = ICE_SUCCESS;
2088         u16 vsi_list_id = 0;
2089
2090         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2091              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2092                 return ICE_ERR_NOT_IMPL;
2093
2094         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2095              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2096             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2097              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2098                 return ICE_ERR_NOT_IMPL;
2099
2100         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2101                 /* Only one entry existed in the mapping and it was not already
2102                  * a part of a VSI list. So, create a VSI list with the old and
2103                  * new VSIs.
2104                  */
2105                 struct ice_fltr_info tmp_fltr;
2106                 u16 vsi_handle_arr[2];
2107
2108                 /* A rule already exists with the new VSI being added */
2109                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2110                         return ICE_ERR_ALREADY_EXISTS;
2111
2112                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2113                 vsi_handle_arr[1] = new_fltr->vsi_handle;
2114                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2115                                                   &vsi_list_id,
2116                                                   new_fltr->lkup_type);
2117                 if (status)
2118                         return status;
2119
2120                 tmp_fltr = *new_fltr;
2121                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2122                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2123                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2124                 /* Update the previous switch rule of "MAC forward to VSI" to
2125                  * "MAC fwd to VSI list"
2126                  */
2127                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2128                 if (status)
2129                         return status;
2130
2131                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2132                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2133                 m_entry->vsi_list_info =
2134                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2135                                                 vsi_list_id);
2136
2137                 /* If this entry was large action then the large action needs
2138                  * to be updated to point to FWD to VSI list
2139                  */
2140                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2141                         status =
2142                             ice_add_marker_act(hw, m_entry,
2143                                                m_entry->sw_marker_id,
2144                                                m_entry->lg_act_idx);
2145         } else {
2146                 u16 vsi_handle = new_fltr->vsi_handle;
2147                 enum ice_adminq_opc opcode;
2148
2149                 if (!m_entry->vsi_list_info)
2150                         return ICE_ERR_CFG;
2151
2152                 /* A rule already exists with the new VSI being added */
2153                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2154                         return ICE_SUCCESS;
2155
2156                 /* Update the previously created VSI list set with
2157                  * the new VSI ID passed in
2158                  */
2159                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2160                 opcode = ice_aqc_opc_update_sw_rules;
2161
2162                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2163                                                   vsi_list_id, false, opcode,
2164                                                   new_fltr->lkup_type);
2165                 /* update VSI list mapping info with new VSI ID */
2166                 if (!status)
2167                         ice_set_bit(vsi_handle,
2168                                     m_entry->vsi_list_info->vsi_map);
2169         }
2170         if (!status)
2171                 m_entry->vsi_count++;
2172         return status;
2173 }
2174
2175 /**
2176  * ice_find_rule_entry - Search a rule entry
2177  * @hw: pointer to the hardware structure
2178  * @recp_id: lookup type for which the specified rule needs to be searched
2179  * @f_info: rule information
2180  *
2181  * Helper function to search for a given rule entry
2182  * Returns pointer to entry storing the rule if found
2183  */
2184 static struct ice_fltr_mgmt_list_entry *
2185 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2186 {
2187         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2188         struct ice_switch_info *sw = hw->switch_info;
2189         struct LIST_HEAD_TYPE *list_head;
2190
2191         list_head = &sw->recp_list[recp_id].filt_rules;
2192         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2193                             list_entry) {
2194                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2195                             sizeof(f_info->l_data)) &&
2196                     f_info->flag == list_itr->fltr_info.flag) {
2197                         ret = list_itr;
2198                         break;
2199                 }
2200         }
2201         return ret;
2202 }
2203
2204 /**
2205  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2206  * @hw: pointer to the hardware structure
2207  * @recp_id: lookup type for which VSI lists needs to be searched
2208  * @vsi_handle: VSI handle to be found in VSI list
2209  * @vsi_list_id: VSI list ID found containing vsi_handle
2210  *
2211  * Helper function to search a VSI list with single entry containing given VSI
2212  * handle element. This can be extended further to search VSI list with more
2213  * than 1 vsi_count. Returns pointer to VSI list entry if found.
2214  */
2215 static struct ice_vsi_list_map_info *
2216 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2217                         u16 *vsi_list_id)
2218 {
2219         struct ice_vsi_list_map_info *map_info = NULL;
2220         struct ice_switch_info *sw = hw->switch_info;
2221         struct LIST_HEAD_TYPE *list_head;
2222
2223         list_head = &sw->recp_list[recp_id].filt_rules;
2224         if (sw->recp_list[recp_id].adv_rule) {
2225                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2226
2227                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2228                                     ice_adv_fltr_mgmt_list_entry,
2229                                     list_entry) {
2230                         if (list_itr->vsi_list_info) {
2231                                 map_info = list_itr->vsi_list_info;
2232                                 if (ice_is_bit_set(map_info->vsi_map,
2233                                                    vsi_handle)) {
2234                                         *vsi_list_id = map_info->vsi_list_id;
2235                                         return map_info;
2236                                 }
2237                         }
2238                 }
2239         } else {
2240                 struct ice_fltr_mgmt_list_entry *list_itr;
2241
2242                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2243                                     ice_fltr_mgmt_list_entry,
2244                                     list_entry) {
2245                         if (list_itr->vsi_count == 1 &&
2246                             list_itr->vsi_list_info) {
2247                                 map_info = list_itr->vsi_list_info;
2248                                 if (ice_is_bit_set(map_info->vsi_map,
2249                                                    vsi_handle)) {
2250                                         *vsi_list_id = map_info->vsi_list_id;
2251                                         return map_info;
2252                                 }
2253                         }
2254                 }
2255         }
2256         return NULL;
2257 }
2258
2259 /**
2260  * ice_add_rule_internal - add rule for a given lookup type
2261  * @hw: pointer to the hardware structure
2262  * @recp_id: lookup type (recipe ID) for which rule has to be added
2263  * @f_entry: structure containing MAC forwarding information
2264  *
2265  * Adds or updates the rule lists for a given recipe
2266  */
2267 static enum ice_status
2268 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2269                       struct ice_fltr_list_entry *f_entry)
2270 {
2271         struct ice_switch_info *sw = hw->switch_info;
2272         struct ice_fltr_info *new_fltr, *cur_fltr;
2273         struct ice_fltr_mgmt_list_entry *m_entry;
2274         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2275         enum ice_status status = ICE_SUCCESS;
2276
2277         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2278                 return ICE_ERR_PARAM;
2279
2280         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2281         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2282                 f_entry->fltr_info.fwd_id.hw_vsi_id =
2283                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2284
2285         rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2286
2287         ice_acquire_lock(rule_lock);
2288         new_fltr = &f_entry->fltr_info;
2289         if (new_fltr->flag & ICE_FLTR_RX)
2290                 new_fltr->src = hw->port_info->lport;
2291         else if (new_fltr->flag & ICE_FLTR_TX)
2292                 new_fltr->src =
2293                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2294
2295         m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2296         if (!m_entry) {
2297                 ice_release_lock(rule_lock);
2298                 return ice_create_pkt_fwd_rule(hw, f_entry);
2299         }
2300
2301         cur_fltr = &m_entry->fltr_info;
2302         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2303         ice_release_lock(rule_lock);
2304
2305         return status;
2306 }
2307
2308 /**
2309  * ice_remove_vsi_list_rule
2310  * @hw: pointer to the hardware structure
2311  * @vsi_list_id: VSI list ID generated as part of allocate resource
2312  * @lkup_type: switch rule filter lookup type
2313  *
2314  * The VSI list should be emptied before this function is called to remove the
2315  * VSI list.
2316  */
2317 static enum ice_status
2318 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2319                          enum ice_sw_lkup_type lkup_type)
2320 {
2321         struct ice_aqc_sw_rules_elem *s_rule;
2322         enum ice_status status;
2323         u16 s_rule_size;
2324
2325         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2326         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2327         if (!s_rule)
2328                 return ICE_ERR_NO_MEMORY;
2329
2330         s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2331         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2332
2333         /* Free the vsi_list resource that we allocated. It is assumed that the
2334          * list is empty at this point.
2335          */
2336         status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2337                                             ice_aqc_opc_free_res);
2338
2339         ice_free(hw, s_rule);
2340         return status;
2341 }
2342
2343 /**
2344  * ice_rem_update_vsi_list
2345  * @hw: pointer to the hardware structure
2346  * @vsi_handle: VSI handle of the VSI to remove
2347  * @fm_list: filter management entry for which the VSI list management needs to
2348  *           be done
2349  */
2350 static enum ice_status
2351 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2352                         struct ice_fltr_mgmt_list_entry *fm_list)
2353 {
2354         enum ice_sw_lkup_type lkup_type;
2355         enum ice_status status = ICE_SUCCESS;
2356         u16 vsi_list_id;
2357
2358         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2359             fm_list->vsi_count == 0)
2360                 return ICE_ERR_PARAM;
2361
2362         /* A rule with the VSI being removed does not exist */
2363         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2364                 return ICE_ERR_DOES_NOT_EXIST;
2365
2366         lkup_type = fm_list->fltr_info.lkup_type;
2367         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2368         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2369                                           ice_aqc_opc_update_sw_rules,
2370                                           lkup_type);
2371         if (status)
2372                 return status;
2373
2374         fm_list->vsi_count--;
2375         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2376
2377         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2378                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2379                 struct ice_vsi_list_map_info *vsi_list_info =
2380                         fm_list->vsi_list_info;
2381                 u16 rem_vsi_handle;
2382
2383                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2384                                                     ICE_MAX_VSI);
2385                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2386                         return ICE_ERR_OUT_OF_RANGE;
2387
2388                 /* Make sure VSI list is empty before removing it below */
2389                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2390                                                   vsi_list_id, true,
2391                                                   ice_aqc_opc_update_sw_rules,
2392                                                   lkup_type);
2393                 if (status)
2394                         return status;
2395
2396                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2397                 tmp_fltr_info.fwd_id.hw_vsi_id =
2398                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
2399                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2400                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2401                 if (status) {
2402                         ice_debug(hw, ICE_DBG_SW,
2403                                   "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2404                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
2405                         return status;
2406                 }
2407
2408                 fm_list->fltr_info = tmp_fltr_info;
2409         }
2410
2411         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2412             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2413                 struct ice_vsi_list_map_info *vsi_list_info =
2414                         fm_list->vsi_list_info;
2415
2416                 /* Remove the VSI list since it is no longer used */
2417                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2418                 if (status) {
2419                         ice_debug(hw, ICE_DBG_SW,
2420                                   "Failed to remove VSI list %d, error %d\n",
2421                                   vsi_list_id, status);
2422                         return status;
2423                 }
2424
2425                 LIST_DEL(&vsi_list_info->list_entry);
2426                 ice_free(hw, vsi_list_info);
2427                 fm_list->vsi_list_info = NULL;
2428         }
2429
2430         return status;
2431 }
2432
2433 /**
2434  * ice_remove_rule_internal - Remove a filter rule of a given type
2435  *
2436  * @hw: pointer to the hardware structure
2437  * @recp_id: recipe ID for which the rule needs to removed
2438  * @f_entry: rule entry containing filter information
2439  */
2440 static enum ice_status
2441 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2442                          struct ice_fltr_list_entry *f_entry)
2443 {
2444         struct ice_switch_info *sw = hw->switch_info;
2445         struct ice_fltr_mgmt_list_entry *list_elem;
2446         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2447         enum ice_status status = ICE_SUCCESS;
2448         bool remove_rule = false;
2449         u16 vsi_handle;
2450
2451         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2452                 return ICE_ERR_PARAM;
2453         f_entry->fltr_info.fwd_id.hw_vsi_id =
2454                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2455
2456         rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2457         ice_acquire_lock(rule_lock);
2458         list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2459         if (!list_elem) {
2460                 status = ICE_ERR_DOES_NOT_EXIST;
2461                 goto exit;
2462         }
2463
2464         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2465                 remove_rule = true;
2466         } else if (!list_elem->vsi_list_info) {
2467                 status = ICE_ERR_DOES_NOT_EXIST;
2468                 goto exit;
2469         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2470                 /* a ref_cnt > 1 indicates that the vsi_list is being
2471                  * shared by multiple rules. Decrement the ref_cnt and
2472                  * remove this rule, but do not modify the list, as it
2473                  * is in-use by other rules.
2474                  */
2475                 list_elem->vsi_list_info->ref_cnt--;
2476                 remove_rule = true;
2477         } else {
2478                 /* a ref_cnt of 1 indicates the vsi_list is only used
2479                  * by one rule. However, the original removal request is only
2480                  * for a single VSI. Update the vsi_list first, and only
2481                  * remove the rule if there are no further VSIs in this list.
2482                  */
2483                 vsi_handle = f_entry->fltr_info.vsi_handle;
2484                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2485                 if (status)
2486                         goto exit;
2487                 /* if VSI count goes to zero after updating the VSI list */
2488                 if (list_elem->vsi_count == 0)
2489                         remove_rule = true;
2490         }
2491
2492         if (remove_rule) {
2493                 /* Remove the lookup rule */
2494                 struct ice_aqc_sw_rules_elem *s_rule;
2495
2496                 s_rule = (struct ice_aqc_sw_rules_elem *)
2497                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2498                 if (!s_rule) {
2499                         status = ICE_ERR_NO_MEMORY;
2500                         goto exit;
2501                 }
2502
2503                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2504                                  ice_aqc_opc_remove_sw_rules);
2505
2506                 status = ice_aq_sw_rules(hw, s_rule,
2507                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2508                                          ice_aqc_opc_remove_sw_rules, NULL);
2509                 if (status)
2510                         goto exit;
2511
2512                 /* Remove a book keeping from the list */
2513                 ice_free(hw, s_rule);
2514
2515                 LIST_DEL(&list_elem->list_entry);
2516                 ice_free(hw, list_elem);
2517         }
2518 exit:
2519         ice_release_lock(rule_lock);
2520         return status;
2521 }
2522
2523 /**
2524  * ice_aq_get_res_alloc - get allocated resources
2525  * @hw: pointer to the HW struct
2526  * @num_entries: pointer to u16 to store the number of resource entries returned
2527  * @buf: pointer to user-supplied buffer
2528  * @buf_size: size of buff
2529  * @cd: pointer to command details structure or NULL
2530  *
2531  * The user-supplied buffer must be large enough to store the resource
2532  * information for all resource types. Each resource type is an
2533  * ice_aqc_get_res_resp_data_elem structure.
2534  */
2535 enum ice_status
2536 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2537                      u16 buf_size, struct ice_sq_cd *cd)
2538 {
2539         struct ice_aqc_get_res_alloc *resp;
2540         enum ice_status status;
2541         struct ice_aq_desc desc;
2542
2543         if (!buf)
2544                 return ICE_ERR_BAD_PTR;
2545
2546         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2547                 return ICE_ERR_INVAL_SIZE;
2548
2549         resp = &desc.params.get_res;
2550
2551         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2552         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2553
2554         if (!status && num_entries)
2555                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2556
2557         return status;
2558 }
2559
2560 /**
2561  * ice_aq_get_res_descs - get allocated resource descriptors
2562  * @hw: pointer to the hardware structure
2563  * @num_entries: number of resource entries in buffer
2564  * @buf: Indirect buffer to hold data parameters and response
2565  * @buf_size: size of buffer for indirect commands
2566  * @res_type: resource type
2567  * @res_shared: is resource shared
2568  * @desc_id: input - first desc ID to start; output - next desc ID
2569  * @cd: pointer to command details structure or NULL
2570  */
2571 enum ice_status
2572 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2573                      struct ice_aqc_get_allocd_res_desc_resp *buf,
2574                      u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2575                      struct ice_sq_cd *cd)
2576 {
2577         struct ice_aqc_get_allocd_res_desc *cmd;
2578         struct ice_aq_desc desc;
2579         enum ice_status status;
2580
2581         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2582
2583         cmd = &desc.params.get_res_desc;
2584
2585         if (!buf)
2586                 return ICE_ERR_PARAM;
2587
2588         if (buf_size != (num_entries * sizeof(*buf)))
2589                 return ICE_ERR_PARAM;
2590
2591         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2592
2593         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2594                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
2595                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2596         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2597
2598         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2599
2600         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2601         if (!status)
2602                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2603
2604         return status;
2605 }
2606
2607 /**
2608  * ice_add_mac - Add a MAC address based filter rule
2609  * @hw: pointer to the hardware structure
2610  * @m_list: list of MAC addresses and forwarding information
2611  *
2612  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2613  * multiple unicast addresses, the function assumes that all the
2614  * addresses are unique in a given add_mac call. It doesn't
2615  * check for duplicates in this case, removing duplicates from a given
2616  * list should be taken care of in the caller of this function.
2617  */
2618 enum ice_status
2619 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2620 {
2621         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2622         struct ice_fltr_list_entry *m_list_itr;
2623         struct LIST_HEAD_TYPE *rule_head;
2624         u16 elem_sent, total_elem_left;
2625         struct ice_switch_info *sw;
2626         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2627         enum ice_status status = ICE_SUCCESS;
2628         u16 num_unicast = 0;
2629         u16 s_rule_size;
2630
2631         if (!m_list || !hw)
2632                 return ICE_ERR_PARAM;
2633         s_rule = NULL;
2634         sw = hw->switch_info;
2635         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2636         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2637                             list_entry) {
2638                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2639                 u16 vsi_handle;
2640                 u16 hw_vsi_id;
2641
2642                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2643                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2644                 if (!ice_is_vsi_valid(hw, vsi_handle))
2645                         return ICE_ERR_PARAM;
2646                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2647                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2648                 /* update the src in case it is VSI num */
2649                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2650                         return ICE_ERR_PARAM;
2651                 m_list_itr->fltr_info.src = hw_vsi_id;
2652                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2653                     IS_ZERO_ETHER_ADDR(add))
2654                         return ICE_ERR_PARAM;
2655                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2656                         /* Don't overwrite the unicast address */
2657                         ice_acquire_lock(rule_lock);
2658                         if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2659                                                 &m_list_itr->fltr_info)) {
2660                                 ice_release_lock(rule_lock);
2661                                 return ICE_ERR_ALREADY_EXISTS;
2662                         }
2663                         ice_release_lock(rule_lock);
2664                         num_unicast++;
2665                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2666                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2667                         m_list_itr->status =
2668                                 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2669                                                       m_list_itr);
2670                         if (m_list_itr->status)
2671                                 return m_list_itr->status;
2672                 }
2673         }
2674
2675         ice_acquire_lock(rule_lock);
2676         /* Exit if no suitable entries were found for adding bulk switch rule */
2677         if (!num_unicast) {
2678                 status = ICE_SUCCESS;
2679                 goto ice_add_mac_exit;
2680         }
2681
2682         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2683
2684         /* Allocate switch rule buffer for the bulk update for unicast */
2685         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2686         s_rule = (struct ice_aqc_sw_rules_elem *)
2687                 ice_calloc(hw, num_unicast, s_rule_size);
2688         if (!s_rule) {
2689                 status = ICE_ERR_NO_MEMORY;
2690                 goto ice_add_mac_exit;
2691         }
2692
2693         r_iter = s_rule;
2694         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2695                             list_entry) {
2696                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2697                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2698
2699                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2700                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2701                                          ice_aqc_opc_add_sw_rules);
2702                         r_iter = (struct ice_aqc_sw_rules_elem *)
2703                                 ((u8 *)r_iter + s_rule_size);
2704                 }
2705         }
2706
2707         /* Call AQ bulk switch rule update for all unicast addresses */
2708         r_iter = s_rule;
2709         /* Call AQ switch rule in AQ_MAX chunk */
2710         for (total_elem_left = num_unicast; total_elem_left > 0;
2711              total_elem_left -= elem_sent) {
2712                 struct ice_aqc_sw_rules_elem *entry = r_iter;
2713
2714                 elem_sent = min(total_elem_left,
2715                                 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2716                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2717                                          elem_sent, ice_aqc_opc_add_sw_rules,
2718                                          NULL);
2719                 if (status)
2720                         goto ice_add_mac_exit;
2721                 r_iter = (struct ice_aqc_sw_rules_elem *)
2722                         ((u8 *)r_iter + (elem_sent * s_rule_size));
2723         }
2724
2725         /* Fill up rule ID based on the value returned from FW */
2726         r_iter = s_rule;
2727         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2728                             list_entry) {
2729                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2730                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2731                 struct ice_fltr_mgmt_list_entry *fm_entry;
2732
2733                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2734                         f_info->fltr_rule_id =
2735                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2736                         f_info->fltr_act = ICE_FWD_TO_VSI;
2737                         /* Create an entry to track this MAC address */
2738                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
2739                                 ice_malloc(hw, sizeof(*fm_entry));
2740                         if (!fm_entry) {
2741                                 status = ICE_ERR_NO_MEMORY;
2742                                 goto ice_add_mac_exit;
2743                         }
2744                         fm_entry->fltr_info = *f_info;
2745                         fm_entry->vsi_count = 1;
2746                         /* The book keeping entries will get removed when
2747                          * base driver calls remove filter AQ command
2748                          */
2749
2750                         LIST_ADD(&fm_entry->list_entry, rule_head);
2751                         r_iter = (struct ice_aqc_sw_rules_elem *)
2752                                 ((u8 *)r_iter + s_rule_size);
2753                 }
2754         }
2755
2756 ice_add_mac_exit:
2757         ice_release_lock(rule_lock);
2758         if (s_rule)
2759                 ice_free(hw, s_rule);
2760         return status;
2761 }
2762
2763 /**
2764  * ice_add_vlan_internal - Add one VLAN based filter rule
2765  * @hw: pointer to the hardware structure
2766  * @f_entry: filter entry containing one VLAN information
2767  */
2768 static enum ice_status
2769 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2770 {
2771         struct ice_switch_info *sw = hw->switch_info;
2772         struct ice_fltr_mgmt_list_entry *v_list_itr;
2773         struct ice_fltr_info *new_fltr, *cur_fltr;
2774         enum ice_sw_lkup_type lkup_type;
2775         u16 vsi_list_id = 0, vsi_handle;
2776         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2777         enum ice_status status = ICE_SUCCESS;
2778
2779         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2780                 return ICE_ERR_PARAM;
2781
2782         f_entry->fltr_info.fwd_id.hw_vsi_id =
2783                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2784         new_fltr = &f_entry->fltr_info;
2785
2786         /* VLAN ID should only be 12 bits */
2787         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2788                 return ICE_ERR_PARAM;
2789
2790         if (new_fltr->src_id != ICE_SRC_ID_VSI)
2791                 return ICE_ERR_PARAM;
2792
2793         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2794         lkup_type = new_fltr->lkup_type;
2795         vsi_handle = new_fltr->vsi_handle;
2796         rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2797         ice_acquire_lock(rule_lock);
2798         v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2799         if (!v_list_itr) {
2800                 struct ice_vsi_list_map_info *map_info = NULL;
2801
2802                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2803                         /* All VLAN pruning rules use a VSI list. Check if
2804                          * there is already a VSI list containing VSI that we
2805                          * want to add. If found, use the same vsi_list_id for
2806                          * this new VLAN rule or else create a new list.
2807                          */
2808                         map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2809                                                            vsi_handle,
2810                                                            &vsi_list_id);
2811                         if (!map_info) {
2812                                 status = ice_create_vsi_list_rule(hw,
2813                                                                   &vsi_handle,
2814                                                                   1,
2815                                                                   &vsi_list_id,
2816                                                                   lkup_type);
2817                                 if (status)
2818                                         goto exit;
2819                         }
2820                         /* Convert the action to forwarding to a VSI list. */
2821                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2822                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2823                 }
2824
2825                 status = ice_create_pkt_fwd_rule(hw, f_entry);
2826                 if (!status) {
2827                         v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2828                                                          new_fltr);
2829                         if (!v_list_itr) {
2830                                 status = ICE_ERR_DOES_NOT_EXIST;
2831                                 goto exit;
2832                         }
2833                         /* reuse VSI list for new rule and increment ref_cnt */
2834                         if (map_info) {
2835                                 v_list_itr->vsi_list_info = map_info;
2836                                 map_info->ref_cnt++;
2837                         } else {
2838                                 v_list_itr->vsi_list_info =
2839                                         ice_create_vsi_list_map(hw, &vsi_handle,
2840                                                                 1, vsi_list_id);
2841                         }
2842                 }
2843         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2844                 /* Update existing VSI list to add new VSI ID only if it used
2845                  * by one VLAN rule.
2846                  */
2847                 cur_fltr = &v_list_itr->fltr_info;
2848                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2849                                                  new_fltr);
2850         } else {
2851                 /* If VLAN rule exists and VSI list being used by this rule is
2852                  * referenced by more than 1 VLAN rule. Then create a new VSI
2853                  * list appending previous VSI with new VSI and update existing
2854                  * VLAN rule to point to new VSI list ID
2855                  */
2856                 struct ice_fltr_info tmp_fltr;
2857                 u16 vsi_handle_arr[2];
2858                 u16 cur_handle;
2859
2860                 /* Current implementation only supports reusing VSI list with
2861                  * one VSI count. We should never hit below condition
2862                  */
2863                 if (v_list_itr->vsi_count > 1 &&
2864                     v_list_itr->vsi_list_info->ref_cnt > 1) {
2865                         ice_debug(hw, ICE_DBG_SW,
2866                                   "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2867                         status = ICE_ERR_CFG;
2868                         goto exit;
2869                 }
2870
2871                 cur_handle =
2872                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2873                                            ICE_MAX_VSI);
2874
2875                 /* A rule already exists with the new VSI being added */
2876                 if (cur_handle == vsi_handle) {
2877                         status = ICE_ERR_ALREADY_EXISTS;
2878                         goto exit;
2879                 }
2880
2881                 vsi_handle_arr[0] = cur_handle;
2882                 vsi_handle_arr[1] = vsi_handle;
2883                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2884                                                   &vsi_list_id, lkup_type);
2885                 if (status)
2886                         goto exit;
2887
2888                 tmp_fltr = v_list_itr->fltr_info;
2889                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2890                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2891                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2892                 /* Update the previous switch rule to a new VSI list which
2893                  * includes current VSI that is requested
2894                  */
2895                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2896                 if (status)
2897                         goto exit;
2898
2899                 /* before overriding VSI list map info. decrement ref_cnt of
2900                  * previous VSI list
2901                  */
2902                 v_list_itr->vsi_list_info->ref_cnt--;
2903
2904                 /* now update to newly created list */
2905                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2906                 v_list_itr->vsi_list_info =
2907                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2908                                                 vsi_list_id);
2909                 v_list_itr->vsi_count++;
2910         }
2911
2912 exit:
2913         ice_release_lock(rule_lock);
2914         return status;
2915 }
2916
2917 /**
2918  * ice_add_vlan - Add VLAN based filter rule
2919  * @hw: pointer to the hardware structure
2920  * @v_list: list of VLAN entries and forwarding information
2921  */
2922 enum ice_status
2923 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
2924 {
2925         struct ice_fltr_list_entry *v_list_itr;
2926
2927         if (!v_list || !hw)
2928                 return ICE_ERR_PARAM;
2929
2930         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
2931                             list_entry) {
2932                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2933                         return ICE_ERR_PARAM;
2934                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2935                 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
2936                 if (v_list_itr->status)
2937                         return v_list_itr->status;
2938         }
2939         return ICE_SUCCESS;
2940 }
2941
2942 #ifndef NO_MACVLAN_SUPPORT
2943 /**
2944  * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
2945  * @hw: pointer to the hardware structure
2946  * @mv_list: list of MAC and VLAN filters
2947  *
2948  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
2949  * pruning bits enabled, then it is the responsibility of the caller to make
2950  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
2951  * VLAN won't be received on that VSI otherwise.
2952  */
2953 enum ice_status
2954 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
2955 {
2956         struct ice_fltr_list_entry *mv_list_itr;
2957
2958         if (!mv_list || !hw)
2959                 return ICE_ERR_PARAM;
2960
2961         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
2962                             list_entry) {
2963                 enum ice_sw_lkup_type l_type =
2964                         mv_list_itr->fltr_info.lkup_type;
2965
2966                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
2967                         return ICE_ERR_PARAM;
2968                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
2969                 mv_list_itr->status =
2970                         ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
2971                                               mv_list_itr);
2972                 if (mv_list_itr->status)
2973                         return mv_list_itr->status;
2974         }
2975         return ICE_SUCCESS;
2976 }
2977 #endif
2978
2979 /**
2980  * ice_add_eth_mac - Add ethertype and MAC based filter rule
2981  * @hw: pointer to the hardware structure
2982  * @em_list: list of ether type MAC filter, MAC is optional
2983  */
2984 enum ice_status
2985 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
2986 {
2987         struct ice_fltr_list_entry *em_list_itr;
2988
2989         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
2990                             list_entry) {
2991                 enum ice_sw_lkup_type l_type =
2992                         em_list_itr->fltr_info.lkup_type;
2993
2994                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2995                     l_type != ICE_SW_LKUP_ETHERTYPE)
2996                         return ICE_ERR_PARAM;
2997
2998                 em_list_itr->fltr_info.flag = ICE_FLTR_TX;
2999                 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3000                                                             em_list_itr);
3001                 if (em_list_itr->status)
3002                         return em_list_itr->status;
3003         }
3004         return ICE_SUCCESS;
3005 }
3006
3007 /**
3008  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3009  * @hw: pointer to the hardware structure
3010  * @em_list: list of ethertype or ethertype MAC entries
3011  */
3012 enum ice_status
3013 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3014 {
3015         struct ice_fltr_list_entry *em_list_itr, *tmp;
3016
3017         if (!em_list || !hw)
3018                 return ICE_ERR_PARAM;
3019
3020         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3021                                  list_entry) {
3022                 enum ice_sw_lkup_type l_type =
3023                         em_list_itr->fltr_info.lkup_type;
3024
3025                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3026                     l_type != ICE_SW_LKUP_ETHERTYPE)
3027                         return ICE_ERR_PARAM;
3028
3029                 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3030                                                                em_list_itr);
3031                 if (em_list_itr->status)
3032                         return em_list_itr->status;
3033         }
3034         return ICE_SUCCESS;
3035 }
3036
3037
3038 /**
3039  * ice_rem_sw_rule_info
3040  * @hw: pointer to the hardware structure
3041  * @rule_head: pointer to the switch list structure that we want to delete
3042  */
3043 static void
3044 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3045 {
3046         if (!LIST_EMPTY(rule_head)) {
3047                 struct ice_fltr_mgmt_list_entry *entry;
3048                 struct ice_fltr_mgmt_list_entry *tmp;
3049
3050                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3051                                          ice_fltr_mgmt_list_entry, list_entry) {
3052                         LIST_DEL(&entry->list_entry);
3053                         ice_free(hw, entry);
3054                 }
3055         }
3056 }
3057
3058 /**
3059  * ice_rem_adv_rule_info
3060  * @hw: pointer to the hardware structure
3061  * @rule_head: pointer to the switch list structure that we want to delete
3062  */
3063 static void
3064 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3065 {
3066         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3067         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3068
3069         if (LIST_EMPTY(rule_head))
3070                 return;
3071
3072         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3073                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
3074                 LIST_DEL(&lst_itr->list_entry);
3075                 ice_free(hw, lst_itr->lkups);
3076                 ice_free(hw, lst_itr);
3077         }
3078 }
3079
3080 /**
3081  * ice_rem_all_sw_rules_info
3082  * @hw: pointer to the hardware structure
3083  */
3084 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3085 {
3086         struct ice_switch_info *sw = hw->switch_info;
3087         u8 i;
3088
3089         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3090                 struct LIST_HEAD_TYPE *rule_head;
3091
3092                 rule_head = &sw->recp_list[i].filt_rules;
3093                 if (!sw->recp_list[i].adv_rule)
3094                         ice_rem_sw_rule_info(hw, rule_head);
3095                 else
3096                         ice_rem_adv_rule_info(hw, rule_head);
3097         }
3098 }
3099
3100 /**
3101  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3102  * @pi: pointer to the port_info structure
3103  * @vsi_handle: VSI handle to set as default
3104  * @set: true to add the above mentioned switch rule, false to remove it
3105  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3106  *
3107  * add filter rule to set/unset given VSI as default VSI for the switch
3108  * (represented by swid)
3109  */
3110 enum ice_status
3111 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3112                  u8 direction)
3113 {
3114         struct ice_aqc_sw_rules_elem *s_rule;
3115         struct ice_fltr_info f_info;
3116         struct ice_hw *hw = pi->hw;
3117         enum ice_adminq_opc opcode;
3118         enum ice_status status;
3119         u16 s_rule_size;
3120         u16 hw_vsi_id;
3121
3122         if (!ice_is_vsi_valid(hw, vsi_handle))
3123                 return ICE_ERR_PARAM;
3124         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3125
3126         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3127                             ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3128         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3129         if (!s_rule)
3130                 return ICE_ERR_NO_MEMORY;
3131
3132         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3133
3134         f_info.lkup_type = ICE_SW_LKUP_DFLT;
3135         f_info.flag = direction;
3136         f_info.fltr_act = ICE_FWD_TO_VSI;
3137         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3138
3139         if (f_info.flag & ICE_FLTR_RX) {
3140                 f_info.src = pi->lport;
3141                 f_info.src_id = ICE_SRC_ID_LPORT;
3142                 if (!set)
3143                         f_info.fltr_rule_id =
3144                                 pi->dflt_rx_vsi_rule_id;
3145         } else if (f_info.flag & ICE_FLTR_TX) {
3146                 f_info.src_id = ICE_SRC_ID_VSI;
3147                 f_info.src = hw_vsi_id;
3148                 if (!set)
3149                         f_info.fltr_rule_id =
3150                                 pi->dflt_tx_vsi_rule_id;
3151         }
3152
3153         if (set)
3154                 opcode = ice_aqc_opc_add_sw_rules;
3155         else
3156                 opcode = ice_aqc_opc_remove_sw_rules;
3157
3158         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3159
3160         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3161         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3162                 goto out;
3163         if (set) {
3164                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3165
3166                 if (f_info.flag & ICE_FLTR_TX) {
3167                         pi->dflt_tx_vsi_num = hw_vsi_id;
3168                         pi->dflt_tx_vsi_rule_id = index;
3169                 } else if (f_info.flag & ICE_FLTR_RX) {
3170                         pi->dflt_rx_vsi_num = hw_vsi_id;
3171                         pi->dflt_rx_vsi_rule_id = index;
3172                 }
3173         } else {
3174                 if (f_info.flag & ICE_FLTR_TX) {
3175                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3176                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3177                 } else if (f_info.flag & ICE_FLTR_RX) {
3178                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3179                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3180                 }
3181         }
3182
3183 out:
3184         ice_free(hw, s_rule);
3185         return status;
3186 }
3187
3188 /**
3189  * ice_remove_mac - remove a MAC address based filter rule
3190  * @hw: pointer to the hardware structure
3191  * @m_list: list of MAC addresses and forwarding information
3192  *
3193  * This function removes either a MAC filter rule or a specific VSI from a
3194  * VSI list for a multicast MAC address.
3195  *
3196  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3197  * ice_add_mac. Caller should be aware that this call will only work if all
3198  * the entries passed into m_list were added previously. It will not attempt to
3199  * do a partial remove of entries that were found.
3200  */
3201 enum ice_status
3202 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3203 {
3204         struct ice_fltr_list_entry *list_itr, *tmp;
3205
3206         if (!m_list)
3207                 return ICE_ERR_PARAM;
3208
3209         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3210                                  list_entry) {
3211                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3212
3213                 if (l_type != ICE_SW_LKUP_MAC)
3214                         return ICE_ERR_PARAM;
3215                 list_itr->status = ice_remove_rule_internal(hw,
3216                                                             ICE_SW_LKUP_MAC,
3217                                                             list_itr);
3218                 if (list_itr->status)
3219                         return list_itr->status;
3220         }
3221         return ICE_SUCCESS;
3222 }
3223
3224 /**
3225  * ice_remove_vlan - Remove VLAN based filter rule
3226  * @hw: pointer to the hardware structure
3227  * @v_list: list of VLAN entries and forwarding information
3228  */
3229 enum ice_status
3230 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3231 {
3232         struct ice_fltr_list_entry *v_list_itr, *tmp;
3233
3234         if (!v_list || !hw)
3235                 return ICE_ERR_PARAM;
3236
3237         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3238                                  list_entry) {
3239                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3240
3241                 if (l_type != ICE_SW_LKUP_VLAN)
3242                         return ICE_ERR_PARAM;
3243                 v_list_itr->status = ice_remove_rule_internal(hw,
3244                                                               ICE_SW_LKUP_VLAN,
3245                                                               v_list_itr);
3246                 if (v_list_itr->status)
3247                         return v_list_itr->status;
3248         }
3249         return ICE_SUCCESS;
3250 }
3251
3252 #ifndef NO_MACVLAN_SUPPORT
3253 /**
3254  * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3255  * @hw: pointer to the hardware structure
3256  * @v_list: list of MAC VLAN entries and forwarding information
3257  */
3258 enum ice_status
3259 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3260 {
3261         struct ice_fltr_list_entry *v_list_itr, *tmp;
3262
3263         if (!v_list || !hw)
3264                 return ICE_ERR_PARAM;
3265
3266         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3267                                  list_entry) {
3268                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3269
3270                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3271                         return ICE_ERR_PARAM;
3272                 v_list_itr->status =
3273                         ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3274                                                  v_list_itr);
3275                 if (v_list_itr->status)
3276                         return v_list_itr->status;
3277         }
3278         return ICE_SUCCESS;
3279 }
3280 #endif /* !NO_MACVLAN_SUPPORT */
3281
3282 /**
3283  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3284  * @fm_entry: filter entry to inspect
3285  * @vsi_handle: VSI handle to compare with filter info
3286  */
3287 static bool
3288 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3289 {
3290         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3291                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3292                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3293                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3294                                  vsi_handle))));
3295 }
3296
3297 /**
3298  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3299  * @hw: pointer to the hardware structure
3300  * @vsi_handle: VSI handle to remove filters from
3301  * @vsi_list_head: pointer to the list to add entry to
3302  * @fi: pointer to fltr_info of filter entry to copy & add
3303  *
3304  * Helper function, used when creating a list of filters to remove from
3305  * a specific VSI. The entry added to vsi_list_head is a COPY of the
3306  * original filter entry, with the exception of fltr_info.fltr_act and
3307  * fltr_info.fwd_id fields. These are set such that later logic can
3308  * extract which VSI to remove the fltr from, and pass on that information.
3309  */
3310 static enum ice_status
3311 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3312                                struct LIST_HEAD_TYPE *vsi_list_head,
3313                                struct ice_fltr_info *fi)
3314 {
3315         struct ice_fltr_list_entry *tmp;
3316
3317         /* this memory is freed up in the caller function
3318          * once filters for this VSI are removed
3319          */
3320         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3321         if (!tmp)
3322                 return ICE_ERR_NO_MEMORY;
3323
3324         tmp->fltr_info = *fi;
3325
3326         /* Overwrite these fields to indicate which VSI to remove filter from,
3327          * so find and remove logic can extract the information from the
3328          * list entries. Note that original entries will still have proper
3329          * values.
3330          */
3331         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3332         tmp->fltr_info.vsi_handle = vsi_handle;
3333         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3334
3335         LIST_ADD(&tmp->list_entry, vsi_list_head);
3336
3337         return ICE_SUCCESS;
3338 }
3339
3340 /**
3341  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3342  * @hw: pointer to the hardware structure
3343  * @vsi_handle: VSI handle to remove filters from
3344  * @lkup_list_head: pointer to the list that has certain lookup type filters
3345  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3346  *
3347  * Locates all filters in lkup_list_head that are used by the given VSI,
3348  * and adds COPIES of those entries to vsi_list_head (intended to be used
3349  * to remove the listed filters).
3350  * Note that this means all entries in vsi_list_head must be explicitly
3351  * deallocated by the caller when done with list.
3352  */
3353 static enum ice_status
3354 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3355                          struct LIST_HEAD_TYPE *lkup_list_head,
3356                          struct LIST_HEAD_TYPE *vsi_list_head)
3357 {
3358         struct ice_fltr_mgmt_list_entry *fm_entry;
3359         enum ice_status status = ICE_SUCCESS;
3360
3361         /* check to make sure VSI ID is valid and within boundary */
3362         if (!ice_is_vsi_valid(hw, vsi_handle))
3363                 return ICE_ERR_PARAM;
3364
3365         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3366                             ice_fltr_mgmt_list_entry, list_entry) {
3367                 struct ice_fltr_info *fi;
3368
3369                 fi = &fm_entry->fltr_info;
3370                 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3371                         continue;
3372
3373                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3374                                                         vsi_list_head, fi);
3375                 if (status)
3376                         return status;
3377         }
3378         return status;
3379 }
3380
3381
3382 /**
3383  * ice_determine_promisc_mask
3384  * @fi: filter info to parse
3385  *
3386  * Helper function to determine which ICE_PROMISC_ mask corresponds
3387  * to given filter into.
3388  */
3389 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3390 {
3391         u16 vid = fi->l_data.mac_vlan.vlan_id;
3392         u8 *macaddr = fi->l_data.mac.mac_addr;
3393         bool is_tx_fltr = false;
3394         u8 promisc_mask = 0;
3395
3396         if (fi->flag == ICE_FLTR_TX)
3397                 is_tx_fltr = true;
3398
3399         if (IS_BROADCAST_ETHER_ADDR(macaddr))
3400                 promisc_mask |= is_tx_fltr ?
3401                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3402         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3403                 promisc_mask |= is_tx_fltr ?
3404                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3405         else if (IS_UNICAST_ETHER_ADDR(macaddr))
3406                 promisc_mask |= is_tx_fltr ?
3407                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3408         if (vid)
3409                 promisc_mask |= is_tx_fltr ?
3410                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3411
3412         return promisc_mask;
3413 }
3414
3415 /**
3416  * ice_get_vsi_promisc - get promiscuous mode of given VSI
3417  * @hw: pointer to the hardware structure
3418  * @vsi_handle: VSI handle to retrieve info from
3419  * @promisc_mask: pointer to mask to be filled in
3420  * @vid: VLAN ID of promisc VLAN VSI
3421  */
3422 enum ice_status
3423 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3424                     u16 *vid)
3425 {
3426         struct ice_switch_info *sw = hw->switch_info;
3427         struct ice_fltr_mgmt_list_entry *itr;
3428         struct LIST_HEAD_TYPE *rule_head;
3429         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3430
3431         if (!ice_is_vsi_valid(hw, vsi_handle))
3432                 return ICE_ERR_PARAM;
3433
3434         *vid = 0;
3435         *promisc_mask = 0;
3436         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3437         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3438
3439         ice_acquire_lock(rule_lock);
3440         LIST_FOR_EACH_ENTRY(itr, rule_head,
3441                             ice_fltr_mgmt_list_entry, list_entry) {
3442                 /* Continue if this filter doesn't apply to this VSI or the
3443                  * VSI ID is not in the VSI map for this filter
3444                  */
3445                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3446                         continue;
3447
3448                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3449         }
3450         ice_release_lock(rule_lock);
3451
3452         return ICE_SUCCESS;
3453 }
3454
3455 /**
3456  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3457  * @hw: pointer to the hardware structure
3458  * @vsi_handle: VSI handle to retrieve info from
3459  * @promisc_mask: pointer to mask to be filled in
3460  * @vid: VLAN ID of promisc VLAN VSI
3461  */
3462 enum ice_status
3463 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3464                          u16 *vid)
3465 {
3466         struct ice_switch_info *sw = hw->switch_info;
3467         struct ice_fltr_mgmt_list_entry *itr;
3468         struct LIST_HEAD_TYPE *rule_head;
3469         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3470
3471         if (!ice_is_vsi_valid(hw, vsi_handle))
3472                 return ICE_ERR_PARAM;
3473
3474         *vid = 0;
3475         *promisc_mask = 0;
3476         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3477         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3478
3479         ice_acquire_lock(rule_lock);
3480         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3481                             list_entry) {
3482                 /* Continue if this filter doesn't apply to this VSI or the
3483                  * VSI ID is not in the VSI map for this filter
3484                  */
3485                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3486                         continue;
3487
3488                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3489         }
3490         ice_release_lock(rule_lock);
3491
3492         return ICE_SUCCESS;
3493 }
3494
3495 /**
3496  * ice_remove_promisc - Remove promisc based filter rules
3497  * @hw: pointer to the hardware structure
3498  * @recp_id: recipe ID for which the rule needs to removed
3499  * @v_list: list of promisc entries
3500  */
3501 static enum ice_status
3502 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3503                    struct LIST_HEAD_TYPE *v_list)
3504 {
3505         struct ice_fltr_list_entry *v_list_itr, *tmp;
3506
3507         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3508                                  list_entry) {
3509                 v_list_itr->status =
3510                         ice_remove_rule_internal(hw, recp_id, v_list_itr);
3511                 if (v_list_itr->status)
3512                         return v_list_itr->status;
3513         }
3514         return ICE_SUCCESS;
3515 }
3516
3517 /**
3518  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3519  * @hw: pointer to the hardware structure
3520  * @vsi_handle: VSI handle to clear mode
3521  * @promisc_mask: mask of promiscuous config bits to clear
3522  * @vid: VLAN ID to clear VLAN promiscuous
3523  */
3524 enum ice_status
3525 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3526                       u16 vid)
3527 {
3528         struct ice_switch_info *sw = hw->switch_info;
3529         struct ice_fltr_list_entry *fm_entry, *tmp;
3530         struct LIST_HEAD_TYPE remove_list_head;
3531         struct ice_fltr_mgmt_list_entry *itr;
3532         struct LIST_HEAD_TYPE *rule_head;
3533         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3534         enum ice_status status = ICE_SUCCESS;
3535         u8 recipe_id;
3536
3537         if (!ice_is_vsi_valid(hw, vsi_handle))
3538                 return ICE_ERR_PARAM;
3539
3540         if (vid)
3541                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3542         else
3543                 recipe_id = ICE_SW_LKUP_PROMISC;
3544
3545         rule_head = &sw->recp_list[recipe_id].filt_rules;
3546         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3547
3548         INIT_LIST_HEAD(&remove_list_head);
3549
3550         ice_acquire_lock(rule_lock);
3551         LIST_FOR_EACH_ENTRY(itr, rule_head,
3552                             ice_fltr_mgmt_list_entry, list_entry) {
3553                 u8 fltr_promisc_mask = 0;
3554
3555                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3556                         continue;
3557
3558                 fltr_promisc_mask |=
3559                         ice_determine_promisc_mask(&itr->fltr_info);
3560
3561                 /* Skip if filter is not completely specified by given mask */
3562                 if (fltr_promisc_mask & ~promisc_mask)
3563                         continue;
3564
3565                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3566                                                         &remove_list_head,
3567                                                         &itr->fltr_info);
3568                 if (status) {
3569                         ice_release_lock(rule_lock);
3570                         goto free_fltr_list;
3571                 }
3572         }
3573         ice_release_lock(rule_lock);
3574
3575         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3576
3577 free_fltr_list:
3578         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3579                                  ice_fltr_list_entry, list_entry) {
3580                 LIST_DEL(&fm_entry->list_entry);
3581                 ice_free(hw, fm_entry);
3582         }
3583
3584         return status;
3585 }
3586
3587 /**
3588  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3589  * @hw: pointer to the hardware structure
3590  * @vsi_handle: VSI handle to configure
3591  * @promisc_mask: mask of promiscuous config bits
3592  * @vid: VLAN ID to set VLAN promiscuous
3593  */
3594 enum ice_status
3595 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3596 {
3597         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3598         struct ice_fltr_list_entry f_list_entry;
3599         struct ice_fltr_info new_fltr;
3600         enum ice_status status = ICE_SUCCESS;
3601         bool is_tx_fltr;
3602         u16 hw_vsi_id;
3603         int pkt_type;
3604         u8 recipe_id;
3605
3606         ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3607
3608         if (!ice_is_vsi_valid(hw, vsi_handle))
3609                 return ICE_ERR_PARAM;
3610         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3611
3612         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3613
3614         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3615                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3616                 new_fltr.l_data.mac_vlan.vlan_id = vid;
3617                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3618         } else {
3619                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3620                 recipe_id = ICE_SW_LKUP_PROMISC;
3621         }
3622
3623         /* Separate filters must be set for each direction/packet type
3624          * combination, so we will loop over the mask value, store the
3625          * individual type, and clear it out in the input mask as it
3626          * is found.
3627          */
3628         while (promisc_mask) {
3629                 u8 *mac_addr;
3630
3631                 pkt_type = 0;
3632                 is_tx_fltr = false;
3633
3634                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3635                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3636                         pkt_type = UCAST_FLTR;
3637                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3638                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3639                         pkt_type = UCAST_FLTR;
3640                         is_tx_fltr = true;
3641                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3642                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3643                         pkt_type = MCAST_FLTR;
3644                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3645                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3646                         pkt_type = MCAST_FLTR;
3647                         is_tx_fltr = true;
3648                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3649                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3650                         pkt_type = BCAST_FLTR;
3651                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3652                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3653                         pkt_type = BCAST_FLTR;
3654                         is_tx_fltr = true;
3655                 }
3656
3657                 /* Check for VLAN promiscuous flag */
3658                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3659                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3660                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3661                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3662                         is_tx_fltr = true;
3663                 }
3664
3665                 /* Set filter DA based on packet type */
3666                 mac_addr = new_fltr.l_data.mac.mac_addr;
3667                 if (pkt_type == BCAST_FLTR) {
3668                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3669                 } else if (pkt_type == MCAST_FLTR ||
3670                            pkt_type == UCAST_FLTR) {
3671                         /* Use the dummy ether header DA */
3672                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3673                                    ICE_NONDMA_TO_NONDMA);
3674                         if (pkt_type == MCAST_FLTR)
3675                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
3676                 }
3677
3678                 /* Need to reset this to zero for all iterations */
3679                 new_fltr.flag = 0;
3680                 if (is_tx_fltr) {
3681                         new_fltr.flag |= ICE_FLTR_TX;
3682                         new_fltr.src = hw_vsi_id;
3683                 } else {
3684                         new_fltr.flag |= ICE_FLTR_RX;
3685                         new_fltr.src = hw->port_info->lport;
3686                 }
3687
3688                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3689                 new_fltr.vsi_handle = vsi_handle;
3690                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3691                 f_list_entry.fltr_info = new_fltr;
3692
3693                 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3694                 if (status != ICE_SUCCESS)
3695                         goto set_promisc_exit;
3696         }
3697
3698 set_promisc_exit:
3699         return status;
3700 }
3701
3702 /**
3703  * ice_set_vlan_vsi_promisc
3704  * @hw: pointer to the hardware structure
3705  * @vsi_handle: VSI handle to configure
3706  * @promisc_mask: mask of promiscuous config bits
3707  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3708  *
3709  * Configure VSI with all associated VLANs to given promiscuous mode(s)
3710  */
3711 enum ice_status
3712 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3713                          bool rm_vlan_promisc)
3714 {
3715         struct ice_switch_info *sw = hw->switch_info;
3716         struct ice_fltr_list_entry *list_itr, *tmp;
3717         struct LIST_HEAD_TYPE vsi_list_head;
3718         struct LIST_HEAD_TYPE *vlan_head;
3719         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
3720         enum ice_status status;
3721         u16 vlan_id;
3722
3723         INIT_LIST_HEAD(&vsi_list_head);
3724         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3725         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3726         ice_acquire_lock(vlan_lock);
3727         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3728                                           &vsi_list_head);
3729         ice_release_lock(vlan_lock);
3730         if (status)
3731                 goto free_fltr_list;
3732
3733         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
3734                             list_entry) {
3735                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3736                 if (rm_vlan_promisc)
3737                         status = ice_clear_vsi_promisc(hw, vsi_handle,
3738                                                        promisc_mask, vlan_id);
3739                 else
3740                         status = ice_set_vsi_promisc(hw, vsi_handle,
3741                                                      promisc_mask, vlan_id);
3742                 if (status)
3743                         break;
3744         }
3745
3746 free_fltr_list:
3747         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
3748                                  ice_fltr_list_entry, list_entry) {
3749                 LIST_DEL(&list_itr->list_entry);
3750                 ice_free(hw, list_itr);
3751         }
3752         return status;
3753 }
3754
3755 /**
3756  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3757  * @hw: pointer to the hardware structure
3758  * @vsi_handle: VSI handle to remove filters from
3759  * @lkup: switch rule filter lookup type
3760  */
3761 static void
3762 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3763                          enum ice_sw_lkup_type lkup)
3764 {
3765         struct ice_switch_info *sw = hw->switch_info;
3766         struct ice_fltr_list_entry *fm_entry;
3767         struct LIST_HEAD_TYPE remove_list_head;
3768         struct LIST_HEAD_TYPE *rule_head;
3769         struct ice_fltr_list_entry *tmp;
3770         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3771         enum ice_status status;
3772
3773         INIT_LIST_HEAD(&remove_list_head);
3774         rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3775         rule_head = &sw->recp_list[lkup].filt_rules;
3776         ice_acquire_lock(rule_lock);
3777         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3778                                           &remove_list_head);
3779         ice_release_lock(rule_lock);
3780         if (status)
3781                 return;
3782
3783         switch (lkup) {
3784         case ICE_SW_LKUP_MAC:
3785                 ice_remove_mac(hw, &remove_list_head);
3786                 break;
3787         case ICE_SW_LKUP_VLAN:
3788                 ice_remove_vlan(hw, &remove_list_head);
3789                 break;
3790         case ICE_SW_LKUP_PROMISC:
3791         case ICE_SW_LKUP_PROMISC_VLAN:
3792                 ice_remove_promisc(hw, lkup, &remove_list_head);
3793                 break;
3794         case ICE_SW_LKUP_MAC_VLAN:
3795 #ifndef NO_MACVLAN_SUPPORT
3796                 ice_remove_mac_vlan(hw, &remove_list_head);
3797 #else
3798                 ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n");
3799 #endif /* !NO_MACVLAN_SUPPORT */
3800                 break;
3801         case ICE_SW_LKUP_ETHERTYPE:
3802         case ICE_SW_LKUP_ETHERTYPE_MAC:
3803                 ice_remove_eth_mac(hw, &remove_list_head);
3804                 break;
3805         case ICE_SW_LKUP_DFLT:
3806                 ice_debug(hw, ICE_DBG_SW,
3807                           "Remove filters for this lookup type hasn't been implemented yet\n");
3808                 break;
3809         case ICE_SW_LKUP_LAST:
3810                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
3811                 break;
3812         }
3813
3814         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3815                                  ice_fltr_list_entry, list_entry) {
3816                 LIST_DEL(&fm_entry->list_entry);
3817                 ice_free(hw, fm_entry);
3818         }
3819 }
3820
3821 /**
3822  * ice_remove_vsi_fltr - Remove all filters for a VSI
3823  * @hw: pointer to the hardware structure
3824  * @vsi_handle: VSI handle to remove filters from
3825  */
3826 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3827 {
3828         ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
3829
3830         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
3831         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
3832         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
3833         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
3834         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
3835         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
3836         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
3837         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
3838 }
3839
3840 /**
3841  * ice_alloc_res_cntr - allocating resource counter
3842  * @hw: pointer to the hardware structure
3843  * @type: type of resource
3844  * @alloc_shared: if set it is shared else dedicated
3845  * @num_items: number of entries requested for FD resource type
3846  * @counter_id: counter index returned by AQ call
3847  */
3848 enum ice_status
3849 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3850                    u16 *counter_id)
3851 {
3852         struct ice_aqc_alloc_free_res_elem *buf;
3853         enum ice_status status;
3854         u16 buf_len;
3855
3856         /* Allocate resource */
3857         buf_len = sizeof(*buf);
3858         buf = (struct ice_aqc_alloc_free_res_elem *)
3859                 ice_malloc(hw, buf_len);
3860         if (!buf)
3861                 return ICE_ERR_NO_MEMORY;
3862
3863         buf->num_elems = CPU_TO_LE16(num_items);
3864         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3865                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
3866
3867         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3868                                        ice_aqc_opc_alloc_res, NULL);
3869         if (status)
3870                 goto exit;
3871
3872         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
3873
3874 exit:
3875         ice_free(hw, buf);
3876         return status;
3877 }
3878
3879 /**
3880  * ice_free_res_cntr - free resource counter
3881  * @hw: pointer to the hardware structure
3882  * @type: type of resource
3883  * @alloc_shared: if set it is shared else dedicated
3884  * @num_items: number of entries to be freed for FD resource type
3885  * @counter_id: counter ID resource which needs to be freed
3886  */
3887 enum ice_status
3888 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3889                   u16 counter_id)
3890 {
3891         struct ice_aqc_alloc_free_res_elem *buf;
3892         enum ice_status status;
3893         u16 buf_len;
3894
3895         /* Free resource */
3896         buf_len = sizeof(*buf);
3897         buf = (struct ice_aqc_alloc_free_res_elem *)
3898                 ice_malloc(hw, buf_len);
3899         if (!buf)
3900                 return ICE_ERR_NO_MEMORY;
3901
3902         buf->num_elems = CPU_TO_LE16(num_items);
3903         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3904                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
3905         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
3906
3907         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3908                                        ice_aqc_opc_free_res, NULL);
3909         if (status)
3910                 ice_debug(hw, ICE_DBG_SW,
3911                           "counter resource could not be freed\n");
3912
3913         ice_free(hw, buf);
3914         return status;
3915 }
3916
3917 /**
3918  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
3919  * @hw: pointer to the hardware structure
3920  * @counter_id: returns counter index
3921  */
3922 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
3923 {
3924         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3925                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
3926                                   counter_id);
3927 }
3928
3929 /**
3930  * ice_free_vlan_res_counter - Free counter resource for VLAN type
3931  * @hw: pointer to the hardware structure
3932  * @counter_id: counter index to be freed
3933  */
3934 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
3935 {
3936         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3937                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
3938                                  counter_id);
3939 }
3940
3941 /**
3942  * ice_alloc_res_lg_act - add large action resource
3943  * @hw: pointer to the hardware structure
3944  * @l_id: large action ID to fill it in
3945  * @num_acts: number of actions to hold with a large action entry
3946  */
3947 static enum ice_status
3948 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
3949 {
3950         struct ice_aqc_alloc_free_res_elem *sw_buf;
3951         enum ice_status status;
3952         u16 buf_len;
3953
3954         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
3955                 return ICE_ERR_PARAM;
3956
3957         /* Allocate resource for large action */
3958         buf_len = sizeof(*sw_buf);
3959         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
3960                 ice_malloc(hw, buf_len);
3961         if (!sw_buf)
3962                 return ICE_ERR_NO_MEMORY;
3963
3964         sw_buf->num_elems = CPU_TO_LE16(1);
3965
3966         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
3967          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
3968          * If num_acts is greater than 2, then use
3969          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
3970          * The num_acts cannot exceed 4. This was ensured at the
3971          * beginning of the function.
3972          */
3973         if (num_acts == 1)
3974                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
3975         else if (num_acts == 2)
3976                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
3977         else
3978                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
3979
3980         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
3981                                        ice_aqc_opc_alloc_res, NULL);
3982         if (!status)
3983                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
3984
3985         ice_free(hw, sw_buf);
3986         return status;
3987 }
3988
3989 /**
3990  * ice_add_mac_with_sw_marker - add filter with sw marker
3991  * @hw: pointer to the hardware structure
3992  * @f_info: filter info structure containing the MAC filter information
3993  * @sw_marker: sw marker to tag the Rx descriptor with
3994  */
3995 enum ice_status
3996 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
3997                            u16 sw_marker)
3998 {
3999         struct ice_switch_info *sw = hw->switch_info;
4000         struct ice_fltr_mgmt_list_entry *m_entry;
4001         struct ice_fltr_list_entry fl_info;
4002         struct LIST_HEAD_TYPE l_head;
4003         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4004         enum ice_status ret;
4005         bool entry_exists;
4006         u16 lg_act_id;
4007
4008         if (f_info->fltr_act != ICE_FWD_TO_VSI)
4009                 return ICE_ERR_PARAM;
4010
4011         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4012                 return ICE_ERR_PARAM;
4013
4014         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4015                 return ICE_ERR_PARAM;
4016
4017         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4018                 return ICE_ERR_PARAM;
4019         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4020
4021         /* Add filter if it doesn't exist so then the adding of large
4022          * action always results in update
4023          */
4024
4025         INIT_LIST_HEAD(&l_head);
4026         fl_info.fltr_info = *f_info;
4027         LIST_ADD(&fl_info.list_entry, &l_head);
4028
4029         entry_exists = false;
4030         ret = ice_add_mac(hw, &l_head);
4031         if (ret == ICE_ERR_ALREADY_EXISTS)
4032                 entry_exists = true;
4033         else if (ret)
4034                 return ret;
4035
4036         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4037         ice_acquire_lock(rule_lock);
4038         /* Get the book keeping entry for the filter */
4039         m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4040         if (!m_entry)
4041                 goto exit_error;
4042
4043         /* If counter action was enabled for this rule then don't enable
4044          * sw marker large action
4045          */
4046         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4047                 ret = ICE_ERR_PARAM;
4048                 goto exit_error;
4049         }
4050
4051         /* if same marker was added before */
4052         if (m_entry->sw_marker_id == sw_marker) {
4053                 ret = ICE_ERR_ALREADY_EXISTS;
4054                 goto exit_error;
4055         }
4056
4057         /* Allocate a hardware table entry to hold large act. Three actions
4058          * for marker based large action
4059          */
4060         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4061         if (ret)
4062                 goto exit_error;
4063
4064         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4065                 goto exit_error;
4066
4067         /* Update the switch rule to add the marker action */
4068         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4069         if (!ret) {
4070                 ice_release_lock(rule_lock);
4071                 return ret;
4072         }
4073
4074 exit_error:
4075         ice_release_lock(rule_lock);
4076         /* only remove entry if it did not exist previously */
4077         if (!entry_exists)
4078                 ret = ice_remove_mac(hw, &l_head);
4079
4080         return ret;
4081 }
4082
4083 /**
4084  * ice_add_mac_with_counter - add filter with counter enabled
4085  * @hw: pointer to the hardware structure
4086  * @f_info: pointer to filter info structure containing the MAC filter
4087  *          information
4088  */
4089 enum ice_status
4090 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4091 {
4092         struct ice_switch_info *sw = hw->switch_info;
4093         struct ice_fltr_mgmt_list_entry *m_entry;
4094         struct ice_fltr_list_entry fl_info;
4095         struct LIST_HEAD_TYPE l_head;
4096         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4097         enum ice_status ret;
4098         bool entry_exist;
4099         u16 counter_id;
4100         u16 lg_act_id;
4101
4102         if (f_info->fltr_act != ICE_FWD_TO_VSI)
4103                 return ICE_ERR_PARAM;
4104
4105         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4106                 return ICE_ERR_PARAM;
4107
4108         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4109                 return ICE_ERR_PARAM;
4110         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4111
4112         entry_exist = false;
4113
4114         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4115
4116         /* Add filter if it doesn't exist so then the adding of large
4117          * action always results in update
4118          */
4119         INIT_LIST_HEAD(&l_head);
4120
4121         fl_info.fltr_info = *f_info;
4122         LIST_ADD(&fl_info.list_entry, &l_head);
4123
4124         ret = ice_add_mac(hw, &l_head);
4125         if (ret == ICE_ERR_ALREADY_EXISTS)
4126                 entry_exist = true;
4127         else if (ret)
4128                 return ret;
4129
4130         ice_acquire_lock(rule_lock);
4131         m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4132         if (!m_entry) {
4133                 ret = ICE_ERR_BAD_PTR;
4134                 goto exit_error;
4135         }
4136
4137         /* Don't enable counter for a filter for which sw marker was enabled */
4138         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4139                 ret = ICE_ERR_PARAM;
4140                 goto exit_error;
4141         }
4142
4143         /* If a counter was already enabled then don't need to add again */
4144         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4145                 ret = ICE_ERR_ALREADY_EXISTS;
4146                 goto exit_error;
4147         }
4148
4149         /* Allocate a hardware table entry to VLAN counter */
4150         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4151         if (ret)
4152                 goto exit_error;
4153
4154         /* Allocate a hardware table entry to hold large act. Two actions for
4155          * counter based large action
4156          */
4157         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4158         if (ret)
4159                 goto exit_error;
4160
4161         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4162                 goto exit_error;
4163
4164         /* Update the switch rule to add the counter action */
4165         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4166         if (!ret) {
4167                 ice_release_lock(rule_lock);
4168                 return ret;
4169         }
4170
4171 exit_error:
4172         ice_release_lock(rule_lock);
4173         /* only remove entry if it did not exist previously */
4174         if (!entry_exist)
4175                 ret = ice_remove_mac(hw, &l_head);
4176
4177         return ret;
4178 }
4179
4180 /* This is mapping table entry that maps every word within a given protocol
4181  * structure to the real byte offset as per the specification of that
4182  * protocol header.
4183  * for example dst address is 3 words in ethertype header and corresponding
4184  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4185  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4186  * matching entry describing its field. This needs to be updated if new
4187  * structure is added to that union.
4188  */
4189 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4190         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
4191         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
4192         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4193         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4194         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4195                                  26, 28, 30, 32, 34, 36, 38 } },
4196         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4197                                  26, 28, 30, 32, 34, 36, 38 } },
4198         { ICE_TCP_IL,           { 0, 2 } },
4199         { ICE_UDP_ILOS,         { 0, 2 } },
4200         { ICE_SCTP_IL,          { 0, 2 } },
4201         { ICE_VXLAN,            { 8, 10, 12 } },
4202         { ICE_GENEVE,           { 8, 10, 12 } },
4203         { ICE_VXLAN_GPE,        { 0, 2, 4 } },
4204         { ICE_NVGRE,            { 0, 2 } },
4205         { ICE_PROTOCOL_LAST,    { 0 } }
4206 };
4207
4208 /* The following table describes preferred grouping of recipes.
4209  * If a recipe that needs to be programmed is a superset or matches one of the
4210  * following combinations, then the recipe needs to be chained as per the
4211  * following policy.
4212  */
4213 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4214         {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4215               { ICE_MAC_OFOS_HW, 4, 0 } } },
4216         {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4217               { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } } },
4218         {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } } },
4219         {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } } },
4220 };
4221
4222 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4223         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
4224         { ICE_MAC_IL,           ICE_MAC_IL_HW },
4225         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
4226         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
4227         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
4228         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
4229         { ICE_TCP_IL,           ICE_TCP_IL_HW },
4230         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
4231         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
4232         { ICE_VXLAN,            ICE_UDP_OF_HW },
4233         { ICE_GENEVE,           ICE_UDP_OF_HW },
4234         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
4235         { ICE_NVGRE,            ICE_GRE_OF_HW },
4236         { ICE_PROTOCOL_LAST,    0 }
4237 };
4238
4239 /**
4240  * ice_find_recp - find a recipe
4241  * @hw: pointer to the hardware structure
4242  * @lkup_exts: extension sequence to match
4243  *
4244  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4245  */
4246 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4247 {
4248         struct ice_sw_recipe *recp;
4249         u16 i;
4250
4251         ice_get_recp_to_prof_map(hw);
4252         /* Initialize available_result_ids which tracks available result idx */
4253         for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4254                 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4255                             available_result_ids);
4256
4257         /* Walk through existing recipes to find a match */
4258         recp = hw->switch_info->recp_list;
4259         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4260                 /* If recipe was not created for this ID, in SW bookkeeping,
4261                  * check if FW has an entry for this recipe. If the FW has an
4262                  * entry update it in our SW bookkeeping and continue with the
4263                  * matching.
4264                  */
4265                 if (!recp[i].recp_created)
4266                         if (ice_get_recp_frm_fw(hw,
4267                                                 hw->switch_info->recp_list, i))
4268                                 continue;
4269
4270                 /* if number of words we are looking for match */
4271                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4272                         struct ice_fv_word *a = lkup_exts->fv_words;
4273                         struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4274                         bool found = true;
4275                         u8 p, q;
4276
4277                         for (p = 0; p < lkup_exts->n_val_words; p++) {
4278                                 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4279                                      q++) {
4280                                         if (a[p].off == b[q].off &&
4281                                             a[p].prot_id == b[q].prot_id)
4282                                                 /* Found the "p"th word in the
4283                                                  * given recipe
4284                                                  */
4285                                                 break;
4286                                 }
4287                                 /* After walking through all the words in the
4288                                  * "i"th recipe if "p"th word was not found then
4289                                  * this recipe is not what we are looking for.
4290                                  * So break out from this loop and try the next
4291                                  * recipe
4292                                  */
4293                                 if (q >= recp[i].lkup_exts.n_val_words) {
4294                                         found = false;
4295                                         break;
4296                                 }
4297                         }
4298                         /* If for "i"th recipe the found was never set to false
4299                          * then it means we found our match
4300                          */
4301                         if (found)
4302                                 return i; /* Return the recipe ID */
4303                 }
4304         }
4305         return ICE_MAX_NUM_RECIPES;
4306 }
4307
4308 /**
4309  * ice_prot_type_to_id - get protocol ID from protocol type
4310  * @type: protocol type
4311  * @id: pointer to variable that will receive the ID
4312  *
4313  * Returns true if found, false otherwise
4314  */
4315 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4316 {
4317         u16 i;
4318
4319         for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4320                 if (ice_prot_id_tbl[i].type == type) {
4321                         *id = ice_prot_id_tbl[i].protocol_id;
4322                         return true;
4323                 }
4324         return false;
4325 }
4326
4327 /**
4328  * ice_find_valid_words - count valid words
4329  * @rule: advanced rule with lookup information
4330  * @lkup_exts: byte offset extractions of the words that are valid
4331  *
4332  * calculate valid words in a lookup rule using mask value
4333  */
4334 static u16
4335 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4336                      struct ice_prot_lkup_ext *lkup_exts)
4337 {
4338         u16 j, word = 0;
4339         u16 prot_id;
4340         u16 ret_val;
4341
4342         if (!ice_prot_type_to_id(rule->type, &prot_id))
4343                 return 0;
4344
4345         word = lkup_exts->n_val_words;
4346
4347         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4348                 if (((u16 *)&rule->m_u)[j] == 0xffff &&
4349                     rule->type < ARRAY_SIZE(ice_prot_ext)) {
4350                         /* No more space to accommodate */
4351                         if (word >= ICE_MAX_CHAIN_WORDS)
4352                                 return 0;
4353                         lkup_exts->fv_words[word].off =
4354                                 ice_prot_ext[rule->type].offs[j];
4355                         lkup_exts->fv_words[word].prot_id =
4356                                 ice_prot_id_tbl[rule->type].protocol_id;
4357                         word++;
4358                 }
4359
4360         ret_val = word - lkup_exts->n_val_words;
4361         lkup_exts->n_val_words = word;
4362
4363         return ret_val;
4364 }
4365
4366 /**
4367  * ice_find_prot_off_ind - check for specific ID and offset in rule
4368  * @lkup_exts: an array of protocol header extractions
4369  * @prot_type: protocol type to check
4370  * @off: expected offset of the extraction
4371  *
4372  * Check if the prot_ext has given protocol ID and offset
4373  */
4374 static u8
4375 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4376                       u16 off)
4377 {
4378         u8 j;
4379
4380         for (j = 0; j < lkup_exts->n_val_words; j++)
4381                 if (lkup_exts->fv_words[j].off == off &&
4382                     lkup_exts->fv_words[j].prot_id == prot_type)
4383                         return j;
4384
4385         return ICE_MAX_CHAIN_WORDS;
4386 }
4387
4388 /**
4389  * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4390  * @lkup_exts: an array of protocol header extractions
4391  * @r_policy: preferred recipe grouping policy
4392  *
4393  * Helper function to check if given recipe group is subset we need to check if
4394  * all the words described by the given recipe group exist in the advanced rule
4395  * look up information
4396  */
4397 static bool
4398 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4399                      const struct ice_pref_recipe_group *r_policy)
4400 {
4401         u8 ind[ICE_NUM_WORDS_RECIPE];
4402         u8 count = 0;
4403         u8 i;
4404
4405         /* check if everything in the r_policy is part of the entire rule */
4406         for (i = 0; i < r_policy->n_val_pairs; i++) {
4407                 u8 j;
4408
4409                 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4410                                           r_policy->pairs[i].off);
4411                 if (j >= ICE_MAX_CHAIN_WORDS)
4412                         return false;
4413
4414                 /* store the indexes temporarily found by the find function
4415                  * this will be used to mark the words as 'done'
4416                  */
4417                 ind[count++] = j;
4418         }
4419
4420         /* If the entire policy recipe was a true match, then mark the fields
4421          * that are covered by the recipe as 'done' meaning that these words
4422          * will be clumped together in one recipe.
4423          * "Done" here means in our searching if certain recipe group
4424          * matches or is subset of the given rule, then we mark all
4425          * the corresponding offsets as found. So the remaining recipes should
4426          * be created with whatever words that were left.
4427          */
4428         for (i = 0; i < count; i++) {
4429                 u8 in = ind[i];
4430
4431                 ice_set_bit(in, lkup_exts->done);
4432         }
4433         return true;
4434 }
4435
4436 /**
4437  * ice_create_first_fit_recp_def - Create a recipe grouping
4438  * @hw: pointer to the hardware structure
4439  * @lkup_exts: an array of protocol header extractions
4440  * @rg_list: pointer to a list that stores new recipe groups
4441  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4442  *
4443  * Using first fit algorithm, take all the words that are still not done
4444  * and start grouping them in 4-word groups. Each group makes up one
4445  * recipe.
4446  */
4447 static enum ice_status
4448 ice_create_first_fit_recp_def(struct ice_hw *hw,
4449                               struct ice_prot_lkup_ext *lkup_exts,
4450                               struct LIST_HEAD_TYPE *rg_list,
4451                               u8 *recp_cnt)
4452 {
4453         struct ice_pref_recipe_group *grp = NULL;
4454         u8 j;
4455
4456         *recp_cnt = 0;
4457
4458         /* Walk through every word in the rule to check if it is not done. If so
4459          * then this word needs to be part of a new recipe.
4460          */
4461         for (j = 0; j < lkup_exts->n_val_words; j++)
4462                 if (!ice_is_bit_set(lkup_exts->done, j)) {
4463                         if (!grp ||
4464                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4465                                 struct ice_recp_grp_entry *entry;
4466
4467                                 entry = (struct ice_recp_grp_entry *)
4468                                         ice_malloc(hw, sizeof(*entry));
4469                                 if (!entry)
4470                                         return ICE_ERR_NO_MEMORY;
4471                                 LIST_ADD(&entry->l_entry, rg_list);
4472                                 grp = &entry->r_group;
4473                                 (*recp_cnt)++;
4474                         }
4475
4476                         grp->pairs[grp->n_val_pairs].prot_id =
4477                                 lkup_exts->fv_words[j].prot_id;
4478                         grp->pairs[grp->n_val_pairs].off =
4479                                 lkup_exts->fv_words[j].off;
4480                         grp->n_val_pairs++;
4481                 }
4482
4483         return ICE_SUCCESS;
4484 }
4485
4486 /**
4487  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4488  * @hw: pointer to the hardware structure
4489  * @fv_list: field vector with the extraction sequence information
4490  * @rg_list: recipe groupings with protocol-offset pairs
4491  *
4492  * Helper function to fill in the field vector indices for protocol-offset
4493  * pairs. These indexes are then ultimately programmed into a recipe.
4494  */
4495 static void
4496 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4497                        struct LIST_HEAD_TYPE *rg_list)
4498 {
4499         struct ice_sw_fv_list_entry *fv;
4500         struct ice_recp_grp_entry *rg;
4501         struct ice_fv_word *fv_ext;
4502
4503         if (LIST_EMPTY(fv_list))
4504                 return;
4505
4506         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4507         fv_ext = fv->fv_ptr->ew;
4508
4509         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4510                 u8 i;
4511
4512                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4513                         struct ice_fv_word *pr;
4514                         u8 j;
4515
4516                         pr = &rg->r_group.pairs[i];
4517                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4518                                 if (fv_ext[j].prot_id == pr->prot_id &&
4519                                     fv_ext[j].off == pr->off) {
4520                                         /* Store index of field vector */
4521                                         rg->fv_idx[i] = j;
4522                                         break;
4523                                 }
4524                 }
4525         }
4526 }
4527
4528 /**
4529  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4530  * @hw: pointer to hardware structure
4531  * @rm: recipe management list entry
4532  * @match_tun: if field vector index for tunnel needs to be programmed
4533  */
4534 static enum ice_status
4535 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4536                   bool match_tun)
4537 {
4538         struct ice_aqc_recipe_data_elem *tmp;
4539         struct ice_aqc_recipe_data_elem *buf;
4540         struct ice_recp_grp_entry *entry;
4541         enum ice_status status;
4542         u16 recipe_count;
4543         u8 chain_idx;
4544         u8 recps = 0;
4545
4546         /* When more than one recipe are required, another recipe is needed to
4547          * chain them together. Matching a tunnel metadata ID takes up one of
4548          * the match fields in the chaining recipe reducing the number of
4549          * chained recipes by one.
4550          */
4551         if (rm->n_grp_count > 1)
4552                 rm->n_grp_count++;
4553         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4554             (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4555                 return ICE_ERR_MAX_LIMIT;
4556
4557         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4558                                                             ICE_MAX_NUM_RECIPES,
4559                                                             sizeof(*tmp));
4560         if (!tmp)
4561                 return ICE_ERR_NO_MEMORY;
4562
4563         buf = (struct ice_aqc_recipe_data_elem *)
4564                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4565         if (!buf) {
4566                 status = ICE_ERR_NO_MEMORY;
4567                 goto err_mem;
4568         }
4569
4570         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4571         recipe_count = ICE_MAX_NUM_RECIPES;
4572         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4573                                    NULL);
4574         if (status || recipe_count == 0)
4575                 goto err_unroll;
4576
4577         /* Allocate the recipe resources, and configure them according to the
4578          * match fields from protocol headers and extracted field vectors.
4579          */
4580         chain_idx = ICE_CHAIN_FV_INDEX_START -
4581                 ice_find_first_bit(available_result_ids,
4582                                    ICE_CHAIN_FV_INDEX_START + 1);
4583         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4584                 u8 i;
4585
4586                 status = ice_alloc_recipe(hw, &entry->rid);
4587                 if (status)
4588                         goto err_unroll;
4589
4590                 /* Clear the result index of the located recipe, as this will be
4591                  * updated, if needed, later in the recipe creation process.
4592                  */
4593                 tmp[0].content.result_indx = 0;
4594
4595                 buf[recps] = tmp[0];
4596                 buf[recps].recipe_indx = (u8)entry->rid;
4597                 /* if the recipe is a non-root recipe RID should be programmed
4598                  * as 0 for the rules to be applied correctly.
4599                  */
4600                 buf[recps].content.rid = 0;
4601                 ice_memset(&buf[recps].content.lkup_indx, 0,
4602                            sizeof(buf[recps].content.lkup_indx),
4603                            ICE_NONDMA_MEM);
4604
4605                 /* All recipes use look-up index 0 to match switch ID. */
4606                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4607                 buf[recps].content.mask[0] =
4608                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4609                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4610                  * to be 0
4611                  */
4612                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4613                         buf[recps].content.lkup_indx[i] = 0x80;
4614                         buf[recps].content.mask[i] = 0;
4615                 }
4616
4617                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4618                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4619                         buf[recps].content.mask[i + 1] = CPU_TO_LE16(0xFFFF);
4620                 }
4621
4622                 if (rm->n_grp_count > 1) {
4623                         entry->chain_idx = chain_idx;
4624                         buf[recps].content.result_indx =
4625                                 ICE_AQ_RECIPE_RESULT_EN |
4626                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4627                                  ICE_AQ_RECIPE_RESULT_DATA_M);
4628                         ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4629                                       available_result_ids);
4630                         chain_idx = ICE_CHAIN_FV_INDEX_START -
4631                                 ice_find_first_bit(available_result_ids,
4632                                                    ICE_CHAIN_FV_INDEX_START +
4633                                                    1);
4634                 }
4635
4636                 /* fill recipe dependencies */
4637                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4638                                 ICE_MAX_NUM_RECIPES);
4639                 ice_set_bit(buf[recps].recipe_indx,
4640                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
4641                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4642                 recps++;
4643         }
4644
4645         if (rm->n_grp_count == 1) {
4646                 rm->root_rid = buf[0].recipe_indx;
4647                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
4648                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4649                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4650                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4651                                    sizeof(buf[0].recipe_bitmap),
4652                                    ICE_NONDMA_TO_NONDMA);
4653                 } else {
4654                         status = ICE_ERR_BAD_PTR;
4655                         goto err_unroll;
4656                 }
4657                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4658                  * the recipe which is getting created if specified
4659                  * by user. Usually any advanced switch filter, which results
4660                  * into new extraction sequence, ended up creating a new recipe
4661                  * of type ROOT and usually recipes are associated with profiles
4662                  * Switch rule referreing newly created recipe, needs to have
4663                  * either/or 'fwd' or 'join' priority, otherwise switch rule
4664                  * evaluation will not happen correctly. In other words, if
4665                  * switch rule to be evaluated on priority basis, then recipe
4666                  * needs to have priority, otherwise it will be evaluated last.
4667                  */
4668                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4669         } else {
4670                 struct ice_recp_grp_entry *last_chain_entry;
4671                 u16 rid, i;
4672
4673                 /* Allocate the last recipe that will chain the outcomes of the
4674                  * other recipes together
4675                  */
4676                 status = ice_alloc_recipe(hw, &rid);
4677                 if (status)
4678                         goto err_unroll;
4679
4680                 buf[recps].recipe_indx = (u8)rid;
4681                 buf[recps].content.rid = (u8)rid;
4682                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4683                 /* the new entry created should also be part of rg_list to
4684                  * make sure we have complete recipe
4685                  */
4686                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
4687                         sizeof(*last_chain_entry));
4688                 if (!last_chain_entry) {
4689                         status = ICE_ERR_NO_MEMORY;
4690                         goto err_unroll;
4691                 }
4692                 last_chain_entry->rid = rid;
4693                 ice_memset(&buf[recps].content.lkup_indx, 0,
4694                            sizeof(buf[recps].content.lkup_indx),
4695                            ICE_NONDMA_MEM);
4696                 /* All recipes use look-up index 0 to match switch ID. */
4697                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4698                 buf[recps].content.mask[0] =
4699                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4700                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4701                         buf[recps].content.lkup_indx[i] =
4702                                 ICE_AQ_RECIPE_LKUP_IGNORE;
4703                         buf[recps].content.mask[i] = 0;
4704                 }
4705
4706                 i = 1;
4707                 /* update r_bitmap with the recp that is used for chaining */
4708                 ice_set_bit(rid, rm->r_bitmap);
4709                 /* this is the recipe that chains all the other recipes so it
4710                  * should not have a chaining ID to indicate the same
4711                  */
4712                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4713                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
4714                                     l_entry) {
4715                         last_chain_entry->fv_idx[i] = entry->chain_idx;
4716                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
4717                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
4718                         ice_set_bit(entry->rid, rm->r_bitmap);
4719                 }
4720                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
4721                 if (sizeof(buf[recps].recipe_bitmap) >=
4722                     sizeof(rm->r_bitmap)) {
4723                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4724                                    sizeof(buf[recps].recipe_bitmap),
4725                                    ICE_NONDMA_TO_NONDMA);
4726                 } else {
4727                         status = ICE_ERR_BAD_PTR;
4728                         goto err_unroll;
4729                 }
4730                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4731
4732                 /* To differentiate among different UDP tunnels, a meta data ID
4733                  * flag is used.
4734                  */
4735                 if (match_tun) {
4736                         buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
4737                         buf[recps].content.mask[i] =
4738                                 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
4739                 }
4740
4741                 recps++;
4742                 rm->root_rid = (u8)rid;
4743         }
4744         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4745         if (status)
4746                 goto err_unroll;
4747
4748         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4749         ice_release_change_lock(hw);
4750         if (status)
4751                 goto err_unroll;
4752
4753         /* Every recipe that just got created add it to the recipe
4754          * book keeping list
4755          */
4756         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4757                 struct ice_switch_info *sw = hw->switch_info;
4758                 struct ice_sw_recipe *recp;
4759
4760                 recp = &sw->recp_list[entry->rid];
4761                 recp->root_rid = entry->rid;
4762                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
4763                            entry->r_group.n_val_pairs *
4764                            sizeof(struct ice_fv_word),
4765                            ICE_NONDMA_TO_NONDMA);
4766
4767                 recp->n_ext_words = entry->r_group.n_val_pairs;
4768                 recp->chain_idx = entry->chain_idx;
4769                 recp->recp_created = true;
4770                 recp->big_recp = false;
4771         }
4772         rm->root_buf = buf;
4773         ice_free(hw, tmp);
4774         return status;
4775
4776 err_unroll:
4777 err_mem:
4778         ice_free(hw, tmp);
4779         ice_free(hw, buf);
4780         return status;
4781 }
4782
4783 /**
4784  * ice_create_recipe_group - creates recipe group
4785  * @hw: pointer to hardware structure
4786  * @rm: recipe management list entry
4787  * @lkup_exts: lookup elements
4788  */
4789 static enum ice_status
4790 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4791                         struct ice_prot_lkup_ext *lkup_exts)
4792 {
4793         struct ice_recp_grp_entry *entry;
4794         struct ice_recp_grp_entry *tmp;
4795         enum ice_status status;
4796         u8 recp_count = 0;
4797         u16 groups, i;
4798
4799         rm->n_grp_count = 0;
4800
4801         /* Each switch recipe can match up to 5 words or metadata. One word in
4802          * each recipe is used to match the switch ID. Four words are left for
4803          * matching other values. If the new advanced recipe requires more than
4804          * 4 words, it needs to be split into multiple recipes which are chained
4805          * together using the intermediate result that each produces as input to
4806          * the other recipes in the sequence.
4807          */
4808         groups = ARRAY_SIZE(ice_recipe_pack);
4809
4810         /* Check if any of the preferred recipes from the grouping policy
4811          * matches.
4812          */
4813         for (i = 0; i < groups; i++)
4814                 /* Check if the recipe from the preferred grouping matches
4815                  * or is a subset of the fields that needs to be looked up.
4816                  */
4817                 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
4818                         /* This recipe can be used by itself or grouped with
4819                          * other recipes.
4820                          */
4821                         entry = (struct ice_recp_grp_entry *)
4822                                 ice_malloc(hw, sizeof(*entry));
4823                         if (!entry) {
4824                                 status = ICE_ERR_NO_MEMORY;
4825                                 goto err_unroll;
4826                         }
4827                         entry->r_group = ice_recipe_pack[i];
4828                         LIST_ADD(&entry->l_entry, &rm->rg_list);
4829                         rm->n_grp_count++;
4830                 }
4831
4832         /* Create recipes for words that are marked not done by packing them
4833          * as best fit.
4834          */
4835         status = ice_create_first_fit_recp_def(hw, lkup_exts,
4836                                                &rm->rg_list, &recp_count);
4837         if (!status) {
4838                 rm->n_grp_count += recp_count;
4839                 rm->n_ext_words = lkup_exts->n_val_words;
4840                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
4841                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
4842                 goto out;
4843         }
4844
4845 err_unroll:
4846         LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
4847                                  l_entry) {
4848                 LIST_DEL(&entry->l_entry);
4849                 ice_free(hw, entry);
4850         }
4851
4852 out:
4853         return status;
4854 }
4855
4856 /**
4857  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
4858  * @hw: pointer to hardware structure
4859  * @lkups: lookup elements or match criteria for the advanced recipe, one
4860  *         structure per protocol header
4861  * @lkups_cnt: number of protocols
4862  * @fv_list: pointer to a list that holds the returned field vectors
4863  */
4864 static enum ice_status
4865 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4866            struct LIST_HEAD_TYPE *fv_list)
4867 {
4868         enum ice_status status;
4869         u16 *prot_ids;
4870         u16 i;
4871
4872         prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
4873         if (!prot_ids)
4874                 return ICE_ERR_NO_MEMORY;
4875
4876         for (i = 0; i < lkups_cnt; i++)
4877                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
4878                         status = ICE_ERR_CFG;
4879                         goto free_mem;
4880                 }
4881
4882         /* Find field vectors that include all specified protocol types */
4883         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
4884
4885 free_mem:
4886         ice_free(hw, prot_ids);
4887         return status;
4888 }
4889
4890 /**
4891  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
4892  * @hw: pointer to hardware structure
4893  * @lkups: lookup elements or match criteria for the advanced recipe, one
4894  *  structure per protocol header
4895  * @lkups_cnt: number of protocols
4896  * @rinfo: other information regarding the rule e.g. priority and action info
4897  * @rid: return the recipe ID of the recipe created
4898  */
4899 static enum ice_status
4900 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
4901                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
4902 {
4903         struct ice_prot_lkup_ext *lkup_exts;
4904         struct ice_recp_grp_entry *r_entry;
4905         struct ice_sw_fv_list_entry *fvit;
4906         struct ice_recp_grp_entry *r_tmp;
4907         struct ice_sw_fv_list_entry *tmp;
4908         enum ice_status status = ICE_SUCCESS;
4909         struct ice_sw_recipe *rm;
4910         bool match_tun = false;
4911         u8 i;
4912
4913         if (!lkups_cnt)
4914                 return ICE_ERR_PARAM;
4915
4916         lkup_exts = (struct ice_prot_lkup_ext *)
4917                 ice_malloc(hw, sizeof(*lkup_exts));
4918         if (!lkup_exts)
4919                 return ICE_ERR_NO_MEMORY;
4920
4921         /* Determine the number of words to be matched and if it exceeds a
4922          * recipe's restrictions
4923          */
4924         for (i = 0; i < lkups_cnt; i++) {
4925                 u16 count;
4926
4927                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
4928                         status = ICE_ERR_CFG;
4929                         goto err_free_lkup_exts;
4930                 }
4931
4932                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
4933                 if (!count) {
4934                         status = ICE_ERR_CFG;
4935                         goto err_free_lkup_exts;
4936                 }
4937         }
4938
4939         *rid = ice_find_recp(hw, lkup_exts);
4940         if (*rid < ICE_MAX_NUM_RECIPES)
4941                 /* Success if found a recipe that match the existing criteria */
4942                 goto err_free_lkup_exts;
4943
4944         /* Recipe we need does not exist, add a recipe */
4945
4946         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
4947         if (!rm) {
4948                 status = ICE_ERR_NO_MEMORY;
4949                 goto err_free_lkup_exts;
4950         }
4951
4952         /* Get field vectors that contain fields extracted from all the protocol
4953          * headers being programmed.
4954          */
4955         INIT_LIST_HEAD(&rm->fv_list);
4956         INIT_LIST_HEAD(&rm->rg_list);
4957
4958         status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
4959         if (status)
4960                 goto err_unroll;
4961
4962         /* Group match words into recipes using preferred recipe grouping
4963          * criteria.
4964          */
4965         status = ice_create_recipe_group(hw, rm, lkup_exts);
4966         if (status)
4967                 goto err_unroll;
4968
4969         /* There is only profile for UDP tunnels. So, it is necessary to use a
4970          * metadata ID flag to differentiate different tunnel types. A separate
4971          * recipe needs to be used for the metadata.
4972          */
4973         if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
4974              rinfo->tun_type == ICE_SW_TUN_GENEVE ||
4975              rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
4976                 match_tun = true;
4977
4978         /* set the recipe priority if specified */
4979         rm->priority = rinfo->priority ? rinfo->priority : 0;
4980
4981         /* Find offsets from the field vector. Pick the first one for all the
4982          * recipes.
4983          */
4984         ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
4985         status = ice_add_sw_recipe(hw, rm, match_tun);
4986         if (status)
4987                 goto err_unroll;
4988
4989         /* Associate all the recipes created with all the profiles in the
4990          * common field vector.
4991          */
4992         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
4993                             list_entry) {
4994                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
4995
4996                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
4997                                                       (u8 *)r_bitmap, NULL);
4998                 if (status)
4999                         goto err_unroll;
5000
5001                 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5002                               ICE_MAX_NUM_RECIPES);
5003                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5004                 if (status)
5005                         goto err_unroll;
5006
5007                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5008                                                       (u8 *)rm->r_bitmap,
5009                                                       NULL);
5010                 ice_release_change_lock(hw);
5011
5012                 if (status)
5013                         goto err_unroll;
5014         }
5015
5016         *rid = rm->root_rid;
5017         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5018                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5019 err_unroll:
5020         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5021                                  ice_recp_grp_entry, l_entry) {
5022                 LIST_DEL(&r_entry->l_entry);
5023                 ice_free(hw, r_entry);
5024         }
5025
5026         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5027                                  list_entry) {
5028                 LIST_DEL(&fvit->list_entry);
5029                 ice_free(hw, fvit);
5030         }
5031
5032         if (rm->root_buf)
5033                 ice_free(hw, rm->root_buf);
5034
5035         ice_free(hw, rm);
5036
5037 err_free_lkup_exts:
5038         ice_free(hw, lkup_exts);
5039
5040         return status;
5041 }
5042
5043 #define ICE_MAC_HDR_OFFSET      0
5044 #define ICE_IP_HDR_OFFSET       14
5045 #define ICE_GRE_HDR_OFFSET      34
5046 #define ICE_MAC_IL_HDR_OFFSET   42
5047 #define ICE_IP_IL_HDR_OFFSET    56
5048 #define ICE_L4_HDR_OFFSET       34
5049 #define ICE_UDP_TUN_HDR_OFFSET  42
5050
5051 /**
5052  * ice_find_dummy_packet - find dummy packet with given match criteria
5053  *
5054  * @lkups: lookup elements or match criteria for the advanced recipe, one
5055  *         structure per protocol header
5056  * @lkups_cnt: number of protocols
5057  * @tun_type: tunnel type from the match criteria
5058  * @pkt: dummy packet to fill according to filter match criteria
5059  * @pkt_len: packet length of dummy packet
5060  */
5061 static void
5062 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5063                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5064                       u16 *pkt_len)
5065 {
5066         u16 i;
5067
5068         if (tun_type == ICE_SW_TUN_NVGRE || tun_type == ICE_ALL_TUNNELS) {
5069                 *pkt = dummy_gre_packet;
5070                 *pkt_len = sizeof(dummy_gre_packet);
5071                 return;
5072         }
5073
5074         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5075             tun_type == ICE_SW_TUN_VXLAN_GPE) {
5076                 *pkt = dummy_udp_tun_packet;
5077                 *pkt_len = sizeof(dummy_udp_tun_packet);
5078                 return;
5079         }
5080
5081         for (i = 0; i < lkups_cnt; i++) {
5082                 if (lkups[i].type == ICE_UDP_ILOS) {
5083                         *pkt = dummy_udp_tun_packet;
5084                         *pkt_len = sizeof(dummy_udp_tun_packet);
5085                         return;
5086                 }
5087         }
5088
5089         *pkt = dummy_tcp_tun_packet;
5090         *pkt_len = sizeof(dummy_tcp_tun_packet);
5091 }
5092
5093 /**
5094  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5095  *
5096  * @lkups: lookup elements or match criteria for the advanced recipe, one
5097  *         structure per protocol header
5098  * @lkups_cnt: number of protocols
5099  * @tun_type: to know if the dummy packet is supposed to be tunnel packet
5100  * @s_rule: stores rule information from the match criteria
5101  * @dummy_pkt: dummy packet to fill according to filter match criteria
5102  * @pkt_len: packet length of dummy packet
5103  */
5104 static void
5105 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5106                           enum ice_sw_tunnel_type tun_type,
5107                           struct ice_aqc_sw_rules_elem *s_rule,
5108                           const u8 *dummy_pkt, u16 pkt_len)
5109 {
5110         u8 *pkt;
5111         u16 i;
5112
5113         /* Start with a packet with a pre-defined/dummy content. Then, fill
5114          * in the header values to be looked up or matched.
5115          */
5116         pkt = s_rule->pdata.lkup_tx_rx.hdr;
5117
5118         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5119
5120         for (i = 0; i < lkups_cnt; i++) {
5121                 u32 len, pkt_off, hdr_size, field_off;
5122
5123                 switch (lkups[i].type) {
5124                 case ICE_MAC_OFOS:
5125                 case ICE_MAC_IL:
5126                         pkt_off = offsetof(struct ice_ether_hdr, dst_addr) +
5127                                 ((lkups[i].type == ICE_MAC_IL) ?
5128                                  ICE_MAC_IL_HDR_OFFSET : 0);
5129                         len = sizeof(lkups[i].h_u.eth_hdr.dst_addr);
5130                         if ((tun_type == ICE_SW_TUN_VXLAN ||
5131                              tun_type == ICE_SW_TUN_GENEVE ||
5132                              tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5133                              lkups[i].type == ICE_MAC_IL) {
5134                                 pkt_off += sizeof(struct ice_udp_tnl_hdr);
5135                         }
5136
5137                         ice_memcpy(&pkt[pkt_off],
5138                                    &lkups[i].h_u.eth_hdr.dst_addr, len,
5139                                    ICE_NONDMA_TO_NONDMA);
5140                         pkt_off = offsetof(struct ice_ether_hdr, src_addr) +
5141                                 ((lkups[i].type == ICE_MAC_IL) ?
5142                                  ICE_MAC_IL_HDR_OFFSET : 0);
5143                         len = sizeof(lkups[i].h_u.eth_hdr.src_addr);
5144                         if ((tun_type == ICE_SW_TUN_VXLAN ||
5145                              tun_type == ICE_SW_TUN_GENEVE ||
5146                              tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5147                              lkups[i].type == ICE_MAC_IL) {
5148                                 pkt_off += sizeof(struct ice_udp_tnl_hdr);
5149                         }
5150                         ice_memcpy(&pkt[pkt_off],
5151                                    &lkups[i].h_u.eth_hdr.src_addr, len,
5152                                    ICE_NONDMA_TO_NONDMA);
5153                         if (lkups[i].h_u.eth_hdr.ethtype_id) {
5154                                 pkt_off = offsetof(struct ice_ether_hdr,
5155                                                    ethtype_id) +
5156                                         ((lkups[i].type == ICE_MAC_IL) ?
5157                                          ICE_MAC_IL_HDR_OFFSET : 0);
5158                                 len = sizeof(lkups[i].h_u.eth_hdr.ethtype_id);
5159                                 if ((tun_type == ICE_SW_TUN_VXLAN ||
5160                                      tun_type == ICE_SW_TUN_GENEVE ||
5161                                      tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5162                                      lkups[i].type == ICE_MAC_IL) {
5163                                         pkt_off +=
5164                                                 sizeof(struct ice_udp_tnl_hdr);
5165                                 }
5166                                 ice_memcpy(&pkt[pkt_off],
5167                                            &lkups[i].h_u.eth_hdr.ethtype_id,
5168                                            len, ICE_NONDMA_TO_NONDMA);
5169                         }
5170                         break;
5171                 case ICE_IPV4_OFOS:
5172                         hdr_size = sizeof(struct ice_ipv4_hdr);
5173                         if (lkups[i].h_u.ipv4_hdr.dst_addr) {
5174                                 pkt_off = ICE_IP_HDR_OFFSET +
5175                                            offsetof(struct ice_ipv4_hdr,
5176                                                     dst_addr);
5177                                 field_off = offsetof(struct ice_ipv4_hdr,
5178                                                      dst_addr);
5179                                 len = hdr_size - field_off;
5180                                 ice_memcpy(&pkt[pkt_off],
5181                                            &lkups[i].h_u.ipv4_hdr.dst_addr,
5182                                            len, ICE_NONDMA_TO_NONDMA);
5183                         }
5184                         if (lkups[i].h_u.ipv4_hdr.src_addr) {
5185                                 pkt_off = ICE_IP_HDR_OFFSET +
5186                                            offsetof(struct ice_ipv4_hdr,
5187                                                     src_addr);
5188                                 field_off = offsetof(struct ice_ipv4_hdr,
5189                                                      src_addr);
5190                                 len = hdr_size - field_off;
5191                                 ice_memcpy(&pkt[pkt_off],
5192                                            &lkups[i].h_u.ipv4_hdr.src_addr,
5193                                            len, ICE_NONDMA_TO_NONDMA);
5194                         }
5195                         break;
5196                 case ICE_IPV4_IL:
5197                         break;
5198                 case ICE_TCP_IL:
5199                 case ICE_UDP_ILOS:
5200                 case ICE_SCTP_IL:
5201                         hdr_size = sizeof(struct ice_udp_tnl_hdr);
5202                         if (lkups[i].h_u.l4_hdr.dst_port) {
5203                                 pkt_off = ICE_L4_HDR_OFFSET +
5204                                            offsetof(struct ice_l4_hdr,
5205                                                     dst_port);
5206                                 field_off = offsetof(struct ice_l4_hdr,
5207                                                      dst_port);
5208                                 len =  hdr_size - field_off;
5209                                 ice_memcpy(&pkt[pkt_off],
5210                                            &lkups[i].h_u.l4_hdr.dst_port,
5211                                            len, ICE_NONDMA_TO_NONDMA);
5212                         }
5213                         if (lkups[i].h_u.l4_hdr.src_port) {
5214                                 pkt_off = ICE_L4_HDR_OFFSET +
5215                                         offsetof(struct ice_l4_hdr, src_port);
5216                                 field_off = offsetof(struct ice_l4_hdr,
5217                                                      src_port);
5218                                 len =  hdr_size - field_off;
5219                                 ice_memcpy(&pkt[pkt_off],
5220                                            &lkups[i].h_u.l4_hdr.src_port,
5221                                            len, ICE_NONDMA_TO_NONDMA);
5222                         }
5223                         break;
5224                 case ICE_VXLAN:
5225                 case ICE_GENEVE:
5226                 case ICE_VXLAN_GPE:
5227                         pkt_off = ICE_UDP_TUN_HDR_OFFSET +
5228                                    offsetof(struct ice_udp_tnl_hdr, vni);
5229                         field_off = offsetof(struct ice_udp_tnl_hdr, vni);
5230                         len =  sizeof(struct ice_udp_tnl_hdr) - field_off;
5231                         ice_memcpy(&pkt[pkt_off], &lkups[i].h_u.tnl_hdr.vni,
5232                                    len, ICE_NONDMA_TO_NONDMA);
5233                         break;
5234                 default:
5235                         break;
5236                 }
5237         }
5238         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5239 }
5240
5241 /**
5242  * ice_find_adv_rule_entry - Search a rule entry
5243  * @hw: pointer to the hardware structure
5244  * @lkups: lookup elements or match criteria for the advanced recipe, one
5245  *         structure per protocol header
5246  * @lkups_cnt: number of protocols
5247  * @recp_id: recipe ID for which we are finding the rule
5248  * @rinfo: other information regarding the rule e.g. priority and action info
5249  *
5250  * Helper function to search for a given advance rule entry
5251  * Returns pointer to entry storing the rule if found
5252  */
5253 static struct ice_adv_fltr_mgmt_list_entry *
5254 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5255                         u16 lkups_cnt, u8 recp_id,
5256                         struct ice_adv_rule_info *rinfo)
5257 {
5258         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5259         struct ice_switch_info *sw = hw->switch_info;
5260         int i;
5261
5262         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5263                             ice_adv_fltr_mgmt_list_entry, list_entry) {
5264                 bool lkups_matched = true;
5265
5266                 if (lkups_cnt != list_itr->lkups_cnt)
5267                         continue;
5268                 for (i = 0; i < list_itr->lkups_cnt; i++)
5269                         if (memcmp(&list_itr->lkups[i], &lkups[i],
5270                                    sizeof(*lkups))) {
5271                                 lkups_matched = false;
5272                                 break;
5273                         }
5274                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5275                     rinfo->tun_type == list_itr->rule_info.tun_type &&
5276                     lkups_matched)
5277                         return list_itr;
5278         }
5279         return NULL;
5280 }
5281
5282 /**
5283  * ice_adv_add_update_vsi_list
5284  * @hw: pointer to the hardware structure
5285  * @m_entry: pointer to current adv filter management list entry
5286  * @cur_fltr: filter information from the book keeping entry
5287  * @new_fltr: filter information with the new VSI to be added
5288  *
5289  * Call AQ command to add or update previously created VSI list with new VSI.
5290  *
5291  * Helper function to do book keeping associated with adding filter information
5292  * The algorithm to do the booking keeping is described below :
5293  * When a VSI needs to subscribe to a given advanced filter
5294  *      if only one VSI has been added till now
5295  *              Allocate a new VSI list and add two VSIs
5296  *              to this list using switch rule command
5297  *              Update the previously created switch rule with the
5298  *              newly created VSI list ID
5299  *      if a VSI list was previously created
5300  *              Add the new VSI to the previously created VSI list set
5301  *              using the update switch rule command
5302  */
5303 static enum ice_status
5304 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5305                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
5306                             struct ice_adv_rule_info *cur_fltr,
5307                             struct ice_adv_rule_info *new_fltr)
5308 {
5309         enum ice_status status;
5310         u16 vsi_list_id = 0;
5311
5312         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5313             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5314                 return ICE_ERR_NOT_IMPL;
5315
5316         if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5317             new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5318                 return ICE_ERR_ALREADY_EXISTS;
5319
5320         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5321              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5322             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5323              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5324                 return ICE_ERR_NOT_IMPL;
5325
5326         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5327                  /* Only one entry existed in the mapping and it was not already
5328                   * a part of a VSI list. So, create a VSI list with the old and
5329                   * new VSIs.
5330                   */
5331                 struct ice_fltr_info tmp_fltr;
5332                 u16 vsi_handle_arr[2];
5333
5334                 /* A rule already exists with the new VSI being added */
5335                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5336                     new_fltr->sw_act.fwd_id.hw_vsi_id)
5337                         return ICE_ERR_ALREADY_EXISTS;
5338
5339                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5340                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5341                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5342                                                   &vsi_list_id,
5343                                                   ICE_SW_LKUP_LAST);
5344                 if (status)
5345                         return status;
5346
5347                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5348                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5349                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5350                 /* Update the previous switch rule of "forward to VSI" to
5351                  * "fwd to VSI list"
5352                  */
5353                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5354                 if (status)
5355                         return status;
5356
5357                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5358                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5359                 m_entry->vsi_list_info =
5360                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5361                                                 vsi_list_id);
5362         } else {
5363                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5364
5365                 if (!m_entry->vsi_list_info)
5366                         return ICE_ERR_CFG;
5367
5368                 /* A rule already exists with the new VSI being added */
5369                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5370                         return ICE_SUCCESS;
5371
5372                 /* Update the previously created VSI list set with
5373                  * the new VSI ID passed in
5374                  */
5375                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5376
5377                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5378                                                   vsi_list_id, false,
5379                                                   ice_aqc_opc_update_sw_rules,
5380                                                   ICE_SW_LKUP_LAST);
5381                 /* update VSI list mapping info with new VSI ID */
5382                 if (!status)
5383                         ice_set_bit(vsi_handle,
5384                                     m_entry->vsi_list_info->vsi_map);
5385         }
5386         if (!status)
5387                 m_entry->vsi_count++;
5388         return status;
5389 }
5390
5391 /**
5392  * ice_add_adv_rule - create an advanced switch rule
5393  * @hw: pointer to the hardware structure
5394  * @lkups: information on the words that needs to be looked up. All words
5395  * together makes one recipe
5396  * @lkups_cnt: num of entries in the lkups array
5397  * @rinfo: other information related to the rule that needs to be programmed
5398  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5399  *               ignored is case of error.
5400  *
5401  * This function can program only 1 rule at a time. The lkups is used to
5402  * describe the all the words that forms the "lookup" portion of the recipe.
5403  * These words can span multiple protocols. Callers to this function need to
5404  * pass in a list of protocol headers with lookup information along and mask
5405  * that determines which words are valid from the given protocol header.
5406  * rinfo describes other information related to this rule such as forwarding
5407  * IDs, priority of this rule, etc.
5408  */
5409 enum ice_status
5410 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5411                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5412                  struct ice_rule_query_data *added_entry)
5413 {
5414         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5415         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5416         struct ice_aqc_sw_rules_elem *s_rule;
5417         struct LIST_HEAD_TYPE *rule_head;
5418         struct ice_switch_info *sw;
5419         enum ice_status status;
5420         const u8 *pkt = NULL;
5421         u32 act = 0;
5422
5423         if (!lkups_cnt)
5424                 return ICE_ERR_PARAM;
5425
5426         for (i = 0; i < lkups_cnt; i++) {
5427                 u16 j, *ptr;
5428
5429                 /* Validate match masks to make sure they match complete 16-bit
5430                  * words.
5431                  */
5432                 ptr = (u16 *)&lkups->m_u;
5433                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5434                         if (ptr[j] != 0 && ptr[j] != 0xffff)
5435                                 return ICE_ERR_PARAM;
5436         }
5437
5438         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5439               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5440               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5441                 return ICE_ERR_CFG;
5442
5443         vsi_handle = rinfo->sw_act.vsi_handle;
5444         if (!ice_is_vsi_valid(hw, vsi_handle))
5445                 return ICE_ERR_PARAM;
5446
5447         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5448                 rinfo->sw_act.fwd_id.hw_vsi_id =
5449                         ice_get_hw_vsi_num(hw, vsi_handle);
5450         if (rinfo->sw_act.flag & ICE_FLTR_TX)
5451                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5452
5453         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5454         if (status)
5455                 return status;
5456         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5457         if (m_entry) {
5458                 /* we have to add VSI to VSI_LIST and increment vsi_count.
5459                  * Also Update VSI list so that we can change forwarding rule
5460                  * if the rule already exists, we will check if it exists with
5461                  * same vsi_id, if not then add it to the VSI list if it already
5462                  * exists if not then create a VSI list and add the existing VSI
5463                  * ID and the new VSI ID to the list
5464                  * We will add that VSI to the list
5465                  */
5466                 status = ice_adv_add_update_vsi_list(hw, m_entry,
5467                                                      &m_entry->rule_info,
5468                                                      rinfo);
5469                 if (added_entry) {
5470                         added_entry->rid = rid;
5471                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5472                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5473                 }
5474                 return status;
5475         }
5476         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5477                               &pkt_len);
5478         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5479         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5480         if (!s_rule)
5481                 return ICE_ERR_NO_MEMORY;
5482         act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5483         switch (rinfo->sw_act.fltr_act) {
5484         case ICE_FWD_TO_VSI:
5485                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5486                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5487                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5488                 break;
5489         case ICE_FWD_TO_Q:
5490                 act |= ICE_SINGLE_ACT_TO_Q;
5491                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5492                        ICE_SINGLE_ACT_Q_INDEX_M;
5493                 break;
5494         case ICE_DROP_PACKET:
5495                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5496                        ICE_SINGLE_ACT_VALID_BIT;
5497                 break;
5498         default:
5499                 status = ICE_ERR_CFG;
5500                 goto err_ice_add_adv_rule;
5501         }
5502
5503         /* set the rule LOOKUP type based on caller specified 'RX'
5504          * instead of hardcoding it to be either LOOKUP_TX/RX
5505          *
5506          * for 'RX' set the source to be the port number
5507          * for 'TX' set the source to be the source HW VSI number (determined
5508          * by caller)
5509          */
5510         if (rinfo->rx) {
5511                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5512                 s_rule->pdata.lkup_tx_rx.src =
5513                         CPU_TO_LE16(hw->port_info->lport);
5514         } else {
5515                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5516                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5517         }
5518
5519         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5520         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5521
5522         ice_fill_adv_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, s_rule,
5523                                   pkt, pkt_len);
5524
5525         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5526                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5527                                  NULL);
5528         if (status)
5529                 goto err_ice_add_adv_rule;
5530         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5531                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5532         if (!adv_fltr) {
5533                 status = ICE_ERR_NO_MEMORY;
5534                 goto err_ice_add_adv_rule;
5535         }
5536
5537         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5538                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5539                            ICE_NONDMA_TO_NONDMA);
5540         if (!adv_fltr->lkups) {
5541                 status = ICE_ERR_NO_MEMORY;
5542                 goto err_ice_add_adv_rule;
5543         }
5544
5545         adv_fltr->lkups_cnt = lkups_cnt;
5546         adv_fltr->rule_info = *rinfo;
5547         adv_fltr->rule_info.fltr_rule_id =
5548                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5549         sw = hw->switch_info;
5550         sw->recp_list[rid].adv_rule = true;
5551         rule_head = &sw->recp_list[rid].filt_rules;
5552
5553         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5554                 struct ice_fltr_info tmp_fltr;
5555
5556                 tmp_fltr.fltr_rule_id =
5557                         LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5558                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5559                 tmp_fltr.fwd_id.hw_vsi_id =
5560                         ice_get_hw_vsi_num(hw, vsi_handle);
5561                 tmp_fltr.vsi_handle = vsi_handle;
5562                 /* Update the previous switch rule of "forward to VSI" to
5563                  * "fwd to VSI list"
5564                  */
5565                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5566                 if (status)
5567                         goto err_ice_add_adv_rule;
5568                 adv_fltr->vsi_count = 1;
5569         }
5570
5571         /* Add rule entry to book keeping list */
5572         LIST_ADD(&adv_fltr->list_entry, rule_head);
5573         if (added_entry) {
5574                 added_entry->rid = rid;
5575                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5576                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5577         }
5578 err_ice_add_adv_rule:
5579         if (status && adv_fltr) {
5580                 ice_free(hw, adv_fltr->lkups);
5581                 ice_free(hw, adv_fltr);
5582         }
5583
5584         ice_free(hw, s_rule);
5585
5586         return status;
5587 }
5588
5589 /**
5590  * ice_adv_rem_update_vsi_list
5591  * @hw: pointer to the hardware structure
5592  * @vsi_handle: VSI handle of the VSI to remove
5593  * @fm_list: filter management entry for which the VSI list management needs to
5594  *           be done
5595  */
5596 static enum ice_status
5597 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5598                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
5599 {
5600         struct ice_vsi_list_map_info *vsi_list_info;
5601         enum ice_sw_lkup_type lkup_type;
5602         enum ice_status status;
5603         u16 vsi_list_id;
5604
5605         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5606             fm_list->vsi_count == 0)
5607                 return ICE_ERR_PARAM;
5608
5609         /* A rule with the VSI being removed does not exist */
5610         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5611                 return ICE_ERR_DOES_NOT_EXIST;
5612
5613         lkup_type = ICE_SW_LKUP_LAST;
5614         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5615         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5616                                           ice_aqc_opc_update_sw_rules,
5617                                           lkup_type);
5618         if (status)
5619                 return status;
5620
5621         fm_list->vsi_count--;
5622         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5623         vsi_list_info = fm_list->vsi_list_info;
5624         if (fm_list->vsi_count == 1) {
5625                 struct ice_fltr_info tmp_fltr;
5626                 u16 rem_vsi_handle;
5627
5628                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
5629                                                     ICE_MAX_VSI);
5630                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5631                         return ICE_ERR_OUT_OF_RANGE;
5632
5633                 /* Make sure VSI list is empty before removing it below */
5634                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5635                                                   vsi_list_id, true,
5636                                                   ice_aqc_opc_update_sw_rules,
5637                                                   lkup_type);
5638                 if (status)
5639                         return status;
5640                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5641                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5642                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5643                 tmp_fltr.fwd_id.hw_vsi_id =
5644                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
5645                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5646                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
5647
5648                 /* Update the previous switch rule of "MAC forward to VSI" to
5649                  * "MAC fwd to VSI list"
5650                  */
5651                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5652                 if (status) {
5653                         ice_debug(hw, ICE_DBG_SW,
5654                                   "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5655                                   tmp_fltr.fwd_id.hw_vsi_id, status);
5656                         return status;
5657                 }
5658         }
5659
5660         if (fm_list->vsi_count == 1) {
5661                 /* Remove the VSI list since it is no longer used */
5662                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5663                 if (status) {
5664                         ice_debug(hw, ICE_DBG_SW,
5665                                   "Failed to remove VSI list %d, error %d\n",
5666                                   vsi_list_id, status);
5667                         return status;
5668                 }
5669
5670                 LIST_DEL(&vsi_list_info->list_entry);
5671                 ice_free(hw, vsi_list_info);
5672                 fm_list->vsi_list_info = NULL;
5673         }
5674
5675         return status;
5676 }
5677
5678 /**
5679  * ice_rem_adv_rule - removes existing advanced switch rule
5680  * @hw: pointer to the hardware structure
5681  * @lkups: information on the words that needs to be looked up. All words
5682  *         together makes one recipe
5683  * @lkups_cnt: num of entries in the lkups array
5684  * @rinfo: Its the pointer to the rule information for the rule
5685  *
5686  * This function can be used to remove 1 rule at a time. The lkups is
5687  * used to describe all the words that forms the "lookup" portion of the
5688  * rule. These words can span multiple protocols. Callers to this function
5689  * need to pass in a list of protocol headers with lookup information along
5690  * and mask that determines which words are valid from the given protocol
5691  * header. rinfo describes other information related to this rule such as
5692  * forwarding IDs, priority of this rule, etc.
5693  */
5694 enum ice_status
5695 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5696                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5697 {
5698         struct ice_adv_fltr_mgmt_list_entry *list_elem;
5699         struct ice_prot_lkup_ext lkup_exts;
5700         u16 rule_buf_sz, pkt_len, i, rid;
5701         enum ice_status status = ICE_SUCCESS;
5702         bool remove_rule = false;
5703         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5704         const u8 *pkt = NULL;
5705         u16 vsi_handle;
5706
5707         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
5708         for (i = 0; i < lkups_cnt; i++) {
5709                 u16 count;
5710
5711                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5712                         return ICE_ERR_CFG;
5713
5714                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5715                 if (!count)
5716                         return ICE_ERR_CFG;
5717         }
5718         rid = ice_find_recp(hw, &lkup_exts);
5719         /* If did not find a recipe that match the existing criteria */
5720         if (rid == ICE_MAX_NUM_RECIPES)
5721                 return ICE_ERR_PARAM;
5722
5723         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5724         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5725         /* the rule is already removed */
5726         if (!list_elem)
5727                 return ICE_SUCCESS;
5728         ice_acquire_lock(rule_lock);
5729         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5730                 remove_rule = true;
5731         } else if (list_elem->vsi_count > 1) {
5732                 list_elem->vsi_list_info->ref_cnt--;
5733                 remove_rule = false;
5734                 vsi_handle = rinfo->sw_act.vsi_handle;
5735                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5736         } else {
5737                 vsi_handle = rinfo->sw_act.vsi_handle;
5738                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5739                 if (status) {
5740                         ice_release_lock(rule_lock);
5741                         return status;
5742                 }
5743                 if (list_elem->vsi_count == 0)
5744                         remove_rule = true;
5745         }
5746         ice_release_lock(rule_lock);
5747         if (remove_rule) {
5748                 struct ice_aqc_sw_rules_elem *s_rule;
5749
5750                 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5751                                       &pkt_len);
5752                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5753                 s_rule =
5754                         (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
5755                                                                    rule_buf_sz);
5756                 if (!s_rule)
5757                         return ICE_ERR_NO_MEMORY;
5758                 s_rule->pdata.lkup_tx_rx.act = 0;
5759                 s_rule->pdata.lkup_tx_rx.index =
5760                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
5761                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5762                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5763                                          rule_buf_sz, 1,
5764                                          ice_aqc_opc_remove_sw_rules, NULL);
5765                 if (status == ICE_SUCCESS) {
5766                         ice_acquire_lock(rule_lock);
5767                         LIST_DEL(&list_elem->list_entry);
5768                         ice_free(hw, list_elem->lkups);
5769                         ice_free(hw, list_elem);
5770                         ice_release_lock(rule_lock);
5771                 }
5772                 ice_free(hw, s_rule);
5773         }
5774         return status;
5775 }
5776
5777 /**
5778  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5779  * @hw: pointer to the hardware structure
5780  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5781  *
5782  * This function is used to remove 1 rule at a time. The removal is based on
5783  * the remove_entry parameter. This function will remove rule for a given
5784  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5785  */
5786 enum ice_status
5787 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5788                        struct ice_rule_query_data *remove_entry)
5789 {
5790         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5791         struct LIST_HEAD_TYPE *list_head;
5792         struct ice_adv_rule_info rinfo;
5793         struct ice_switch_info *sw;
5794
5795         sw = hw->switch_info;
5796         if (!sw->recp_list[remove_entry->rid].recp_created)
5797                 return ICE_ERR_PARAM;
5798         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5799         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
5800                             list_entry) {
5801                 if (list_itr->rule_info.fltr_rule_id ==
5802                     remove_entry->rule_id) {
5803                         rinfo = list_itr->rule_info;
5804                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5805                         return ice_rem_adv_rule(hw, list_itr->lkups,
5806                                                 list_itr->lkups_cnt, &rinfo);
5807                 }
5808         }
5809         return ICE_ERR_PARAM;
5810 }
5811
5812 /**
5813  * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
5814  *                       given VSI handle
5815  * @hw: pointer to the hardware structure
5816  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
5817  *
5818  * This function is used to remove all the rules for a given VSI and as soon
5819  * as removing a rule fails, it will return immediately with the error code,
5820  * else it will return ICE_SUCCESS
5821  */
5822 enum ice_status
5823 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
5824 {
5825         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5826         struct ice_vsi_list_map_info *map_info;
5827         struct LIST_HEAD_TYPE *list_head;
5828         struct ice_adv_rule_info rinfo;
5829         struct ice_switch_info *sw;
5830         enum ice_status status;
5831         u16 vsi_list_id = 0;
5832         u8 rid;
5833
5834         sw = hw->switch_info;
5835         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
5836                 if (!sw->recp_list[rid].recp_created)
5837                         continue;
5838                 if (!sw->recp_list[rid].adv_rule)
5839                         continue;
5840                 list_head = &sw->recp_list[rid].filt_rules;
5841                 map_info = NULL;
5842                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
5843                                     ice_adv_fltr_mgmt_list_entry, list_entry) {
5844                         map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
5845                                                            &vsi_list_id);
5846                         if (!map_info)
5847                                 continue;
5848                         rinfo = list_itr->rule_info;
5849                         rinfo.sw_act.vsi_handle = vsi_handle;
5850                         status = ice_rem_adv_rule(hw, list_itr->lkups,
5851                                                   list_itr->lkups_cnt, &rinfo);
5852                         if (status)
5853                                 return status;
5854                         map_info = NULL;
5855                 }
5856         }
5857         return ICE_SUCCESS;
5858 }
5859
5860 /**
5861  * ice_replay_fltr - Replay all the filters stored by a specific list head
5862  * @hw: pointer to the hardware structure
5863  * @list_head: list for which filters needs to be replayed
5864  * @recp_id: Recipe ID for which rules need to be replayed
5865  */
5866 static enum ice_status
5867 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
5868 {
5869         struct ice_fltr_mgmt_list_entry *itr;
5870         struct LIST_HEAD_TYPE l_head;
5871         enum ice_status status = ICE_SUCCESS;
5872
5873         if (LIST_EMPTY(list_head))
5874                 return status;
5875
5876         /* Move entries from the given list_head to a temporary l_head so that
5877          * they can be replayed. Otherwise when trying to re-add the same
5878          * filter, the function will return already exists
5879          */
5880         LIST_REPLACE_INIT(list_head, &l_head);
5881
5882         /* Mark the given list_head empty by reinitializing it so filters
5883          * could be added again by *handler
5884          */
5885         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
5886                             list_entry) {
5887                 struct ice_fltr_list_entry f_entry;
5888
5889                 f_entry.fltr_info = itr->fltr_info;
5890                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
5891                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
5892                         if (status != ICE_SUCCESS)
5893                                 goto end;
5894                         continue;
5895                 }
5896
5897                 /* Add a filter per VSI separately */
5898                 while (1) {
5899                         u16 vsi_handle;
5900
5901                         vsi_handle =
5902                                 ice_find_first_bit(itr->vsi_list_info->vsi_map,
5903                                                    ICE_MAX_VSI);
5904                         if (!ice_is_vsi_valid(hw, vsi_handle))
5905                                 break;
5906
5907                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5908                         f_entry.fltr_info.vsi_handle = vsi_handle;
5909                         f_entry.fltr_info.fwd_id.hw_vsi_id =
5910                                 ice_get_hw_vsi_num(hw, vsi_handle);
5911                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5912                         if (recp_id == ICE_SW_LKUP_VLAN)
5913                                 status = ice_add_vlan_internal(hw, &f_entry);
5914                         else
5915                                 status = ice_add_rule_internal(hw, recp_id,
5916                                                                &f_entry);
5917                         if (status != ICE_SUCCESS)
5918                                 goto end;
5919                 }
5920         }
5921 end:
5922         /* Clear the filter management list */
5923         ice_rem_sw_rule_info(hw, &l_head);
5924         return status;
5925 }
5926
5927 /**
5928  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
5929  * @hw: pointer to the hardware structure
5930  *
5931  * NOTE: This function does not clean up partially added filters on error.
5932  * It is up to caller of the function to issue a reset or fail early.
5933  */
5934 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
5935 {
5936         struct ice_switch_info *sw = hw->switch_info;
5937         enum ice_status status = ICE_SUCCESS;
5938         u8 i;
5939
5940         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5941                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
5942
5943                 status = ice_replay_fltr(hw, i, head);
5944                 if (status != ICE_SUCCESS)
5945                         return status;
5946         }
5947         return status;
5948 }
5949
5950 /**
5951  * ice_replay_vsi_fltr - Replay filters for requested VSI
5952  * @hw: pointer to the hardware structure
5953  * @vsi_handle: driver VSI handle
5954  * @recp_id: Recipe ID for which rules need to be replayed
5955  * @list_head: list for which filters need to be replayed
5956  *
5957  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
5958  * It is required to pass valid VSI handle.
5959  */
5960 static enum ice_status
5961 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
5962                     struct LIST_HEAD_TYPE *list_head)
5963 {
5964         struct ice_fltr_mgmt_list_entry *itr;
5965         enum ice_status status = ICE_SUCCESS;
5966         u16 hw_vsi_id;
5967
5968         if (LIST_EMPTY(list_head))
5969                 return status;
5970         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
5971
5972         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
5973                             list_entry) {
5974                 struct ice_fltr_list_entry f_entry;
5975
5976                 f_entry.fltr_info = itr->fltr_info;
5977                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
5978                     itr->fltr_info.vsi_handle == vsi_handle) {
5979                         /* update the src in case it is VSI num */
5980                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5981                                 f_entry.fltr_info.src = hw_vsi_id;
5982                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
5983                         if (status != ICE_SUCCESS)
5984                                 goto end;
5985                         continue;
5986                 }
5987                 if (!itr->vsi_list_info ||
5988                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
5989                         continue;
5990                 /* Clearing it so that the logic can add it back */
5991                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5992                 f_entry.fltr_info.vsi_handle = vsi_handle;
5993                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5994                 /* update the src in case it is VSI num */
5995                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
5996                         f_entry.fltr_info.src = hw_vsi_id;
5997                 if (recp_id == ICE_SW_LKUP_VLAN)
5998                         status = ice_add_vlan_internal(hw, &f_entry);
5999                 else
6000                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
6001                 if (status != ICE_SUCCESS)
6002                         goto end;
6003         }
6004 end:
6005         return status;
6006 }
6007
6008 /**
6009  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6010  * @hw: pointer to the hardware structure
6011  * @vsi_handle: driver VSI handle
6012  * @list_head: list for which filters need to be replayed
6013  *
6014  * Replay the advanced rule for the given VSI.
6015  */
6016 static enum ice_status
6017 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6018                         struct LIST_HEAD_TYPE *list_head)
6019 {
6020         struct ice_rule_query_data added_entry = { 0 };
6021         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6022         enum ice_status status = ICE_SUCCESS;
6023
6024         if (LIST_EMPTY(list_head))
6025                 return status;
6026         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6027                             list_entry) {
6028                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6029                 u16 lk_cnt = adv_fltr->lkups_cnt;
6030
6031                 if (vsi_handle != rinfo->sw_act.vsi_handle)
6032                         continue;
6033                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6034                                           &added_entry);
6035                 if (status)
6036                         break;
6037         }
6038         return status;
6039 }
6040
6041 /**
6042  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6043  * @hw: pointer to the hardware structure
6044  * @vsi_handle: driver VSI handle
6045  *
6046  * Replays filters for requested VSI via vsi_handle.
6047  */
6048 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6049 {
6050         struct ice_switch_info *sw = hw->switch_info;
6051         enum ice_status status;
6052         u8 i;
6053
6054         /* Update the recipes that were created */
6055         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6056                 struct LIST_HEAD_TYPE *head;
6057
6058                 head = &sw->recp_list[i].filt_replay_rules;
6059                 if (!sw->recp_list[i].adv_rule)
6060                         status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6061                 else
6062                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6063                 if (status != ICE_SUCCESS)
6064                         return status;
6065         }
6066
6067         return ICE_SUCCESS;
6068 }
6069
6070 /**
6071  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6072  * @hw: pointer to the HW struct
6073  *
6074  * Deletes the filter replay rules.
6075  */
6076 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6077 {
6078         struct ice_switch_info *sw = hw->switch_info;
6079         u8 i;
6080
6081         if (!sw)
6082                 return;
6083
6084         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6085                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6086                         struct LIST_HEAD_TYPE *l_head;
6087
6088                         l_head = &sw->recp_list[i].filt_replay_rules;
6089                         if (!sw->recp_list[i].adv_rule)
6090                                 ice_rem_sw_rule_info(hw, l_head);
6091                         else
6092                                 ice_rem_adv_rule_info(hw, l_head);
6093                 }
6094         }
6095 }