net/ice/base: introduce some new macros
[dpdk.git] / drivers / net / ice / base / ice_switch.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2001-2019
3  */
4
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
7 #include "ice_flow.h"
8
9
10 #define ICE_ETH_DA_OFFSET               0
11 #define ICE_ETH_ETHTYPE_OFFSET          12
12 #define ICE_ETH_VLAN_TCI_OFFSET         14
13 #define ICE_MAX_VLAN_ID                 0xFFF
14
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16  * struct to configure any switch filter rules.
17  * {DA (6 bytes), SA(6 bytes),
18  * Ether type (2 bytes for header without VLAN tag) OR
19  * VLAN tag (4 bytes for header with VLAN tag) }
20  *
21  * Word on Hardcoded values
22  * byte 0 = 0x2: to identify it as locally administered DA MAC
23  * byte 6 = 0x2: to identify it as locally administered SA MAC
24  * byte 12 = 0x81 & byte 13 = 0x00:
25  *      In case of VLAN filter first two bytes defines ether type (0x8100)
26  *      and remaining two bytes are placeholder for programming a given VLAN ID
27  *      In case of Ether type filter it is treated as header without VLAN tag
28  *      and byte 12 and 13 is used to program a given Ether type instead
29  */
30 #define DUMMY_ETH_HDR_LEN               16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
32                                                         0x2, 0, 0, 0, 0, 0,
33                                                         0x81, 0, 0, 0};
34
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36         (sizeof(struct ice_aqc_sw_rules_elem) - \
37          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38          sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40         (sizeof(struct ice_aqc_sw_rules_elem) - \
41          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42          sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44         (sizeof(struct ice_aqc_sw_rules_elem) - \
45          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46          sizeof(struct ice_sw_rule_lg_act) - \
47          sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48          ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50         (sizeof(struct ice_aqc_sw_rules_elem) - \
51          sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52          sizeof(struct ice_sw_rule_vsi_list) - \
53          sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54          ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
55
56 static const
57 u8 dummy_gre_packet[] = { 0, 0, 0, 0,           /* Ether starts */
58                           0, 0, 0, 0,
59                           0, 0, 0, 0,
60                           0x08, 0,              /* Ether ends */
61                           0x45, 0, 0, 0x3E,     /* IP starts */
62                           0, 0, 0, 0,
63                           0, 0x2F, 0, 0,
64                           0, 0, 0, 0,
65                           0, 0, 0, 0,           /* IP ends */
66                           0x80, 0, 0x65, 0x58,  /* GRE starts */
67                           0, 0, 0, 0,           /* GRE ends */
68                           0, 0, 0, 0,           /* Ether starts */
69                           0, 0, 0, 0,
70                           0, 0, 0, 0,
71                           0x08, 0,              /* Ether ends */
72                           0x45, 0, 0, 0x14,     /* IP starts */
73                           0, 0, 0, 0,
74                           0, 0, 0, 0,
75                           0, 0, 0, 0,
76                           0, 0, 0, 0            /* IP ends */
77                         };
78
79 static const u8
80 dummy_udp_tun_packet[] = {0, 0, 0, 0,           /* Ether starts */
81                           0, 0, 0, 0,
82                           0, 0, 0, 0,
83                           0x08, 0,              /* Ether ends */
84                           0x45, 0, 0, 0x32,     /* IP starts */
85                           0, 0, 0, 0,
86                           0, 0x11, 0, 0,
87                           0, 0, 0, 0,
88                           0, 0, 0, 0,           /* IP ends */
89                           0, 0, 0x12, 0xB5,     /* UDP start*/
90                           0, 0x1E, 0, 0,        /* UDP end*/
91                           0, 0, 0, 0,           /* VXLAN start */
92                           0, 0, 0, 0,           /* VXLAN end*/
93                           0, 0, 0, 0,           /* Ether starts */
94                           0, 0, 0, 0,
95                           0, 0, 0, 0,
96                           0, 0                  /* Ether ends */
97                         };
98
99 static const u8
100 dummy_tcp_tun_packet[] = {0, 0, 0, 0,           /* Ether starts */
101                           0, 0, 0, 0,
102                           0, 0, 0, 0,
103                           0x08, 0,              /* Ether ends */
104                           0x45, 0, 0, 0x28,     /* IP starts */
105                           0, 0x01, 0, 0,
106                           0x40, 0x06, 0xF5, 0x69,
107                           0, 0, 0, 0,
108                           0, 0, 0, 0,   /* IP ends */
109                           0, 0, 0, 0,
110                           0, 0, 0, 0,
111                           0, 0, 0, 0,
112                           0x50, 0x02, 0x20,
113                           0, 0x9, 0x79, 0, 0,
114                           0, 0 /* 2 bytes padding for 4 byte alignment*/
115                         };
116
117 /* this is a recipe to profile bitmap association */
118 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
119                           ICE_MAX_NUM_PROFILES);
120 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
121
122 /**
123  * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
124  * @hw: pointer to hardware structure
125  * @recps: struct that we need to populate
126  * @rid: recipe ID that we are populating
127  *
128  * This function is used to populate all the necessary entries into our
129  * bookkeeping so that we have a current list of all the recipes that are
130  * programmed in the firmware.
131  */
132 static enum ice_status
133 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid)
134 {
135         u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
136         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
137         u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
138         struct ice_aqc_recipe_data_elem *tmp;
139         u16 num_recps = ICE_MAX_NUM_RECIPES;
140         struct ice_prot_lkup_ext *lkup_exts;
141         enum ice_status status;
142
143         /* we need a buffer big enough to accommodate all the recipes */
144         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
145                 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
146         if (!tmp)
147                 return ICE_ERR_NO_MEMORY;
148
149         tmp[0].recipe_indx = rid;
150         status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
151         /* non-zero status meaning recipe doesn't exist */
152         if (status)
153                 goto err_unroll;
154         lkup_exts = &recps[rid].lkup_exts;
155         /* start populating all the entries for recps[rid] based on lkups from
156          * firmware
157          */
158         for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
159                 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
160                 struct ice_recp_grp_entry *rg_entry;
161                 u8 prof_id, prot = 0;
162                 u16 off = 0;
163
164                 rg_entry = (struct ice_recp_grp_entry *)
165                         ice_malloc(hw, sizeof(*rg_entry));
166                 if (!rg_entry) {
167                         status = ICE_ERR_NO_MEMORY;
168                         goto err_unroll;
169                 }
170                 /* Avoid 8th bit since its result enable bit */
171                 result_idxs[result_idx] = root_bufs.content.result_indx &
172                         ~ICE_AQ_RECIPE_RESULT_EN;
173                 /* Check if result enable bit is set */
174                 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
175                         ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
176                                       result_idxs[result_idx++],
177                                       available_result_ids);
178                 ice_memcpy(r_bitmap,
179                            recipe_to_profile[tmp[sub_recps].recipe_indx],
180                            sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
181                 /* get the first profile that is associated with rid */
182                 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
183                 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
184                         u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
185
186                         rg_entry->fv_idx[i] = lkup_indx;
187                         /* If the recipe is a chained recipe then all its
188                          * child recipe's result will have a result index.
189                          * To fill fv_words we should not use those result
190                          * index, we only need the protocol ids and offsets.
191                          * We will skip all the fv_idx which stores result
192                          * index in them. We also need to skip any fv_idx which
193                          * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
194                          * valid offset value.
195                          */
196                         if (result_idxs[0] == rg_entry->fv_idx[i] ||
197                             result_idxs[1] == rg_entry->fv_idx[i] ||
198                             result_idxs[2] == rg_entry->fv_idx[i] ||
199                             result_idxs[3] == rg_entry->fv_idx[i] ||
200                             result_idxs[4] == rg_entry->fv_idx[i] ||
201                             rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
202                             rg_entry->fv_idx[i] == 0)
203                                 continue;
204
205                         ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
206                                           rg_entry->fv_idx[i], &prot, &off);
207                         lkup_exts->fv_words[fv_word_idx].prot_id = prot;
208                         lkup_exts->fv_words[fv_word_idx].off = off;
209                         fv_word_idx++;
210                 }
211                 /* populate rg_list with the data from the child entry of this
212                  * recipe
213                  */
214                 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
215         }
216         lkup_exts->n_val_words = fv_word_idx;
217         recps[rid].n_grp_count = num_recps;
218         recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
219                 ice_calloc(hw, recps[rid].n_grp_count,
220                            sizeof(struct ice_aqc_recipe_data_elem));
221         if (!recps[rid].root_buf)
222                 goto err_unroll;
223
224         ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
225                    sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
226         recps[rid].recp_created = true;
227         if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
228                 recps[rid].root_rid = rid;
229 err_unroll:
230         ice_free(hw, tmp);
231         return status;
232 }
233
234 /**
235  * ice_get_recp_to_prof_map - updates recipe to profile mapping
236  * @hw: pointer to hardware structure
237  *
238  * This function is used to populate recipe_to_profile matrix where index to
239  * this array is the recipe ID and the element is the mapping of which profiles
240  * is this recipe mapped to.
241  */
242 static void
243 ice_get_recp_to_prof_map(struct ice_hw *hw)
244 {
245         ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
246         u16 i;
247
248         for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
249                 u16 j;
250
251                 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
252                 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
253                         continue;
254
255                 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
256                         if (ice_is_bit_set(r_bitmap, j))
257                                 ice_set_bit(i, recipe_to_profile[j]);
258         }
259 }
260
261 /**
262  * ice_init_def_sw_recp - initialize the recipe book keeping tables
263  * @hw: pointer to the HW struct
264  *
265  * Allocate memory for the entire recipe table and initialize the structures/
266  * entries corresponding to basic recipes.
267  */
268 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
269 {
270         struct ice_sw_recipe *recps;
271         u8 i;
272
273         recps = (struct ice_sw_recipe *)
274                 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
275         if (!recps)
276                 return ICE_ERR_NO_MEMORY;
277
278         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
279                 recps[i].root_rid = i;
280                 INIT_LIST_HEAD(&recps[i].filt_rules);
281                 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
282                 INIT_LIST_HEAD(&recps[i].rg_list);
283                 ice_init_lock(&recps[i].filt_rule_lock);
284         }
285
286         hw->switch_info->recp_list = recps;
287
288         return ICE_SUCCESS;
289 }
290
291 /**
292  * ice_aq_get_sw_cfg - get switch configuration
293  * @hw: pointer to the hardware structure
294  * @buf: pointer to the result buffer
295  * @buf_size: length of the buffer available for response
296  * @req_desc: pointer to requested descriptor
297  * @num_elems: pointer to number of elements
298  * @cd: pointer to command details structure or NULL
299  *
300  * Get switch configuration (0x0200) to be placed in 'buff'.
301  * This admin command returns information such as initial VSI/port number
302  * and switch ID it belongs to.
303  *
304  * NOTE: *req_desc is both an input/output parameter.
305  * The caller of this function first calls this function with *request_desc set
306  * to 0. If the response from f/w has *req_desc set to 0, all the switch
307  * configuration information has been returned; if non-zero (meaning not all
308  * the information was returned), the caller should call this function again
309  * with *req_desc set to the previous value returned by f/w to get the
310  * next block of switch configuration information.
311  *
312  * *num_elems is output only parameter. This reflects the number of elements
313  * in response buffer. The caller of this function to use *num_elems while
314  * parsing the response buffer.
315  */
316 static enum ice_status
317 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
318                   u16 buf_size, u16 *req_desc, u16 *num_elems,
319                   struct ice_sq_cd *cd)
320 {
321         struct ice_aqc_get_sw_cfg *cmd;
322         enum ice_status status;
323         struct ice_aq_desc desc;
324
325         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
326         cmd = &desc.params.get_sw_conf;
327         cmd->element = CPU_TO_LE16(*req_desc);
328
329         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
330         if (!status) {
331                 *req_desc = LE16_TO_CPU(cmd->element);
332                 *num_elems = LE16_TO_CPU(cmd->num_elems);
333         }
334
335         return status;
336 }
337
338
339 /**
340  * ice_alloc_sw - allocate resources specific to switch
341  * @hw: pointer to the HW struct
342  * @ena_stats: true to turn on VEB stats
343  * @shared_res: true for shared resource, false for dedicated resource
344  * @sw_id: switch ID returned
345  * @counter_id: VEB counter ID returned
346  *
347  * allocates switch resources (SWID and VEB counter) (0x0208)
348  */
349 enum ice_status
350 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
351              u16 *counter_id)
352 {
353         struct ice_aqc_alloc_free_res_elem *sw_buf;
354         struct ice_aqc_res_elem *sw_ele;
355         enum ice_status status;
356         u16 buf_len;
357
358         buf_len = sizeof(*sw_buf);
359         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
360                    ice_malloc(hw, buf_len);
361         if (!sw_buf)
362                 return ICE_ERR_NO_MEMORY;
363
364         /* Prepare buffer for switch ID.
365          * The number of resource entries in buffer is passed as 1 since only a
366          * single switch/VEB instance is allocated, and hence a single sw_id
367          * is requested.
368          */
369         sw_buf->num_elems = CPU_TO_LE16(1);
370         sw_buf->res_type =
371                 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
372                             (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
373                             ICE_AQC_RES_TYPE_FLAG_DEDICATED));
374
375         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
376                                        ice_aqc_opc_alloc_res, NULL);
377
378         if (status)
379                 goto ice_alloc_sw_exit;
380
381         sw_ele = &sw_buf->elem[0];
382         *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
383
384         if (ena_stats) {
385                 /* Prepare buffer for VEB Counter */
386                 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
387                 struct ice_aqc_alloc_free_res_elem *counter_buf;
388                 struct ice_aqc_res_elem *counter_ele;
389
390                 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
391                                 ice_malloc(hw, buf_len);
392                 if (!counter_buf) {
393                         status = ICE_ERR_NO_MEMORY;
394                         goto ice_alloc_sw_exit;
395                 }
396
397                 /* The number of resource entries in buffer is passed as 1 since
398                  * only a single switch/VEB instance is allocated, and hence a
399                  * single VEB counter is requested.
400                  */
401                 counter_buf->num_elems = CPU_TO_LE16(1);
402                 counter_buf->res_type =
403                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
404                                     ICE_AQC_RES_TYPE_FLAG_DEDICATED);
405                 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
406                                                opc, NULL);
407
408                 if (status) {
409                         ice_free(hw, counter_buf);
410                         goto ice_alloc_sw_exit;
411                 }
412                 counter_ele = &counter_buf->elem[0];
413                 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
414                 ice_free(hw, counter_buf);
415         }
416
417 ice_alloc_sw_exit:
418         ice_free(hw, sw_buf);
419         return status;
420 }
421
422 /**
423  * ice_free_sw - free resources specific to switch
424  * @hw: pointer to the HW struct
425  * @sw_id: switch ID returned
426  * @counter_id: VEB counter ID returned
427  *
428  * free switch resources (SWID and VEB counter) (0x0209)
429  *
430  * NOTE: This function frees multiple resources. It continues
431  * releasing other resources even after it encounters error.
432  * The error code returned is the last error it encountered.
433  */
434 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
435 {
436         struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
437         enum ice_status status, ret_status;
438         u16 buf_len;
439
440         buf_len = sizeof(*sw_buf);
441         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
442                    ice_malloc(hw, buf_len);
443         if (!sw_buf)
444                 return ICE_ERR_NO_MEMORY;
445
446         /* Prepare buffer to free for switch ID res.
447          * The number of resource entries in buffer is passed as 1 since only a
448          * single switch/VEB instance is freed, and hence a single sw_id
449          * is released.
450          */
451         sw_buf->num_elems = CPU_TO_LE16(1);
452         sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
453         sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
454
455         ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
456                                            ice_aqc_opc_free_res, NULL);
457
458         if (ret_status)
459                 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
460
461         /* Prepare buffer to free for VEB Counter resource */
462         counter_buf = (struct ice_aqc_alloc_free_res_elem *)
463                         ice_malloc(hw, buf_len);
464         if (!counter_buf) {
465                 ice_free(hw, sw_buf);
466                 return ICE_ERR_NO_MEMORY;
467         }
468
469         /* The number of resource entries in buffer is passed as 1 since only a
470          * single switch/VEB instance is freed, and hence a single VEB counter
471          * is released
472          */
473         counter_buf->num_elems = CPU_TO_LE16(1);
474         counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
475         counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
476
477         status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
478                                        ice_aqc_opc_free_res, NULL);
479         if (status) {
480                 ice_debug(hw, ICE_DBG_SW,
481                           "VEB counter resource could not be freed\n");
482                 ret_status = status;
483         }
484
485         ice_free(hw, counter_buf);
486         ice_free(hw, sw_buf);
487         return ret_status;
488 }
489
490 /**
491  * ice_aq_add_vsi
492  * @hw: pointer to the HW struct
493  * @vsi_ctx: pointer to a VSI context struct
494  * @cd: pointer to command details structure or NULL
495  *
496  * Add a VSI context to the hardware (0x0210)
497  */
498 enum ice_status
499 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
500                struct ice_sq_cd *cd)
501 {
502         struct ice_aqc_add_update_free_vsi_resp *res;
503         struct ice_aqc_add_get_update_free_vsi *cmd;
504         struct ice_aq_desc desc;
505         enum ice_status status;
506
507         cmd = &desc.params.vsi_cmd;
508         res = &desc.params.add_update_free_vsi_res;
509
510         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
511
512         if (!vsi_ctx->alloc_from_pool)
513                 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
514                                            ICE_AQ_VSI_IS_VALID);
515
516         cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
517
518         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
519
520         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
521                                  sizeof(vsi_ctx->info), cd);
522
523         if (!status) {
524                 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
525                 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
526                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
527         }
528
529         return status;
530 }
531
532 /**
533  * ice_aq_free_vsi
534  * @hw: pointer to the HW struct
535  * @vsi_ctx: pointer to a VSI context struct
536  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
537  * @cd: pointer to command details structure or NULL
538  *
539  * Free VSI context info from hardware (0x0213)
540  */
541 enum ice_status
542 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
543                 bool keep_vsi_alloc, struct ice_sq_cd *cd)
544 {
545         struct ice_aqc_add_update_free_vsi_resp *resp;
546         struct ice_aqc_add_get_update_free_vsi *cmd;
547         struct ice_aq_desc desc;
548         enum ice_status status;
549
550         cmd = &desc.params.vsi_cmd;
551         resp = &desc.params.add_update_free_vsi_res;
552
553         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
554
555         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
556         if (keep_vsi_alloc)
557                 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
558
559         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
560         if (!status) {
561                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
562                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
563         }
564
565         return status;
566 }
567
568 /**
569  * ice_aq_update_vsi
570  * @hw: pointer to the HW struct
571  * @vsi_ctx: pointer to a VSI context struct
572  * @cd: pointer to command details structure or NULL
573  *
574  * Update VSI context in the hardware (0x0211)
575  */
576 enum ice_status
577 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
578                   struct ice_sq_cd *cd)
579 {
580         struct ice_aqc_add_update_free_vsi_resp *resp;
581         struct ice_aqc_add_get_update_free_vsi *cmd;
582         struct ice_aq_desc desc;
583         enum ice_status status;
584
585         cmd = &desc.params.vsi_cmd;
586         resp = &desc.params.add_update_free_vsi_res;
587
588         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
589
590         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
591
592         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
593
594         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
595                                  sizeof(vsi_ctx->info), cd);
596
597         if (!status) {
598                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
599                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
600         }
601
602         return status;
603 }
604
605 /**
606  * ice_is_vsi_valid - check whether the VSI is valid or not
607  * @hw: pointer to the HW struct
608  * @vsi_handle: VSI handle
609  *
610  * check whether the VSI is valid or not
611  */
612 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
613 {
614         return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
615 }
616
617 /**
618  * ice_get_hw_vsi_num - return the HW VSI number
619  * @hw: pointer to the HW struct
620  * @vsi_handle: VSI handle
621  *
622  * return the HW VSI number
623  * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
624  */
625 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
626 {
627         return hw->vsi_ctx[vsi_handle]->vsi_num;
628 }
629
630 /**
631  * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
632  * @hw: pointer to the HW struct
633  * @vsi_handle: VSI handle
634  *
635  * return the VSI context entry for a given VSI handle
636  */
637 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
638 {
639         return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
640 }
641
642 /**
643  * ice_save_vsi_ctx - save the VSI context for a given VSI handle
644  * @hw: pointer to the HW struct
645  * @vsi_handle: VSI handle
646  * @vsi: VSI context pointer
647  *
648  * save the VSI context entry for a given VSI handle
649  */
650 static void
651 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
652 {
653         hw->vsi_ctx[vsi_handle] = vsi;
654 }
655
656 /**
657  * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
658  * @hw: pointer to the HW struct
659  * @vsi_handle: VSI handle
660  */
661 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
662 {
663         struct ice_vsi_ctx *vsi;
664         u8 i;
665
666         vsi = ice_get_vsi_ctx(hw, vsi_handle);
667         if (!vsi)
668                 return;
669         ice_for_each_traffic_class(i) {
670                 if (vsi->lan_q_ctx[i]) {
671                         ice_free(hw, vsi->lan_q_ctx[i]);
672                         vsi->lan_q_ctx[i] = NULL;
673                 }
674         }
675 }
676
677 /**
678  * ice_clear_vsi_ctx - clear the VSI context entry
679  * @hw: pointer to the HW struct
680  * @vsi_handle: VSI handle
681  *
682  * clear the VSI context entry
683  */
684 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
685 {
686         struct ice_vsi_ctx *vsi;
687
688         vsi = ice_get_vsi_ctx(hw, vsi_handle);
689         if (vsi) {
690                 ice_clear_vsi_q_ctx(hw, vsi_handle);
691                 ice_free(hw, vsi);
692                 hw->vsi_ctx[vsi_handle] = NULL;
693         }
694 }
695
696 /**
697  * ice_clear_all_vsi_ctx - clear all the VSI context entries
698  * @hw: pointer to the HW struct
699  */
700 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
701 {
702         u16 i;
703
704         for (i = 0; i < ICE_MAX_VSI; i++)
705                 ice_clear_vsi_ctx(hw, i);
706 }
707
708 /**
709  * ice_add_vsi - add VSI context to the hardware and VSI handle list
710  * @hw: pointer to the HW struct
711  * @vsi_handle: unique VSI handle provided by drivers
712  * @vsi_ctx: pointer to a VSI context struct
713  * @cd: pointer to command details structure or NULL
714  *
715  * Add a VSI context to the hardware also add it into the VSI handle list.
716  * If this function gets called after reset for existing VSIs then update
717  * with the new HW VSI number in the corresponding VSI handle list entry.
718  */
719 enum ice_status
720 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
721             struct ice_sq_cd *cd)
722 {
723         struct ice_vsi_ctx *tmp_vsi_ctx;
724         enum ice_status status;
725
726         if (vsi_handle >= ICE_MAX_VSI)
727                 return ICE_ERR_PARAM;
728         status = ice_aq_add_vsi(hw, vsi_ctx, cd);
729         if (status)
730                 return status;
731         tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
732         if (!tmp_vsi_ctx) {
733                 /* Create a new VSI context */
734                 tmp_vsi_ctx = (struct ice_vsi_ctx *)
735                         ice_malloc(hw, sizeof(*tmp_vsi_ctx));
736                 if (!tmp_vsi_ctx) {
737                         ice_aq_free_vsi(hw, vsi_ctx, false, cd);
738                         return ICE_ERR_NO_MEMORY;
739                 }
740                 *tmp_vsi_ctx = *vsi_ctx;
741
742                 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
743         } else {
744                 /* update with new HW VSI num */
745                 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
746                         tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
747         }
748
749         return ICE_SUCCESS;
750 }
751
752 /**
753  * ice_free_vsi- free VSI context from hardware and VSI handle list
754  * @hw: pointer to the HW struct
755  * @vsi_handle: unique VSI handle
756  * @vsi_ctx: pointer to a VSI context struct
757  * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
758  * @cd: pointer to command details structure or NULL
759  *
760  * Free VSI context info from hardware as well as from VSI handle list
761  */
762 enum ice_status
763 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
764              bool keep_vsi_alloc, struct ice_sq_cd *cd)
765 {
766         enum ice_status status;
767
768         if (!ice_is_vsi_valid(hw, vsi_handle))
769                 return ICE_ERR_PARAM;
770         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
771         status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
772         if (!status)
773                 ice_clear_vsi_ctx(hw, vsi_handle);
774         return status;
775 }
776
777 /**
778  * ice_update_vsi
779  * @hw: pointer to the HW struct
780  * @vsi_handle: unique VSI handle
781  * @vsi_ctx: pointer to a VSI context struct
782  * @cd: pointer to command details structure or NULL
783  *
784  * Update VSI context in the hardware
785  */
786 enum ice_status
787 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
788                struct ice_sq_cd *cd)
789 {
790         if (!ice_is_vsi_valid(hw, vsi_handle))
791                 return ICE_ERR_PARAM;
792         vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
793         return ice_aq_update_vsi(hw, vsi_ctx, cd);
794 }
795
796 /**
797  * ice_aq_get_vsi_params
798  * @hw: pointer to the HW struct
799  * @vsi_ctx: pointer to a VSI context struct
800  * @cd: pointer to command details structure or NULL
801  *
802  * Get VSI context info from hardware (0x0212)
803  */
804 enum ice_status
805 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
806                       struct ice_sq_cd *cd)
807 {
808         struct ice_aqc_add_get_update_free_vsi *cmd;
809         struct ice_aqc_get_vsi_resp *resp;
810         struct ice_aq_desc desc;
811         enum ice_status status;
812
813         cmd = &desc.params.vsi_cmd;
814         resp = &desc.params.get_vsi_resp;
815
816         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
817
818         cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
819
820         status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
821                                  sizeof(vsi_ctx->info), cd);
822         if (!status) {
823                 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
824                                         ICE_AQ_VSI_NUM_M;
825                 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
826                 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
827         }
828
829         return status;
830 }
831
832 /**
833  * ice_aq_add_update_mir_rule - add/update a mirror rule
834  * @hw: pointer to the HW struct
835  * @rule_type: Rule Type
836  * @dest_vsi: VSI number to which packets will be mirrored
837  * @count: length of the list
838  * @mr_buf: buffer for list of mirrored VSI numbers
839  * @cd: pointer to command details structure or NULL
840  * @rule_id: Rule ID
841  *
842  * Add/Update Mirror Rule (0x260).
843  */
844 enum ice_status
845 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
846                            u16 count, struct ice_mir_rule_buf *mr_buf,
847                            struct ice_sq_cd *cd, u16 *rule_id)
848 {
849         struct ice_aqc_add_update_mir_rule *cmd;
850         struct ice_aq_desc desc;
851         enum ice_status status;
852         __le16 *mr_list = NULL;
853         u16 buf_size = 0;
854
855         switch (rule_type) {
856         case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
857         case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
858                 /* Make sure count and mr_buf are set for these rule_types */
859                 if (!(count && mr_buf))
860                         return ICE_ERR_PARAM;
861
862                 buf_size = count * sizeof(__le16);
863                 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
864                 if (!mr_list)
865                         return ICE_ERR_NO_MEMORY;
866                 break;
867         case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
868         case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
869                 /* Make sure count and mr_buf are not set for these
870                  * rule_types
871                  */
872                 if (count || mr_buf)
873                         return ICE_ERR_PARAM;
874                 break;
875         default:
876                 ice_debug(hw, ICE_DBG_SW,
877                           "Error due to unsupported rule_type %u\n", rule_type);
878                 return ICE_ERR_OUT_OF_RANGE;
879         }
880
881         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
882
883         /* Pre-process 'mr_buf' items for add/update of virtual port
884          * ingress/egress mirroring (but not physical port ingress/egress
885          * mirroring)
886          */
887         if (mr_buf) {
888                 int i;
889
890                 for (i = 0; i < count; i++) {
891                         u16 id;
892
893                         id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
894
895                         /* Validate specified VSI number, make sure it is less
896                          * than ICE_MAX_VSI, if not return with error.
897                          */
898                         if (id >= ICE_MAX_VSI) {
899                                 ice_debug(hw, ICE_DBG_SW,
900                                           "Error VSI index (%u) out-of-range\n",
901                                           id);
902                                 ice_free(hw, mr_list);
903                                 return ICE_ERR_OUT_OF_RANGE;
904                         }
905
906                         /* add VSI to mirror rule */
907                         if (mr_buf[i].add)
908                                 mr_list[i] =
909                                         CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
910                         else /* remove VSI from mirror rule */
911                                 mr_list[i] = CPU_TO_LE16(id);
912                 }
913         }
914
915         cmd = &desc.params.add_update_rule;
916         if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
917                 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
918                                            ICE_AQC_RULE_ID_VALID_M);
919         cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
920         cmd->num_entries = CPU_TO_LE16(count);
921         cmd->dest = CPU_TO_LE16(dest_vsi);
922
923         status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
924         if (!status)
925                 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
926
927         ice_free(hw, mr_list);
928
929         return status;
930 }
931
932 /**
933  * ice_aq_delete_mir_rule - delete a mirror rule
934  * @hw: pointer to the HW struct
935  * @rule_id: Mirror rule ID (to be deleted)
936  * @keep_allocd: if set, the VSI stays part of the PF allocated res,
937  *               otherwise it is returned to the shared pool
938  * @cd: pointer to command details structure or NULL
939  *
940  * Delete Mirror Rule (0x261).
941  */
942 enum ice_status
943 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
944                        struct ice_sq_cd *cd)
945 {
946         struct ice_aqc_delete_mir_rule *cmd;
947         struct ice_aq_desc desc;
948
949         /* rule_id should be in the range 0...63 */
950         if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
951                 return ICE_ERR_OUT_OF_RANGE;
952
953         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
954
955         cmd = &desc.params.del_rule;
956         rule_id |= ICE_AQC_RULE_ID_VALID_M;
957         cmd->rule_id = CPU_TO_LE16(rule_id);
958
959         if (keep_allocd)
960                 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
961
962         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
963 }
964
965 /**
966  * ice_aq_alloc_free_vsi_list
967  * @hw: pointer to the HW struct
968  * @vsi_list_id: VSI list ID returned or used for lookup
969  * @lkup_type: switch rule filter lookup type
970  * @opc: switch rules population command type - pass in the command opcode
971  *
972  * allocates or free a VSI list resource
973  */
974 static enum ice_status
975 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
976                            enum ice_sw_lkup_type lkup_type,
977                            enum ice_adminq_opc opc)
978 {
979         struct ice_aqc_alloc_free_res_elem *sw_buf;
980         struct ice_aqc_res_elem *vsi_ele;
981         enum ice_status status;
982         u16 buf_len;
983
984         buf_len = sizeof(*sw_buf);
985         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
986                 ice_malloc(hw, buf_len);
987         if (!sw_buf)
988                 return ICE_ERR_NO_MEMORY;
989         sw_buf->num_elems = CPU_TO_LE16(1);
990
991         if (lkup_type == ICE_SW_LKUP_MAC ||
992             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
993             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
994             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
995             lkup_type == ICE_SW_LKUP_PROMISC ||
996             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
997             lkup_type == ICE_SW_LKUP_LAST) {
998                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
999         } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1000                 sw_buf->res_type =
1001                         CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1002         } else {
1003                 status = ICE_ERR_PARAM;
1004                 goto ice_aq_alloc_free_vsi_list_exit;
1005         }
1006
1007         if (opc == ice_aqc_opc_free_res)
1008                 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1009
1010         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1011         if (status)
1012                 goto ice_aq_alloc_free_vsi_list_exit;
1013
1014         if (opc == ice_aqc_opc_alloc_res) {
1015                 vsi_ele = &sw_buf->elem[0];
1016                 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1017         }
1018
1019 ice_aq_alloc_free_vsi_list_exit:
1020         ice_free(hw, sw_buf);
1021         return status;
1022 }
1023
1024 /**
1025  * ice_aq_set_storm_ctrl - Sets storm control configuration
1026  * @hw: pointer to the HW struct
1027  * @bcast_thresh: represents the upper threshold for broadcast storm control
1028  * @mcast_thresh: represents the upper threshold for multicast storm control
1029  * @ctl_bitmask: storm control control knobs
1030  *
1031  * Sets the storm control configuration (0x0280)
1032  */
1033 enum ice_status
1034 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1035                       u32 ctl_bitmask)
1036 {
1037         struct ice_aqc_storm_cfg *cmd;
1038         struct ice_aq_desc desc;
1039
1040         cmd = &desc.params.storm_conf;
1041
1042         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1043
1044         cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1045         cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1046         cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1047
1048         return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1049 }
1050
1051 /**
1052  * ice_aq_get_storm_ctrl - gets storm control configuration
1053  * @hw: pointer to the HW struct
1054  * @bcast_thresh: represents the upper threshold for broadcast storm control
1055  * @mcast_thresh: represents the upper threshold for multicast storm control
1056  * @ctl_bitmask: storm control control knobs
1057  *
1058  * Gets the storm control configuration (0x0281)
1059  */
1060 enum ice_status
1061 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1062                       u32 *ctl_bitmask)
1063 {
1064         enum ice_status status;
1065         struct ice_aq_desc desc;
1066
1067         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1068
1069         status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1070         if (!status) {
1071                 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1072
1073                 if (bcast_thresh)
1074                         *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1075                                 ICE_AQ_THRESHOLD_M;
1076                 if (mcast_thresh)
1077                         *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1078                                 ICE_AQ_THRESHOLD_M;
1079                 if (ctl_bitmask)
1080                         *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1081         }
1082
1083         return status;
1084 }
1085
1086 /**
1087  * ice_aq_sw_rules - add/update/remove switch rules
1088  * @hw: pointer to the HW struct
1089  * @rule_list: pointer to switch rule population list
1090  * @rule_list_sz: total size of the rule list in bytes
1091  * @num_rules: number of switch rules in the rule_list
1092  * @opc: switch rules population command type - pass in the command opcode
1093  * @cd: pointer to command details structure or NULL
1094  *
1095  * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1096  */
1097 static enum ice_status
1098 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1099                 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1100 {
1101         struct ice_aq_desc desc;
1102
1103         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1104
1105         if (opc != ice_aqc_opc_add_sw_rules &&
1106             opc != ice_aqc_opc_update_sw_rules &&
1107             opc != ice_aqc_opc_remove_sw_rules)
1108                 return ICE_ERR_PARAM;
1109
1110         ice_fill_dflt_direct_cmd_desc(&desc, opc);
1111
1112         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1113         desc.params.sw_rules.num_rules_fltr_entry_index =
1114                 CPU_TO_LE16(num_rules);
1115         return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1116 }
1117
1118 /**
1119  * ice_aq_add_recipe - add switch recipe
1120  * @hw: pointer to the HW struct
1121  * @s_recipe_list: pointer to switch rule population list
1122  * @num_recipes: number of switch recipes in the list
1123  * @cd: pointer to command details structure or NULL
1124  *
1125  * Add(0x0290)
1126  */
1127 enum ice_status
1128 ice_aq_add_recipe(struct ice_hw *hw,
1129                   struct ice_aqc_recipe_data_elem *s_recipe_list,
1130                   u16 num_recipes, struct ice_sq_cd *cd)
1131 {
1132         struct ice_aqc_add_get_recipe *cmd;
1133         struct ice_aq_desc desc;
1134         u16 buf_size;
1135
1136         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1137         cmd = &desc.params.add_get_recipe;
1138         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1139
1140         cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1141         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1142
1143         buf_size = num_recipes * sizeof(*s_recipe_list);
1144
1145         return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1146 }
1147
1148 /**
1149  * ice_aq_get_recipe - get switch recipe
1150  * @hw: pointer to the HW struct
1151  * @s_recipe_list: pointer to switch rule population list
1152  * @num_recipes: pointer to the number of recipes (input and output)
1153  * @recipe_root: root recipe number of recipe(s) to retrieve
1154  * @cd: pointer to command details structure or NULL
1155  *
1156  * Get(0x0292)
1157  *
1158  * On input, *num_recipes should equal the number of entries in s_recipe_list.
1159  * On output, *num_recipes will equal the number of entries returned in
1160  * s_recipe_list.
1161  *
1162  * The caller must supply enough space in s_recipe_list to hold all possible
1163  * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1164  */
1165 enum ice_status
1166 ice_aq_get_recipe(struct ice_hw *hw,
1167                   struct ice_aqc_recipe_data_elem *s_recipe_list,
1168                   u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1169 {
1170         struct ice_aqc_add_get_recipe *cmd;
1171         struct ice_aq_desc desc;
1172         enum ice_status status;
1173         u16 buf_size;
1174
1175         if (*num_recipes != ICE_MAX_NUM_RECIPES)
1176                 return ICE_ERR_PARAM;
1177
1178         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1179         cmd = &desc.params.add_get_recipe;
1180         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1181
1182         cmd->return_index = CPU_TO_LE16(recipe_root);
1183         cmd->num_sub_recipes = 0;
1184
1185         buf_size = *num_recipes * sizeof(*s_recipe_list);
1186
1187         status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1188         /* cppcheck-suppress constArgument */
1189         *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1190
1191         return status;
1192 }
1193
1194 /**
1195  * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1196  * @hw: pointer to the HW struct
1197  * @profile_id: package profile ID to associate the recipe with
1198  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1199  * @cd: pointer to command details structure or NULL
1200  * Recipe to profile association (0x0291)
1201  */
1202 enum ice_status
1203 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1204                              struct ice_sq_cd *cd)
1205 {
1206         struct ice_aqc_recipe_to_profile *cmd;
1207         struct ice_aq_desc desc;
1208
1209         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1210         cmd = &desc.params.recipe_to_profile;
1211         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1212         cmd->profile_id = CPU_TO_LE16(profile_id);
1213         /* Set the recipe ID bit in the bitmask to let the device know which
1214          * profile we are associating the recipe to
1215          */
1216         ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1217                    ICE_NONDMA_TO_NONDMA);
1218
1219         return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1220 }
1221
1222 /**
1223  * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1224  * @hw: pointer to the HW struct
1225  * @profile_id: package profile ID to associate the recipe with
1226  * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1227  * @cd: pointer to command details structure or NULL
1228  * Associate profile ID with given recipe (0x0293)
1229  */
1230 enum ice_status
1231 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1232                              struct ice_sq_cd *cd)
1233 {
1234         struct ice_aqc_recipe_to_profile *cmd;
1235         struct ice_aq_desc desc;
1236         enum ice_status status;
1237
1238         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1239         cmd = &desc.params.recipe_to_profile;
1240         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1241         cmd->profile_id = CPU_TO_LE16(profile_id);
1242
1243         status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1244         if (!status)
1245                 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1246                            sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1247
1248         return status;
1249 }
1250
1251 /**
1252  * ice_alloc_recipe - add recipe resource
1253  * @hw: pointer to the hardware structure
1254  * @rid: recipe ID returned as response to AQ call
1255  */
1256 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1257 {
1258         struct ice_aqc_alloc_free_res_elem *sw_buf;
1259         enum ice_status status;
1260         u16 buf_len;
1261
1262         buf_len = sizeof(*sw_buf);
1263         sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1264         if (!sw_buf)
1265                 return ICE_ERR_NO_MEMORY;
1266
1267         sw_buf->num_elems = CPU_TO_LE16(1);
1268         sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1269                                         ICE_AQC_RES_TYPE_S) |
1270                                         ICE_AQC_RES_TYPE_FLAG_SHARED);
1271         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1272                                        ice_aqc_opc_alloc_res, NULL);
1273         if (!status)
1274                 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1275         ice_free(hw, sw_buf);
1276
1277         return status;
1278 }
1279
1280 /* ice_init_port_info - Initialize port_info with switch configuration data
1281  * @pi: pointer to port_info
1282  * @vsi_port_num: VSI number or port number
1283  * @type: Type of switch element (port or VSI)
1284  * @swid: switch ID of the switch the element is attached to
1285  * @pf_vf_num: PF or VF number
1286  * @is_vf: true if the element is a VF, false otherwise
1287  */
1288 static void
1289 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1290                    u16 swid, u16 pf_vf_num, bool is_vf)
1291 {
1292         switch (type) {
1293         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1294                 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1295                 pi->sw_id = swid;
1296                 pi->pf_vf_num = pf_vf_num;
1297                 pi->is_vf = is_vf;
1298                 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1299                 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1300                 break;
1301         default:
1302                 ice_debug(pi->hw, ICE_DBG_SW,
1303                           "incorrect VSI/port type received\n");
1304                 break;
1305         }
1306 }
1307
1308 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1309  * @hw: pointer to the hardware structure
1310  */
1311 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1312 {
1313         struct ice_aqc_get_sw_cfg_resp *rbuf;
1314         enum ice_status status;
1315         u16 num_total_ports;
1316         u16 req_desc = 0;
1317         u16 num_elems;
1318         u16 j = 0;
1319         u16 i;
1320
1321         num_total_ports = 1;
1322
1323         rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1324                 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1325
1326         if (!rbuf)
1327                 return ICE_ERR_NO_MEMORY;
1328
1329         /* Multiple calls to ice_aq_get_sw_cfg may be required
1330          * to get all the switch configuration information. The need
1331          * for additional calls is indicated by ice_aq_get_sw_cfg
1332          * writing a non-zero value in req_desc
1333          */
1334         do {
1335                 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1336                                            &req_desc, &num_elems, NULL);
1337
1338                 if (status)
1339                         break;
1340
1341                 for (i = 0; i < num_elems; i++) {
1342                         struct ice_aqc_get_sw_cfg_resp_elem *ele;
1343                         u16 pf_vf_num, swid, vsi_port_num;
1344                         bool is_vf = false;
1345                         u8 type;
1346
1347                         ele = rbuf[i].elements;
1348                         vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1349                                 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1350
1351                         pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1352                                 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1353
1354                         swid = LE16_TO_CPU(ele->swid);
1355
1356                         if (LE16_TO_CPU(ele->pf_vf_num) &
1357                             ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1358                                 is_vf = true;
1359
1360                         type = LE16_TO_CPU(ele->vsi_port_num) >>
1361                                 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1362
1363                         switch (type) {
1364                         case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1365                         case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1366                                 if (j == num_total_ports) {
1367                                         ice_debug(hw, ICE_DBG_SW,
1368                                                   "more ports than expected\n");
1369                                         status = ICE_ERR_CFG;
1370                                         goto out;
1371                                 }
1372                                 ice_init_port_info(hw->port_info,
1373                                                    vsi_port_num, type, swid,
1374                                                    pf_vf_num, is_vf);
1375                                 j++;
1376                                 break;
1377                         default:
1378                                 break;
1379                         }
1380                 }
1381         } while (req_desc && !status);
1382
1383
1384 out:
1385         ice_free(hw, (void *)rbuf);
1386         return status;
1387 }
1388
1389
1390 /**
1391  * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1392  * @hw: pointer to the hardware structure
1393  * @fi: filter info structure to fill/update
1394  *
1395  * This helper function populates the lb_en and lan_en elements of the provided
1396  * ice_fltr_info struct using the switch's type and characteristics of the
1397  * switch rule being configured.
1398  */
1399 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1400 {
1401         fi->lb_en = false;
1402         fi->lan_en = false;
1403         if ((fi->flag & ICE_FLTR_TX) &&
1404             (fi->fltr_act == ICE_FWD_TO_VSI ||
1405              fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1406              fi->fltr_act == ICE_FWD_TO_Q ||
1407              fi->fltr_act == ICE_FWD_TO_QGRP)) {
1408                 /* Setting LB for prune actions will result in replicated
1409                  * packets to the internal switch that will be dropped.
1410                  */
1411                 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1412                         fi->lb_en = true;
1413
1414                 /* Set lan_en to TRUE if
1415                  * 1. The switch is a VEB AND
1416                  * 2
1417                  * 2.1 The lookup is a directional lookup like ethertype,
1418                  * promiscuous, ethertype-MAC, promiscuous-VLAN
1419                  * and default-port OR
1420                  * 2.2 The lookup is VLAN, OR
1421                  * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1422                  * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1423                  *
1424                  * OR
1425                  *
1426                  * The switch is a VEPA.
1427                  *
1428                  * In all other cases, the LAN enable has to be set to false.
1429                  */
1430                 if (hw->evb_veb) {
1431                         if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1432                             fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1433                             fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1434                             fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1435                             fi->lkup_type == ICE_SW_LKUP_DFLT ||
1436                             fi->lkup_type == ICE_SW_LKUP_VLAN ||
1437                             (fi->lkup_type == ICE_SW_LKUP_MAC &&
1438                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1439                             (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1440                              !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1441                                 fi->lan_en = true;
1442                 } else {
1443                         fi->lan_en = true;
1444                 }
1445         }
1446 }
1447
1448 /**
1449  * ice_ilog2 - Calculates integer log base 2 of a number
1450  * @n: number on which to perform operation
1451  */
1452 static int ice_ilog2(u64 n)
1453 {
1454         int i;
1455
1456         for (i = 63; i >= 0; i--)
1457                 if (((u64)1 << i) & n)
1458                         return i;
1459
1460         return -1;
1461 }
1462
1463 /**
1464  * ice_fill_sw_rule - Helper function to fill switch rule structure
1465  * @hw: pointer to the hardware structure
1466  * @f_info: entry containing packet forwarding information
1467  * @s_rule: switch rule structure to be filled in based on mac_entry
1468  * @opc: switch rules population command type - pass in the command opcode
1469  */
1470 static void
1471 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1472                  struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1473 {
1474         u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1475         void *daddr = NULL;
1476         u16 eth_hdr_sz;
1477         u8 *eth_hdr;
1478         u32 act = 0;
1479         __be16 *off;
1480         u8 q_rgn;
1481
1482         if (opc == ice_aqc_opc_remove_sw_rules) {
1483                 s_rule->pdata.lkup_tx_rx.act = 0;
1484                 s_rule->pdata.lkup_tx_rx.index =
1485                         CPU_TO_LE16(f_info->fltr_rule_id);
1486                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1487                 return;
1488         }
1489
1490         eth_hdr_sz = sizeof(dummy_eth_header);
1491         eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1492
1493         /* initialize the ether header with a dummy header */
1494         ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1495         ice_fill_sw_info(hw, f_info);
1496
1497         switch (f_info->fltr_act) {
1498         case ICE_FWD_TO_VSI:
1499                 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1500                         ICE_SINGLE_ACT_VSI_ID_M;
1501                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1502                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1503                                 ICE_SINGLE_ACT_VALID_BIT;
1504                 break;
1505         case ICE_FWD_TO_VSI_LIST:
1506                 act |= ICE_SINGLE_ACT_VSI_LIST;
1507                 act |= (f_info->fwd_id.vsi_list_id <<
1508                         ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1509                         ICE_SINGLE_ACT_VSI_LIST_ID_M;
1510                 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1511                         act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1512                                 ICE_SINGLE_ACT_VALID_BIT;
1513                 break;
1514         case ICE_FWD_TO_Q:
1515                 act |= ICE_SINGLE_ACT_TO_Q;
1516                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1517                         ICE_SINGLE_ACT_Q_INDEX_M;
1518                 break;
1519         case ICE_DROP_PACKET:
1520                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1521                         ICE_SINGLE_ACT_VALID_BIT;
1522                 break;
1523         case ICE_FWD_TO_QGRP:
1524                 q_rgn = f_info->qgrp_size > 0 ?
1525                         (u8)ice_ilog2(f_info->qgrp_size) : 0;
1526                 act |= ICE_SINGLE_ACT_TO_Q;
1527                 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1528                         ICE_SINGLE_ACT_Q_INDEX_M;
1529                 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1530                         ICE_SINGLE_ACT_Q_REGION_M;
1531                 break;
1532         default:
1533                 return;
1534         }
1535
1536         if (f_info->lb_en)
1537                 act |= ICE_SINGLE_ACT_LB_ENABLE;
1538         if (f_info->lan_en)
1539                 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1540
1541         switch (f_info->lkup_type) {
1542         case ICE_SW_LKUP_MAC:
1543                 daddr = f_info->l_data.mac.mac_addr;
1544                 break;
1545         case ICE_SW_LKUP_VLAN:
1546                 vlan_id = f_info->l_data.vlan.vlan_id;
1547                 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1548                     f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1549                         act |= ICE_SINGLE_ACT_PRUNE;
1550                         act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1551                 }
1552                 break;
1553         case ICE_SW_LKUP_ETHERTYPE_MAC:
1554                 daddr = f_info->l_data.ethertype_mac.mac_addr;
1555                 /* fall-through */
1556         case ICE_SW_LKUP_ETHERTYPE:
1557                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1558                 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1559                 break;
1560         case ICE_SW_LKUP_MAC_VLAN:
1561                 daddr = f_info->l_data.mac_vlan.mac_addr;
1562                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1563                 break;
1564         case ICE_SW_LKUP_PROMISC_VLAN:
1565                 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1566                 /* fall-through */
1567         case ICE_SW_LKUP_PROMISC:
1568                 daddr = f_info->l_data.mac_vlan.mac_addr;
1569                 break;
1570         default:
1571                 break;
1572         }
1573
1574         s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1575                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1576                 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1577
1578         /* Recipe set depending on lookup type */
1579         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1580         s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1581         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1582
1583         if (daddr)
1584                 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1585                            ICE_NONDMA_TO_NONDMA);
1586
1587         if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1588                 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1589                 *off = CPU_TO_BE16(vlan_id);
1590         }
1591
1592         /* Create the switch rule with the final dummy Ethernet header */
1593         if (opc != ice_aqc_opc_update_sw_rules)
1594                 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1595 }
1596
1597 /**
1598  * ice_add_marker_act
1599  * @hw: pointer to the hardware structure
1600  * @m_ent: the management entry for which sw marker needs to be added
1601  * @sw_marker: sw marker to tag the Rx descriptor with
1602  * @l_id: large action resource ID
1603  *
1604  * Create a large action to hold software marker and update the switch rule
1605  * entry pointed by m_ent with newly created large action
1606  */
1607 static enum ice_status
1608 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1609                    u16 sw_marker, u16 l_id)
1610 {
1611         struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1612         /* For software marker we need 3 large actions
1613          * 1. FWD action: FWD TO VSI or VSI LIST
1614          * 2. GENERIC VALUE action to hold the profile ID
1615          * 3. GENERIC VALUE action to hold the software marker ID
1616          */
1617         const u16 num_lg_acts = 3;
1618         enum ice_status status;
1619         u16 lg_act_size;
1620         u16 rules_size;
1621         u32 act;
1622         u16 id;
1623
1624         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1625                 return ICE_ERR_PARAM;
1626
1627         /* Create two back-to-back switch rules and submit them to the HW using
1628          * one memory buffer:
1629          *    1. Large Action
1630          *    2. Look up Tx Rx
1631          */
1632         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1633         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1634         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1635         if (!lg_act)
1636                 return ICE_ERR_NO_MEMORY;
1637
1638         rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1639
1640         /* Fill in the first switch rule i.e. large action */
1641         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1642         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1643         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1644
1645         /* First action VSI forwarding or VSI list forwarding depending on how
1646          * many VSIs
1647          */
1648         id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1649                 m_ent->fltr_info.fwd_id.hw_vsi_id;
1650
1651         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1652         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1653                 ICE_LG_ACT_VSI_LIST_ID_M;
1654         if (m_ent->vsi_count > 1)
1655                 act |= ICE_LG_ACT_VSI_LIST;
1656         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1657
1658         /* Second action descriptor type */
1659         act = ICE_LG_ACT_GENERIC;
1660
1661         act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1662         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1663
1664         act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1665                ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1666
1667         /* Third action Marker value */
1668         act |= ICE_LG_ACT_GENERIC;
1669         act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1670                 ICE_LG_ACT_GENERIC_VALUE_M;
1671
1672         lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1673
1674         /* call the fill switch rule to fill the lookup Tx Rx structure */
1675         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1676                          ice_aqc_opc_update_sw_rules);
1677
1678         /* Update the action to point to the large action ID */
1679         rx_tx->pdata.lkup_tx_rx.act =
1680                 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1681                             ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1682                              ICE_SINGLE_ACT_PTR_VAL_M));
1683
1684         /* Use the filter rule ID of the previously created rule with single
1685          * act. Once the update happens, hardware will treat this as large
1686          * action
1687          */
1688         rx_tx->pdata.lkup_tx_rx.index =
1689                 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1690
1691         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1692                                  ice_aqc_opc_update_sw_rules, NULL);
1693         if (!status) {
1694                 m_ent->lg_act_idx = l_id;
1695                 m_ent->sw_marker_id = sw_marker;
1696         }
1697
1698         ice_free(hw, lg_act);
1699         return status;
1700 }
1701
1702 /**
1703  * ice_add_counter_act - add/update filter rule with counter action
1704  * @hw: pointer to the hardware structure
1705  * @m_ent: the management entry for which counter needs to be added
1706  * @counter_id: VLAN counter ID returned as part of allocate resource
1707  * @l_id: large action resource ID
1708  */
1709 static enum ice_status
1710 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1711                     u16 counter_id, u16 l_id)
1712 {
1713         struct ice_aqc_sw_rules_elem *lg_act;
1714         struct ice_aqc_sw_rules_elem *rx_tx;
1715         enum ice_status status;
1716         /* 2 actions will be added while adding a large action counter */
1717         const int num_acts = 2;
1718         u16 lg_act_size;
1719         u16 rules_size;
1720         u16 f_rule_id;
1721         u32 act;
1722         u16 id;
1723
1724         if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1725                 return ICE_ERR_PARAM;
1726
1727         /* Create two back-to-back switch rules and submit them to the HW using
1728          * one memory buffer:
1729          * 1. Large Action
1730          * 2. Look up Tx Rx
1731          */
1732         lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1733         rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1734         lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
1735                                                                  rules_size);
1736         if (!lg_act)
1737                 return ICE_ERR_NO_MEMORY;
1738
1739         rx_tx = (struct ice_aqc_sw_rules_elem *)
1740                 ((u8 *)lg_act + lg_act_size);
1741
1742         /* Fill in the first switch rule i.e. large action */
1743         lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1744         lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1745         lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1746
1747         /* First action VSI forwarding or VSI list forwarding depending on how
1748          * many VSIs
1749          */
1750         id = (m_ent->vsi_count > 1) ?  m_ent->fltr_info.fwd_id.vsi_list_id :
1751                 m_ent->fltr_info.fwd_id.hw_vsi_id;
1752
1753         act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1754         act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1755                 ICE_LG_ACT_VSI_LIST_ID_M;
1756         if (m_ent->vsi_count > 1)
1757                 act |= ICE_LG_ACT_VSI_LIST;
1758         lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1759
1760         /* Second action counter ID */
1761         act = ICE_LG_ACT_STAT_COUNT;
1762         act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1763                 ICE_LG_ACT_STAT_COUNT_M;
1764         lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1765
1766         /* call the fill switch rule to fill the lookup Tx Rx structure */
1767         ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1768                          ice_aqc_opc_update_sw_rules);
1769
1770         act = ICE_SINGLE_ACT_PTR;
1771         act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
1772         rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1773
1774         /* Use the filter rule ID of the previously created rule with single
1775          * act. Once the update happens, hardware will treat this as large
1776          * action
1777          */
1778         f_rule_id = m_ent->fltr_info.fltr_rule_id;
1779         rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
1780
1781         status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1782                                  ice_aqc_opc_update_sw_rules, NULL);
1783         if (!status) {
1784                 m_ent->lg_act_idx = l_id;
1785                 m_ent->counter_index = counter_id;
1786         }
1787
1788         ice_free(hw, lg_act);
1789         return status;
1790 }
1791
1792 /**
1793  * ice_create_vsi_list_map
1794  * @hw: pointer to the hardware structure
1795  * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1796  * @num_vsi: number of VSI handles in the array
1797  * @vsi_list_id: VSI list ID generated as part of allocate resource
1798  *
1799  * Helper function to create a new entry of VSI list ID to VSI mapping
1800  * using the given VSI list ID
1801  */
1802 static struct ice_vsi_list_map_info *
1803 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1804                         u16 vsi_list_id)
1805 {
1806         struct ice_switch_info *sw = hw->switch_info;
1807         struct ice_vsi_list_map_info *v_map;
1808         int i;
1809
1810         v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
1811                 sizeof(*v_map));
1812         if (!v_map)
1813                 return NULL;
1814
1815         v_map->vsi_list_id = vsi_list_id;
1816         v_map->ref_cnt = 1;
1817         for (i = 0; i < num_vsi; i++)
1818                 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
1819
1820         LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
1821         return v_map;
1822 }
1823
1824 /**
1825  * ice_update_vsi_list_rule
1826  * @hw: pointer to the hardware structure
1827  * @vsi_handle_arr: array of VSI handles to form a VSI list
1828  * @num_vsi: number of VSI handles in the array
1829  * @vsi_list_id: VSI list ID generated as part of allocate resource
1830  * @remove: Boolean value to indicate if this is a remove action
1831  * @opc: switch rules population command type - pass in the command opcode
1832  * @lkup_type: lookup type of the filter
1833  *
1834  * Call AQ command to add a new switch rule or update existing switch rule
1835  * using the given VSI list ID
1836  */
1837 static enum ice_status
1838 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1839                          u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1840                          enum ice_sw_lkup_type lkup_type)
1841 {
1842         struct ice_aqc_sw_rules_elem *s_rule;
1843         enum ice_status status;
1844         u16 s_rule_size;
1845         u16 type;
1846         int i;
1847
1848         if (!num_vsi)
1849                 return ICE_ERR_PARAM;
1850
1851         if (lkup_type == ICE_SW_LKUP_MAC ||
1852             lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1853             lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1854             lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1855             lkup_type == ICE_SW_LKUP_PROMISC ||
1856             lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1857             lkup_type == ICE_SW_LKUP_LAST)
1858                 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1859                                 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1860         else if (lkup_type == ICE_SW_LKUP_VLAN)
1861                 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1862                                 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1863         else
1864                 return ICE_ERR_PARAM;
1865
1866         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1867         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
1868         if (!s_rule)
1869                 return ICE_ERR_NO_MEMORY;
1870         for (i = 0; i < num_vsi; i++) {
1871                 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
1872                         status = ICE_ERR_PARAM;
1873                         goto exit;
1874                 }
1875                 /* AQ call requires hw_vsi_id(s) */
1876                 s_rule->pdata.vsi_list.vsi[i] =
1877                         CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
1878         }
1879
1880         s_rule->type = CPU_TO_LE16(type);
1881         s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
1882         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
1883
1884         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
1885
1886 exit:
1887         ice_free(hw, s_rule);
1888         return status;
1889 }
1890
1891 /**
1892  * ice_create_vsi_list_rule - Creates and populates a VSI list rule
1893  * @hw: pointer to the HW struct
1894  * @vsi_handle_arr: array of VSI handles to form a VSI list
1895  * @num_vsi: number of VSI handles in the array
1896  * @vsi_list_id: stores the ID of the VSI list to be created
1897  * @lkup_type: switch rule filter's lookup type
1898  */
1899 static enum ice_status
1900 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1901                          u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
1902 {
1903         enum ice_status status;
1904
1905         status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
1906                                             ice_aqc_opc_alloc_res);
1907         if (status)
1908                 return status;
1909
1910         /* Update the newly created VSI list to include the specified VSIs */
1911         return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
1912                                         *vsi_list_id, false,
1913                                         ice_aqc_opc_add_sw_rules, lkup_type);
1914 }
1915
1916 /**
1917  * ice_create_pkt_fwd_rule
1918  * @hw: pointer to the hardware structure
1919  * @f_entry: entry containing packet forwarding information
1920  *
1921  * Create switch rule with given filter information and add an entry
1922  * to the corresponding filter management list to track this switch rule
1923  * and VSI mapping
1924  */
1925 static enum ice_status
1926 ice_create_pkt_fwd_rule(struct ice_hw *hw,
1927                         struct ice_fltr_list_entry *f_entry)
1928 {
1929         struct ice_fltr_mgmt_list_entry *fm_entry;
1930         struct ice_aqc_sw_rules_elem *s_rule;
1931         enum ice_sw_lkup_type l_type;
1932         struct ice_sw_recipe *recp;
1933         enum ice_status status;
1934
1935         s_rule = (struct ice_aqc_sw_rules_elem *)
1936                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
1937         if (!s_rule)
1938                 return ICE_ERR_NO_MEMORY;
1939         fm_entry = (struct ice_fltr_mgmt_list_entry *)
1940                    ice_malloc(hw, sizeof(*fm_entry));
1941         if (!fm_entry) {
1942                 status = ICE_ERR_NO_MEMORY;
1943                 goto ice_create_pkt_fwd_rule_exit;
1944         }
1945
1946         fm_entry->fltr_info = f_entry->fltr_info;
1947
1948         /* Initialize all the fields for the management entry */
1949         fm_entry->vsi_count = 1;
1950         fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
1951         fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
1952         fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
1953
1954         ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
1955                          ice_aqc_opc_add_sw_rules);
1956
1957         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
1958                                  ice_aqc_opc_add_sw_rules, NULL);
1959         if (status) {
1960                 ice_free(hw, fm_entry);
1961                 goto ice_create_pkt_fwd_rule_exit;
1962         }
1963
1964         f_entry->fltr_info.fltr_rule_id =
1965                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1966         fm_entry->fltr_info.fltr_rule_id =
1967                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
1968
1969         /* The book keeping entries will get removed when base driver
1970          * calls remove filter AQ command
1971          */
1972         l_type = fm_entry->fltr_info.lkup_type;
1973         recp = &hw->switch_info->recp_list[l_type];
1974         LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
1975
1976 ice_create_pkt_fwd_rule_exit:
1977         ice_free(hw, s_rule);
1978         return status;
1979 }
1980
1981 /**
1982  * ice_update_pkt_fwd_rule
1983  * @hw: pointer to the hardware structure
1984  * @f_info: filter information for switch rule
1985  *
1986  * Call AQ command to update a previously created switch rule with a
1987  * VSI list ID
1988  */
1989 static enum ice_status
1990 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
1991 {
1992         struct ice_aqc_sw_rules_elem *s_rule;
1993         enum ice_status status;
1994
1995         s_rule = (struct ice_aqc_sw_rules_elem *)
1996                 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
1997         if (!s_rule)
1998                 return ICE_ERR_NO_MEMORY;
1999
2000         ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2001
2002         s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2003
2004         /* Update switch rule with new rule set to forward VSI list */
2005         status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2006                                  ice_aqc_opc_update_sw_rules, NULL);
2007
2008         ice_free(hw, s_rule);
2009         return status;
2010 }
2011
2012 /**
2013  * ice_update_sw_rule_bridge_mode
2014  * @hw: pointer to the HW struct
2015  *
2016  * Updates unicast switch filter rules based on VEB/VEPA mode
2017  */
2018 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2019 {
2020         struct ice_switch_info *sw = hw->switch_info;
2021         struct ice_fltr_mgmt_list_entry *fm_entry;
2022         enum ice_status status = ICE_SUCCESS;
2023         struct LIST_HEAD_TYPE *rule_head;
2024         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2025
2026         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2027         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2028
2029         ice_acquire_lock(rule_lock);
2030         LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2031                             list_entry) {
2032                 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2033                 u8 *addr = fi->l_data.mac.mac_addr;
2034
2035                 /* Update unicast Tx rules to reflect the selected
2036                  * VEB/VEPA mode
2037                  */
2038                 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2039                     (fi->fltr_act == ICE_FWD_TO_VSI ||
2040                      fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2041                      fi->fltr_act == ICE_FWD_TO_Q ||
2042                      fi->fltr_act == ICE_FWD_TO_QGRP)) {
2043                         status = ice_update_pkt_fwd_rule(hw, fi);
2044                         if (status)
2045                                 break;
2046                 }
2047         }
2048
2049         ice_release_lock(rule_lock);
2050
2051         return status;
2052 }
2053
2054 /**
2055  * ice_add_update_vsi_list
2056  * @hw: pointer to the hardware structure
2057  * @m_entry: pointer to current filter management list entry
2058  * @cur_fltr: filter information from the book keeping entry
2059  * @new_fltr: filter information with the new VSI to be added
2060  *
2061  * Call AQ command to add or update previously created VSI list with new VSI.
2062  *
2063  * Helper function to do book keeping associated with adding filter information
2064  * The algorithm to do the book keeping is described below :
2065  * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2066  *      if only one VSI has been added till now
2067  *              Allocate a new VSI list and add two VSIs
2068  *              to this list using switch rule command
2069  *              Update the previously created switch rule with the
2070  *              newly created VSI list ID
2071  *      if a VSI list was previously created
2072  *              Add the new VSI to the previously created VSI list set
2073  *              using the update switch rule command
2074  */
2075 static enum ice_status
2076 ice_add_update_vsi_list(struct ice_hw *hw,
2077                         struct ice_fltr_mgmt_list_entry *m_entry,
2078                         struct ice_fltr_info *cur_fltr,
2079                         struct ice_fltr_info *new_fltr)
2080 {
2081         enum ice_status status = ICE_SUCCESS;
2082         u16 vsi_list_id = 0;
2083
2084         if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2085              cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2086                 return ICE_ERR_NOT_IMPL;
2087
2088         if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2089              new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2090             (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2091              cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2092                 return ICE_ERR_NOT_IMPL;
2093
2094         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2095                 /* Only one entry existed in the mapping and it was not already
2096                  * a part of a VSI list. So, create a VSI list with the old and
2097                  * new VSIs.
2098                  */
2099                 struct ice_fltr_info tmp_fltr;
2100                 u16 vsi_handle_arr[2];
2101
2102                 /* A rule already exists with the new VSI being added */
2103                 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2104                         return ICE_ERR_ALREADY_EXISTS;
2105
2106                 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2107                 vsi_handle_arr[1] = new_fltr->vsi_handle;
2108                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2109                                                   &vsi_list_id,
2110                                                   new_fltr->lkup_type);
2111                 if (status)
2112                         return status;
2113
2114                 tmp_fltr = *new_fltr;
2115                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2116                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2117                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2118                 /* Update the previous switch rule of "MAC forward to VSI" to
2119                  * "MAC fwd to VSI list"
2120                  */
2121                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2122                 if (status)
2123                         return status;
2124
2125                 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2126                 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2127                 m_entry->vsi_list_info =
2128                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2129                                                 vsi_list_id);
2130
2131                 /* If this entry was large action then the large action needs
2132                  * to be updated to point to FWD to VSI list
2133                  */
2134                 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2135                         status =
2136                             ice_add_marker_act(hw, m_entry,
2137                                                m_entry->sw_marker_id,
2138                                                m_entry->lg_act_idx);
2139         } else {
2140                 u16 vsi_handle = new_fltr->vsi_handle;
2141                 enum ice_adminq_opc opcode;
2142
2143                 if (!m_entry->vsi_list_info)
2144                         return ICE_ERR_CFG;
2145
2146                 /* A rule already exists with the new VSI being added */
2147                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2148                         return ICE_SUCCESS;
2149
2150                 /* Update the previously created VSI list set with
2151                  * the new VSI ID passed in
2152                  */
2153                 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2154                 opcode = ice_aqc_opc_update_sw_rules;
2155
2156                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2157                                                   vsi_list_id, false, opcode,
2158                                                   new_fltr->lkup_type);
2159                 /* update VSI list mapping info with new VSI ID */
2160                 if (!status)
2161                         ice_set_bit(vsi_handle,
2162                                     m_entry->vsi_list_info->vsi_map);
2163         }
2164         if (!status)
2165                 m_entry->vsi_count++;
2166         return status;
2167 }
2168
2169 /**
2170  * ice_find_rule_entry - Search a rule entry
2171  * @hw: pointer to the hardware structure
2172  * @recp_id: lookup type for which the specified rule needs to be searched
2173  * @f_info: rule information
2174  *
2175  * Helper function to search for a given rule entry
2176  * Returns pointer to entry storing the rule if found
2177  */
2178 static struct ice_fltr_mgmt_list_entry *
2179 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2180 {
2181         struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2182         struct ice_switch_info *sw = hw->switch_info;
2183         struct LIST_HEAD_TYPE *list_head;
2184
2185         list_head = &sw->recp_list[recp_id].filt_rules;
2186         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2187                             list_entry) {
2188                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2189                             sizeof(f_info->l_data)) &&
2190                     f_info->flag == list_itr->fltr_info.flag) {
2191                         ret = list_itr;
2192                         break;
2193                 }
2194         }
2195         return ret;
2196 }
2197
2198 /**
2199  * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2200  * @hw: pointer to the hardware structure
2201  * @recp_id: lookup type for which VSI lists needs to be searched
2202  * @vsi_handle: VSI handle to be found in VSI list
2203  * @vsi_list_id: VSI list ID found containing vsi_handle
2204  *
2205  * Helper function to search a VSI list with single entry containing given VSI
2206  * handle element. This can be extended further to search VSI list with more
2207  * than 1 vsi_count. Returns pointer to VSI list entry if found.
2208  */
2209 static struct ice_vsi_list_map_info *
2210 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2211                         u16 *vsi_list_id)
2212 {
2213         struct ice_vsi_list_map_info *map_info = NULL;
2214         struct ice_switch_info *sw = hw->switch_info;
2215         struct LIST_HEAD_TYPE *list_head;
2216
2217         list_head = &sw->recp_list[recp_id].filt_rules;
2218         if (sw->recp_list[recp_id].adv_rule) {
2219                 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2220
2221                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2222                                     ice_adv_fltr_mgmt_list_entry,
2223                                     list_entry) {
2224                         if (list_itr->vsi_list_info) {
2225                                 map_info = list_itr->vsi_list_info;
2226                                 if (ice_is_bit_set(map_info->vsi_map,
2227                                                    vsi_handle)) {
2228                                         *vsi_list_id = map_info->vsi_list_id;
2229                                         return map_info;
2230                                 }
2231                         }
2232                 }
2233         } else {
2234                 struct ice_fltr_mgmt_list_entry *list_itr;
2235
2236                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2237                                     ice_fltr_mgmt_list_entry,
2238                                     list_entry) {
2239                         if (list_itr->vsi_count == 1 &&
2240                             list_itr->vsi_list_info) {
2241                                 map_info = list_itr->vsi_list_info;
2242                                 if (ice_is_bit_set(map_info->vsi_map,
2243                                                    vsi_handle)) {
2244                                         *vsi_list_id = map_info->vsi_list_id;
2245                                         return map_info;
2246                                 }
2247                         }
2248                 }
2249         }
2250         return NULL;
2251 }
2252
2253 /**
2254  * ice_add_rule_internal - add rule for a given lookup type
2255  * @hw: pointer to the hardware structure
2256  * @recp_id: lookup type (recipe ID) for which rule has to be added
2257  * @f_entry: structure containing MAC forwarding information
2258  *
2259  * Adds or updates the rule lists for a given recipe
2260  */
2261 static enum ice_status
2262 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2263                       struct ice_fltr_list_entry *f_entry)
2264 {
2265         struct ice_switch_info *sw = hw->switch_info;
2266         struct ice_fltr_info *new_fltr, *cur_fltr;
2267         struct ice_fltr_mgmt_list_entry *m_entry;
2268         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2269         enum ice_status status = ICE_SUCCESS;
2270
2271         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2272                 return ICE_ERR_PARAM;
2273
2274         /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2275         if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2276                 f_entry->fltr_info.fwd_id.hw_vsi_id =
2277                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2278
2279         rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2280
2281         ice_acquire_lock(rule_lock);
2282         new_fltr = &f_entry->fltr_info;
2283         if (new_fltr->flag & ICE_FLTR_RX)
2284                 new_fltr->src = hw->port_info->lport;
2285         else if (new_fltr->flag & ICE_FLTR_TX)
2286                 new_fltr->src =
2287                         ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2288
2289         m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2290         if (!m_entry) {
2291                 status = ice_create_pkt_fwd_rule(hw, f_entry);
2292                 goto exit_add_rule_internal;
2293         }
2294
2295         cur_fltr = &m_entry->fltr_info;
2296         status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2297
2298 exit_add_rule_internal:
2299         ice_release_lock(rule_lock);
2300         return status;
2301 }
2302
2303 /**
2304  * ice_remove_vsi_list_rule
2305  * @hw: pointer to the hardware structure
2306  * @vsi_list_id: VSI list ID generated as part of allocate resource
2307  * @lkup_type: switch rule filter lookup type
2308  *
2309  * The VSI list should be emptied before this function is called to remove the
2310  * VSI list.
2311  */
2312 static enum ice_status
2313 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2314                          enum ice_sw_lkup_type lkup_type)
2315 {
2316         struct ice_aqc_sw_rules_elem *s_rule;
2317         enum ice_status status;
2318         u16 s_rule_size;
2319
2320         s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2321         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2322         if (!s_rule)
2323                 return ICE_ERR_NO_MEMORY;
2324
2325         s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2326         s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2327
2328         /* Free the vsi_list resource that we allocated. It is assumed that the
2329          * list is empty at this point.
2330          */
2331         status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2332                                             ice_aqc_opc_free_res);
2333
2334         ice_free(hw, s_rule);
2335         return status;
2336 }
2337
2338 /**
2339  * ice_rem_update_vsi_list
2340  * @hw: pointer to the hardware structure
2341  * @vsi_handle: VSI handle of the VSI to remove
2342  * @fm_list: filter management entry for which the VSI list management needs to
2343  *           be done
2344  */
2345 static enum ice_status
2346 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2347                         struct ice_fltr_mgmt_list_entry *fm_list)
2348 {
2349         enum ice_sw_lkup_type lkup_type;
2350         enum ice_status status = ICE_SUCCESS;
2351         u16 vsi_list_id;
2352
2353         if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2354             fm_list->vsi_count == 0)
2355                 return ICE_ERR_PARAM;
2356
2357         /* A rule with the VSI being removed does not exist */
2358         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2359                 return ICE_ERR_DOES_NOT_EXIST;
2360
2361         lkup_type = fm_list->fltr_info.lkup_type;
2362         vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2363         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2364                                           ice_aqc_opc_update_sw_rules,
2365                                           lkup_type);
2366         if (status)
2367                 return status;
2368
2369         fm_list->vsi_count--;
2370         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2371
2372         if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2373                 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2374                 struct ice_vsi_list_map_info *vsi_list_info =
2375                         fm_list->vsi_list_info;
2376                 u16 rem_vsi_handle;
2377
2378                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2379                                                     ICE_MAX_VSI);
2380                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2381                         return ICE_ERR_OUT_OF_RANGE;
2382
2383                 /* Make sure VSI list is empty before removing it below */
2384                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2385                                                   vsi_list_id, true,
2386                                                   ice_aqc_opc_update_sw_rules,
2387                                                   lkup_type);
2388                 if (status)
2389                         return status;
2390
2391                 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2392                 tmp_fltr_info.fwd_id.hw_vsi_id =
2393                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
2394                 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2395                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2396                 if (status) {
2397                         ice_debug(hw, ICE_DBG_SW,
2398                                   "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2399                                   tmp_fltr_info.fwd_id.hw_vsi_id, status);
2400                         return status;
2401                 }
2402
2403                 fm_list->fltr_info = tmp_fltr_info;
2404         }
2405
2406         if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2407             (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2408                 struct ice_vsi_list_map_info *vsi_list_info =
2409                         fm_list->vsi_list_info;
2410
2411                 /* Remove the VSI list since it is no longer used */
2412                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2413                 if (status) {
2414                         ice_debug(hw, ICE_DBG_SW,
2415                                   "Failed to remove VSI list %d, error %d\n",
2416                                   vsi_list_id, status);
2417                         return status;
2418                 }
2419
2420                 LIST_DEL(&vsi_list_info->list_entry);
2421                 ice_free(hw, vsi_list_info);
2422                 fm_list->vsi_list_info = NULL;
2423         }
2424
2425         return status;
2426 }
2427
2428 /**
2429  * ice_remove_rule_internal - Remove a filter rule of a given type
2430  *
2431  * @hw: pointer to the hardware structure
2432  * @recp_id: recipe ID for which the rule needs to removed
2433  * @f_entry: rule entry containing filter information
2434  */
2435 static enum ice_status
2436 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2437                          struct ice_fltr_list_entry *f_entry)
2438 {
2439         struct ice_switch_info *sw = hw->switch_info;
2440         struct ice_fltr_mgmt_list_entry *list_elem;
2441         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2442         enum ice_status status = ICE_SUCCESS;
2443         bool remove_rule = false;
2444         u16 vsi_handle;
2445
2446         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2447                 return ICE_ERR_PARAM;
2448         f_entry->fltr_info.fwd_id.hw_vsi_id =
2449                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2450
2451         rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2452         ice_acquire_lock(rule_lock);
2453         list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2454         if (!list_elem) {
2455                 status = ICE_ERR_DOES_NOT_EXIST;
2456                 goto exit;
2457         }
2458
2459         if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2460                 remove_rule = true;
2461         } else if (!list_elem->vsi_list_info) {
2462                 status = ICE_ERR_DOES_NOT_EXIST;
2463                 goto exit;
2464         } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2465                 /* a ref_cnt > 1 indicates that the vsi_list is being
2466                  * shared by multiple rules. Decrement the ref_cnt and
2467                  * remove this rule, but do not modify the list, as it
2468                  * is in-use by other rules.
2469                  */
2470                 list_elem->vsi_list_info->ref_cnt--;
2471                 remove_rule = true;
2472         } else {
2473                 /* a ref_cnt of 1 indicates the vsi_list is only used
2474                  * by one rule. However, the original removal request is only
2475                  * for a single VSI. Update the vsi_list first, and only
2476                  * remove the rule if there are no further VSIs in this list.
2477                  */
2478                 vsi_handle = f_entry->fltr_info.vsi_handle;
2479                 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2480                 if (status)
2481                         goto exit;
2482                 /* if VSI count goes to zero after updating the VSI list */
2483                 if (list_elem->vsi_count == 0)
2484                         remove_rule = true;
2485         }
2486
2487         if (remove_rule) {
2488                 /* Remove the lookup rule */
2489                 struct ice_aqc_sw_rules_elem *s_rule;
2490
2491                 s_rule = (struct ice_aqc_sw_rules_elem *)
2492                         ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2493                 if (!s_rule) {
2494                         status = ICE_ERR_NO_MEMORY;
2495                         goto exit;
2496                 }
2497
2498                 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2499                                  ice_aqc_opc_remove_sw_rules);
2500
2501                 status = ice_aq_sw_rules(hw, s_rule,
2502                                          ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2503                                          ice_aqc_opc_remove_sw_rules, NULL);
2504                 if (status)
2505                         goto exit;
2506
2507                 /* Remove a book keeping from the list */
2508                 ice_free(hw, s_rule);
2509
2510                 LIST_DEL(&list_elem->list_entry);
2511                 ice_free(hw, list_elem);
2512         }
2513 exit:
2514         ice_release_lock(rule_lock);
2515         return status;
2516 }
2517
2518 /**
2519  * ice_aq_get_res_alloc - get allocated resources
2520  * @hw: pointer to the HW struct
2521  * @num_entries: pointer to u16 to store the number of resource entries returned
2522  * @buf: pointer to user-supplied buffer
2523  * @buf_size: size of buff
2524  * @cd: pointer to command details structure or NULL
2525  *
2526  * The user-supplied buffer must be large enough to store the resource
2527  * information for all resource types. Each resource type is an
2528  * ice_aqc_get_res_resp_data_elem structure.
2529  */
2530 enum ice_status
2531 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2532                      u16 buf_size, struct ice_sq_cd *cd)
2533 {
2534         struct ice_aqc_get_res_alloc *resp;
2535         enum ice_status status;
2536         struct ice_aq_desc desc;
2537
2538         if (!buf)
2539                 return ICE_ERR_BAD_PTR;
2540
2541         if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2542                 return ICE_ERR_INVAL_SIZE;
2543
2544         resp = &desc.params.get_res;
2545
2546         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2547         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2548
2549         if (!status && num_entries)
2550                 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2551
2552         return status;
2553 }
2554
2555 /**
2556  * ice_aq_get_res_descs - get allocated resource descriptors
2557  * @hw: pointer to the hardware structure
2558  * @num_entries: number of resource entries in buffer
2559  * @buf: Indirect buffer to hold data parameters and response
2560  * @buf_size: size of buffer for indirect commands
2561  * @res_type: resource type
2562  * @res_shared: is resource shared
2563  * @desc_id: input - first desc ID to start; output - next desc ID
2564  * @cd: pointer to command details structure or NULL
2565  */
2566 enum ice_status
2567 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2568                      struct ice_aqc_get_allocd_res_desc_resp *buf,
2569                      u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2570                      struct ice_sq_cd *cd)
2571 {
2572         struct ice_aqc_get_allocd_res_desc *cmd;
2573         struct ice_aq_desc desc;
2574         enum ice_status status;
2575
2576         ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2577
2578         cmd = &desc.params.get_res_desc;
2579
2580         if (!buf)
2581                 return ICE_ERR_PARAM;
2582
2583         if (buf_size != (num_entries * sizeof(*buf)))
2584                 return ICE_ERR_PARAM;
2585
2586         ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2587
2588         cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2589                                          ICE_AQC_RES_TYPE_M) | (res_shared ?
2590                                         ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2591         cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2592
2593         desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2594
2595         status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2596         if (!status)
2597                 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2598
2599         return status;
2600 }
2601
2602 /**
2603  * ice_add_mac - Add a MAC address based filter rule
2604  * @hw: pointer to the hardware structure
2605  * @m_list: list of MAC addresses and forwarding information
2606  *
2607  * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2608  * multiple unicast addresses, the function assumes that all the
2609  * addresses are unique in a given add_mac call. It doesn't
2610  * check for duplicates in this case, removing duplicates from a given
2611  * list should be taken care of in the caller of this function.
2612  */
2613 enum ice_status
2614 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2615 {
2616         struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2617         struct ice_fltr_list_entry *m_list_itr;
2618         struct LIST_HEAD_TYPE *rule_head;
2619         u16 elem_sent, total_elem_left;
2620         struct ice_switch_info *sw;
2621         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2622         enum ice_status status = ICE_SUCCESS;
2623         u16 num_unicast = 0;
2624         u16 s_rule_size;
2625
2626         if (!m_list || !hw)
2627                 return ICE_ERR_PARAM;
2628         s_rule = NULL;
2629         sw = hw->switch_info;
2630         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2631         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2632                             list_entry) {
2633                 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2634                 u16 vsi_handle;
2635                 u16 hw_vsi_id;
2636
2637                 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2638                 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2639                 if (!ice_is_vsi_valid(hw, vsi_handle))
2640                         return ICE_ERR_PARAM;
2641                 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2642                 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2643                 /* update the src in case it is VSI num */
2644                 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2645                         return ICE_ERR_PARAM;
2646                 m_list_itr->fltr_info.src = hw_vsi_id;
2647                 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2648                     IS_ZERO_ETHER_ADDR(add))
2649                         return ICE_ERR_PARAM;
2650                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2651                         /* Don't overwrite the unicast address */
2652                         ice_acquire_lock(rule_lock);
2653                         if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2654                                                 &m_list_itr->fltr_info)) {
2655                                 ice_release_lock(rule_lock);
2656                                 return ICE_ERR_ALREADY_EXISTS;
2657                         }
2658                         ice_release_lock(rule_lock);
2659                         num_unicast++;
2660                 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2661                            (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2662                         m_list_itr->status =
2663                                 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2664                                                       m_list_itr);
2665                         if (m_list_itr->status)
2666                                 return m_list_itr->status;
2667                 }
2668         }
2669
2670         ice_acquire_lock(rule_lock);
2671         /* Exit if no suitable entries were found for adding bulk switch rule */
2672         if (!num_unicast) {
2673                 status = ICE_SUCCESS;
2674                 goto ice_add_mac_exit;
2675         }
2676
2677         rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2678
2679         /* Allocate switch rule buffer for the bulk update for unicast */
2680         s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2681         s_rule = (struct ice_aqc_sw_rules_elem *)
2682                 ice_calloc(hw, num_unicast, s_rule_size);
2683         if (!s_rule) {
2684                 status = ICE_ERR_NO_MEMORY;
2685                 goto ice_add_mac_exit;
2686         }
2687
2688         r_iter = s_rule;
2689         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2690                             list_entry) {
2691                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2692                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2693
2694                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2695                         ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2696                                          ice_aqc_opc_add_sw_rules);
2697                         r_iter = (struct ice_aqc_sw_rules_elem *)
2698                                 ((u8 *)r_iter + s_rule_size);
2699                 }
2700         }
2701
2702         /* Call AQ bulk switch rule update for all unicast addresses */
2703         r_iter = s_rule;
2704         /* Call AQ switch rule in AQ_MAX chunk */
2705         for (total_elem_left = num_unicast; total_elem_left > 0;
2706              total_elem_left -= elem_sent) {
2707                 struct ice_aqc_sw_rules_elem *entry = r_iter;
2708
2709                 elem_sent = min(total_elem_left,
2710                                 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2711                 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2712                                          elem_sent, ice_aqc_opc_add_sw_rules,
2713                                          NULL);
2714                 if (status)
2715                         goto ice_add_mac_exit;
2716                 r_iter = (struct ice_aqc_sw_rules_elem *)
2717                         ((u8 *)r_iter + (elem_sent * s_rule_size));
2718         }
2719
2720         /* Fill up rule ID based on the value returned from FW */
2721         r_iter = s_rule;
2722         LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2723                             list_entry) {
2724                 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2725                 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2726                 struct ice_fltr_mgmt_list_entry *fm_entry;
2727
2728                 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2729                         f_info->fltr_rule_id =
2730                                 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2731                         f_info->fltr_act = ICE_FWD_TO_VSI;
2732                         /* Create an entry to track this MAC address */
2733                         fm_entry = (struct ice_fltr_mgmt_list_entry *)
2734                                 ice_malloc(hw, sizeof(*fm_entry));
2735                         if (!fm_entry) {
2736                                 status = ICE_ERR_NO_MEMORY;
2737                                 goto ice_add_mac_exit;
2738                         }
2739                         fm_entry->fltr_info = *f_info;
2740                         fm_entry->vsi_count = 1;
2741                         /* The book keeping entries will get removed when
2742                          * base driver calls remove filter AQ command
2743                          */
2744
2745                         LIST_ADD(&fm_entry->list_entry, rule_head);
2746                         r_iter = (struct ice_aqc_sw_rules_elem *)
2747                                 ((u8 *)r_iter + s_rule_size);
2748                 }
2749         }
2750
2751 ice_add_mac_exit:
2752         ice_release_lock(rule_lock);
2753         if (s_rule)
2754                 ice_free(hw, s_rule);
2755         return status;
2756 }
2757
2758 /**
2759  * ice_add_vlan_internal - Add one VLAN based filter rule
2760  * @hw: pointer to the hardware structure
2761  * @f_entry: filter entry containing one VLAN information
2762  */
2763 static enum ice_status
2764 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2765 {
2766         struct ice_switch_info *sw = hw->switch_info;
2767         struct ice_fltr_mgmt_list_entry *v_list_itr;
2768         struct ice_fltr_info *new_fltr, *cur_fltr;
2769         enum ice_sw_lkup_type lkup_type;
2770         u16 vsi_list_id = 0, vsi_handle;
2771         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2772         enum ice_status status = ICE_SUCCESS;
2773
2774         if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2775                 return ICE_ERR_PARAM;
2776
2777         f_entry->fltr_info.fwd_id.hw_vsi_id =
2778                 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2779         new_fltr = &f_entry->fltr_info;
2780
2781         /* VLAN ID should only be 12 bits */
2782         if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2783                 return ICE_ERR_PARAM;
2784
2785         if (new_fltr->src_id != ICE_SRC_ID_VSI)
2786                 return ICE_ERR_PARAM;
2787
2788         new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2789         lkup_type = new_fltr->lkup_type;
2790         vsi_handle = new_fltr->vsi_handle;
2791         rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2792         ice_acquire_lock(rule_lock);
2793         v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2794         if (!v_list_itr) {
2795                 struct ice_vsi_list_map_info *map_info = NULL;
2796
2797                 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2798                         /* All VLAN pruning rules use a VSI list. Check if
2799                          * there is already a VSI list containing VSI that we
2800                          * want to add. If found, use the same vsi_list_id for
2801                          * this new VLAN rule or else create a new list.
2802                          */
2803                         map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2804                                                            vsi_handle,
2805                                                            &vsi_list_id);
2806                         if (!map_info) {
2807                                 status = ice_create_vsi_list_rule(hw,
2808                                                                   &vsi_handle,
2809                                                                   1,
2810                                                                   &vsi_list_id,
2811                                                                   lkup_type);
2812                                 if (status)
2813                                         goto exit;
2814                         }
2815                         /* Convert the action to forwarding to a VSI list. */
2816                         new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2817                         new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2818                 }
2819
2820                 status = ice_create_pkt_fwd_rule(hw, f_entry);
2821                 if (!status) {
2822                         v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2823                                                          new_fltr);
2824                         if (!v_list_itr) {
2825                                 status = ICE_ERR_DOES_NOT_EXIST;
2826                                 goto exit;
2827                         }
2828                         /* reuse VSI list for new rule and increment ref_cnt */
2829                         if (map_info) {
2830                                 v_list_itr->vsi_list_info = map_info;
2831                                 map_info->ref_cnt++;
2832                         } else {
2833                                 v_list_itr->vsi_list_info =
2834                                         ice_create_vsi_list_map(hw, &vsi_handle,
2835                                                                 1, vsi_list_id);
2836                         }
2837                 }
2838         } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2839                 /* Update existing VSI list to add new VSI ID only if it used
2840                  * by one VLAN rule.
2841                  */
2842                 cur_fltr = &v_list_itr->fltr_info;
2843                 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2844                                                  new_fltr);
2845         } else {
2846                 /* If VLAN rule exists and VSI list being used by this rule is
2847                  * referenced by more than 1 VLAN rule. Then create a new VSI
2848                  * list appending previous VSI with new VSI and update existing
2849                  * VLAN rule to point to new VSI list ID
2850                  */
2851                 struct ice_fltr_info tmp_fltr;
2852                 u16 vsi_handle_arr[2];
2853                 u16 cur_handle;
2854
2855                 /* Current implementation only supports reusing VSI list with
2856                  * one VSI count. We should never hit below condition
2857                  */
2858                 if (v_list_itr->vsi_count > 1 &&
2859                     v_list_itr->vsi_list_info->ref_cnt > 1) {
2860                         ice_debug(hw, ICE_DBG_SW,
2861                                   "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2862                         status = ICE_ERR_CFG;
2863                         goto exit;
2864                 }
2865
2866                 cur_handle =
2867                         ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
2868                                            ICE_MAX_VSI);
2869
2870                 /* A rule already exists with the new VSI being added */
2871                 if (cur_handle == vsi_handle) {
2872                         status = ICE_ERR_ALREADY_EXISTS;
2873                         goto exit;
2874                 }
2875
2876                 vsi_handle_arr[0] = cur_handle;
2877                 vsi_handle_arr[1] = vsi_handle;
2878                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2879                                                   &vsi_list_id, lkup_type);
2880                 if (status)
2881                         goto exit;
2882
2883                 tmp_fltr = v_list_itr->fltr_info;
2884                 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
2885                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2886                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2887                 /* Update the previous switch rule to a new VSI list which
2888                  * includes current VSI that is requested
2889                  */
2890                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2891                 if (status)
2892                         goto exit;
2893
2894                 /* before overriding VSI list map info. decrement ref_cnt of
2895                  * previous VSI list
2896                  */
2897                 v_list_itr->vsi_list_info->ref_cnt--;
2898
2899                 /* now update to newly created list */
2900                 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
2901                 v_list_itr->vsi_list_info =
2902                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2903                                                 vsi_list_id);
2904                 v_list_itr->vsi_count++;
2905         }
2906
2907 exit:
2908         ice_release_lock(rule_lock);
2909         return status;
2910 }
2911
2912 /**
2913  * ice_add_vlan - Add VLAN based filter rule
2914  * @hw: pointer to the hardware structure
2915  * @v_list: list of VLAN entries and forwarding information
2916  */
2917 enum ice_status
2918 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
2919 {
2920         struct ice_fltr_list_entry *v_list_itr;
2921
2922         if (!v_list || !hw)
2923                 return ICE_ERR_PARAM;
2924
2925         LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
2926                             list_entry) {
2927                 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
2928                         return ICE_ERR_PARAM;
2929                 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
2930                 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
2931                 if (v_list_itr->status)
2932                         return v_list_itr->status;
2933         }
2934         return ICE_SUCCESS;
2935 }
2936
2937 #ifndef NO_MACVLAN_SUPPORT
2938 /**
2939  * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
2940  * @hw: pointer to the hardware structure
2941  * @mv_list: list of MAC and VLAN filters
2942  *
2943  * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
2944  * pruning bits enabled, then it is the responsibility of the caller to make
2945  * sure to add a VLAN only filter on the same VSI. Packets belonging to that
2946  * VLAN won't be received on that VSI otherwise.
2947  */
2948 enum ice_status
2949 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
2950 {
2951         struct ice_fltr_list_entry *mv_list_itr;
2952
2953         if (!mv_list || !hw)
2954                 return ICE_ERR_PARAM;
2955
2956         LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
2957                             list_entry) {
2958                 enum ice_sw_lkup_type l_type =
2959                         mv_list_itr->fltr_info.lkup_type;
2960
2961                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
2962                         return ICE_ERR_PARAM;
2963                 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
2964                 mv_list_itr->status =
2965                         ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
2966                                               mv_list_itr);
2967                 if (mv_list_itr->status)
2968                         return mv_list_itr->status;
2969         }
2970         return ICE_SUCCESS;
2971 }
2972 #endif
2973
2974 /**
2975  * ice_add_eth_mac - Add ethertype and MAC based filter rule
2976  * @hw: pointer to the hardware structure
2977  * @em_list: list of ether type MAC filter, MAC is optional
2978  *
2979  * This function requires the caller to populate the entries in
2980  * the filter list with the necessary fields (including flags to
2981  * indicate Tx or Rx rules).
2982  */
2983 enum ice_status
2984 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
2985 {
2986         struct ice_fltr_list_entry *em_list_itr;
2987
2988         if (!em_list || !hw)
2989                 return ICE_ERR_PARAM;
2990
2991         LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
2992                             list_entry) {
2993                 enum ice_sw_lkup_type l_type =
2994                         em_list_itr->fltr_info.lkup_type;
2995
2996                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
2997                     l_type != ICE_SW_LKUP_ETHERTYPE)
2998                         return ICE_ERR_PARAM;
2999
3000                 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3001                                                             em_list_itr);
3002                 if (em_list_itr->status)
3003                         return em_list_itr->status;
3004         }
3005         return ICE_SUCCESS;
3006 }
3007
3008 /**
3009  * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3010  * @hw: pointer to the hardware structure
3011  * @em_list: list of ethertype or ethertype MAC entries
3012  */
3013 enum ice_status
3014 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3015 {
3016         struct ice_fltr_list_entry *em_list_itr, *tmp;
3017
3018         if (!em_list || !hw)
3019                 return ICE_ERR_PARAM;
3020
3021         LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3022                                  list_entry) {
3023                 enum ice_sw_lkup_type l_type =
3024                         em_list_itr->fltr_info.lkup_type;
3025
3026                 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3027                     l_type != ICE_SW_LKUP_ETHERTYPE)
3028                         return ICE_ERR_PARAM;
3029
3030                 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3031                                                                em_list_itr);
3032                 if (em_list_itr->status)
3033                         return em_list_itr->status;
3034         }
3035         return ICE_SUCCESS;
3036 }
3037
3038
3039 /**
3040  * ice_rem_sw_rule_info
3041  * @hw: pointer to the hardware structure
3042  * @rule_head: pointer to the switch list structure that we want to delete
3043  */
3044 static void
3045 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3046 {
3047         if (!LIST_EMPTY(rule_head)) {
3048                 struct ice_fltr_mgmt_list_entry *entry;
3049                 struct ice_fltr_mgmt_list_entry *tmp;
3050
3051                 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3052                                          ice_fltr_mgmt_list_entry, list_entry) {
3053                         LIST_DEL(&entry->list_entry);
3054                         ice_free(hw, entry);
3055                 }
3056         }
3057 }
3058
3059 /**
3060  * ice_rem_adv_rule_info
3061  * @hw: pointer to the hardware structure
3062  * @rule_head: pointer to the switch list structure that we want to delete
3063  */
3064 static void
3065 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3066 {
3067         struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3068         struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3069
3070         if (LIST_EMPTY(rule_head))
3071                 return;
3072
3073         LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3074                                  ice_adv_fltr_mgmt_list_entry, list_entry) {
3075                 LIST_DEL(&lst_itr->list_entry);
3076                 ice_free(hw, lst_itr->lkups);
3077                 ice_free(hw, lst_itr);
3078         }
3079 }
3080
3081 /**
3082  * ice_rem_all_sw_rules_info
3083  * @hw: pointer to the hardware structure
3084  */
3085 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3086 {
3087         struct ice_switch_info *sw = hw->switch_info;
3088         u8 i;
3089
3090         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3091                 struct LIST_HEAD_TYPE *rule_head;
3092
3093                 rule_head = &sw->recp_list[i].filt_rules;
3094                 if (!sw->recp_list[i].adv_rule)
3095                         ice_rem_sw_rule_info(hw, rule_head);
3096                 else
3097                         ice_rem_adv_rule_info(hw, rule_head);
3098         }
3099 }
3100
3101 /**
3102  * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3103  * @pi: pointer to the port_info structure
3104  * @vsi_handle: VSI handle to set as default
3105  * @set: true to add the above mentioned switch rule, false to remove it
3106  * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3107  *
3108  * add filter rule to set/unset given VSI as default VSI for the switch
3109  * (represented by swid)
3110  */
3111 enum ice_status
3112 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3113                  u8 direction)
3114 {
3115         struct ice_aqc_sw_rules_elem *s_rule;
3116         struct ice_fltr_info f_info;
3117         struct ice_hw *hw = pi->hw;
3118         enum ice_adminq_opc opcode;
3119         enum ice_status status;
3120         u16 s_rule_size;
3121         u16 hw_vsi_id;
3122
3123         if (!ice_is_vsi_valid(hw, vsi_handle))
3124                 return ICE_ERR_PARAM;
3125         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3126
3127         s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3128                             ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3129         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3130         if (!s_rule)
3131                 return ICE_ERR_NO_MEMORY;
3132
3133         ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3134
3135         f_info.lkup_type = ICE_SW_LKUP_DFLT;
3136         f_info.flag = direction;
3137         f_info.fltr_act = ICE_FWD_TO_VSI;
3138         f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3139
3140         if (f_info.flag & ICE_FLTR_RX) {
3141                 f_info.src = pi->lport;
3142                 f_info.src_id = ICE_SRC_ID_LPORT;
3143                 if (!set)
3144                         f_info.fltr_rule_id =
3145                                 pi->dflt_rx_vsi_rule_id;
3146         } else if (f_info.flag & ICE_FLTR_TX) {
3147                 f_info.src_id = ICE_SRC_ID_VSI;
3148                 f_info.src = hw_vsi_id;
3149                 if (!set)
3150                         f_info.fltr_rule_id =
3151                                 pi->dflt_tx_vsi_rule_id;
3152         }
3153
3154         if (set)
3155                 opcode = ice_aqc_opc_add_sw_rules;
3156         else
3157                 opcode = ice_aqc_opc_remove_sw_rules;
3158
3159         ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3160
3161         status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3162         if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3163                 goto out;
3164         if (set) {
3165                 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3166
3167                 if (f_info.flag & ICE_FLTR_TX) {
3168                         pi->dflt_tx_vsi_num = hw_vsi_id;
3169                         pi->dflt_tx_vsi_rule_id = index;
3170                 } else if (f_info.flag & ICE_FLTR_RX) {
3171                         pi->dflt_rx_vsi_num = hw_vsi_id;
3172                         pi->dflt_rx_vsi_rule_id = index;
3173                 }
3174         } else {
3175                 if (f_info.flag & ICE_FLTR_TX) {
3176                         pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3177                         pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3178                 } else if (f_info.flag & ICE_FLTR_RX) {
3179                         pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3180                         pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3181                 }
3182         }
3183
3184 out:
3185         ice_free(hw, s_rule);
3186         return status;
3187 }
3188
3189 /**
3190  * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3191  * @hw: pointer to the hardware structure
3192  * @recp_id: lookup type for which the specified rule needs to be searched
3193  * @f_info: rule information
3194  *
3195  * Helper function to search for a unicast rule entry - this is to be used
3196  * to remove unicast MAC filter that is not shared with other VSIs on the
3197  * PF switch.
3198  *
3199  * Returns pointer to entry storing the rule if found
3200  */
3201 static struct ice_fltr_mgmt_list_entry *
3202 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3203                           struct ice_fltr_info *f_info)
3204 {
3205         struct ice_switch_info *sw = hw->switch_info;
3206         struct ice_fltr_mgmt_list_entry *list_itr;
3207         struct LIST_HEAD_TYPE *list_head;
3208
3209         list_head = &sw->recp_list[recp_id].filt_rules;
3210         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3211                             list_entry) {
3212                 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3213                             sizeof(f_info->l_data)) &&
3214                     f_info->fwd_id.hw_vsi_id ==
3215                     list_itr->fltr_info.fwd_id.hw_vsi_id &&
3216                     f_info->flag == list_itr->fltr_info.flag)
3217                         return list_itr;
3218         }
3219         return NULL;
3220 }
3221
3222 /**
3223  * ice_remove_mac - remove a MAC address based filter rule
3224  * @hw: pointer to the hardware structure
3225  * @m_list: list of MAC addresses and forwarding information
3226  *
3227  * This function removes either a MAC filter rule or a specific VSI from a
3228  * VSI list for a multicast MAC address.
3229  *
3230  * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3231  * ice_add_mac. Caller should be aware that this call will only work if all
3232  * the entries passed into m_list were added previously. It will not attempt to
3233  * do a partial remove of entries that were found.
3234  */
3235 enum ice_status
3236 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3237 {
3238         struct ice_fltr_list_entry *list_itr, *tmp;
3239         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3240
3241         if (!m_list)
3242                 return ICE_ERR_PARAM;
3243
3244         rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3245         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3246                                  list_entry) {
3247                 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3248                 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3249                 u16 vsi_handle;
3250
3251                 if (l_type != ICE_SW_LKUP_MAC)
3252                         return ICE_ERR_PARAM;
3253
3254                 vsi_handle = list_itr->fltr_info.vsi_handle;
3255                 if (!ice_is_vsi_valid(hw, vsi_handle))
3256                         return ICE_ERR_PARAM;
3257
3258                 list_itr->fltr_info.fwd_id.hw_vsi_id =
3259                                         ice_get_hw_vsi_num(hw, vsi_handle);
3260                 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3261                         /* Don't remove the unicast address that belongs to
3262                          * another VSI on the switch, since it is not being
3263                          * shared...
3264                          */
3265                         ice_acquire_lock(rule_lock);
3266                         if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3267                                                        &list_itr->fltr_info)) {
3268                                 ice_release_lock(rule_lock);
3269                                 return ICE_ERR_DOES_NOT_EXIST;
3270                         }
3271                         ice_release_lock(rule_lock);
3272                 }
3273                 list_itr->status = ice_remove_rule_internal(hw,
3274                                                             ICE_SW_LKUP_MAC,
3275                                                             list_itr);
3276                 if (list_itr->status)
3277                         return list_itr->status;
3278         }
3279         return ICE_SUCCESS;
3280 }
3281
3282 /**
3283  * ice_remove_vlan - Remove VLAN based filter rule
3284  * @hw: pointer to the hardware structure
3285  * @v_list: list of VLAN entries and forwarding information
3286  */
3287 enum ice_status
3288 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3289 {
3290         struct ice_fltr_list_entry *v_list_itr, *tmp;
3291
3292         if (!v_list || !hw)
3293                 return ICE_ERR_PARAM;
3294
3295         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3296                                  list_entry) {
3297                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3298
3299                 if (l_type != ICE_SW_LKUP_VLAN)
3300                         return ICE_ERR_PARAM;
3301                 v_list_itr->status = ice_remove_rule_internal(hw,
3302                                                               ICE_SW_LKUP_VLAN,
3303                                                               v_list_itr);
3304                 if (v_list_itr->status)
3305                         return v_list_itr->status;
3306         }
3307         return ICE_SUCCESS;
3308 }
3309
3310 #ifndef NO_MACVLAN_SUPPORT
3311 /**
3312  * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3313  * @hw: pointer to the hardware structure
3314  * @v_list: list of MAC VLAN entries and forwarding information
3315  */
3316 enum ice_status
3317 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3318 {
3319         struct ice_fltr_list_entry *v_list_itr, *tmp;
3320
3321         if (!v_list || !hw)
3322                 return ICE_ERR_PARAM;
3323
3324         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3325                                  list_entry) {
3326                 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3327
3328                 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3329                         return ICE_ERR_PARAM;
3330                 v_list_itr->status =
3331                         ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3332                                                  v_list_itr);
3333                 if (v_list_itr->status)
3334                         return v_list_itr->status;
3335         }
3336         return ICE_SUCCESS;
3337 }
3338 #endif /* !NO_MACVLAN_SUPPORT */
3339
3340 /**
3341  * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3342  * @fm_entry: filter entry to inspect
3343  * @vsi_handle: VSI handle to compare with filter info
3344  */
3345 static bool
3346 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3347 {
3348         return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3349                  fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3350                 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3351                  (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3352                                  vsi_handle))));
3353 }
3354
3355 /**
3356  * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3357  * @hw: pointer to the hardware structure
3358  * @vsi_handle: VSI handle to remove filters from
3359  * @vsi_list_head: pointer to the list to add entry to
3360  * @fi: pointer to fltr_info of filter entry to copy & add
3361  *
3362  * Helper function, used when creating a list of filters to remove from
3363  * a specific VSI. The entry added to vsi_list_head is a COPY of the
3364  * original filter entry, with the exception of fltr_info.fltr_act and
3365  * fltr_info.fwd_id fields. These are set such that later logic can
3366  * extract which VSI to remove the fltr from, and pass on that information.
3367  */
3368 static enum ice_status
3369 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3370                                struct LIST_HEAD_TYPE *vsi_list_head,
3371                                struct ice_fltr_info *fi)
3372 {
3373         struct ice_fltr_list_entry *tmp;
3374
3375         /* this memory is freed up in the caller function
3376          * once filters for this VSI are removed
3377          */
3378         tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3379         if (!tmp)
3380                 return ICE_ERR_NO_MEMORY;
3381
3382         tmp->fltr_info = *fi;
3383
3384         /* Overwrite these fields to indicate which VSI to remove filter from,
3385          * so find and remove logic can extract the information from the
3386          * list entries. Note that original entries will still have proper
3387          * values.
3388          */
3389         tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3390         tmp->fltr_info.vsi_handle = vsi_handle;
3391         tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3392
3393         LIST_ADD(&tmp->list_entry, vsi_list_head);
3394
3395         return ICE_SUCCESS;
3396 }
3397
3398 /**
3399  * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3400  * @hw: pointer to the hardware structure
3401  * @vsi_handle: VSI handle to remove filters from
3402  * @lkup_list_head: pointer to the list that has certain lookup type filters
3403  * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3404  *
3405  * Locates all filters in lkup_list_head that are used by the given VSI,
3406  * and adds COPIES of those entries to vsi_list_head (intended to be used
3407  * to remove the listed filters).
3408  * Note that this means all entries in vsi_list_head must be explicitly
3409  * deallocated by the caller when done with list.
3410  */
3411 static enum ice_status
3412 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3413                          struct LIST_HEAD_TYPE *lkup_list_head,
3414                          struct LIST_HEAD_TYPE *vsi_list_head)
3415 {
3416         struct ice_fltr_mgmt_list_entry *fm_entry;
3417         enum ice_status status = ICE_SUCCESS;
3418
3419         /* check to make sure VSI ID is valid and within boundary */
3420         if (!ice_is_vsi_valid(hw, vsi_handle))
3421                 return ICE_ERR_PARAM;
3422
3423         LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3424                             ice_fltr_mgmt_list_entry, list_entry) {
3425                 struct ice_fltr_info *fi;
3426
3427                 fi = &fm_entry->fltr_info;
3428                 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3429                         continue;
3430
3431                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3432                                                         vsi_list_head, fi);
3433                 if (status)
3434                         return status;
3435         }
3436         return status;
3437 }
3438
3439
3440 /**
3441  * ice_determine_promisc_mask
3442  * @fi: filter info to parse
3443  *
3444  * Helper function to determine which ICE_PROMISC_ mask corresponds
3445  * to given filter into.
3446  */
3447 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3448 {
3449         u16 vid = fi->l_data.mac_vlan.vlan_id;
3450         u8 *macaddr = fi->l_data.mac.mac_addr;
3451         bool is_tx_fltr = false;
3452         u8 promisc_mask = 0;
3453
3454         if (fi->flag == ICE_FLTR_TX)
3455                 is_tx_fltr = true;
3456
3457         if (IS_BROADCAST_ETHER_ADDR(macaddr))
3458                 promisc_mask |= is_tx_fltr ?
3459                         ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3460         else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3461                 promisc_mask |= is_tx_fltr ?
3462                         ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3463         else if (IS_UNICAST_ETHER_ADDR(macaddr))
3464                 promisc_mask |= is_tx_fltr ?
3465                         ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3466         if (vid)
3467                 promisc_mask |= is_tx_fltr ?
3468                         ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3469
3470         return promisc_mask;
3471 }
3472
3473 /**
3474  * ice_get_vsi_promisc - get promiscuous mode of given VSI
3475  * @hw: pointer to the hardware structure
3476  * @vsi_handle: VSI handle to retrieve info from
3477  * @promisc_mask: pointer to mask to be filled in
3478  * @vid: VLAN ID of promisc VLAN VSI
3479  */
3480 enum ice_status
3481 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3482                     u16 *vid)
3483 {
3484         struct ice_switch_info *sw = hw->switch_info;
3485         struct ice_fltr_mgmt_list_entry *itr;
3486         struct LIST_HEAD_TYPE *rule_head;
3487         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3488
3489         if (!ice_is_vsi_valid(hw, vsi_handle))
3490                 return ICE_ERR_PARAM;
3491
3492         *vid = 0;
3493         *promisc_mask = 0;
3494         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3495         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3496
3497         ice_acquire_lock(rule_lock);
3498         LIST_FOR_EACH_ENTRY(itr, rule_head,
3499                             ice_fltr_mgmt_list_entry, list_entry) {
3500                 /* Continue if this filter doesn't apply to this VSI or the
3501                  * VSI ID is not in the VSI map for this filter
3502                  */
3503                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3504                         continue;
3505
3506                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3507         }
3508         ice_release_lock(rule_lock);
3509
3510         return ICE_SUCCESS;
3511 }
3512
3513 /**
3514  * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3515  * @hw: pointer to the hardware structure
3516  * @vsi_handle: VSI handle to retrieve info from
3517  * @promisc_mask: pointer to mask to be filled in
3518  * @vid: VLAN ID of promisc VLAN VSI
3519  */
3520 enum ice_status
3521 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3522                          u16 *vid)
3523 {
3524         struct ice_switch_info *sw = hw->switch_info;
3525         struct ice_fltr_mgmt_list_entry *itr;
3526         struct LIST_HEAD_TYPE *rule_head;
3527         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3528
3529         if (!ice_is_vsi_valid(hw, vsi_handle))
3530                 return ICE_ERR_PARAM;
3531
3532         *vid = 0;
3533         *promisc_mask = 0;
3534         rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3535         rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3536
3537         ice_acquire_lock(rule_lock);
3538         LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3539                             list_entry) {
3540                 /* Continue if this filter doesn't apply to this VSI or the
3541                  * VSI ID is not in the VSI map for this filter
3542                  */
3543                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3544                         continue;
3545
3546                 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3547         }
3548         ice_release_lock(rule_lock);
3549
3550         return ICE_SUCCESS;
3551 }
3552
3553 /**
3554  * ice_remove_promisc - Remove promisc based filter rules
3555  * @hw: pointer to the hardware structure
3556  * @recp_id: recipe ID for which the rule needs to removed
3557  * @v_list: list of promisc entries
3558  */
3559 static enum ice_status
3560 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3561                    struct LIST_HEAD_TYPE *v_list)
3562 {
3563         struct ice_fltr_list_entry *v_list_itr, *tmp;
3564
3565         LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3566                                  list_entry) {
3567                 v_list_itr->status =
3568                         ice_remove_rule_internal(hw, recp_id, v_list_itr);
3569                 if (v_list_itr->status)
3570                         return v_list_itr->status;
3571         }
3572         return ICE_SUCCESS;
3573 }
3574
3575 /**
3576  * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3577  * @hw: pointer to the hardware structure
3578  * @vsi_handle: VSI handle to clear mode
3579  * @promisc_mask: mask of promiscuous config bits to clear
3580  * @vid: VLAN ID to clear VLAN promiscuous
3581  */
3582 enum ice_status
3583 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3584                       u16 vid)
3585 {
3586         struct ice_switch_info *sw = hw->switch_info;
3587         struct ice_fltr_list_entry *fm_entry, *tmp;
3588         struct LIST_HEAD_TYPE remove_list_head;
3589         struct ice_fltr_mgmt_list_entry *itr;
3590         struct LIST_HEAD_TYPE *rule_head;
3591         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3592         enum ice_status status = ICE_SUCCESS;
3593         u8 recipe_id;
3594
3595         if (!ice_is_vsi_valid(hw, vsi_handle))
3596                 return ICE_ERR_PARAM;
3597
3598         if (vid)
3599                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3600         else
3601                 recipe_id = ICE_SW_LKUP_PROMISC;
3602
3603         rule_head = &sw->recp_list[recipe_id].filt_rules;
3604         rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3605
3606         INIT_LIST_HEAD(&remove_list_head);
3607
3608         ice_acquire_lock(rule_lock);
3609         LIST_FOR_EACH_ENTRY(itr, rule_head,
3610                             ice_fltr_mgmt_list_entry, list_entry) {
3611                 u8 fltr_promisc_mask = 0;
3612
3613                 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3614                         continue;
3615
3616                 fltr_promisc_mask |=
3617                         ice_determine_promisc_mask(&itr->fltr_info);
3618
3619                 /* Skip if filter is not completely specified by given mask */
3620                 if (fltr_promisc_mask & ~promisc_mask)
3621                         continue;
3622
3623                 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3624                                                         &remove_list_head,
3625                                                         &itr->fltr_info);
3626                 if (status) {
3627                         ice_release_lock(rule_lock);
3628                         goto free_fltr_list;
3629                 }
3630         }
3631         ice_release_lock(rule_lock);
3632
3633         status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3634
3635 free_fltr_list:
3636         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3637                                  ice_fltr_list_entry, list_entry) {
3638                 LIST_DEL(&fm_entry->list_entry);
3639                 ice_free(hw, fm_entry);
3640         }
3641
3642         return status;
3643 }
3644
3645 /**
3646  * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3647  * @hw: pointer to the hardware structure
3648  * @vsi_handle: VSI handle to configure
3649  * @promisc_mask: mask of promiscuous config bits
3650  * @vid: VLAN ID to set VLAN promiscuous
3651  */
3652 enum ice_status
3653 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3654 {
3655         enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3656         struct ice_fltr_list_entry f_list_entry;
3657         struct ice_fltr_info new_fltr;
3658         enum ice_status status = ICE_SUCCESS;
3659         bool is_tx_fltr;
3660         u16 hw_vsi_id;
3661         int pkt_type;
3662         u8 recipe_id;
3663
3664         ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3665
3666         if (!ice_is_vsi_valid(hw, vsi_handle))
3667                 return ICE_ERR_PARAM;
3668         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3669
3670         ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3671
3672         if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3673                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3674                 new_fltr.l_data.mac_vlan.vlan_id = vid;
3675                 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3676         } else {
3677                 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3678                 recipe_id = ICE_SW_LKUP_PROMISC;
3679         }
3680
3681         /* Separate filters must be set for each direction/packet type
3682          * combination, so we will loop over the mask value, store the
3683          * individual type, and clear it out in the input mask as it
3684          * is found.
3685          */
3686         while (promisc_mask) {
3687                 u8 *mac_addr;
3688
3689                 pkt_type = 0;
3690                 is_tx_fltr = false;
3691
3692                 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3693                         promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3694                         pkt_type = UCAST_FLTR;
3695                 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3696                         promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3697                         pkt_type = UCAST_FLTR;
3698                         is_tx_fltr = true;
3699                 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3700                         promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3701                         pkt_type = MCAST_FLTR;
3702                 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3703                         promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3704                         pkt_type = MCAST_FLTR;
3705                         is_tx_fltr = true;
3706                 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3707                         promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3708                         pkt_type = BCAST_FLTR;
3709                 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3710                         promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3711                         pkt_type = BCAST_FLTR;
3712                         is_tx_fltr = true;
3713                 }
3714
3715                 /* Check for VLAN promiscuous flag */
3716                 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3717                         promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3718                 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3719                         promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3720                         is_tx_fltr = true;
3721                 }
3722
3723                 /* Set filter DA based on packet type */
3724                 mac_addr = new_fltr.l_data.mac.mac_addr;
3725                 if (pkt_type == BCAST_FLTR) {
3726                         ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3727                 } else if (pkt_type == MCAST_FLTR ||
3728                            pkt_type == UCAST_FLTR) {
3729                         /* Use the dummy ether header DA */
3730                         ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3731                                    ICE_NONDMA_TO_NONDMA);
3732                         if (pkt_type == MCAST_FLTR)
3733                                 mac_addr[0] |= 0x1;     /* Set multicast bit */
3734                 }
3735
3736                 /* Need to reset this to zero for all iterations */
3737                 new_fltr.flag = 0;
3738                 if (is_tx_fltr) {
3739                         new_fltr.flag |= ICE_FLTR_TX;
3740                         new_fltr.src = hw_vsi_id;
3741                 } else {
3742                         new_fltr.flag |= ICE_FLTR_RX;
3743                         new_fltr.src = hw->port_info->lport;
3744                 }
3745
3746                 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3747                 new_fltr.vsi_handle = vsi_handle;
3748                 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3749                 f_list_entry.fltr_info = new_fltr;
3750
3751                 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3752                 if (status != ICE_SUCCESS)
3753                         goto set_promisc_exit;
3754         }
3755
3756 set_promisc_exit:
3757         return status;
3758 }
3759
3760 /**
3761  * ice_set_vlan_vsi_promisc
3762  * @hw: pointer to the hardware structure
3763  * @vsi_handle: VSI handle to configure
3764  * @promisc_mask: mask of promiscuous config bits
3765  * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3766  *
3767  * Configure VSI with all associated VLANs to given promiscuous mode(s)
3768  */
3769 enum ice_status
3770 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3771                          bool rm_vlan_promisc)
3772 {
3773         struct ice_switch_info *sw = hw->switch_info;
3774         struct ice_fltr_list_entry *list_itr, *tmp;
3775         struct LIST_HEAD_TYPE vsi_list_head;
3776         struct LIST_HEAD_TYPE *vlan_head;
3777         struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
3778         enum ice_status status;
3779         u16 vlan_id;
3780
3781         INIT_LIST_HEAD(&vsi_list_head);
3782         vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3783         vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3784         ice_acquire_lock(vlan_lock);
3785         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3786                                           &vsi_list_head);
3787         ice_release_lock(vlan_lock);
3788         if (status)
3789                 goto free_fltr_list;
3790
3791         LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
3792                             list_entry) {
3793                 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3794                 if (rm_vlan_promisc)
3795                         status = ice_clear_vsi_promisc(hw, vsi_handle,
3796                                                        promisc_mask, vlan_id);
3797                 else
3798                         status = ice_set_vsi_promisc(hw, vsi_handle,
3799                                                      promisc_mask, vlan_id);
3800                 if (status)
3801                         break;
3802         }
3803
3804 free_fltr_list:
3805         LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
3806                                  ice_fltr_list_entry, list_entry) {
3807                 LIST_DEL(&list_itr->list_entry);
3808                 ice_free(hw, list_itr);
3809         }
3810         return status;
3811 }
3812
3813 /**
3814  * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3815  * @hw: pointer to the hardware structure
3816  * @vsi_handle: VSI handle to remove filters from
3817  * @lkup: switch rule filter lookup type
3818  */
3819 static void
3820 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3821                          enum ice_sw_lkup_type lkup)
3822 {
3823         struct ice_switch_info *sw = hw->switch_info;
3824         struct ice_fltr_list_entry *fm_entry;
3825         struct LIST_HEAD_TYPE remove_list_head;
3826         struct LIST_HEAD_TYPE *rule_head;
3827         struct ice_fltr_list_entry *tmp;
3828         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
3829         enum ice_status status;
3830
3831         INIT_LIST_HEAD(&remove_list_head);
3832         rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3833         rule_head = &sw->recp_list[lkup].filt_rules;
3834         ice_acquire_lock(rule_lock);
3835         status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3836                                           &remove_list_head);
3837         ice_release_lock(rule_lock);
3838         if (status)
3839                 return;
3840
3841         switch (lkup) {
3842         case ICE_SW_LKUP_MAC:
3843                 ice_remove_mac(hw, &remove_list_head);
3844                 break;
3845         case ICE_SW_LKUP_VLAN:
3846                 ice_remove_vlan(hw, &remove_list_head);
3847                 break;
3848         case ICE_SW_LKUP_PROMISC:
3849         case ICE_SW_LKUP_PROMISC_VLAN:
3850                 ice_remove_promisc(hw, lkup, &remove_list_head);
3851                 break;
3852         case ICE_SW_LKUP_MAC_VLAN:
3853 #ifndef NO_MACVLAN_SUPPORT
3854                 ice_remove_mac_vlan(hw, &remove_list_head);
3855 #else
3856                 ice_debug(hw, ICE_DBG_SW, "MAC VLAN look up is not supported yet\n");
3857 #endif /* !NO_MACVLAN_SUPPORT */
3858                 break;
3859         case ICE_SW_LKUP_ETHERTYPE:
3860         case ICE_SW_LKUP_ETHERTYPE_MAC:
3861                 ice_remove_eth_mac(hw, &remove_list_head);
3862                 break;
3863         case ICE_SW_LKUP_DFLT:
3864                 ice_debug(hw, ICE_DBG_SW,
3865                           "Remove filters for this lookup type hasn't been implemented yet\n");
3866                 break;
3867         case ICE_SW_LKUP_LAST:
3868                 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
3869                 break;
3870         }
3871
3872         LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3873                                  ice_fltr_list_entry, list_entry) {
3874                 LIST_DEL(&fm_entry->list_entry);
3875                 ice_free(hw, fm_entry);
3876         }
3877 }
3878
3879 /**
3880  * ice_remove_vsi_fltr - Remove all filters for a VSI
3881  * @hw: pointer to the hardware structure
3882  * @vsi_handle: VSI handle to remove filters from
3883  */
3884 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
3885 {
3886         ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
3887
3888         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
3889         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
3890         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
3891         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
3892         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
3893         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
3894         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
3895         ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
3896 }
3897
3898 /**
3899  * ice_alloc_res_cntr - allocating resource counter
3900  * @hw: pointer to the hardware structure
3901  * @type: type of resource
3902  * @alloc_shared: if set it is shared else dedicated
3903  * @num_items: number of entries requested for FD resource type
3904  * @counter_id: counter index returned by AQ call
3905  */
3906 enum ice_status
3907 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3908                    u16 *counter_id)
3909 {
3910         struct ice_aqc_alloc_free_res_elem *buf;
3911         enum ice_status status;
3912         u16 buf_len;
3913
3914         /* Allocate resource */
3915         buf_len = sizeof(*buf);
3916         buf = (struct ice_aqc_alloc_free_res_elem *)
3917                 ice_malloc(hw, buf_len);
3918         if (!buf)
3919                 return ICE_ERR_NO_MEMORY;
3920
3921         buf->num_elems = CPU_TO_LE16(num_items);
3922         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3923                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
3924
3925         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3926                                        ice_aqc_opc_alloc_res, NULL);
3927         if (status)
3928                 goto exit;
3929
3930         *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
3931
3932 exit:
3933         ice_free(hw, buf);
3934         return status;
3935 }
3936
3937 /**
3938  * ice_free_res_cntr - free resource counter
3939  * @hw: pointer to the hardware structure
3940  * @type: type of resource
3941  * @alloc_shared: if set it is shared else dedicated
3942  * @num_items: number of entries to be freed for FD resource type
3943  * @counter_id: counter ID resource which needs to be freed
3944  */
3945 enum ice_status
3946 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
3947                   u16 counter_id)
3948 {
3949         struct ice_aqc_alloc_free_res_elem *buf;
3950         enum ice_status status;
3951         u16 buf_len;
3952
3953         /* Free resource */
3954         buf_len = sizeof(*buf);
3955         buf = (struct ice_aqc_alloc_free_res_elem *)
3956                 ice_malloc(hw, buf_len);
3957         if (!buf)
3958                 return ICE_ERR_NO_MEMORY;
3959
3960         buf->num_elems = CPU_TO_LE16(num_items);
3961         buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
3962                                       ICE_AQC_RES_TYPE_M) | alloc_shared);
3963         buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
3964
3965         status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
3966                                        ice_aqc_opc_free_res, NULL);
3967         if (status)
3968                 ice_debug(hw, ICE_DBG_SW,
3969                           "counter resource could not be freed\n");
3970
3971         ice_free(hw, buf);
3972         return status;
3973 }
3974
3975 /**
3976  * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
3977  * @hw: pointer to the hardware structure
3978  * @counter_id: returns counter index
3979  */
3980 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
3981 {
3982         return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3983                                   ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
3984                                   counter_id);
3985 }
3986
3987 /**
3988  * ice_free_vlan_res_counter - Free counter resource for VLAN type
3989  * @hw: pointer to the hardware structure
3990  * @counter_id: counter index to be freed
3991  */
3992 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
3993 {
3994         return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
3995                                  ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
3996                                  counter_id);
3997 }
3998
3999 /**
4000  * ice_alloc_res_lg_act - add large action resource
4001  * @hw: pointer to the hardware structure
4002  * @l_id: large action ID to fill it in
4003  * @num_acts: number of actions to hold with a large action entry
4004  */
4005 static enum ice_status
4006 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4007 {
4008         struct ice_aqc_alloc_free_res_elem *sw_buf;
4009         enum ice_status status;
4010         u16 buf_len;
4011
4012         if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4013                 return ICE_ERR_PARAM;
4014
4015         /* Allocate resource for large action */
4016         buf_len = sizeof(*sw_buf);
4017         sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4018                 ice_malloc(hw, buf_len);
4019         if (!sw_buf)
4020                 return ICE_ERR_NO_MEMORY;
4021
4022         sw_buf->num_elems = CPU_TO_LE16(1);
4023
4024         /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4025          * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4026          * If num_acts is greater than 2, then use
4027          * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4028          * The num_acts cannot exceed 4. This was ensured at the
4029          * beginning of the function.
4030          */
4031         if (num_acts == 1)
4032                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4033         else if (num_acts == 2)
4034                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4035         else
4036                 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4037
4038         status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4039                                        ice_aqc_opc_alloc_res, NULL);
4040         if (!status)
4041                 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4042
4043         ice_free(hw, sw_buf);
4044         return status;
4045 }
4046
4047 /**
4048  * ice_add_mac_with_sw_marker - add filter with sw marker
4049  * @hw: pointer to the hardware structure
4050  * @f_info: filter info structure containing the MAC filter information
4051  * @sw_marker: sw marker to tag the Rx descriptor with
4052  */
4053 enum ice_status
4054 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4055                            u16 sw_marker)
4056 {
4057         struct ice_switch_info *sw = hw->switch_info;
4058         struct ice_fltr_mgmt_list_entry *m_entry;
4059         struct ice_fltr_list_entry fl_info;
4060         struct LIST_HEAD_TYPE l_head;
4061         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4062         enum ice_status ret;
4063         bool entry_exists;
4064         u16 lg_act_id;
4065
4066         if (f_info->fltr_act != ICE_FWD_TO_VSI)
4067                 return ICE_ERR_PARAM;
4068
4069         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4070                 return ICE_ERR_PARAM;
4071
4072         if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4073                 return ICE_ERR_PARAM;
4074
4075         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4076                 return ICE_ERR_PARAM;
4077         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4078
4079         /* Add filter if it doesn't exist so then the adding of large
4080          * action always results in update
4081          */
4082
4083         INIT_LIST_HEAD(&l_head);
4084         fl_info.fltr_info = *f_info;
4085         LIST_ADD(&fl_info.list_entry, &l_head);
4086
4087         entry_exists = false;
4088         ret = ice_add_mac(hw, &l_head);
4089         if (ret == ICE_ERR_ALREADY_EXISTS)
4090                 entry_exists = true;
4091         else if (ret)
4092                 return ret;
4093
4094         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4095         ice_acquire_lock(rule_lock);
4096         /* Get the book keeping entry for the filter */
4097         m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4098         if (!m_entry)
4099                 goto exit_error;
4100
4101         /* If counter action was enabled for this rule then don't enable
4102          * sw marker large action
4103          */
4104         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4105                 ret = ICE_ERR_PARAM;
4106                 goto exit_error;
4107         }
4108
4109         /* if same marker was added before */
4110         if (m_entry->sw_marker_id == sw_marker) {
4111                 ret = ICE_ERR_ALREADY_EXISTS;
4112                 goto exit_error;
4113         }
4114
4115         /* Allocate a hardware table entry to hold large act. Three actions
4116          * for marker based large action
4117          */
4118         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4119         if (ret)
4120                 goto exit_error;
4121
4122         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4123                 goto exit_error;
4124
4125         /* Update the switch rule to add the marker action */
4126         ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4127         if (!ret) {
4128                 ice_release_lock(rule_lock);
4129                 return ret;
4130         }
4131
4132 exit_error:
4133         ice_release_lock(rule_lock);
4134         /* only remove entry if it did not exist previously */
4135         if (!entry_exists)
4136                 ret = ice_remove_mac(hw, &l_head);
4137
4138         return ret;
4139 }
4140
4141 /**
4142  * ice_add_mac_with_counter - add filter with counter enabled
4143  * @hw: pointer to the hardware structure
4144  * @f_info: pointer to filter info structure containing the MAC filter
4145  *          information
4146  */
4147 enum ice_status
4148 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4149 {
4150         struct ice_switch_info *sw = hw->switch_info;
4151         struct ice_fltr_mgmt_list_entry *m_entry;
4152         struct ice_fltr_list_entry fl_info;
4153         struct LIST_HEAD_TYPE l_head;
4154         struct ice_lock *rule_lock;     /* Lock to protect filter rule list */
4155         enum ice_status ret;
4156         bool entry_exist;
4157         u16 counter_id;
4158         u16 lg_act_id;
4159
4160         if (f_info->fltr_act != ICE_FWD_TO_VSI)
4161                 return ICE_ERR_PARAM;
4162
4163         if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4164                 return ICE_ERR_PARAM;
4165
4166         if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4167                 return ICE_ERR_PARAM;
4168         f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4169
4170         entry_exist = false;
4171
4172         rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4173
4174         /* Add filter if it doesn't exist so then the adding of large
4175          * action always results in update
4176          */
4177         INIT_LIST_HEAD(&l_head);
4178
4179         fl_info.fltr_info = *f_info;
4180         LIST_ADD(&fl_info.list_entry, &l_head);
4181
4182         ret = ice_add_mac(hw, &l_head);
4183         if (ret == ICE_ERR_ALREADY_EXISTS)
4184                 entry_exist = true;
4185         else if (ret)
4186                 return ret;
4187
4188         ice_acquire_lock(rule_lock);
4189         m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4190         if (!m_entry) {
4191                 ret = ICE_ERR_BAD_PTR;
4192                 goto exit_error;
4193         }
4194
4195         /* Don't enable counter for a filter for which sw marker was enabled */
4196         if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4197                 ret = ICE_ERR_PARAM;
4198                 goto exit_error;
4199         }
4200
4201         /* If a counter was already enabled then don't need to add again */
4202         if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4203                 ret = ICE_ERR_ALREADY_EXISTS;
4204                 goto exit_error;
4205         }
4206
4207         /* Allocate a hardware table entry to VLAN counter */
4208         ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4209         if (ret)
4210                 goto exit_error;
4211
4212         /* Allocate a hardware table entry to hold large act. Two actions for
4213          * counter based large action
4214          */
4215         ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4216         if (ret)
4217                 goto exit_error;
4218
4219         if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4220                 goto exit_error;
4221
4222         /* Update the switch rule to add the counter action */
4223         ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4224         if (!ret) {
4225                 ice_release_lock(rule_lock);
4226                 return ret;
4227         }
4228
4229 exit_error:
4230         ice_release_lock(rule_lock);
4231         /* only remove entry if it did not exist previously */
4232         if (!entry_exist)
4233                 ret = ice_remove_mac(hw, &l_head);
4234
4235         return ret;
4236 }
4237
4238 /* This is mapping table entry that maps every word within a given protocol
4239  * structure to the real byte offset as per the specification of that
4240  * protocol header.
4241  * for example dst address is 3 words in ethertype header and corresponding
4242  * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4243  * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4244  * matching entry describing its field. This needs to be updated if new
4245  * structure is added to that union.
4246  */
4247 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4248         { ICE_MAC_OFOS,         { 0, 2, 4, 6, 8, 10, 12 } },
4249         { ICE_MAC_IL,           { 0, 2, 4, 6, 8, 10, 12 } },
4250         { ICE_IPV4_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4251         { ICE_IPV4_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4252         { ICE_IPV6_IL,          { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4253                                  26, 28, 30, 32, 34, 36, 38 } },
4254         { ICE_IPV6_OFOS,        { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4255                                  26, 28, 30, 32, 34, 36, 38 } },
4256         { ICE_TCP_IL,           { 0, 2 } },
4257         { ICE_UDP_ILOS,         { 0, 2 } },
4258         { ICE_SCTP_IL,          { 0, 2 } },
4259         { ICE_VXLAN,            { 8, 10, 12 } },
4260         { ICE_GENEVE,           { 8, 10, 12 } },
4261         { ICE_VXLAN_GPE,        { 0, 2, 4 } },
4262         { ICE_NVGRE,            { 0, 2 } },
4263         { ICE_PROTOCOL_LAST,    { 0 } }
4264 };
4265
4266 /* The following table describes preferred grouping of recipes.
4267  * If a recipe that needs to be programmed is a superset or matches one of the
4268  * following combinations, then the recipe needs to be chained as per the
4269  * following policy.
4270  */
4271 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4272         {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4273               { ICE_MAC_OFOS_HW, 4, 0 } } },
4274         {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4275               { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } } },
4276         {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } } },
4277         {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } } },
4278 };
4279
4280 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4281         { ICE_MAC_OFOS,         ICE_MAC_OFOS_HW },
4282         { ICE_MAC_IL,           ICE_MAC_IL_HW },
4283         { ICE_IPV4_OFOS,        ICE_IPV4_OFOS_HW },
4284         { ICE_IPV4_IL,          ICE_IPV4_IL_HW },
4285         { ICE_IPV6_OFOS,        ICE_IPV6_OFOS_HW },
4286         { ICE_IPV6_IL,          ICE_IPV6_IL_HW },
4287         { ICE_TCP_IL,           ICE_TCP_IL_HW },
4288         { ICE_UDP_ILOS,         ICE_UDP_ILOS_HW },
4289         { ICE_SCTP_IL,          ICE_SCTP_IL_HW },
4290         { ICE_VXLAN,            ICE_UDP_OF_HW },
4291         { ICE_GENEVE,           ICE_UDP_OF_HW },
4292         { ICE_VXLAN_GPE,        ICE_UDP_OF_HW },
4293         { ICE_NVGRE,            ICE_GRE_OF_HW },
4294         { ICE_PROTOCOL_LAST,    0 }
4295 };
4296
4297 /**
4298  * ice_find_recp - find a recipe
4299  * @hw: pointer to the hardware structure
4300  * @lkup_exts: extension sequence to match
4301  *
4302  * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4303  */
4304 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4305 {
4306         struct ice_sw_recipe *recp;
4307         u16 i;
4308
4309         ice_get_recp_to_prof_map(hw);
4310         /* Initialize available_result_ids which tracks available result idx */
4311         for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4312                 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4313                             available_result_ids);
4314
4315         /* Walk through existing recipes to find a match */
4316         recp = hw->switch_info->recp_list;
4317         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4318                 /* If recipe was not created for this ID, in SW bookkeeping,
4319                  * check if FW has an entry for this recipe. If the FW has an
4320                  * entry update it in our SW bookkeeping and continue with the
4321                  * matching.
4322                  */
4323                 if (!recp[i].recp_created)
4324                         if (ice_get_recp_frm_fw(hw,
4325                                                 hw->switch_info->recp_list, i))
4326                                 continue;
4327
4328                 /* if number of words we are looking for match */
4329                 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4330                         struct ice_fv_word *a = lkup_exts->fv_words;
4331                         struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4332                         bool found = true;
4333                         u8 p, q;
4334
4335                         for (p = 0; p < lkup_exts->n_val_words; p++) {
4336                                 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4337                                      q++) {
4338                                         if (a[p].off == b[q].off &&
4339                                             a[p].prot_id == b[q].prot_id)
4340                                                 /* Found the "p"th word in the
4341                                                  * given recipe
4342                                                  */
4343                                                 break;
4344                                 }
4345                                 /* After walking through all the words in the
4346                                  * "i"th recipe if "p"th word was not found then
4347                                  * this recipe is not what we are looking for.
4348                                  * So break out from this loop and try the next
4349                                  * recipe
4350                                  */
4351                                 if (q >= recp[i].lkup_exts.n_val_words) {
4352                                         found = false;
4353                                         break;
4354                                 }
4355                         }
4356                         /* If for "i"th recipe the found was never set to false
4357                          * then it means we found our match
4358                          */
4359                         if (found)
4360                                 return i; /* Return the recipe ID */
4361                 }
4362         }
4363         return ICE_MAX_NUM_RECIPES;
4364 }
4365
4366 /**
4367  * ice_prot_type_to_id - get protocol ID from protocol type
4368  * @type: protocol type
4369  * @id: pointer to variable that will receive the ID
4370  *
4371  * Returns true if found, false otherwise
4372  */
4373 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4374 {
4375         u16 i;
4376
4377         for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4378                 if (ice_prot_id_tbl[i].type == type) {
4379                         *id = ice_prot_id_tbl[i].protocol_id;
4380                         return true;
4381                 }
4382         return false;
4383 }
4384
4385 /**
4386  * ice_find_valid_words - count valid words
4387  * @rule: advanced rule with lookup information
4388  * @lkup_exts: byte offset extractions of the words that are valid
4389  *
4390  * calculate valid words in a lookup rule using mask value
4391  */
4392 static u16
4393 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4394                      struct ice_prot_lkup_ext *lkup_exts)
4395 {
4396         u16 j, word = 0;
4397         u16 prot_id;
4398         u16 ret_val;
4399
4400         if (!ice_prot_type_to_id(rule->type, &prot_id))
4401                 return 0;
4402
4403         word = lkup_exts->n_val_words;
4404
4405         for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4406                 if (((u16 *)&rule->m_u)[j] == 0xffff &&
4407                     rule->type < ARRAY_SIZE(ice_prot_ext)) {
4408                         /* No more space to accommodate */
4409                         if (word >= ICE_MAX_CHAIN_WORDS)
4410                                 return 0;
4411                         lkup_exts->fv_words[word].off =
4412                                 ice_prot_ext[rule->type].offs[j];
4413                         lkup_exts->fv_words[word].prot_id =
4414                                 ice_prot_id_tbl[rule->type].protocol_id;
4415                         word++;
4416                 }
4417
4418         ret_val = word - lkup_exts->n_val_words;
4419         lkup_exts->n_val_words = word;
4420
4421         return ret_val;
4422 }
4423
4424 /**
4425  * ice_find_prot_off_ind - check for specific ID and offset in rule
4426  * @lkup_exts: an array of protocol header extractions
4427  * @prot_type: protocol type to check
4428  * @off: expected offset of the extraction
4429  *
4430  * Check if the prot_ext has given protocol ID and offset
4431  */
4432 static u8
4433 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4434                       u16 off)
4435 {
4436         u8 j;
4437
4438         for (j = 0; j < lkup_exts->n_val_words; j++)
4439                 if (lkup_exts->fv_words[j].off == off &&
4440                     lkup_exts->fv_words[j].prot_id == prot_type)
4441                         return j;
4442
4443         return ICE_MAX_CHAIN_WORDS;
4444 }
4445
4446 /**
4447  * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4448  * @lkup_exts: an array of protocol header extractions
4449  * @r_policy: preferred recipe grouping policy
4450  *
4451  * Helper function to check if given recipe group is subset we need to check if
4452  * all the words described by the given recipe group exist in the advanced rule
4453  * look up information
4454  */
4455 static bool
4456 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4457                      const struct ice_pref_recipe_group *r_policy)
4458 {
4459         u8 ind[ICE_NUM_WORDS_RECIPE];
4460         u8 count = 0;
4461         u8 i;
4462
4463         /* check if everything in the r_policy is part of the entire rule */
4464         for (i = 0; i < r_policy->n_val_pairs; i++) {
4465                 u8 j;
4466
4467                 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4468                                           r_policy->pairs[i].off);
4469                 if (j >= ICE_MAX_CHAIN_WORDS)
4470                         return false;
4471
4472                 /* store the indexes temporarily found by the find function
4473                  * this will be used to mark the words as 'done'
4474                  */
4475                 ind[count++] = j;
4476         }
4477
4478         /* If the entire policy recipe was a true match, then mark the fields
4479          * that are covered by the recipe as 'done' meaning that these words
4480          * will be clumped together in one recipe.
4481          * "Done" here means in our searching if certain recipe group
4482          * matches or is subset of the given rule, then we mark all
4483          * the corresponding offsets as found. So the remaining recipes should
4484          * be created with whatever words that were left.
4485          */
4486         for (i = 0; i < count; i++) {
4487                 u8 in = ind[i];
4488
4489                 ice_set_bit(in, lkup_exts->done);
4490         }
4491         return true;
4492 }
4493
4494 /**
4495  * ice_create_first_fit_recp_def - Create a recipe grouping
4496  * @hw: pointer to the hardware structure
4497  * @lkup_exts: an array of protocol header extractions
4498  * @rg_list: pointer to a list that stores new recipe groups
4499  * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4500  *
4501  * Using first fit algorithm, take all the words that are still not done
4502  * and start grouping them in 4-word groups. Each group makes up one
4503  * recipe.
4504  */
4505 static enum ice_status
4506 ice_create_first_fit_recp_def(struct ice_hw *hw,
4507                               struct ice_prot_lkup_ext *lkup_exts,
4508                               struct LIST_HEAD_TYPE *rg_list,
4509                               u8 *recp_cnt)
4510 {
4511         struct ice_pref_recipe_group *grp = NULL;
4512         u8 j;
4513
4514         *recp_cnt = 0;
4515
4516         /* Walk through every word in the rule to check if it is not done. If so
4517          * then this word needs to be part of a new recipe.
4518          */
4519         for (j = 0; j < lkup_exts->n_val_words; j++)
4520                 if (!ice_is_bit_set(lkup_exts->done, j)) {
4521                         if (!grp ||
4522                             grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4523                                 struct ice_recp_grp_entry *entry;
4524
4525                                 entry = (struct ice_recp_grp_entry *)
4526                                         ice_malloc(hw, sizeof(*entry));
4527                                 if (!entry)
4528                                         return ICE_ERR_NO_MEMORY;
4529                                 LIST_ADD(&entry->l_entry, rg_list);
4530                                 grp = &entry->r_group;
4531                                 (*recp_cnt)++;
4532                         }
4533
4534                         grp->pairs[grp->n_val_pairs].prot_id =
4535                                 lkup_exts->fv_words[j].prot_id;
4536                         grp->pairs[grp->n_val_pairs].off =
4537                                 lkup_exts->fv_words[j].off;
4538                         grp->n_val_pairs++;
4539                 }
4540
4541         return ICE_SUCCESS;
4542 }
4543
4544 /**
4545  * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4546  * @hw: pointer to the hardware structure
4547  * @fv_list: field vector with the extraction sequence information
4548  * @rg_list: recipe groupings with protocol-offset pairs
4549  *
4550  * Helper function to fill in the field vector indices for protocol-offset
4551  * pairs. These indexes are then ultimately programmed into a recipe.
4552  */
4553 static void
4554 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4555                        struct LIST_HEAD_TYPE *rg_list)
4556 {
4557         struct ice_sw_fv_list_entry *fv;
4558         struct ice_recp_grp_entry *rg;
4559         struct ice_fv_word *fv_ext;
4560
4561         if (LIST_EMPTY(fv_list))
4562                 return;
4563
4564         fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4565         fv_ext = fv->fv_ptr->ew;
4566
4567         LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4568                 u8 i;
4569
4570                 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4571                         struct ice_fv_word *pr;
4572                         u8 j;
4573
4574                         pr = &rg->r_group.pairs[i];
4575                         for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4576                                 if (fv_ext[j].prot_id == pr->prot_id &&
4577                                     fv_ext[j].off == pr->off) {
4578                                         /* Store index of field vector */
4579                                         rg->fv_idx[i] = j;
4580                                         break;
4581                                 }
4582                 }
4583         }
4584 }
4585
4586 /**
4587  * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4588  * @hw: pointer to hardware structure
4589  * @rm: recipe management list entry
4590  * @match_tun: if field vector index for tunnel needs to be programmed
4591  */
4592 static enum ice_status
4593 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4594                   bool match_tun)
4595 {
4596         struct ice_aqc_recipe_data_elem *tmp;
4597         struct ice_aqc_recipe_data_elem *buf;
4598         struct ice_recp_grp_entry *entry;
4599         enum ice_status status;
4600         u16 recipe_count;
4601         u8 chain_idx;
4602         u8 recps = 0;
4603
4604         /* When more than one recipe are required, another recipe is needed to
4605          * chain them together. Matching a tunnel metadata ID takes up one of
4606          * the match fields in the chaining recipe reducing the number of
4607          * chained recipes by one.
4608          */
4609         if (rm->n_grp_count > 1)
4610                 rm->n_grp_count++;
4611         if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4612             (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4613                 return ICE_ERR_MAX_LIMIT;
4614
4615         tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4616                                                             ICE_MAX_NUM_RECIPES,
4617                                                             sizeof(*tmp));
4618         if (!tmp)
4619                 return ICE_ERR_NO_MEMORY;
4620
4621         buf = (struct ice_aqc_recipe_data_elem *)
4622                 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4623         if (!buf) {
4624                 status = ICE_ERR_NO_MEMORY;
4625                 goto err_mem;
4626         }
4627
4628         ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4629         recipe_count = ICE_MAX_NUM_RECIPES;
4630         status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4631                                    NULL);
4632         if (status || recipe_count == 0)
4633                 goto err_unroll;
4634
4635         /* Allocate the recipe resources, and configure them according to the
4636          * match fields from protocol headers and extracted field vectors.
4637          */
4638         chain_idx = ICE_CHAIN_FV_INDEX_START -
4639                 ice_find_first_bit(available_result_ids,
4640                                    ICE_CHAIN_FV_INDEX_START + 1);
4641         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4642                 u8 i;
4643
4644                 status = ice_alloc_recipe(hw, &entry->rid);
4645                 if (status)
4646                         goto err_unroll;
4647
4648                 /* Clear the result index of the located recipe, as this will be
4649                  * updated, if needed, later in the recipe creation process.
4650                  */
4651                 tmp[0].content.result_indx = 0;
4652
4653                 buf[recps] = tmp[0];
4654                 buf[recps].recipe_indx = (u8)entry->rid;
4655                 /* if the recipe is a non-root recipe RID should be programmed
4656                  * as 0 for the rules to be applied correctly.
4657                  */
4658                 buf[recps].content.rid = 0;
4659                 ice_memset(&buf[recps].content.lkup_indx, 0,
4660                            sizeof(buf[recps].content.lkup_indx),
4661                            ICE_NONDMA_MEM);
4662
4663                 /* All recipes use look-up index 0 to match switch ID. */
4664                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4665                 buf[recps].content.mask[0] =
4666                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4667                 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4668                  * to be 0
4669                  */
4670                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4671                         buf[recps].content.lkup_indx[i] = 0x80;
4672                         buf[recps].content.mask[i] = 0;
4673                 }
4674
4675                 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4676                         buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4677                         buf[recps].content.mask[i + 1] = CPU_TO_LE16(0xFFFF);
4678                 }
4679
4680                 if (rm->n_grp_count > 1) {
4681                         entry->chain_idx = chain_idx;
4682                         buf[recps].content.result_indx =
4683                                 ICE_AQ_RECIPE_RESULT_EN |
4684                                 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4685                                  ICE_AQ_RECIPE_RESULT_DATA_M);
4686                         ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4687                                       available_result_ids);
4688                         chain_idx = ICE_CHAIN_FV_INDEX_START -
4689                                 ice_find_first_bit(available_result_ids,
4690                                                    ICE_CHAIN_FV_INDEX_START +
4691                                                    1);
4692                 }
4693
4694                 /* fill recipe dependencies */
4695                 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4696                                 ICE_MAX_NUM_RECIPES);
4697                 ice_set_bit(buf[recps].recipe_indx,
4698                             (ice_bitmap_t *)buf[recps].recipe_bitmap);
4699                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4700                 recps++;
4701         }
4702
4703         if (rm->n_grp_count == 1) {
4704                 rm->root_rid = buf[0].recipe_indx;
4705                 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
4706                 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4707                 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4708                         ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4709                                    sizeof(buf[0].recipe_bitmap),
4710                                    ICE_NONDMA_TO_NONDMA);
4711                 } else {
4712                         status = ICE_ERR_BAD_PTR;
4713                         goto err_unroll;
4714                 }
4715                 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4716                  * the recipe which is getting created if specified
4717                  * by user. Usually any advanced switch filter, which results
4718                  * into new extraction sequence, ended up creating a new recipe
4719                  * of type ROOT and usually recipes are associated with profiles
4720                  * Switch rule referreing newly created recipe, needs to have
4721                  * either/or 'fwd' or 'join' priority, otherwise switch rule
4722                  * evaluation will not happen correctly. In other words, if
4723                  * switch rule to be evaluated on priority basis, then recipe
4724                  * needs to have priority, otherwise it will be evaluated last.
4725                  */
4726                 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4727         } else {
4728                 struct ice_recp_grp_entry *last_chain_entry;
4729                 u16 rid, i;
4730
4731                 /* Allocate the last recipe that will chain the outcomes of the
4732                  * other recipes together
4733                  */
4734                 status = ice_alloc_recipe(hw, &rid);
4735                 if (status)
4736                         goto err_unroll;
4737
4738                 buf[recps].recipe_indx = (u8)rid;
4739                 buf[recps].content.rid = (u8)rid;
4740                 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4741                 /* the new entry created should also be part of rg_list to
4742                  * make sure we have complete recipe
4743                  */
4744                 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
4745                         sizeof(*last_chain_entry));
4746                 if (!last_chain_entry) {
4747                         status = ICE_ERR_NO_MEMORY;
4748                         goto err_unroll;
4749                 }
4750                 last_chain_entry->rid = rid;
4751                 ice_memset(&buf[recps].content.lkup_indx, 0,
4752                            sizeof(buf[recps].content.lkup_indx),
4753                            ICE_NONDMA_MEM);
4754                 /* All recipes use look-up index 0 to match switch ID. */
4755                 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4756                 buf[recps].content.mask[0] =
4757                         CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4758                 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4759                         buf[recps].content.lkup_indx[i] =
4760                                 ICE_AQ_RECIPE_LKUP_IGNORE;
4761                         buf[recps].content.mask[i] = 0;
4762                 }
4763
4764                 i = 1;
4765                 /* update r_bitmap with the recp that is used for chaining */
4766                 ice_set_bit(rid, rm->r_bitmap);
4767                 /* this is the recipe that chains all the other recipes so it
4768                  * should not have a chaining ID to indicate the same
4769                  */
4770                 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4771                 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
4772                                     l_entry) {
4773                         last_chain_entry->fv_idx[i] = entry->chain_idx;
4774                         buf[recps].content.lkup_indx[i] = entry->chain_idx;
4775                         buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
4776                         ice_set_bit(entry->rid, rm->r_bitmap);
4777                 }
4778                 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
4779                 if (sizeof(buf[recps].recipe_bitmap) >=
4780                     sizeof(rm->r_bitmap)) {
4781                         ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4782                                    sizeof(buf[recps].recipe_bitmap),
4783                                    ICE_NONDMA_TO_NONDMA);
4784                 } else {
4785                         status = ICE_ERR_BAD_PTR;
4786                         goto err_unroll;
4787                 }
4788                 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4789
4790                 /* To differentiate among different UDP tunnels, a meta data ID
4791                  * flag is used.
4792                  */
4793                 if (match_tun) {
4794                         buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
4795                         buf[recps].content.mask[i] =
4796                                 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
4797                 }
4798
4799                 recps++;
4800                 rm->root_rid = (u8)rid;
4801         }
4802         status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4803         if (status)
4804                 goto err_unroll;
4805
4806         status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4807         ice_release_change_lock(hw);
4808         if (status)
4809                 goto err_unroll;
4810
4811         /* Every recipe that just got created add it to the recipe
4812          * book keeping list
4813          */
4814         LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4815                 struct ice_switch_info *sw = hw->switch_info;
4816                 struct ice_sw_recipe *recp;
4817
4818                 recp = &sw->recp_list[entry->rid];
4819                 recp->root_rid = entry->rid;
4820                 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
4821                            entry->r_group.n_val_pairs *
4822                            sizeof(struct ice_fv_word),
4823                            ICE_NONDMA_TO_NONDMA);
4824
4825                 recp->n_ext_words = entry->r_group.n_val_pairs;
4826                 recp->chain_idx = entry->chain_idx;
4827                 recp->recp_created = true;
4828                 recp->big_recp = false;
4829         }
4830         rm->root_buf = buf;
4831         ice_free(hw, tmp);
4832         return status;
4833
4834 err_unroll:
4835 err_mem:
4836         ice_free(hw, tmp);
4837         ice_free(hw, buf);
4838         return status;
4839 }
4840
4841 /**
4842  * ice_create_recipe_group - creates recipe group
4843  * @hw: pointer to hardware structure
4844  * @rm: recipe management list entry
4845  * @lkup_exts: lookup elements
4846  */
4847 static enum ice_status
4848 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4849                         struct ice_prot_lkup_ext *lkup_exts)
4850 {
4851         struct ice_recp_grp_entry *entry;
4852         struct ice_recp_grp_entry *tmp;
4853         enum ice_status status;
4854         u8 recp_count = 0;
4855         u16 groups, i;
4856
4857         rm->n_grp_count = 0;
4858
4859         /* Each switch recipe can match up to 5 words or metadata. One word in
4860          * each recipe is used to match the switch ID. Four words are left for
4861          * matching other values. If the new advanced recipe requires more than
4862          * 4 words, it needs to be split into multiple recipes which are chained
4863          * together using the intermediate result that each produces as input to
4864          * the other recipes in the sequence.
4865          */
4866         groups = ARRAY_SIZE(ice_recipe_pack);
4867
4868         /* Check if any of the preferred recipes from the grouping policy
4869          * matches.
4870          */
4871         for (i = 0; i < groups; i++)
4872                 /* Check if the recipe from the preferred grouping matches
4873                  * or is a subset of the fields that needs to be looked up.
4874                  */
4875                 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
4876                         /* This recipe can be used by itself or grouped with
4877                          * other recipes.
4878                          */
4879                         entry = (struct ice_recp_grp_entry *)
4880                                 ice_malloc(hw, sizeof(*entry));
4881                         if (!entry) {
4882                                 status = ICE_ERR_NO_MEMORY;
4883                                 goto err_unroll;
4884                         }
4885                         entry->r_group = ice_recipe_pack[i];
4886                         LIST_ADD(&entry->l_entry, &rm->rg_list);
4887                         rm->n_grp_count++;
4888                 }
4889
4890         /* Create recipes for words that are marked not done by packing them
4891          * as best fit.
4892          */
4893         status = ice_create_first_fit_recp_def(hw, lkup_exts,
4894                                                &rm->rg_list, &recp_count);
4895         if (!status) {
4896                 rm->n_grp_count += recp_count;
4897                 rm->n_ext_words = lkup_exts->n_val_words;
4898                 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
4899                            sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
4900                 goto out;
4901         }
4902
4903 err_unroll:
4904         LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
4905                                  l_entry) {
4906                 LIST_DEL(&entry->l_entry);
4907                 ice_free(hw, entry);
4908         }
4909
4910 out:
4911         return status;
4912 }
4913
4914 /**
4915  * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
4916  * @hw: pointer to hardware structure
4917  * @lkups: lookup elements or match criteria for the advanced recipe, one
4918  *         structure per protocol header
4919  * @lkups_cnt: number of protocols
4920  * @fv_list: pointer to a list that holds the returned field vectors
4921  */
4922 static enum ice_status
4923 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
4924            struct LIST_HEAD_TYPE *fv_list)
4925 {
4926         enum ice_status status;
4927         u16 *prot_ids;
4928         u16 i;
4929
4930         prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
4931         if (!prot_ids)
4932                 return ICE_ERR_NO_MEMORY;
4933
4934         for (i = 0; i < lkups_cnt; i++)
4935                 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
4936                         status = ICE_ERR_CFG;
4937                         goto free_mem;
4938                 }
4939
4940         /* Find field vectors that include all specified protocol types */
4941         status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
4942
4943 free_mem:
4944         ice_free(hw, prot_ids);
4945         return status;
4946 }
4947
4948 /**
4949  * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
4950  * @hw: pointer to hardware structure
4951  * @lkups: lookup elements or match criteria for the advanced recipe, one
4952  *  structure per protocol header
4953  * @lkups_cnt: number of protocols
4954  * @rinfo: other information regarding the rule e.g. priority and action info
4955  * @rid: return the recipe ID of the recipe created
4956  */
4957 static enum ice_status
4958 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
4959                    u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
4960 {
4961         struct ice_prot_lkup_ext *lkup_exts;
4962         struct ice_recp_grp_entry *r_entry;
4963         struct ice_sw_fv_list_entry *fvit;
4964         struct ice_recp_grp_entry *r_tmp;
4965         struct ice_sw_fv_list_entry *tmp;
4966         enum ice_status status = ICE_SUCCESS;
4967         struct ice_sw_recipe *rm;
4968         bool match_tun = false;
4969         u8 i;
4970
4971         if (!lkups_cnt)
4972                 return ICE_ERR_PARAM;
4973
4974         lkup_exts = (struct ice_prot_lkup_ext *)
4975                 ice_malloc(hw, sizeof(*lkup_exts));
4976         if (!lkup_exts)
4977                 return ICE_ERR_NO_MEMORY;
4978
4979         /* Determine the number of words to be matched and if it exceeds a
4980          * recipe's restrictions
4981          */
4982         for (i = 0; i < lkups_cnt; i++) {
4983                 u16 count;
4984
4985                 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
4986                         status = ICE_ERR_CFG;
4987                         goto err_free_lkup_exts;
4988                 }
4989
4990                 count = ice_fill_valid_words(&lkups[i], lkup_exts);
4991                 if (!count) {
4992                         status = ICE_ERR_CFG;
4993                         goto err_free_lkup_exts;
4994                 }
4995         }
4996
4997         *rid = ice_find_recp(hw, lkup_exts);
4998         if (*rid < ICE_MAX_NUM_RECIPES)
4999                 /* Success if found a recipe that match the existing criteria */
5000                 goto err_free_lkup_exts;
5001
5002         /* Recipe we need does not exist, add a recipe */
5003
5004         rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5005         if (!rm) {
5006                 status = ICE_ERR_NO_MEMORY;
5007                 goto err_free_lkup_exts;
5008         }
5009
5010         /* Get field vectors that contain fields extracted from all the protocol
5011          * headers being programmed.
5012          */
5013         INIT_LIST_HEAD(&rm->fv_list);
5014         INIT_LIST_HEAD(&rm->rg_list);
5015
5016         status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5017         if (status)
5018                 goto err_unroll;
5019
5020         /* Group match words into recipes using preferred recipe grouping
5021          * criteria.
5022          */
5023         status = ice_create_recipe_group(hw, rm, lkup_exts);
5024         if (status)
5025                 goto err_unroll;
5026
5027         /* There is only profile for UDP tunnels. So, it is necessary to use a
5028          * metadata ID flag to differentiate different tunnel types. A separate
5029          * recipe needs to be used for the metadata.
5030          */
5031         if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5032              rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5033              rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5034                 match_tun = true;
5035
5036         /* set the recipe priority if specified */
5037         rm->priority = rinfo->priority ? rinfo->priority : 0;
5038
5039         /* Find offsets from the field vector. Pick the first one for all the
5040          * recipes.
5041          */
5042         ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5043         status = ice_add_sw_recipe(hw, rm, match_tun);
5044         if (status)
5045                 goto err_unroll;
5046
5047         /* Associate all the recipes created with all the profiles in the
5048          * common field vector.
5049          */
5050         LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5051                             list_entry) {
5052                 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5053
5054                 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5055                                                       (u8 *)r_bitmap, NULL);
5056                 if (status)
5057                         goto err_unroll;
5058
5059                 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5060                               ICE_MAX_NUM_RECIPES);
5061                 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5062                 if (status)
5063                         goto err_unroll;
5064
5065                 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5066                                                       (u8 *)rm->r_bitmap,
5067                                                       NULL);
5068                 ice_release_change_lock(hw);
5069
5070                 if (status)
5071                         goto err_unroll;
5072         }
5073
5074         *rid = rm->root_rid;
5075         ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5076                    lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5077 err_unroll:
5078         LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5079                                  ice_recp_grp_entry, l_entry) {
5080                 LIST_DEL(&r_entry->l_entry);
5081                 ice_free(hw, r_entry);
5082         }
5083
5084         LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5085                                  list_entry) {
5086                 LIST_DEL(&fvit->list_entry);
5087                 ice_free(hw, fvit);
5088         }
5089
5090         if (rm->root_buf)
5091                 ice_free(hw, rm->root_buf);
5092
5093         ice_free(hw, rm);
5094
5095 err_free_lkup_exts:
5096         ice_free(hw, lkup_exts);
5097
5098         return status;
5099 }
5100
5101 #define ICE_MAC_HDR_OFFSET      0
5102 #define ICE_IP_HDR_OFFSET       14
5103 #define ICE_GRE_HDR_OFFSET      34
5104 #define ICE_MAC_IL_HDR_OFFSET   42
5105 #define ICE_IP_IL_HDR_OFFSET    56
5106 #define ICE_L4_HDR_OFFSET       34
5107 #define ICE_UDP_TUN_HDR_OFFSET  42
5108
5109 /**
5110  * ice_find_dummy_packet - find dummy packet with given match criteria
5111  *
5112  * @lkups: lookup elements or match criteria for the advanced recipe, one
5113  *         structure per protocol header
5114  * @lkups_cnt: number of protocols
5115  * @tun_type: tunnel type from the match criteria
5116  * @pkt: dummy packet to fill according to filter match criteria
5117  * @pkt_len: packet length of dummy packet
5118  */
5119 static void
5120 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5121                       enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5122                       u16 *pkt_len)
5123 {
5124         u16 i;
5125
5126         if (tun_type == ICE_SW_TUN_NVGRE || tun_type == ICE_ALL_TUNNELS) {
5127                 *pkt = dummy_gre_packet;
5128                 *pkt_len = sizeof(dummy_gre_packet);
5129                 return;
5130         }
5131
5132         if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5133             tun_type == ICE_SW_TUN_VXLAN_GPE) {
5134                 *pkt = dummy_udp_tun_packet;
5135                 *pkt_len = sizeof(dummy_udp_tun_packet);
5136                 return;
5137         }
5138
5139         for (i = 0; i < lkups_cnt; i++) {
5140                 if (lkups[i].type == ICE_UDP_ILOS) {
5141                         *pkt = dummy_udp_tun_packet;
5142                         *pkt_len = sizeof(dummy_udp_tun_packet);
5143                         return;
5144                 }
5145         }
5146
5147         *pkt = dummy_tcp_tun_packet;
5148         *pkt_len = sizeof(dummy_tcp_tun_packet);
5149 }
5150
5151 /**
5152  * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5153  *
5154  * @lkups: lookup elements or match criteria for the advanced recipe, one
5155  *         structure per protocol header
5156  * @lkups_cnt: number of protocols
5157  * @tun_type: to know if the dummy packet is supposed to be tunnel packet
5158  * @s_rule: stores rule information from the match criteria
5159  * @dummy_pkt: dummy packet to fill according to filter match criteria
5160  * @pkt_len: packet length of dummy packet
5161  */
5162 static void
5163 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5164                           enum ice_sw_tunnel_type tun_type,
5165                           struct ice_aqc_sw_rules_elem *s_rule,
5166                           const u8 *dummy_pkt, u16 pkt_len)
5167 {
5168         u8 *pkt;
5169         u16 i;
5170
5171         /* Start with a packet with a pre-defined/dummy content. Then, fill
5172          * in the header values to be looked up or matched.
5173          */
5174         pkt = s_rule->pdata.lkup_tx_rx.hdr;
5175
5176         ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5177
5178         for (i = 0; i < lkups_cnt; i++) {
5179                 u32 len, pkt_off, hdr_size, field_off;
5180
5181                 switch (lkups[i].type) {
5182                 case ICE_MAC_OFOS:
5183                 case ICE_MAC_IL:
5184                         pkt_off = offsetof(struct ice_ether_hdr, dst_addr) +
5185                                 ((lkups[i].type == ICE_MAC_IL) ?
5186                                  ICE_MAC_IL_HDR_OFFSET : 0);
5187                         len = sizeof(lkups[i].h_u.eth_hdr.dst_addr);
5188                         if ((tun_type == ICE_SW_TUN_VXLAN ||
5189                              tun_type == ICE_SW_TUN_GENEVE ||
5190                              tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5191                              lkups[i].type == ICE_MAC_IL) {
5192                                 pkt_off += sizeof(struct ice_udp_tnl_hdr);
5193                         }
5194
5195                         ice_memcpy(&pkt[pkt_off],
5196                                    &lkups[i].h_u.eth_hdr.dst_addr, len,
5197                                    ICE_NONDMA_TO_NONDMA);
5198                         pkt_off = offsetof(struct ice_ether_hdr, src_addr) +
5199                                 ((lkups[i].type == ICE_MAC_IL) ?
5200                                  ICE_MAC_IL_HDR_OFFSET : 0);
5201                         len = sizeof(lkups[i].h_u.eth_hdr.src_addr);
5202                         if ((tun_type == ICE_SW_TUN_VXLAN ||
5203                              tun_type == ICE_SW_TUN_GENEVE ||
5204                              tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5205                              lkups[i].type == ICE_MAC_IL) {
5206                                 pkt_off += sizeof(struct ice_udp_tnl_hdr);
5207                         }
5208                         ice_memcpy(&pkt[pkt_off],
5209                                    &lkups[i].h_u.eth_hdr.src_addr, len,
5210                                    ICE_NONDMA_TO_NONDMA);
5211                         if (lkups[i].h_u.eth_hdr.ethtype_id) {
5212                                 pkt_off = offsetof(struct ice_ether_hdr,
5213                                                    ethtype_id) +
5214                                         ((lkups[i].type == ICE_MAC_IL) ?
5215                                          ICE_MAC_IL_HDR_OFFSET : 0);
5216                                 len = sizeof(lkups[i].h_u.eth_hdr.ethtype_id);
5217                                 if ((tun_type == ICE_SW_TUN_VXLAN ||
5218                                      tun_type == ICE_SW_TUN_GENEVE ||
5219                                      tun_type == ICE_SW_TUN_VXLAN_GPE) &&
5220                                      lkups[i].type == ICE_MAC_IL) {
5221                                         pkt_off +=
5222                                                 sizeof(struct ice_udp_tnl_hdr);
5223                                 }
5224                                 ice_memcpy(&pkt[pkt_off],
5225                                            &lkups[i].h_u.eth_hdr.ethtype_id,
5226                                            len, ICE_NONDMA_TO_NONDMA);
5227                         }
5228                         break;
5229                 case ICE_IPV4_OFOS:
5230                         hdr_size = sizeof(struct ice_ipv4_hdr);
5231                         if (lkups[i].h_u.ipv4_hdr.dst_addr) {
5232                                 pkt_off = ICE_IP_HDR_OFFSET +
5233                                            offsetof(struct ice_ipv4_hdr,
5234                                                     dst_addr);
5235                                 field_off = offsetof(struct ice_ipv4_hdr,
5236                                                      dst_addr);
5237                                 len = hdr_size - field_off;
5238                                 ice_memcpy(&pkt[pkt_off],
5239                                            &lkups[i].h_u.ipv4_hdr.dst_addr,
5240                                            len, ICE_NONDMA_TO_NONDMA);
5241                         }
5242                         if (lkups[i].h_u.ipv4_hdr.src_addr) {
5243                                 pkt_off = ICE_IP_HDR_OFFSET +
5244                                            offsetof(struct ice_ipv4_hdr,
5245                                                     src_addr);
5246                                 field_off = offsetof(struct ice_ipv4_hdr,
5247                                                      src_addr);
5248                                 len = hdr_size - field_off;
5249                                 ice_memcpy(&pkt[pkt_off],
5250                                            &lkups[i].h_u.ipv4_hdr.src_addr,
5251                                            len, ICE_NONDMA_TO_NONDMA);
5252                         }
5253                         break;
5254                 case ICE_IPV4_IL:
5255                         break;
5256                 case ICE_TCP_IL:
5257                 case ICE_UDP_ILOS:
5258                 case ICE_SCTP_IL:
5259                         hdr_size = sizeof(struct ice_udp_tnl_hdr);
5260                         if (lkups[i].h_u.l4_hdr.dst_port) {
5261                                 pkt_off = ICE_L4_HDR_OFFSET +
5262                                            offsetof(struct ice_l4_hdr,
5263                                                     dst_port);
5264                                 field_off = offsetof(struct ice_l4_hdr,
5265                                                      dst_port);
5266                                 len =  hdr_size - field_off;
5267                                 ice_memcpy(&pkt[pkt_off],
5268                                            &lkups[i].h_u.l4_hdr.dst_port,
5269                                            len, ICE_NONDMA_TO_NONDMA);
5270                         }
5271                         if (lkups[i].h_u.l4_hdr.src_port) {
5272                                 pkt_off = ICE_L4_HDR_OFFSET +
5273                                         offsetof(struct ice_l4_hdr, src_port);
5274                                 field_off = offsetof(struct ice_l4_hdr,
5275                                                      src_port);
5276                                 len =  hdr_size - field_off;
5277                                 ice_memcpy(&pkt[pkt_off],
5278                                            &lkups[i].h_u.l4_hdr.src_port,
5279                                            len, ICE_NONDMA_TO_NONDMA);
5280                         }
5281                         break;
5282                 case ICE_VXLAN:
5283                 case ICE_GENEVE:
5284                 case ICE_VXLAN_GPE:
5285                         pkt_off = ICE_UDP_TUN_HDR_OFFSET +
5286                                    offsetof(struct ice_udp_tnl_hdr, vni);
5287                         field_off = offsetof(struct ice_udp_tnl_hdr, vni);
5288                         len =  sizeof(struct ice_udp_tnl_hdr) - field_off;
5289                         ice_memcpy(&pkt[pkt_off], &lkups[i].h_u.tnl_hdr.vni,
5290                                    len, ICE_NONDMA_TO_NONDMA);
5291                         break;
5292                 default:
5293                         break;
5294                 }
5295         }
5296         s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5297 }
5298
5299 /**
5300  * ice_find_adv_rule_entry - Search a rule entry
5301  * @hw: pointer to the hardware structure
5302  * @lkups: lookup elements or match criteria for the advanced recipe, one
5303  *         structure per protocol header
5304  * @lkups_cnt: number of protocols
5305  * @recp_id: recipe ID for which we are finding the rule
5306  * @rinfo: other information regarding the rule e.g. priority and action info
5307  *
5308  * Helper function to search for a given advance rule entry
5309  * Returns pointer to entry storing the rule if found
5310  */
5311 static struct ice_adv_fltr_mgmt_list_entry *
5312 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5313                         u16 lkups_cnt, u8 recp_id,
5314                         struct ice_adv_rule_info *rinfo)
5315 {
5316         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5317         struct ice_switch_info *sw = hw->switch_info;
5318         int i;
5319
5320         LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5321                             ice_adv_fltr_mgmt_list_entry, list_entry) {
5322                 bool lkups_matched = true;
5323
5324                 if (lkups_cnt != list_itr->lkups_cnt)
5325                         continue;
5326                 for (i = 0; i < list_itr->lkups_cnt; i++)
5327                         if (memcmp(&list_itr->lkups[i], &lkups[i],
5328                                    sizeof(*lkups))) {
5329                                 lkups_matched = false;
5330                                 break;
5331                         }
5332                 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5333                     rinfo->tun_type == list_itr->rule_info.tun_type &&
5334                     lkups_matched)
5335                         return list_itr;
5336         }
5337         return NULL;
5338 }
5339
5340 /**
5341  * ice_adv_add_update_vsi_list
5342  * @hw: pointer to the hardware structure
5343  * @m_entry: pointer to current adv filter management list entry
5344  * @cur_fltr: filter information from the book keeping entry
5345  * @new_fltr: filter information with the new VSI to be added
5346  *
5347  * Call AQ command to add or update previously created VSI list with new VSI.
5348  *
5349  * Helper function to do book keeping associated with adding filter information
5350  * The algorithm to do the booking keeping is described below :
5351  * When a VSI needs to subscribe to a given advanced filter
5352  *      if only one VSI has been added till now
5353  *              Allocate a new VSI list and add two VSIs
5354  *              to this list using switch rule command
5355  *              Update the previously created switch rule with the
5356  *              newly created VSI list ID
5357  *      if a VSI list was previously created
5358  *              Add the new VSI to the previously created VSI list set
5359  *              using the update switch rule command
5360  */
5361 static enum ice_status
5362 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5363                             struct ice_adv_fltr_mgmt_list_entry *m_entry,
5364                             struct ice_adv_rule_info *cur_fltr,
5365                             struct ice_adv_rule_info *new_fltr)
5366 {
5367         enum ice_status status;
5368         u16 vsi_list_id = 0;
5369
5370         if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5371             cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5372                 return ICE_ERR_NOT_IMPL;
5373
5374         if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5375             new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5376                 return ICE_ERR_ALREADY_EXISTS;
5377
5378         if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5379              new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5380             (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5381              cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5382                 return ICE_ERR_NOT_IMPL;
5383
5384         if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5385                  /* Only one entry existed in the mapping and it was not already
5386                   * a part of a VSI list. So, create a VSI list with the old and
5387                   * new VSIs.
5388                   */
5389                 struct ice_fltr_info tmp_fltr;
5390                 u16 vsi_handle_arr[2];
5391
5392                 /* A rule already exists with the new VSI being added */
5393                 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5394                     new_fltr->sw_act.fwd_id.hw_vsi_id)
5395                         return ICE_ERR_ALREADY_EXISTS;
5396
5397                 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5398                 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5399                 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5400                                                   &vsi_list_id,
5401                                                   ICE_SW_LKUP_LAST);
5402                 if (status)
5403                         return status;
5404
5405                 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5406                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5407                 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5408                 /* Update the previous switch rule of "forward to VSI" to
5409                  * "fwd to VSI list"
5410                  */
5411                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5412                 if (status)
5413                         return status;
5414
5415                 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5416                 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5417                 m_entry->vsi_list_info =
5418                         ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5419                                                 vsi_list_id);
5420         } else {
5421                 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5422
5423                 if (!m_entry->vsi_list_info)
5424                         return ICE_ERR_CFG;
5425
5426                 /* A rule already exists with the new VSI being added */
5427                 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5428                         return ICE_SUCCESS;
5429
5430                 /* Update the previously created VSI list set with
5431                  * the new VSI ID passed in
5432                  */
5433                 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5434
5435                 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5436                                                   vsi_list_id, false,
5437                                                   ice_aqc_opc_update_sw_rules,
5438                                                   ICE_SW_LKUP_LAST);
5439                 /* update VSI list mapping info with new VSI ID */
5440                 if (!status)
5441                         ice_set_bit(vsi_handle,
5442                                     m_entry->vsi_list_info->vsi_map);
5443         }
5444         if (!status)
5445                 m_entry->vsi_count++;
5446         return status;
5447 }
5448
5449 /**
5450  * ice_add_adv_rule - create an advanced switch rule
5451  * @hw: pointer to the hardware structure
5452  * @lkups: information on the words that needs to be looked up. All words
5453  * together makes one recipe
5454  * @lkups_cnt: num of entries in the lkups array
5455  * @rinfo: other information related to the rule that needs to be programmed
5456  * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5457  *               ignored is case of error.
5458  *
5459  * This function can program only 1 rule at a time. The lkups is used to
5460  * describe the all the words that forms the "lookup" portion of the recipe.
5461  * These words can span multiple protocols. Callers to this function need to
5462  * pass in a list of protocol headers with lookup information along and mask
5463  * that determines which words are valid from the given protocol header.
5464  * rinfo describes other information related to this rule such as forwarding
5465  * IDs, priority of this rule, etc.
5466  */
5467 enum ice_status
5468 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5469                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5470                  struct ice_rule_query_data *added_entry)
5471 {
5472         struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5473         u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5474         struct ice_aqc_sw_rules_elem *s_rule;
5475         struct LIST_HEAD_TYPE *rule_head;
5476         struct ice_switch_info *sw;
5477         enum ice_status status;
5478         const u8 *pkt = NULL;
5479         u32 act = 0;
5480
5481         if (!lkups_cnt)
5482                 return ICE_ERR_PARAM;
5483
5484         for (i = 0; i < lkups_cnt; i++) {
5485                 u16 j, *ptr;
5486
5487                 /* Validate match masks to make sure they match complete 16-bit
5488                  * words.
5489                  */
5490                 ptr = (u16 *)&lkups->m_u;
5491                 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5492                         if (ptr[j] != 0 && ptr[j] != 0xffff)
5493                                 return ICE_ERR_PARAM;
5494         }
5495
5496         if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5497               rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5498               rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5499                 return ICE_ERR_CFG;
5500
5501         vsi_handle = rinfo->sw_act.vsi_handle;
5502         if (!ice_is_vsi_valid(hw, vsi_handle))
5503                 return ICE_ERR_PARAM;
5504
5505         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5506                 rinfo->sw_act.fwd_id.hw_vsi_id =
5507                         ice_get_hw_vsi_num(hw, vsi_handle);
5508         if (rinfo->sw_act.flag & ICE_FLTR_TX)
5509                 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5510
5511         status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5512         if (status)
5513                 return status;
5514         m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5515         if (m_entry) {
5516                 /* we have to add VSI to VSI_LIST and increment vsi_count.
5517                  * Also Update VSI list so that we can change forwarding rule
5518                  * if the rule already exists, we will check if it exists with
5519                  * same vsi_id, if not then add it to the VSI list if it already
5520                  * exists if not then create a VSI list and add the existing VSI
5521                  * ID and the new VSI ID to the list
5522                  * We will add that VSI to the list
5523                  */
5524                 status = ice_adv_add_update_vsi_list(hw, m_entry,
5525                                                      &m_entry->rule_info,
5526                                                      rinfo);
5527                 if (added_entry) {
5528                         added_entry->rid = rid;
5529                         added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5530                         added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5531                 }
5532                 return status;
5533         }
5534         ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5535                               &pkt_len);
5536         rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5537         s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5538         if (!s_rule)
5539                 return ICE_ERR_NO_MEMORY;
5540         act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5541         switch (rinfo->sw_act.fltr_act) {
5542         case ICE_FWD_TO_VSI:
5543                 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5544                         ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5545                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5546                 break;
5547         case ICE_FWD_TO_Q:
5548                 act |= ICE_SINGLE_ACT_TO_Q;
5549                 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5550                        ICE_SINGLE_ACT_Q_INDEX_M;
5551                 break;
5552         case ICE_DROP_PACKET:
5553                 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5554                        ICE_SINGLE_ACT_VALID_BIT;
5555                 break;
5556         default:
5557                 status = ICE_ERR_CFG;
5558                 goto err_ice_add_adv_rule;
5559         }
5560
5561         /* set the rule LOOKUP type based on caller specified 'RX'
5562          * instead of hardcoding it to be either LOOKUP_TX/RX
5563          *
5564          * for 'RX' set the source to be the port number
5565          * for 'TX' set the source to be the source HW VSI number (determined
5566          * by caller)
5567          */
5568         if (rinfo->rx) {
5569                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5570                 s_rule->pdata.lkup_tx_rx.src =
5571                         CPU_TO_LE16(hw->port_info->lport);
5572         } else {
5573                 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5574                 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5575         }
5576
5577         s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5578         s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5579
5580         ice_fill_adv_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, s_rule,
5581                                   pkt, pkt_len);
5582
5583         status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5584                                  rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5585                                  NULL);
5586         if (status)
5587                 goto err_ice_add_adv_rule;
5588         adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5589                 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5590         if (!adv_fltr) {
5591                 status = ICE_ERR_NO_MEMORY;
5592                 goto err_ice_add_adv_rule;
5593         }
5594
5595         adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5596                 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5597                            ICE_NONDMA_TO_NONDMA);
5598         if (!adv_fltr->lkups) {
5599                 status = ICE_ERR_NO_MEMORY;
5600                 goto err_ice_add_adv_rule;
5601         }
5602
5603         adv_fltr->lkups_cnt = lkups_cnt;
5604         adv_fltr->rule_info = *rinfo;
5605         adv_fltr->rule_info.fltr_rule_id =
5606                 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5607         sw = hw->switch_info;
5608         sw->recp_list[rid].adv_rule = true;
5609         rule_head = &sw->recp_list[rid].filt_rules;
5610
5611         if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5612                 struct ice_fltr_info tmp_fltr;
5613
5614                 tmp_fltr.fltr_rule_id =
5615                         LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5616                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5617                 tmp_fltr.fwd_id.hw_vsi_id =
5618                         ice_get_hw_vsi_num(hw, vsi_handle);
5619                 tmp_fltr.vsi_handle = vsi_handle;
5620                 /* Update the previous switch rule of "forward to VSI" to
5621                  * "fwd to VSI list"
5622                  */
5623                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5624                 if (status)
5625                         goto err_ice_add_adv_rule;
5626                 adv_fltr->vsi_count = 1;
5627         }
5628
5629         /* Add rule entry to book keeping list */
5630         LIST_ADD(&adv_fltr->list_entry, rule_head);
5631         if (added_entry) {
5632                 added_entry->rid = rid;
5633                 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5634                 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5635         }
5636 err_ice_add_adv_rule:
5637         if (status && adv_fltr) {
5638                 ice_free(hw, adv_fltr->lkups);
5639                 ice_free(hw, adv_fltr);
5640         }
5641
5642         ice_free(hw, s_rule);
5643
5644         return status;
5645 }
5646
5647 /**
5648  * ice_adv_rem_update_vsi_list
5649  * @hw: pointer to the hardware structure
5650  * @vsi_handle: VSI handle of the VSI to remove
5651  * @fm_list: filter management entry for which the VSI list management needs to
5652  *           be done
5653  */
5654 static enum ice_status
5655 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5656                             struct ice_adv_fltr_mgmt_list_entry *fm_list)
5657 {
5658         struct ice_vsi_list_map_info *vsi_list_info;
5659         enum ice_sw_lkup_type lkup_type;
5660         enum ice_status status;
5661         u16 vsi_list_id;
5662
5663         if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5664             fm_list->vsi_count == 0)
5665                 return ICE_ERR_PARAM;
5666
5667         /* A rule with the VSI being removed does not exist */
5668         if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5669                 return ICE_ERR_DOES_NOT_EXIST;
5670
5671         lkup_type = ICE_SW_LKUP_LAST;
5672         vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5673         status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5674                                           ice_aqc_opc_update_sw_rules,
5675                                           lkup_type);
5676         if (status)
5677                 return status;
5678
5679         fm_list->vsi_count--;
5680         ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5681         vsi_list_info = fm_list->vsi_list_info;
5682         if (fm_list->vsi_count == 1) {
5683                 struct ice_fltr_info tmp_fltr;
5684                 u16 rem_vsi_handle;
5685
5686                 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
5687                                                     ICE_MAX_VSI);
5688                 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5689                         return ICE_ERR_OUT_OF_RANGE;
5690
5691                 /* Make sure VSI list is empty before removing it below */
5692                 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5693                                                   vsi_list_id, true,
5694                                                   ice_aqc_opc_update_sw_rules,
5695                                                   lkup_type);
5696                 if (status)
5697                         return status;
5698                 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5699                 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5700                 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5701                 tmp_fltr.fwd_id.hw_vsi_id =
5702                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
5703                 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5704                         ice_get_hw_vsi_num(hw, rem_vsi_handle);
5705
5706                 /* Update the previous switch rule of "MAC forward to VSI" to
5707                  * "MAC fwd to VSI list"
5708                  */
5709                 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5710                 if (status) {
5711                         ice_debug(hw, ICE_DBG_SW,
5712                                   "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5713                                   tmp_fltr.fwd_id.hw_vsi_id, status);
5714                         return status;
5715                 }
5716         }
5717
5718         if (fm_list->vsi_count == 1) {
5719                 /* Remove the VSI list since it is no longer used */
5720                 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5721                 if (status) {
5722                         ice_debug(hw, ICE_DBG_SW,
5723                                   "Failed to remove VSI list %d, error %d\n",
5724                                   vsi_list_id, status);
5725                         return status;
5726                 }
5727
5728                 LIST_DEL(&vsi_list_info->list_entry);
5729                 ice_free(hw, vsi_list_info);
5730                 fm_list->vsi_list_info = NULL;
5731         }
5732
5733         return status;
5734 }
5735
5736 /**
5737  * ice_rem_adv_rule - removes existing advanced switch rule
5738  * @hw: pointer to the hardware structure
5739  * @lkups: information on the words that needs to be looked up. All words
5740  *         together makes one recipe
5741  * @lkups_cnt: num of entries in the lkups array
5742  * @rinfo: Its the pointer to the rule information for the rule
5743  *
5744  * This function can be used to remove 1 rule at a time. The lkups is
5745  * used to describe all the words that forms the "lookup" portion of the
5746  * rule. These words can span multiple protocols. Callers to this function
5747  * need to pass in a list of protocol headers with lookup information along
5748  * and mask that determines which words are valid from the given protocol
5749  * header. rinfo describes other information related to this rule such as
5750  * forwarding IDs, priority of this rule, etc.
5751  */
5752 enum ice_status
5753 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5754                  u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5755 {
5756         struct ice_adv_fltr_mgmt_list_entry *list_elem;
5757         struct ice_prot_lkup_ext lkup_exts;
5758         u16 rule_buf_sz, pkt_len, i, rid;
5759         enum ice_status status = ICE_SUCCESS;
5760         bool remove_rule = false;
5761         struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5762         const u8 *pkt = NULL;
5763         u16 vsi_handle;
5764
5765         ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
5766         for (i = 0; i < lkups_cnt; i++) {
5767                 u16 count;
5768
5769                 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5770                         return ICE_ERR_CFG;
5771
5772                 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5773                 if (!count)
5774                         return ICE_ERR_CFG;
5775         }
5776         rid = ice_find_recp(hw, &lkup_exts);
5777         /* If did not find a recipe that match the existing criteria */
5778         if (rid == ICE_MAX_NUM_RECIPES)
5779                 return ICE_ERR_PARAM;
5780
5781         rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5782         list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5783         /* the rule is already removed */
5784         if (!list_elem)
5785                 return ICE_SUCCESS;
5786         ice_acquire_lock(rule_lock);
5787         if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5788                 remove_rule = true;
5789         } else if (list_elem->vsi_count > 1) {
5790                 list_elem->vsi_list_info->ref_cnt--;
5791                 remove_rule = false;
5792                 vsi_handle = rinfo->sw_act.vsi_handle;
5793                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5794         } else {
5795                 vsi_handle = rinfo->sw_act.vsi_handle;
5796                 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5797                 if (status) {
5798                         ice_release_lock(rule_lock);
5799                         return status;
5800                 }
5801                 if (list_elem->vsi_count == 0)
5802                         remove_rule = true;
5803         }
5804         ice_release_lock(rule_lock);
5805         if (remove_rule) {
5806                 struct ice_aqc_sw_rules_elem *s_rule;
5807
5808                 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5809                                       &pkt_len);
5810                 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5811                 s_rule =
5812                         (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
5813                                                                    rule_buf_sz);
5814                 if (!s_rule)
5815                         return ICE_ERR_NO_MEMORY;
5816                 s_rule->pdata.lkup_tx_rx.act = 0;
5817                 s_rule->pdata.lkup_tx_rx.index =
5818                         CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
5819                 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5820                 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5821                                          rule_buf_sz, 1,
5822                                          ice_aqc_opc_remove_sw_rules, NULL);
5823                 if (status == ICE_SUCCESS) {
5824                         ice_acquire_lock(rule_lock);
5825                         LIST_DEL(&list_elem->list_entry);
5826                         ice_free(hw, list_elem->lkups);
5827                         ice_free(hw, list_elem);
5828                         ice_release_lock(rule_lock);
5829                 }
5830                 ice_free(hw, s_rule);
5831         }
5832         return status;
5833 }
5834
5835 /**
5836  * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5837  * @hw: pointer to the hardware structure
5838  * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5839  *
5840  * This function is used to remove 1 rule at a time. The removal is based on
5841  * the remove_entry parameter. This function will remove rule for a given
5842  * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5843  */
5844 enum ice_status
5845 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5846                        struct ice_rule_query_data *remove_entry)
5847 {
5848         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5849         struct LIST_HEAD_TYPE *list_head;
5850         struct ice_adv_rule_info rinfo;
5851         struct ice_switch_info *sw;
5852
5853         sw = hw->switch_info;
5854         if (!sw->recp_list[remove_entry->rid].recp_created)
5855                 return ICE_ERR_PARAM;
5856         list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5857         LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
5858                             list_entry) {
5859                 if (list_itr->rule_info.fltr_rule_id ==
5860                     remove_entry->rule_id) {
5861                         rinfo = list_itr->rule_info;
5862                         rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5863                         return ice_rem_adv_rule(hw, list_itr->lkups,
5864                                                 list_itr->lkups_cnt, &rinfo);
5865                 }
5866         }
5867         return ICE_ERR_PARAM;
5868 }
5869
5870 /**
5871  * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
5872  *                       given VSI handle
5873  * @hw: pointer to the hardware structure
5874  * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
5875  *
5876  * This function is used to remove all the rules for a given VSI and as soon
5877  * as removing a rule fails, it will return immediately with the error code,
5878  * else it will return ICE_SUCCESS
5879  */
5880 enum ice_status
5881 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
5882 {
5883         struct ice_adv_fltr_mgmt_list_entry *list_itr;
5884         struct ice_vsi_list_map_info *map_info;
5885         struct LIST_HEAD_TYPE *list_head;
5886         struct ice_adv_rule_info rinfo;
5887         struct ice_switch_info *sw;
5888         enum ice_status status;
5889         u16 vsi_list_id = 0;
5890         u8 rid;
5891
5892         sw = hw->switch_info;
5893         for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
5894                 if (!sw->recp_list[rid].recp_created)
5895                         continue;
5896                 if (!sw->recp_list[rid].adv_rule)
5897                         continue;
5898                 list_head = &sw->recp_list[rid].filt_rules;
5899                 map_info = NULL;
5900                 LIST_FOR_EACH_ENTRY(list_itr, list_head,
5901                                     ice_adv_fltr_mgmt_list_entry, list_entry) {
5902                         map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
5903                                                            &vsi_list_id);
5904                         if (!map_info)
5905                                 continue;
5906                         rinfo = list_itr->rule_info;
5907                         rinfo.sw_act.vsi_handle = vsi_handle;
5908                         status = ice_rem_adv_rule(hw, list_itr->lkups,
5909                                                   list_itr->lkups_cnt, &rinfo);
5910                         if (status)
5911                                 return status;
5912                         map_info = NULL;
5913                 }
5914         }
5915         return ICE_SUCCESS;
5916 }
5917
5918 /**
5919  * ice_replay_fltr - Replay all the filters stored by a specific list head
5920  * @hw: pointer to the hardware structure
5921  * @list_head: list for which filters needs to be replayed
5922  * @recp_id: Recipe ID for which rules need to be replayed
5923  */
5924 static enum ice_status
5925 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
5926 {
5927         struct ice_fltr_mgmt_list_entry *itr;
5928         struct LIST_HEAD_TYPE l_head;
5929         enum ice_status status = ICE_SUCCESS;
5930
5931         if (LIST_EMPTY(list_head))
5932                 return status;
5933
5934         /* Move entries from the given list_head to a temporary l_head so that
5935          * they can be replayed. Otherwise when trying to re-add the same
5936          * filter, the function will return already exists
5937          */
5938         LIST_REPLACE_INIT(list_head, &l_head);
5939
5940         /* Mark the given list_head empty by reinitializing it so filters
5941          * could be added again by *handler
5942          */
5943         LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
5944                             list_entry) {
5945                 struct ice_fltr_list_entry f_entry;
5946
5947                 f_entry.fltr_info = itr->fltr_info;
5948                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
5949                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
5950                         if (status != ICE_SUCCESS)
5951                                 goto end;
5952                         continue;
5953                 }
5954
5955                 /* Add a filter per VSI separately */
5956                 while (1) {
5957                         u16 vsi_handle;
5958
5959                         vsi_handle =
5960                                 ice_find_first_bit(itr->vsi_list_info->vsi_map,
5961                                                    ICE_MAX_VSI);
5962                         if (!ice_is_vsi_valid(hw, vsi_handle))
5963                                 break;
5964
5965                         ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
5966                         f_entry.fltr_info.vsi_handle = vsi_handle;
5967                         f_entry.fltr_info.fwd_id.hw_vsi_id =
5968                                 ice_get_hw_vsi_num(hw, vsi_handle);
5969                         f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
5970                         if (recp_id == ICE_SW_LKUP_VLAN)
5971                                 status = ice_add_vlan_internal(hw, &f_entry);
5972                         else
5973                                 status = ice_add_rule_internal(hw, recp_id,
5974                                                                &f_entry);
5975                         if (status != ICE_SUCCESS)
5976                                 goto end;
5977                 }
5978         }
5979 end:
5980         /* Clear the filter management list */
5981         ice_rem_sw_rule_info(hw, &l_head);
5982         return status;
5983 }
5984
5985 /**
5986  * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
5987  * @hw: pointer to the hardware structure
5988  *
5989  * NOTE: This function does not clean up partially added filters on error.
5990  * It is up to caller of the function to issue a reset or fail early.
5991  */
5992 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
5993 {
5994         struct ice_switch_info *sw = hw->switch_info;
5995         enum ice_status status = ICE_SUCCESS;
5996         u8 i;
5997
5998         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
5999                 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6000
6001                 status = ice_replay_fltr(hw, i, head);
6002                 if (status != ICE_SUCCESS)
6003                         return status;
6004         }
6005         return status;
6006 }
6007
6008 /**
6009  * ice_replay_vsi_fltr - Replay filters for requested VSI
6010  * @hw: pointer to the hardware structure
6011  * @vsi_handle: driver VSI handle
6012  * @recp_id: Recipe ID for which rules need to be replayed
6013  * @list_head: list for which filters need to be replayed
6014  *
6015  * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6016  * It is required to pass valid VSI handle.
6017  */
6018 static enum ice_status
6019 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6020                     struct LIST_HEAD_TYPE *list_head)
6021 {
6022         struct ice_fltr_mgmt_list_entry *itr;
6023         enum ice_status status = ICE_SUCCESS;
6024         u16 hw_vsi_id;
6025
6026         if (LIST_EMPTY(list_head))
6027                 return status;
6028         hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6029
6030         LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6031                             list_entry) {
6032                 struct ice_fltr_list_entry f_entry;
6033
6034                 f_entry.fltr_info = itr->fltr_info;
6035                 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6036                     itr->fltr_info.vsi_handle == vsi_handle) {
6037                         /* update the src in case it is VSI num */
6038                         if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6039                                 f_entry.fltr_info.src = hw_vsi_id;
6040                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
6041                         if (status != ICE_SUCCESS)
6042                                 goto end;
6043                         continue;
6044                 }
6045                 if (!itr->vsi_list_info ||
6046                     !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6047                         continue;
6048                 /* Clearing it so that the logic can add it back */
6049                 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6050                 f_entry.fltr_info.vsi_handle = vsi_handle;
6051                 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6052                 /* update the src in case it is VSI num */
6053                 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6054                         f_entry.fltr_info.src = hw_vsi_id;
6055                 if (recp_id == ICE_SW_LKUP_VLAN)
6056                         status = ice_add_vlan_internal(hw, &f_entry);
6057                 else
6058                         status = ice_add_rule_internal(hw, recp_id, &f_entry);
6059                 if (status != ICE_SUCCESS)
6060                         goto end;
6061         }
6062 end:
6063         return status;
6064 }
6065
6066 /**
6067  * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6068  * @hw: pointer to the hardware structure
6069  * @vsi_handle: driver VSI handle
6070  * @list_head: list for which filters need to be replayed
6071  *
6072  * Replay the advanced rule for the given VSI.
6073  */
6074 static enum ice_status
6075 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6076                         struct LIST_HEAD_TYPE *list_head)
6077 {
6078         struct ice_rule_query_data added_entry = { 0 };
6079         struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6080         enum ice_status status = ICE_SUCCESS;
6081
6082         if (LIST_EMPTY(list_head))
6083                 return status;
6084         LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6085                             list_entry) {
6086                 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6087                 u16 lk_cnt = adv_fltr->lkups_cnt;
6088
6089                 if (vsi_handle != rinfo->sw_act.vsi_handle)
6090                         continue;
6091                 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6092                                           &added_entry);
6093                 if (status)
6094                         break;
6095         }
6096         return status;
6097 }
6098
6099 /**
6100  * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6101  * @hw: pointer to the hardware structure
6102  * @vsi_handle: driver VSI handle
6103  *
6104  * Replays filters for requested VSI via vsi_handle.
6105  */
6106 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6107 {
6108         struct ice_switch_info *sw = hw->switch_info;
6109         enum ice_status status;
6110         u8 i;
6111
6112         /* Update the recipes that were created */
6113         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6114                 struct LIST_HEAD_TYPE *head;
6115
6116                 head = &sw->recp_list[i].filt_replay_rules;
6117                 if (!sw->recp_list[i].adv_rule)
6118                         status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6119                 else
6120                         status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6121                 if (status != ICE_SUCCESS)
6122                         return status;
6123         }
6124
6125         return ICE_SUCCESS;
6126 }
6127
6128 /**
6129  * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6130  * @hw: pointer to the HW struct
6131  *
6132  * Deletes the filter replay rules.
6133  */
6134 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6135 {
6136         struct ice_switch_info *sw = hw->switch_info;
6137         u8 i;
6138
6139         if (!sw)
6140                 return;
6141
6142         for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6143                 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6144                         struct LIST_HEAD_TYPE *l_head;
6145
6146                         l_head = &sw->recp_list[i].filt_replay_rules;
6147                         if (!sw->recp_list[i].adv_rule)
6148                                 ice_rem_sw_rule_info(hw, l_head);
6149                         else
6150                                 ice_rem_adv_rule_info(hw, l_head);
6151                 }
6152         }
6153 }