1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2001-2019
5 #include "ice_switch.h"
6 #include "ice_flex_type.h"
10 #define ICE_ETH_DA_OFFSET 0
11 #define ICE_ETH_ETHTYPE_OFFSET 12
12 #define ICE_ETH_VLAN_TCI_OFFSET 14
13 #define ICE_MAX_VLAN_ID 0xFFF
15 /* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
16 * struct to configure any switch filter rules.
17 * {DA (6 bytes), SA(6 bytes),
18 * Ether type (2 bytes for header without VLAN tag) OR
19 * VLAN tag (4 bytes for header with VLAN tag) }
21 * Word on Hardcoded values
22 * byte 0 = 0x2: to identify it as locally administered DA MAC
23 * byte 6 = 0x2: to identify it as locally administered SA MAC
24 * byte 12 = 0x81 & byte 13 = 0x00:
25 * In case of VLAN filter first two bytes defines ether type (0x8100)
26 * and remaining two bytes are placeholder for programming a given VLAN ID
27 * In case of Ether type filter it is treated as header without VLAN tag
28 * and byte 12 and 13 is used to program a given Ether type instead
30 #define DUMMY_ETH_HDR_LEN 16
31 static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
35 #define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
36 (sizeof(struct ice_aqc_sw_rules_elem) - \
37 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
38 sizeof(struct ice_sw_rule_lkup_rx_tx) + DUMMY_ETH_HDR_LEN - 1)
39 #define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
40 (sizeof(struct ice_aqc_sw_rules_elem) - \
41 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
42 sizeof(struct ice_sw_rule_lkup_rx_tx) - 1)
43 #define ICE_SW_RULE_LG_ACT_SIZE(n) \
44 (sizeof(struct ice_aqc_sw_rules_elem) - \
45 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
46 sizeof(struct ice_sw_rule_lg_act) - \
47 sizeof(((struct ice_sw_rule_lg_act *)0)->act) + \
48 ((n) * sizeof(((struct ice_sw_rule_lg_act *)0)->act)))
49 #define ICE_SW_RULE_VSI_LIST_SIZE(n) \
50 (sizeof(struct ice_aqc_sw_rules_elem) - \
51 sizeof(((struct ice_aqc_sw_rules_elem *)0)->pdata) + \
52 sizeof(struct ice_sw_rule_vsi_list) - \
53 sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi) + \
54 ((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi)))
56 struct ice_dummy_pkt_offsets {
57 enum ice_protocol_type type;
58 u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
62 struct ice_dummy_pkt_offsets dummy_gre_packet_offsets[] = {
64 { ICE_IPV4_OFOS, 14 },
68 { ICE_PROTOCOL_LAST, 0 },
72 u8 dummy_gre_packet[] = { 0, 0, 0, 0, /* ICE_MAC_OFOS 0 */
76 0x45, 0, 0, 0x3E, /* ICE_IPV4_OFOS 14 */
81 0x80, 0, 0x65, 0x58, /* ICE_NVGRE 34 */
83 0, 0, 0, 0, /* ICE_MAC_IL 42 */
87 0x45, 0, 0, 0x14, /* ICE_IPV4_IL 54 */
95 struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
97 { ICE_IPV4_OFOS, 14 },
103 { ICE_PROTOCOL_LAST, 0 },
107 u8 dummy_udp_tun_tcp_packet[] = {
108 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
109 0x00, 0x00, 0x00, 0x00,
110 0x00, 0x00, 0x00, 0x00,
113 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
114 0x00, 0x01, 0x00, 0x00,
115 0x40, 0x11, 0x00, 0x00,
116 0x00, 0x00, 0x00, 0x00,
117 0x00, 0x00, 0x00, 0x00,
119 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
120 0x00, 0x46, 0x00, 0x00,
122 0x04, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
123 0x00, 0x00, 0x00, 0x00,
125 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
126 0x00, 0x00, 0x00, 0x00,
127 0x00, 0x00, 0x00, 0x00,
130 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
131 0x00, 0x01, 0x00, 0x00,
132 0x40, 0x06, 0x00, 0x00,
133 0x00, 0x00, 0x00, 0x00,
134 0x00, 0x00, 0x00, 0x00,
136 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
137 0x00, 0x00, 0x00, 0x00,
138 0x00, 0x00, 0x00, 0x00,
139 0x50, 0x02, 0x20, 0x00,
140 0x00, 0x00, 0x00, 0x00
144 struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
146 { ICE_IPV4_OFOS, 14 },
151 { ICE_UDP_ILOS, 84 },
152 { ICE_PROTOCOL_LAST, 0 },
156 u8 dummy_udp_tun_udp_packet[] = {
157 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
158 0x00, 0x00, 0x00, 0x00,
159 0x00, 0x00, 0x00, 0x00,
162 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
163 0x00, 0x01, 0x00, 0x00,
164 0x00, 0x11, 0x00, 0x00,
165 0x00, 0x00, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00,
168 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
169 0x00, 0x3a, 0x00, 0x00,
171 0x0c, 0x00, 0x00, 0x03, /* ICE_VXLAN 42 */
172 0x00, 0x00, 0x00, 0x00,
174 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
175 0x00, 0x00, 0x00, 0x00,
176 0x00, 0x00, 0x00, 0x00,
179 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
180 0x00, 0x01, 0x00, 0x00,
181 0x00, 0x11, 0x00, 0x00,
182 0x00, 0x00, 0x00, 0x00,
183 0x00, 0x00, 0x00, 0x00,
185 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
186 0x00, 0x08, 0x00, 0x00,
190 struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
192 { ICE_IPV4_OFOS, 14 },
193 { ICE_UDP_ILOS, 34 },
194 { ICE_PROTOCOL_LAST, 0 },
198 dummy_udp_packet[] = {
199 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
200 0x00, 0x00, 0x00, 0x00,
201 0x00, 0x00, 0x00, 0x00,
204 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
205 0x00, 0x01, 0x00, 0x00,
206 0x00, 0x11, 0x00, 0x00,
207 0x00, 0x00, 0x00, 0x00,
208 0x00, 0x00, 0x00, 0x00,
210 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
211 0x00, 0x08, 0x00, 0x00,
213 0x00, 0x00, /* 2 bytes for 4 byte alignment */
217 struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
219 { ICE_IPV4_OFOS, 14 },
221 { ICE_PROTOCOL_LAST, 0 },
225 dummy_tcp_packet[] = {
226 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
227 0x00, 0x00, 0x00, 0x00,
228 0x00, 0x00, 0x00, 0x00,
231 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
232 0x00, 0x01, 0x00, 0x00,
233 0x00, 0x06, 0x00, 0x00,
234 0x00, 0x00, 0x00, 0x00,
235 0x00, 0x00, 0x00, 0x00,
237 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
238 0x00, 0x00, 0x00, 0x00,
239 0x00, 0x00, 0x00, 0x00,
240 0x50, 0x00, 0x00, 0x00,
241 0x00, 0x00, 0x00, 0x00,
243 0x00, 0x00, /* 2 bytes for 4 byte alignment */
246 /* this is a recipe to profile bitmap association */
247 static ice_declare_bitmap(recipe_to_profile[ICE_MAX_NUM_RECIPES],
248 ICE_MAX_NUM_PROFILES);
249 static ice_declare_bitmap(available_result_ids, ICE_CHAIN_FV_INDEX_START + 1);
252 * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
253 * @hw: pointer to hardware structure
254 * @recps: struct that we need to populate
255 * @rid: recipe ID that we are populating
257 * This function is used to populate all the necessary entries into our
258 * bookkeeping so that we have a current list of all the recipes that are
259 * programmed in the firmware.
261 static enum ice_status
262 ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid)
264 u16 i, sub_recps, fv_word_idx = 0, result_idx = 0;
265 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_PROFILES);
266 u16 result_idxs[ICE_MAX_CHAIN_RECIPE] = { 0 };
267 struct ice_aqc_recipe_data_elem *tmp;
268 u16 num_recps = ICE_MAX_NUM_RECIPES;
269 struct ice_prot_lkup_ext *lkup_exts;
270 enum ice_status status;
272 /* we need a buffer big enough to accommodate all the recipes */
273 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
274 ICE_MAX_NUM_RECIPES, sizeof(*tmp));
276 return ICE_ERR_NO_MEMORY;
278 tmp[0].recipe_indx = rid;
279 status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
280 /* non-zero status meaning recipe doesn't exist */
283 lkup_exts = &recps[rid].lkup_exts;
284 /* start populating all the entries for recps[rid] based on lkups from
287 for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
288 struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
289 struct ice_recp_grp_entry *rg_entry;
290 u8 prof_id, prot = 0;
293 rg_entry = (struct ice_recp_grp_entry *)
294 ice_malloc(hw, sizeof(*rg_entry));
296 status = ICE_ERR_NO_MEMORY;
299 /* Avoid 8th bit since its result enable bit */
300 result_idxs[result_idx] = root_bufs.content.result_indx &
301 ~ICE_AQ_RECIPE_RESULT_EN;
302 /* Check if result enable bit is set */
303 if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
304 ice_clear_bit(ICE_CHAIN_FV_INDEX_START -
305 result_idxs[result_idx++],
306 available_result_ids);
308 recipe_to_profile[tmp[sub_recps].recipe_indx],
309 sizeof(r_bitmap), ICE_NONDMA_TO_NONDMA);
310 /* get the first profile that is associated with rid */
311 prof_id = ice_find_first_bit(r_bitmap, ICE_MAX_NUM_PROFILES);
312 for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
313 u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
315 rg_entry->fv_idx[i] = lkup_indx;
316 rg_entry->fv_mask[i] =
317 LE16_TO_CPU(root_bufs.content.mask[i + 1]);
319 /* If the recipe is a chained recipe then all its
320 * child recipe's result will have a result index.
321 * To fill fv_words we should not use those result
322 * index, we only need the protocol ids and offsets.
323 * We will skip all the fv_idx which stores result
324 * index in them. We also need to skip any fv_idx which
325 * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
326 * valid offset value.
328 if (result_idxs[0] == rg_entry->fv_idx[i] ||
329 result_idxs[1] == rg_entry->fv_idx[i] ||
330 result_idxs[2] == rg_entry->fv_idx[i] ||
331 result_idxs[3] == rg_entry->fv_idx[i] ||
332 result_idxs[4] == rg_entry->fv_idx[i] ||
333 rg_entry->fv_idx[i] == ICE_AQ_RECIPE_LKUP_IGNORE ||
334 rg_entry->fv_idx[i] == 0)
337 ice_find_prot_off(hw, ICE_BLK_SW, prof_id,
338 rg_entry->fv_idx[i], &prot, &off);
339 lkup_exts->fv_words[fv_word_idx].prot_id = prot;
340 lkup_exts->fv_words[fv_word_idx].off = off;
343 /* populate rg_list with the data from the child entry of this
346 LIST_ADD(&rg_entry->l_entry, &recps[rid].rg_list);
348 lkup_exts->n_val_words = fv_word_idx;
349 recps[rid].n_grp_count = num_recps;
350 recps[rid].root_buf = (struct ice_aqc_recipe_data_elem *)
351 ice_calloc(hw, recps[rid].n_grp_count,
352 sizeof(struct ice_aqc_recipe_data_elem));
353 if (!recps[rid].root_buf)
356 ice_memcpy(recps[rid].root_buf, tmp, recps[rid].n_grp_count *
357 sizeof(*recps[rid].root_buf), ICE_NONDMA_TO_NONDMA);
358 recps[rid].recp_created = true;
359 if (tmp[sub_recps].content.rid & ICE_AQ_RECIPE_ID_IS_ROOT)
360 recps[rid].root_rid = rid;
367 * ice_get_recp_to_prof_map - updates recipe to profile mapping
368 * @hw: pointer to hardware structure
370 * This function is used to populate recipe_to_profile matrix where index to
371 * this array is the recipe ID and the element is the mapping of which profiles
372 * is this recipe mapped to.
375 ice_get_recp_to_prof_map(struct ice_hw *hw)
377 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
380 for (i = 0; i < ICE_MAX_NUM_PROFILES; i++) {
383 ice_zero_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
384 if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
387 for (j = 0; j < ICE_MAX_NUM_RECIPES; j++)
388 if (ice_is_bit_set(r_bitmap, j))
389 ice_set_bit(i, recipe_to_profile[j]);
394 * ice_init_def_sw_recp - initialize the recipe book keeping tables
395 * @hw: pointer to the HW struct
397 * Allocate memory for the entire recipe table and initialize the structures/
398 * entries corresponding to basic recipes.
400 enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
402 struct ice_sw_recipe *recps;
405 recps = (struct ice_sw_recipe *)
406 ice_calloc(hw, ICE_MAX_NUM_RECIPES, sizeof(*recps));
408 return ICE_ERR_NO_MEMORY;
410 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
411 recps[i].root_rid = i;
412 INIT_LIST_HEAD(&recps[i].filt_rules);
413 INIT_LIST_HEAD(&recps[i].filt_replay_rules);
414 INIT_LIST_HEAD(&recps[i].rg_list);
415 ice_init_lock(&recps[i].filt_rule_lock);
418 hw->switch_info->recp_list = recps;
424 * ice_aq_get_sw_cfg - get switch configuration
425 * @hw: pointer to the hardware structure
426 * @buf: pointer to the result buffer
427 * @buf_size: length of the buffer available for response
428 * @req_desc: pointer to requested descriptor
429 * @num_elems: pointer to number of elements
430 * @cd: pointer to command details structure or NULL
432 * Get switch configuration (0x0200) to be placed in 'buff'.
433 * This admin command returns information such as initial VSI/port number
434 * and switch ID it belongs to.
436 * NOTE: *req_desc is both an input/output parameter.
437 * The caller of this function first calls this function with *request_desc set
438 * to 0. If the response from f/w has *req_desc set to 0, all the switch
439 * configuration information has been returned; if non-zero (meaning not all
440 * the information was returned), the caller should call this function again
441 * with *req_desc set to the previous value returned by f/w to get the
442 * next block of switch configuration information.
444 * *num_elems is output only parameter. This reflects the number of elements
445 * in response buffer. The caller of this function to use *num_elems while
446 * parsing the response buffer.
448 static enum ice_status
449 ice_aq_get_sw_cfg(struct ice_hw *hw, struct ice_aqc_get_sw_cfg_resp *buf,
450 u16 buf_size, u16 *req_desc, u16 *num_elems,
451 struct ice_sq_cd *cd)
453 struct ice_aqc_get_sw_cfg *cmd;
454 enum ice_status status;
455 struct ice_aq_desc desc;
457 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_sw_cfg);
458 cmd = &desc.params.get_sw_conf;
459 cmd->element = CPU_TO_LE16(*req_desc);
461 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
463 *req_desc = LE16_TO_CPU(cmd->element);
464 *num_elems = LE16_TO_CPU(cmd->num_elems);
472 * ice_alloc_sw - allocate resources specific to switch
473 * @hw: pointer to the HW struct
474 * @ena_stats: true to turn on VEB stats
475 * @shared_res: true for shared resource, false for dedicated resource
476 * @sw_id: switch ID returned
477 * @counter_id: VEB counter ID returned
479 * allocates switch resources (SWID and VEB counter) (0x0208)
482 ice_alloc_sw(struct ice_hw *hw, bool ena_stats, bool shared_res, u16 *sw_id,
485 struct ice_aqc_alloc_free_res_elem *sw_buf;
486 struct ice_aqc_res_elem *sw_ele;
487 enum ice_status status;
490 buf_len = sizeof(*sw_buf);
491 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
492 ice_malloc(hw, buf_len);
494 return ICE_ERR_NO_MEMORY;
496 /* Prepare buffer for switch ID.
497 * The number of resource entries in buffer is passed as 1 since only a
498 * single switch/VEB instance is allocated, and hence a single sw_id
501 sw_buf->num_elems = CPU_TO_LE16(1);
503 CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID |
504 (shared_res ? ICE_AQC_RES_TYPE_FLAG_SHARED :
505 ICE_AQC_RES_TYPE_FLAG_DEDICATED));
507 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
508 ice_aqc_opc_alloc_res, NULL);
511 goto ice_alloc_sw_exit;
513 sw_ele = &sw_buf->elem[0];
514 *sw_id = LE16_TO_CPU(sw_ele->e.sw_resp);
517 /* Prepare buffer for VEB Counter */
518 enum ice_adminq_opc opc = ice_aqc_opc_alloc_res;
519 struct ice_aqc_alloc_free_res_elem *counter_buf;
520 struct ice_aqc_res_elem *counter_ele;
522 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
523 ice_malloc(hw, buf_len);
525 status = ICE_ERR_NO_MEMORY;
526 goto ice_alloc_sw_exit;
529 /* The number of resource entries in buffer is passed as 1 since
530 * only a single switch/VEB instance is allocated, and hence a
531 * single VEB counter is requested.
533 counter_buf->num_elems = CPU_TO_LE16(1);
534 counter_buf->res_type =
535 CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER |
536 ICE_AQC_RES_TYPE_FLAG_DEDICATED);
537 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
541 ice_free(hw, counter_buf);
542 goto ice_alloc_sw_exit;
544 counter_ele = &counter_buf->elem[0];
545 *counter_id = LE16_TO_CPU(counter_ele->e.sw_resp);
546 ice_free(hw, counter_buf);
550 ice_free(hw, sw_buf);
555 * ice_free_sw - free resources specific to switch
556 * @hw: pointer to the HW struct
557 * @sw_id: switch ID returned
558 * @counter_id: VEB counter ID returned
560 * free switch resources (SWID and VEB counter) (0x0209)
562 * NOTE: This function frees multiple resources. It continues
563 * releasing other resources even after it encounters error.
564 * The error code returned is the last error it encountered.
566 enum ice_status ice_free_sw(struct ice_hw *hw, u16 sw_id, u16 counter_id)
568 struct ice_aqc_alloc_free_res_elem *sw_buf, *counter_buf;
569 enum ice_status status, ret_status;
572 buf_len = sizeof(*sw_buf);
573 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
574 ice_malloc(hw, buf_len);
576 return ICE_ERR_NO_MEMORY;
578 /* Prepare buffer to free for switch ID res.
579 * The number of resource entries in buffer is passed as 1 since only a
580 * single switch/VEB instance is freed, and hence a single sw_id
583 sw_buf->num_elems = CPU_TO_LE16(1);
584 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_SWID);
585 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(sw_id);
587 ret_status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
588 ice_aqc_opc_free_res, NULL);
591 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
593 /* Prepare buffer to free for VEB Counter resource */
594 counter_buf = (struct ice_aqc_alloc_free_res_elem *)
595 ice_malloc(hw, buf_len);
597 ice_free(hw, sw_buf);
598 return ICE_ERR_NO_MEMORY;
601 /* The number of resource entries in buffer is passed as 1 since only a
602 * single switch/VEB instance is freed, and hence a single VEB counter
605 counter_buf->num_elems = CPU_TO_LE16(1);
606 counter_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VEB_COUNTER);
607 counter_buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
609 status = ice_aq_alloc_free_res(hw, 1, counter_buf, buf_len,
610 ice_aqc_opc_free_res, NULL);
612 ice_debug(hw, ICE_DBG_SW,
613 "VEB counter resource could not be freed\n");
617 ice_free(hw, counter_buf);
618 ice_free(hw, sw_buf);
624 * @hw: pointer to the HW struct
625 * @vsi_ctx: pointer to a VSI context struct
626 * @cd: pointer to command details structure or NULL
628 * Add a VSI context to the hardware (0x0210)
631 ice_aq_add_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
632 struct ice_sq_cd *cd)
634 struct ice_aqc_add_update_free_vsi_resp *res;
635 struct ice_aqc_add_get_update_free_vsi *cmd;
636 struct ice_aq_desc desc;
637 enum ice_status status;
639 cmd = &desc.params.vsi_cmd;
640 res = &desc.params.add_update_free_vsi_res;
642 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_vsi);
644 if (!vsi_ctx->alloc_from_pool)
645 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num |
646 ICE_AQ_VSI_IS_VALID);
648 cmd->vsi_flags = CPU_TO_LE16(vsi_ctx->flags);
650 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
652 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
653 sizeof(vsi_ctx->info), cd);
656 vsi_ctx->vsi_num = LE16_TO_CPU(res->vsi_num) & ICE_AQ_VSI_NUM_M;
657 vsi_ctx->vsis_allocd = LE16_TO_CPU(res->vsi_used);
658 vsi_ctx->vsis_unallocated = LE16_TO_CPU(res->vsi_free);
666 * @hw: pointer to the HW struct
667 * @vsi_ctx: pointer to a VSI context struct
668 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
669 * @cd: pointer to command details structure or NULL
671 * Free VSI context info from hardware (0x0213)
674 ice_aq_free_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
675 bool keep_vsi_alloc, struct ice_sq_cd *cd)
677 struct ice_aqc_add_update_free_vsi_resp *resp;
678 struct ice_aqc_add_get_update_free_vsi *cmd;
679 struct ice_aq_desc desc;
680 enum ice_status status;
682 cmd = &desc.params.vsi_cmd;
683 resp = &desc.params.add_update_free_vsi_res;
685 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_free_vsi);
687 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
689 cmd->cmd_flags = CPU_TO_LE16(ICE_AQ_VSI_KEEP_ALLOC);
691 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
693 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
694 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
702 * @hw: pointer to the HW struct
703 * @vsi_ctx: pointer to a VSI context struct
704 * @cd: pointer to command details structure or NULL
706 * Update VSI context in the hardware (0x0211)
709 ice_aq_update_vsi(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
710 struct ice_sq_cd *cd)
712 struct ice_aqc_add_update_free_vsi_resp *resp;
713 struct ice_aqc_add_get_update_free_vsi *cmd;
714 struct ice_aq_desc desc;
715 enum ice_status status;
717 cmd = &desc.params.vsi_cmd;
718 resp = &desc.params.add_update_free_vsi_res;
720 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_update_vsi);
722 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
724 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
726 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
727 sizeof(vsi_ctx->info), cd);
730 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
731 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
738 * ice_is_vsi_valid - check whether the VSI is valid or not
739 * @hw: pointer to the HW struct
740 * @vsi_handle: VSI handle
742 * check whether the VSI is valid or not
744 bool ice_is_vsi_valid(struct ice_hw *hw, u16 vsi_handle)
746 return vsi_handle < ICE_MAX_VSI && hw->vsi_ctx[vsi_handle];
750 * ice_get_hw_vsi_num - return the HW VSI number
751 * @hw: pointer to the HW struct
752 * @vsi_handle: VSI handle
754 * return the HW VSI number
755 * Caution: call this function only if VSI is valid (ice_is_vsi_valid)
757 u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle)
759 return hw->vsi_ctx[vsi_handle]->vsi_num;
763 * ice_get_vsi_ctx - return the VSI context entry for a given VSI handle
764 * @hw: pointer to the HW struct
765 * @vsi_handle: VSI handle
767 * return the VSI context entry for a given VSI handle
769 struct ice_vsi_ctx *ice_get_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
771 return (vsi_handle >= ICE_MAX_VSI) ? NULL : hw->vsi_ctx[vsi_handle];
775 * ice_save_vsi_ctx - save the VSI context for a given VSI handle
776 * @hw: pointer to the HW struct
777 * @vsi_handle: VSI handle
778 * @vsi: VSI context pointer
780 * save the VSI context entry for a given VSI handle
783 ice_save_vsi_ctx(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi)
785 hw->vsi_ctx[vsi_handle] = vsi;
789 * ice_clear_vsi_q_ctx - clear VSI queue contexts for all TCs
790 * @hw: pointer to the HW struct
791 * @vsi_handle: VSI handle
793 static void ice_clear_vsi_q_ctx(struct ice_hw *hw, u16 vsi_handle)
795 struct ice_vsi_ctx *vsi;
798 vsi = ice_get_vsi_ctx(hw, vsi_handle);
801 ice_for_each_traffic_class(i) {
802 if (vsi->lan_q_ctx[i]) {
803 ice_free(hw, vsi->lan_q_ctx[i]);
804 vsi->lan_q_ctx[i] = NULL;
810 * ice_clear_vsi_ctx - clear the VSI context entry
811 * @hw: pointer to the HW struct
812 * @vsi_handle: VSI handle
814 * clear the VSI context entry
816 static void ice_clear_vsi_ctx(struct ice_hw *hw, u16 vsi_handle)
818 struct ice_vsi_ctx *vsi;
820 vsi = ice_get_vsi_ctx(hw, vsi_handle);
822 ice_clear_vsi_q_ctx(hw, vsi_handle);
824 hw->vsi_ctx[vsi_handle] = NULL;
829 * ice_clear_all_vsi_ctx - clear all the VSI context entries
830 * @hw: pointer to the HW struct
832 void ice_clear_all_vsi_ctx(struct ice_hw *hw)
836 for (i = 0; i < ICE_MAX_VSI; i++)
837 ice_clear_vsi_ctx(hw, i);
841 * ice_add_vsi - add VSI context to the hardware and VSI handle list
842 * @hw: pointer to the HW struct
843 * @vsi_handle: unique VSI handle provided by drivers
844 * @vsi_ctx: pointer to a VSI context struct
845 * @cd: pointer to command details structure or NULL
847 * Add a VSI context to the hardware also add it into the VSI handle list.
848 * If this function gets called after reset for existing VSIs then update
849 * with the new HW VSI number in the corresponding VSI handle list entry.
852 ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
853 struct ice_sq_cd *cd)
855 struct ice_vsi_ctx *tmp_vsi_ctx;
856 enum ice_status status;
858 if (vsi_handle >= ICE_MAX_VSI)
859 return ICE_ERR_PARAM;
860 status = ice_aq_add_vsi(hw, vsi_ctx, cd);
863 tmp_vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
865 /* Create a new VSI context */
866 tmp_vsi_ctx = (struct ice_vsi_ctx *)
867 ice_malloc(hw, sizeof(*tmp_vsi_ctx));
869 ice_aq_free_vsi(hw, vsi_ctx, false, cd);
870 return ICE_ERR_NO_MEMORY;
872 *tmp_vsi_ctx = *vsi_ctx;
874 ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
876 /* update with new HW VSI num */
877 if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
878 tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
885 * ice_free_vsi- free VSI context from hardware and VSI handle list
886 * @hw: pointer to the HW struct
887 * @vsi_handle: unique VSI handle
888 * @vsi_ctx: pointer to a VSI context struct
889 * @keep_vsi_alloc: keep VSI allocation as part of this PF's resources
890 * @cd: pointer to command details structure or NULL
892 * Free VSI context info from hardware as well as from VSI handle list
895 ice_free_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
896 bool keep_vsi_alloc, struct ice_sq_cd *cd)
898 enum ice_status status;
900 if (!ice_is_vsi_valid(hw, vsi_handle))
901 return ICE_ERR_PARAM;
902 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
903 status = ice_aq_free_vsi(hw, vsi_ctx, keep_vsi_alloc, cd);
905 ice_clear_vsi_ctx(hw, vsi_handle);
911 * @hw: pointer to the HW struct
912 * @vsi_handle: unique VSI handle
913 * @vsi_ctx: pointer to a VSI context struct
914 * @cd: pointer to command details structure or NULL
916 * Update VSI context in the hardware
919 ice_update_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
920 struct ice_sq_cd *cd)
922 if (!ice_is_vsi_valid(hw, vsi_handle))
923 return ICE_ERR_PARAM;
924 vsi_ctx->vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
925 return ice_aq_update_vsi(hw, vsi_ctx, cd);
929 * ice_aq_get_vsi_params
930 * @hw: pointer to the HW struct
931 * @vsi_ctx: pointer to a VSI context struct
932 * @cd: pointer to command details structure or NULL
934 * Get VSI context info from hardware (0x0212)
937 ice_aq_get_vsi_params(struct ice_hw *hw, struct ice_vsi_ctx *vsi_ctx,
938 struct ice_sq_cd *cd)
940 struct ice_aqc_add_get_update_free_vsi *cmd;
941 struct ice_aqc_get_vsi_resp *resp;
942 struct ice_aq_desc desc;
943 enum ice_status status;
945 cmd = &desc.params.vsi_cmd;
946 resp = &desc.params.get_vsi_resp;
948 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_vsi_params);
950 cmd->vsi_num = CPU_TO_LE16(vsi_ctx->vsi_num | ICE_AQ_VSI_IS_VALID);
952 status = ice_aq_send_cmd(hw, &desc, &vsi_ctx->info,
953 sizeof(vsi_ctx->info), cd);
955 vsi_ctx->vsi_num = LE16_TO_CPU(resp->vsi_num) &
957 vsi_ctx->vsis_allocd = LE16_TO_CPU(resp->vsi_used);
958 vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
965 * ice_aq_add_update_mir_rule - add/update a mirror rule
966 * @hw: pointer to the HW struct
967 * @rule_type: Rule Type
968 * @dest_vsi: VSI number to which packets will be mirrored
969 * @count: length of the list
970 * @mr_buf: buffer for list of mirrored VSI numbers
971 * @cd: pointer to command details structure or NULL
974 * Add/Update Mirror Rule (0x260).
977 ice_aq_add_update_mir_rule(struct ice_hw *hw, u16 rule_type, u16 dest_vsi,
978 u16 count, struct ice_mir_rule_buf *mr_buf,
979 struct ice_sq_cd *cd, u16 *rule_id)
981 struct ice_aqc_add_update_mir_rule *cmd;
982 struct ice_aq_desc desc;
983 enum ice_status status;
984 __le16 *mr_list = NULL;
988 case ICE_AQC_RULE_TYPE_VPORT_INGRESS:
989 case ICE_AQC_RULE_TYPE_VPORT_EGRESS:
990 /* Make sure count and mr_buf are set for these rule_types */
991 if (!(count && mr_buf))
992 return ICE_ERR_PARAM;
994 buf_size = count * sizeof(__le16);
995 mr_list = (_FORCE_ __le16 *)ice_malloc(hw, buf_size);
997 return ICE_ERR_NO_MEMORY;
999 case ICE_AQC_RULE_TYPE_PPORT_INGRESS:
1000 case ICE_AQC_RULE_TYPE_PPORT_EGRESS:
1001 /* Make sure count and mr_buf are not set for these
1004 if (count || mr_buf)
1005 return ICE_ERR_PARAM;
1008 ice_debug(hw, ICE_DBG_SW,
1009 "Error due to unsupported rule_type %u\n", rule_type);
1010 return ICE_ERR_OUT_OF_RANGE;
1013 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_update_mir_rule);
1015 /* Pre-process 'mr_buf' items for add/update of virtual port
1016 * ingress/egress mirroring (but not physical port ingress/egress
1022 for (i = 0; i < count; i++) {
1025 id = mr_buf[i].vsi_idx & ICE_AQC_RULE_MIRRORED_VSI_M;
1027 /* Validate specified VSI number, make sure it is less
1028 * than ICE_MAX_VSI, if not return with error.
1030 if (id >= ICE_MAX_VSI) {
1031 ice_debug(hw, ICE_DBG_SW,
1032 "Error VSI index (%u) out-of-range\n",
1034 ice_free(hw, mr_list);
1035 return ICE_ERR_OUT_OF_RANGE;
1038 /* add VSI to mirror rule */
1041 CPU_TO_LE16(id | ICE_AQC_RULE_ACT_M);
1042 else /* remove VSI from mirror rule */
1043 mr_list[i] = CPU_TO_LE16(id);
1047 cmd = &desc.params.add_update_rule;
1048 if ((*rule_id) != ICE_INVAL_MIRROR_RULE_ID)
1049 cmd->rule_id = CPU_TO_LE16(((*rule_id) & ICE_AQC_RULE_ID_M) |
1050 ICE_AQC_RULE_ID_VALID_M);
1051 cmd->rule_type = CPU_TO_LE16(rule_type & ICE_AQC_RULE_TYPE_M);
1052 cmd->num_entries = CPU_TO_LE16(count);
1053 cmd->dest = CPU_TO_LE16(dest_vsi);
1055 status = ice_aq_send_cmd(hw, &desc, mr_list, buf_size, cd);
1057 *rule_id = LE16_TO_CPU(cmd->rule_id) & ICE_AQC_RULE_ID_M;
1059 ice_free(hw, mr_list);
1065 * ice_aq_delete_mir_rule - delete a mirror rule
1066 * @hw: pointer to the HW struct
1067 * @rule_id: Mirror rule ID (to be deleted)
1068 * @keep_allocd: if set, the VSI stays part of the PF allocated res,
1069 * otherwise it is returned to the shared pool
1070 * @cd: pointer to command details structure or NULL
1072 * Delete Mirror Rule (0x261).
1075 ice_aq_delete_mir_rule(struct ice_hw *hw, u16 rule_id, bool keep_allocd,
1076 struct ice_sq_cd *cd)
1078 struct ice_aqc_delete_mir_rule *cmd;
1079 struct ice_aq_desc desc;
1081 /* rule_id should be in the range 0...63 */
1082 if (rule_id >= ICE_MAX_NUM_MIRROR_RULES)
1083 return ICE_ERR_OUT_OF_RANGE;
1085 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_del_mir_rule);
1087 cmd = &desc.params.del_rule;
1088 rule_id |= ICE_AQC_RULE_ID_VALID_M;
1089 cmd->rule_id = CPU_TO_LE16(rule_id);
1092 cmd->flags = CPU_TO_LE16(ICE_AQC_FLAG_KEEP_ALLOCD_M);
1094 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1098 * ice_aq_alloc_free_vsi_list
1099 * @hw: pointer to the HW struct
1100 * @vsi_list_id: VSI list ID returned or used for lookup
1101 * @lkup_type: switch rule filter lookup type
1102 * @opc: switch rules population command type - pass in the command opcode
1104 * allocates or free a VSI list resource
1106 static enum ice_status
1107 ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
1108 enum ice_sw_lkup_type lkup_type,
1109 enum ice_adminq_opc opc)
1111 struct ice_aqc_alloc_free_res_elem *sw_buf;
1112 struct ice_aqc_res_elem *vsi_ele;
1113 enum ice_status status;
1116 buf_len = sizeof(*sw_buf);
1117 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
1118 ice_malloc(hw, buf_len);
1120 return ICE_ERR_NO_MEMORY;
1121 sw_buf->num_elems = CPU_TO_LE16(1);
1123 if (lkup_type == ICE_SW_LKUP_MAC ||
1124 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1125 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1126 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1127 lkup_type == ICE_SW_LKUP_PROMISC ||
1128 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1129 lkup_type == ICE_SW_LKUP_LAST) {
1130 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
1131 } else if (lkup_type == ICE_SW_LKUP_VLAN) {
1133 CPU_TO_LE16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
1135 status = ICE_ERR_PARAM;
1136 goto ice_aq_alloc_free_vsi_list_exit;
1139 if (opc == ice_aqc_opc_free_res)
1140 sw_buf->elem[0].e.sw_resp = CPU_TO_LE16(*vsi_list_id);
1142 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
1144 goto ice_aq_alloc_free_vsi_list_exit;
1146 if (opc == ice_aqc_opc_alloc_res) {
1147 vsi_ele = &sw_buf->elem[0];
1148 *vsi_list_id = LE16_TO_CPU(vsi_ele->e.sw_resp);
1151 ice_aq_alloc_free_vsi_list_exit:
1152 ice_free(hw, sw_buf);
1157 * ice_aq_set_storm_ctrl - Sets storm control configuration
1158 * @hw: pointer to the HW struct
1159 * @bcast_thresh: represents the upper threshold for broadcast storm control
1160 * @mcast_thresh: represents the upper threshold for multicast storm control
1161 * @ctl_bitmask: storm control control knobs
1163 * Sets the storm control configuration (0x0280)
1166 ice_aq_set_storm_ctrl(struct ice_hw *hw, u32 bcast_thresh, u32 mcast_thresh,
1169 struct ice_aqc_storm_cfg *cmd;
1170 struct ice_aq_desc desc;
1172 cmd = &desc.params.storm_conf;
1174 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_storm_cfg);
1176 cmd->bcast_thresh_size = CPU_TO_LE32(bcast_thresh & ICE_AQ_THRESHOLD_M);
1177 cmd->mcast_thresh_size = CPU_TO_LE32(mcast_thresh & ICE_AQ_THRESHOLD_M);
1178 cmd->storm_ctrl_ctrl = CPU_TO_LE32(ctl_bitmask);
1180 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1184 * ice_aq_get_storm_ctrl - gets storm control configuration
1185 * @hw: pointer to the HW struct
1186 * @bcast_thresh: represents the upper threshold for broadcast storm control
1187 * @mcast_thresh: represents the upper threshold for multicast storm control
1188 * @ctl_bitmask: storm control control knobs
1190 * Gets the storm control configuration (0x0281)
1193 ice_aq_get_storm_ctrl(struct ice_hw *hw, u32 *bcast_thresh, u32 *mcast_thresh,
1196 enum ice_status status;
1197 struct ice_aq_desc desc;
1199 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_storm_cfg);
1201 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1203 struct ice_aqc_storm_cfg *resp = &desc.params.storm_conf;
1206 *bcast_thresh = LE32_TO_CPU(resp->bcast_thresh_size) &
1209 *mcast_thresh = LE32_TO_CPU(resp->mcast_thresh_size) &
1212 *ctl_bitmask = LE32_TO_CPU(resp->storm_ctrl_ctrl);
1219 * ice_aq_sw_rules - add/update/remove switch rules
1220 * @hw: pointer to the HW struct
1221 * @rule_list: pointer to switch rule population list
1222 * @rule_list_sz: total size of the rule list in bytes
1223 * @num_rules: number of switch rules in the rule_list
1224 * @opc: switch rules population command type - pass in the command opcode
1225 * @cd: pointer to command details structure or NULL
1227 * Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
1229 static enum ice_status
1230 ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
1231 u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1233 struct ice_aq_desc desc;
1235 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_sw_rules");
1237 if (opc != ice_aqc_opc_add_sw_rules &&
1238 opc != ice_aqc_opc_update_sw_rules &&
1239 opc != ice_aqc_opc_remove_sw_rules)
1240 return ICE_ERR_PARAM;
1242 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1244 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1245 desc.params.sw_rules.num_rules_fltr_entry_index =
1246 CPU_TO_LE16(num_rules);
1247 return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
1251 * ice_aq_add_recipe - add switch recipe
1252 * @hw: pointer to the HW struct
1253 * @s_recipe_list: pointer to switch rule population list
1254 * @num_recipes: number of switch recipes in the list
1255 * @cd: pointer to command details structure or NULL
1260 ice_aq_add_recipe(struct ice_hw *hw,
1261 struct ice_aqc_recipe_data_elem *s_recipe_list,
1262 u16 num_recipes, struct ice_sq_cd *cd)
1264 struct ice_aqc_add_get_recipe *cmd;
1265 struct ice_aq_desc desc;
1268 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_add_recipe");
1269 cmd = &desc.params.add_get_recipe;
1270 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
1272 cmd->num_sub_recipes = CPU_TO_LE16(num_recipes);
1273 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1275 buf_size = num_recipes * sizeof(*s_recipe_list);
1277 return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1281 * ice_aq_get_recipe - get switch recipe
1282 * @hw: pointer to the HW struct
1283 * @s_recipe_list: pointer to switch rule population list
1284 * @num_recipes: pointer to the number of recipes (input and output)
1285 * @recipe_root: root recipe number of recipe(s) to retrieve
1286 * @cd: pointer to command details structure or NULL
1290 * On input, *num_recipes should equal the number of entries in s_recipe_list.
1291 * On output, *num_recipes will equal the number of entries returned in
1294 * The caller must supply enough space in s_recipe_list to hold all possible
1295 * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
1298 ice_aq_get_recipe(struct ice_hw *hw,
1299 struct ice_aqc_recipe_data_elem *s_recipe_list,
1300 u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
1302 struct ice_aqc_add_get_recipe *cmd;
1303 struct ice_aq_desc desc;
1304 enum ice_status status;
1307 if (*num_recipes != ICE_MAX_NUM_RECIPES)
1308 return ICE_ERR_PARAM;
1310 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe");
1311 cmd = &desc.params.add_get_recipe;
1312 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
1314 cmd->return_index = CPU_TO_LE16(recipe_root);
1315 cmd->num_sub_recipes = 0;
1317 buf_size = *num_recipes * sizeof(*s_recipe_list);
1319 status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
1320 /* cppcheck-suppress constArgument */
1321 *num_recipes = LE16_TO_CPU(cmd->num_sub_recipes);
1327 * ice_aq_map_recipe_to_profile - Map recipe to packet profile
1328 * @hw: pointer to the HW struct
1329 * @profile_id: package profile ID to associate the recipe with
1330 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1331 * @cd: pointer to command details structure or NULL
1332 * Recipe to profile association (0x0291)
1335 ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1336 struct ice_sq_cd *cd)
1338 struct ice_aqc_recipe_to_profile *cmd;
1339 struct ice_aq_desc desc;
1341 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_assoc_recipe_to_prof");
1342 cmd = &desc.params.recipe_to_profile;
1343 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
1344 cmd->profile_id = CPU_TO_LE16(profile_id);
1345 /* Set the recipe ID bit in the bitmask to let the device know which
1346 * profile we are associating the recipe to
1348 ice_memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc),
1349 ICE_NONDMA_TO_NONDMA);
1351 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1355 * ice_aq_get_recipe_to_profile - Map recipe to packet profile
1356 * @hw: pointer to the HW struct
1357 * @profile_id: package profile ID to associate the recipe with
1358 * @r_bitmap: Recipe bitmap filled in and need to be returned as response
1359 * @cd: pointer to command details structure or NULL
1360 * Associate profile ID with given recipe (0x0293)
1363 ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
1364 struct ice_sq_cd *cd)
1366 struct ice_aqc_recipe_to_profile *cmd;
1367 struct ice_aq_desc desc;
1368 enum ice_status status;
1370 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_recipe_to_prof");
1371 cmd = &desc.params.recipe_to_profile;
1372 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
1373 cmd->profile_id = CPU_TO_LE16(profile_id);
1375 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1377 ice_memcpy(r_bitmap, cmd->recipe_assoc,
1378 sizeof(cmd->recipe_assoc), ICE_NONDMA_TO_NONDMA);
1384 * ice_alloc_recipe - add recipe resource
1385 * @hw: pointer to the hardware structure
1386 * @rid: recipe ID returned as response to AQ call
1388 enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
1390 struct ice_aqc_alloc_free_res_elem *sw_buf;
1391 enum ice_status status;
1394 buf_len = sizeof(*sw_buf);
1395 sw_buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1397 return ICE_ERR_NO_MEMORY;
1399 sw_buf->num_elems = CPU_TO_LE16(1);
1400 sw_buf->res_type = CPU_TO_LE16((ICE_AQC_RES_TYPE_RECIPE <<
1401 ICE_AQC_RES_TYPE_S) |
1402 ICE_AQC_RES_TYPE_FLAG_SHARED);
1403 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
1404 ice_aqc_opc_alloc_res, NULL);
1406 *rid = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
1407 ice_free(hw, sw_buf);
1412 /* ice_init_port_info - Initialize port_info with switch configuration data
1413 * @pi: pointer to port_info
1414 * @vsi_port_num: VSI number or port number
1415 * @type: Type of switch element (port or VSI)
1416 * @swid: switch ID of the switch the element is attached to
1417 * @pf_vf_num: PF or VF number
1418 * @is_vf: true if the element is a VF, false otherwise
1421 ice_init_port_info(struct ice_port_info *pi, u16 vsi_port_num, u8 type,
1422 u16 swid, u16 pf_vf_num, bool is_vf)
1425 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1426 pi->lport = (u8)(vsi_port_num & ICE_LPORT_MASK);
1428 pi->pf_vf_num = pf_vf_num;
1430 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
1431 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
1434 ice_debug(pi->hw, ICE_DBG_SW,
1435 "incorrect VSI/port type received\n");
1440 /* ice_get_initial_sw_cfg - Get initial port and default VSI data
1441 * @hw: pointer to the hardware structure
1443 enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
1445 struct ice_aqc_get_sw_cfg_resp *rbuf;
1446 enum ice_status status;
1447 u16 num_total_ports;
1453 num_total_ports = 1;
1455 rbuf = (struct ice_aqc_get_sw_cfg_resp *)
1456 ice_malloc(hw, ICE_SW_CFG_MAX_BUF_LEN);
1459 return ICE_ERR_NO_MEMORY;
1461 /* Multiple calls to ice_aq_get_sw_cfg may be required
1462 * to get all the switch configuration information. The need
1463 * for additional calls is indicated by ice_aq_get_sw_cfg
1464 * writing a non-zero value in req_desc
1467 status = ice_aq_get_sw_cfg(hw, rbuf, ICE_SW_CFG_MAX_BUF_LEN,
1468 &req_desc, &num_elems, NULL);
1473 for (i = 0; i < num_elems; i++) {
1474 struct ice_aqc_get_sw_cfg_resp_elem *ele;
1475 u16 pf_vf_num, swid, vsi_port_num;
1479 ele = rbuf[i].elements;
1480 vsi_port_num = LE16_TO_CPU(ele->vsi_port_num) &
1481 ICE_AQC_GET_SW_CONF_RESP_VSI_PORT_NUM_M;
1483 pf_vf_num = LE16_TO_CPU(ele->pf_vf_num) &
1484 ICE_AQC_GET_SW_CONF_RESP_FUNC_NUM_M;
1486 swid = LE16_TO_CPU(ele->swid);
1488 if (LE16_TO_CPU(ele->pf_vf_num) &
1489 ICE_AQC_GET_SW_CONF_RESP_IS_VF)
1492 type = LE16_TO_CPU(ele->vsi_port_num) >>
1493 ICE_AQC_GET_SW_CONF_RESP_TYPE_S;
1496 case ICE_AQC_GET_SW_CONF_RESP_PHYS_PORT:
1497 case ICE_AQC_GET_SW_CONF_RESP_VIRT_PORT:
1498 if (j == num_total_ports) {
1499 ice_debug(hw, ICE_DBG_SW,
1500 "more ports than expected\n");
1501 status = ICE_ERR_CFG;
1504 ice_init_port_info(hw->port_info,
1505 vsi_port_num, type, swid,
1513 } while (req_desc && !status);
1517 ice_free(hw, (void *)rbuf);
1523 * ice_fill_sw_info - Helper function to populate lb_en and lan_en
1524 * @hw: pointer to the hardware structure
1525 * @fi: filter info structure to fill/update
1527 * This helper function populates the lb_en and lan_en elements of the provided
1528 * ice_fltr_info struct using the switch's type and characteristics of the
1529 * switch rule being configured.
1531 static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
1535 if ((fi->flag & ICE_FLTR_TX) &&
1536 (fi->fltr_act == ICE_FWD_TO_VSI ||
1537 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
1538 fi->fltr_act == ICE_FWD_TO_Q ||
1539 fi->fltr_act == ICE_FWD_TO_QGRP)) {
1540 /* Setting LB for prune actions will result in replicated
1541 * packets to the internal switch that will be dropped.
1543 if (fi->lkup_type != ICE_SW_LKUP_VLAN)
1546 /* Set lan_en to TRUE if
1547 * 1. The switch is a VEB AND
1549 * 2.1 The lookup is a directional lookup like ethertype,
1550 * promiscuous, ethertype-MAC, promiscuous-VLAN
1551 * and default-port OR
1552 * 2.2 The lookup is VLAN, OR
1553 * 2.3 The lookup is MAC with mcast or bcast addr for MAC, OR
1554 * 2.4 The lookup is MAC_VLAN with mcast or bcast addr for MAC.
1558 * The switch is a VEPA.
1560 * In all other cases, the LAN enable has to be set to false.
1563 if (fi->lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1564 fi->lkup_type == ICE_SW_LKUP_PROMISC ||
1565 fi->lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1566 fi->lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1567 fi->lkup_type == ICE_SW_LKUP_DFLT ||
1568 fi->lkup_type == ICE_SW_LKUP_VLAN ||
1569 (fi->lkup_type == ICE_SW_LKUP_MAC &&
1570 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)) ||
1571 (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
1572 !IS_UNICAST_ETHER_ADDR(fi->l_data.mac.mac_addr)))
1581 * ice_ilog2 - Calculates integer log base 2 of a number
1582 * @n: number on which to perform operation
1584 static int ice_ilog2(u64 n)
1588 for (i = 63; i >= 0; i--)
1589 if (((u64)1 << i) & n)
1596 * ice_fill_sw_rule - Helper function to fill switch rule structure
1597 * @hw: pointer to the hardware structure
1598 * @f_info: entry containing packet forwarding information
1599 * @s_rule: switch rule structure to be filled in based on mac_entry
1600 * @opc: switch rules population command type - pass in the command opcode
1603 ice_fill_sw_rule(struct ice_hw *hw, struct ice_fltr_info *f_info,
1604 struct ice_aqc_sw_rules_elem *s_rule, enum ice_adminq_opc opc)
1606 u16 vlan_id = ICE_MAX_VLAN_ID + 1;
1614 if (opc == ice_aqc_opc_remove_sw_rules) {
1615 s_rule->pdata.lkup_tx_rx.act = 0;
1616 s_rule->pdata.lkup_tx_rx.index =
1617 CPU_TO_LE16(f_info->fltr_rule_id);
1618 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
1622 eth_hdr_sz = sizeof(dummy_eth_header);
1623 eth_hdr = s_rule->pdata.lkup_tx_rx.hdr;
1625 /* initialize the ether header with a dummy header */
1626 ice_memcpy(eth_hdr, dummy_eth_header, eth_hdr_sz, ICE_NONDMA_TO_NONDMA);
1627 ice_fill_sw_info(hw, f_info);
1629 switch (f_info->fltr_act) {
1630 case ICE_FWD_TO_VSI:
1631 act |= (f_info->fwd_id.hw_vsi_id << ICE_SINGLE_ACT_VSI_ID_S) &
1632 ICE_SINGLE_ACT_VSI_ID_M;
1633 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1634 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1635 ICE_SINGLE_ACT_VALID_BIT;
1637 case ICE_FWD_TO_VSI_LIST:
1638 act |= ICE_SINGLE_ACT_VSI_LIST;
1639 act |= (f_info->fwd_id.vsi_list_id <<
1640 ICE_SINGLE_ACT_VSI_LIST_ID_S) &
1641 ICE_SINGLE_ACT_VSI_LIST_ID_M;
1642 if (f_info->lkup_type != ICE_SW_LKUP_VLAN)
1643 act |= ICE_SINGLE_ACT_VSI_FORWARDING |
1644 ICE_SINGLE_ACT_VALID_BIT;
1647 act |= ICE_SINGLE_ACT_TO_Q;
1648 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1649 ICE_SINGLE_ACT_Q_INDEX_M;
1651 case ICE_DROP_PACKET:
1652 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
1653 ICE_SINGLE_ACT_VALID_BIT;
1655 case ICE_FWD_TO_QGRP:
1656 q_rgn = f_info->qgrp_size > 0 ?
1657 (u8)ice_ilog2(f_info->qgrp_size) : 0;
1658 act |= ICE_SINGLE_ACT_TO_Q;
1659 act |= (f_info->fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
1660 ICE_SINGLE_ACT_Q_INDEX_M;
1661 act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
1662 ICE_SINGLE_ACT_Q_REGION_M;
1669 act |= ICE_SINGLE_ACT_LB_ENABLE;
1671 act |= ICE_SINGLE_ACT_LAN_ENABLE;
1673 switch (f_info->lkup_type) {
1674 case ICE_SW_LKUP_MAC:
1675 daddr = f_info->l_data.mac.mac_addr;
1677 case ICE_SW_LKUP_VLAN:
1678 vlan_id = f_info->l_data.vlan.vlan_id;
1679 if (f_info->fltr_act == ICE_FWD_TO_VSI ||
1680 f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
1681 act |= ICE_SINGLE_ACT_PRUNE;
1682 act |= ICE_SINGLE_ACT_EGRESS | ICE_SINGLE_ACT_INGRESS;
1685 case ICE_SW_LKUP_ETHERTYPE_MAC:
1686 daddr = f_info->l_data.ethertype_mac.mac_addr;
1688 case ICE_SW_LKUP_ETHERTYPE:
1689 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_ETHTYPE_OFFSET);
1690 *off = CPU_TO_BE16(f_info->l_data.ethertype_mac.ethertype);
1692 case ICE_SW_LKUP_MAC_VLAN:
1693 daddr = f_info->l_data.mac_vlan.mac_addr;
1694 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1696 case ICE_SW_LKUP_PROMISC_VLAN:
1697 vlan_id = f_info->l_data.mac_vlan.vlan_id;
1699 case ICE_SW_LKUP_PROMISC:
1700 daddr = f_info->l_data.mac_vlan.mac_addr;
1706 s_rule->type = (f_info->flag & ICE_FLTR_RX) ?
1707 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX) :
1708 CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
1710 /* Recipe set depending on lookup type */
1711 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(f_info->lkup_type);
1712 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(f_info->src);
1713 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1716 ice_memcpy(eth_hdr + ICE_ETH_DA_OFFSET, daddr, ETH_ALEN,
1717 ICE_NONDMA_TO_NONDMA);
1719 if (!(vlan_id > ICE_MAX_VLAN_ID)) {
1720 off = (_FORCE_ __be16 *)(eth_hdr + ICE_ETH_VLAN_TCI_OFFSET);
1721 *off = CPU_TO_BE16(vlan_id);
1724 /* Create the switch rule with the final dummy Ethernet header */
1725 if (opc != ice_aqc_opc_update_sw_rules)
1726 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(eth_hdr_sz);
1730 * ice_add_marker_act
1731 * @hw: pointer to the hardware structure
1732 * @m_ent: the management entry for which sw marker needs to be added
1733 * @sw_marker: sw marker to tag the Rx descriptor with
1734 * @l_id: large action resource ID
1736 * Create a large action to hold software marker and update the switch rule
1737 * entry pointed by m_ent with newly created large action
1739 static enum ice_status
1740 ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1741 u16 sw_marker, u16 l_id)
1743 struct ice_aqc_sw_rules_elem *lg_act, *rx_tx;
1744 /* For software marker we need 3 large actions
1745 * 1. FWD action: FWD TO VSI or VSI LIST
1746 * 2. GENERIC VALUE action to hold the profile ID
1747 * 3. GENERIC VALUE action to hold the software marker ID
1749 const u16 num_lg_acts = 3;
1750 enum ice_status status;
1756 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1757 return ICE_ERR_PARAM;
1759 /* Create two back-to-back switch rules and submit them to the HW using
1760 * one memory buffer:
1764 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
1765 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1766 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rules_size);
1768 return ICE_ERR_NO_MEMORY;
1770 rx_tx = (struct ice_aqc_sw_rules_elem *)((u8 *)lg_act + lg_act_size);
1772 /* Fill in the first switch rule i.e. large action */
1773 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1774 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1775 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_lg_acts);
1777 /* First action VSI forwarding or VSI list forwarding depending on how
1780 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1781 m_ent->fltr_info.fwd_id.hw_vsi_id;
1783 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1784 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1785 ICE_LG_ACT_VSI_LIST_ID_M;
1786 if (m_ent->vsi_count > 1)
1787 act |= ICE_LG_ACT_VSI_LIST;
1788 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1790 /* Second action descriptor type */
1791 act = ICE_LG_ACT_GENERIC;
1793 act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
1794 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1796 act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
1797 ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
1799 /* Third action Marker value */
1800 act |= ICE_LG_ACT_GENERIC;
1801 act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
1802 ICE_LG_ACT_GENERIC_VALUE_M;
1804 lg_act->pdata.lg_act.act[2] = CPU_TO_LE32(act);
1806 /* call the fill switch rule to fill the lookup Tx Rx structure */
1807 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1808 ice_aqc_opc_update_sw_rules);
1810 /* Update the action to point to the large action ID */
1811 rx_tx->pdata.lkup_tx_rx.act =
1812 CPU_TO_LE32(ICE_SINGLE_ACT_PTR |
1813 ((l_id << ICE_SINGLE_ACT_PTR_VAL_S) &
1814 ICE_SINGLE_ACT_PTR_VAL_M));
1816 /* Use the filter rule ID of the previously created rule with single
1817 * act. Once the update happens, hardware will treat this as large
1820 rx_tx->pdata.lkup_tx_rx.index =
1821 CPU_TO_LE16(m_ent->fltr_info.fltr_rule_id);
1823 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1824 ice_aqc_opc_update_sw_rules, NULL);
1826 m_ent->lg_act_idx = l_id;
1827 m_ent->sw_marker_id = sw_marker;
1830 ice_free(hw, lg_act);
1835 * ice_add_counter_act - add/update filter rule with counter action
1836 * @hw: pointer to the hardware structure
1837 * @m_ent: the management entry for which counter needs to be added
1838 * @counter_id: VLAN counter ID returned as part of allocate resource
1839 * @l_id: large action resource ID
1841 static enum ice_status
1842 ice_add_counter_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
1843 u16 counter_id, u16 l_id)
1845 struct ice_aqc_sw_rules_elem *lg_act;
1846 struct ice_aqc_sw_rules_elem *rx_tx;
1847 enum ice_status status;
1848 /* 2 actions will be added while adding a large action counter */
1849 const int num_acts = 2;
1856 if (m_ent->fltr_info.lkup_type != ICE_SW_LKUP_MAC)
1857 return ICE_ERR_PARAM;
1859 /* Create two back-to-back switch rules and submit them to the HW using
1860 * one memory buffer:
1864 lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_acts);
1865 rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
1866 lg_act = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
1869 return ICE_ERR_NO_MEMORY;
1871 rx_tx = (struct ice_aqc_sw_rules_elem *)
1872 ((u8 *)lg_act + lg_act_size);
1874 /* Fill in the first switch rule i.e. large action */
1875 lg_act->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LG_ACT);
1876 lg_act->pdata.lg_act.index = CPU_TO_LE16(l_id);
1877 lg_act->pdata.lg_act.size = CPU_TO_LE16(num_acts);
1879 /* First action VSI forwarding or VSI list forwarding depending on how
1882 id = (m_ent->vsi_count > 1) ? m_ent->fltr_info.fwd_id.vsi_list_id :
1883 m_ent->fltr_info.fwd_id.hw_vsi_id;
1885 act = ICE_LG_ACT_VSI_FORWARDING | ICE_LG_ACT_VALID_BIT;
1886 act |= (id << ICE_LG_ACT_VSI_LIST_ID_S) &
1887 ICE_LG_ACT_VSI_LIST_ID_M;
1888 if (m_ent->vsi_count > 1)
1889 act |= ICE_LG_ACT_VSI_LIST;
1890 lg_act->pdata.lg_act.act[0] = CPU_TO_LE32(act);
1892 /* Second action counter ID */
1893 act = ICE_LG_ACT_STAT_COUNT;
1894 act |= (counter_id << ICE_LG_ACT_STAT_COUNT_S) &
1895 ICE_LG_ACT_STAT_COUNT_M;
1896 lg_act->pdata.lg_act.act[1] = CPU_TO_LE32(act);
1898 /* call the fill switch rule to fill the lookup Tx Rx structure */
1899 ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
1900 ice_aqc_opc_update_sw_rules);
1902 act = ICE_SINGLE_ACT_PTR;
1903 act |= (l_id << ICE_SINGLE_ACT_PTR_VAL_S) & ICE_SINGLE_ACT_PTR_VAL_M;
1904 rx_tx->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
1906 /* Use the filter rule ID of the previously created rule with single
1907 * act. Once the update happens, hardware will treat this as large
1910 f_rule_id = m_ent->fltr_info.fltr_rule_id;
1911 rx_tx->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_rule_id);
1913 status = ice_aq_sw_rules(hw, lg_act, rules_size, 2,
1914 ice_aqc_opc_update_sw_rules, NULL);
1916 m_ent->lg_act_idx = l_id;
1917 m_ent->counter_index = counter_id;
1920 ice_free(hw, lg_act);
1925 * ice_create_vsi_list_map
1926 * @hw: pointer to the hardware structure
1927 * @vsi_handle_arr: array of VSI handles to set in the VSI mapping
1928 * @num_vsi: number of VSI handles in the array
1929 * @vsi_list_id: VSI list ID generated as part of allocate resource
1931 * Helper function to create a new entry of VSI list ID to VSI mapping
1932 * using the given VSI list ID
1934 static struct ice_vsi_list_map_info *
1935 ice_create_vsi_list_map(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1938 struct ice_switch_info *sw = hw->switch_info;
1939 struct ice_vsi_list_map_info *v_map;
1942 v_map = (struct ice_vsi_list_map_info *)ice_calloc(hw, 1,
1947 v_map->vsi_list_id = vsi_list_id;
1949 for (i = 0; i < num_vsi; i++)
1950 ice_set_bit(vsi_handle_arr[i], v_map->vsi_map);
1952 LIST_ADD(&v_map->list_entry, &sw->vsi_list_map_head);
1957 * ice_update_vsi_list_rule
1958 * @hw: pointer to the hardware structure
1959 * @vsi_handle_arr: array of VSI handles to form a VSI list
1960 * @num_vsi: number of VSI handles in the array
1961 * @vsi_list_id: VSI list ID generated as part of allocate resource
1962 * @remove: Boolean value to indicate if this is a remove action
1963 * @opc: switch rules population command type - pass in the command opcode
1964 * @lkup_type: lookup type of the filter
1966 * Call AQ command to add a new switch rule or update existing switch rule
1967 * using the given VSI list ID
1969 static enum ice_status
1970 ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
1971 u16 vsi_list_id, bool remove, enum ice_adminq_opc opc,
1972 enum ice_sw_lkup_type lkup_type)
1974 struct ice_aqc_sw_rules_elem *s_rule;
1975 enum ice_status status;
1981 return ICE_ERR_PARAM;
1983 if (lkup_type == ICE_SW_LKUP_MAC ||
1984 lkup_type == ICE_SW_LKUP_MAC_VLAN ||
1985 lkup_type == ICE_SW_LKUP_ETHERTYPE ||
1986 lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
1987 lkup_type == ICE_SW_LKUP_PROMISC ||
1988 lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
1989 lkup_type == ICE_SW_LKUP_LAST)
1990 type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
1991 ICE_AQC_SW_RULES_T_VSI_LIST_SET;
1992 else if (lkup_type == ICE_SW_LKUP_VLAN)
1993 type = remove ? ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR :
1994 ICE_AQC_SW_RULES_T_PRUNE_LIST_SET;
1996 return ICE_ERR_PARAM;
1998 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(num_vsi);
1999 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2001 return ICE_ERR_NO_MEMORY;
2002 for (i = 0; i < num_vsi; i++) {
2003 if (!ice_is_vsi_valid(hw, vsi_handle_arr[i])) {
2004 status = ICE_ERR_PARAM;
2007 /* AQ call requires hw_vsi_id(s) */
2008 s_rule->pdata.vsi_list.vsi[i] =
2009 CPU_TO_LE16(ice_get_hw_vsi_num(hw, vsi_handle_arr[i]));
2012 s_rule->type = CPU_TO_LE16(type);
2013 s_rule->pdata.vsi_list.number_vsi = CPU_TO_LE16(num_vsi);
2014 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2016 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opc, NULL);
2019 ice_free(hw, s_rule);
2024 * ice_create_vsi_list_rule - Creates and populates a VSI list rule
2025 * @hw: pointer to the HW struct
2026 * @vsi_handle_arr: array of VSI handles to form a VSI list
2027 * @num_vsi: number of VSI handles in the array
2028 * @vsi_list_id: stores the ID of the VSI list to be created
2029 * @lkup_type: switch rule filter's lookup type
2031 static enum ice_status
2032 ice_create_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
2033 u16 *vsi_list_id, enum ice_sw_lkup_type lkup_type)
2035 enum ice_status status;
2037 status = ice_aq_alloc_free_vsi_list(hw, vsi_list_id, lkup_type,
2038 ice_aqc_opc_alloc_res);
2042 /* Update the newly created VSI list to include the specified VSIs */
2043 return ice_update_vsi_list_rule(hw, vsi_handle_arr, num_vsi,
2044 *vsi_list_id, false,
2045 ice_aqc_opc_add_sw_rules, lkup_type);
2049 * ice_create_pkt_fwd_rule
2050 * @hw: pointer to the hardware structure
2051 * @f_entry: entry containing packet forwarding information
2053 * Create switch rule with given filter information and add an entry
2054 * to the corresponding filter management list to track this switch rule
2057 static enum ice_status
2058 ice_create_pkt_fwd_rule(struct ice_hw *hw,
2059 struct ice_fltr_list_entry *f_entry)
2061 struct ice_fltr_mgmt_list_entry *fm_entry;
2062 struct ice_aqc_sw_rules_elem *s_rule;
2063 enum ice_sw_lkup_type l_type;
2064 struct ice_sw_recipe *recp;
2065 enum ice_status status;
2067 s_rule = (struct ice_aqc_sw_rules_elem *)
2068 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2070 return ICE_ERR_NO_MEMORY;
2071 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2072 ice_malloc(hw, sizeof(*fm_entry));
2074 status = ICE_ERR_NO_MEMORY;
2075 goto ice_create_pkt_fwd_rule_exit;
2078 fm_entry->fltr_info = f_entry->fltr_info;
2080 /* Initialize all the fields for the management entry */
2081 fm_entry->vsi_count = 1;
2082 fm_entry->lg_act_idx = ICE_INVAL_LG_ACT_INDEX;
2083 fm_entry->sw_marker_id = ICE_INVAL_SW_MARKER_ID;
2084 fm_entry->counter_index = ICE_INVAL_COUNTER_ID;
2086 ice_fill_sw_rule(hw, &fm_entry->fltr_info, s_rule,
2087 ice_aqc_opc_add_sw_rules);
2089 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2090 ice_aqc_opc_add_sw_rules, NULL);
2092 ice_free(hw, fm_entry);
2093 goto ice_create_pkt_fwd_rule_exit;
2096 f_entry->fltr_info.fltr_rule_id =
2097 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2098 fm_entry->fltr_info.fltr_rule_id =
2099 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
2101 /* The book keeping entries will get removed when base driver
2102 * calls remove filter AQ command
2104 l_type = fm_entry->fltr_info.lkup_type;
2105 recp = &hw->switch_info->recp_list[l_type];
2106 LIST_ADD(&fm_entry->list_entry, &recp->filt_rules);
2108 ice_create_pkt_fwd_rule_exit:
2109 ice_free(hw, s_rule);
2114 * ice_update_pkt_fwd_rule
2115 * @hw: pointer to the hardware structure
2116 * @f_info: filter information for switch rule
2118 * Call AQ command to update a previously created switch rule with a
2121 static enum ice_status
2122 ice_update_pkt_fwd_rule(struct ice_hw *hw, struct ice_fltr_info *f_info)
2124 struct ice_aqc_sw_rules_elem *s_rule;
2125 enum ice_status status;
2127 s_rule = (struct ice_aqc_sw_rules_elem *)
2128 ice_malloc(hw, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE);
2130 return ICE_ERR_NO_MEMORY;
2132 ice_fill_sw_rule(hw, f_info, s_rule, ice_aqc_opc_update_sw_rules);
2134 s_rule->pdata.lkup_tx_rx.index = CPU_TO_LE16(f_info->fltr_rule_id);
2136 /* Update switch rule with new rule set to forward VSI list */
2137 status = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_ETH_HDR_SIZE, 1,
2138 ice_aqc_opc_update_sw_rules, NULL);
2140 ice_free(hw, s_rule);
2145 * ice_update_sw_rule_bridge_mode
2146 * @hw: pointer to the HW struct
2148 * Updates unicast switch filter rules based on VEB/VEPA mode
2150 enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
2152 struct ice_switch_info *sw = hw->switch_info;
2153 struct ice_fltr_mgmt_list_entry *fm_entry;
2154 enum ice_status status = ICE_SUCCESS;
2155 struct LIST_HEAD_TYPE *rule_head;
2156 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2158 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2159 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2161 ice_acquire_lock(rule_lock);
2162 LIST_FOR_EACH_ENTRY(fm_entry, rule_head, ice_fltr_mgmt_list_entry,
2164 struct ice_fltr_info *fi = &fm_entry->fltr_info;
2165 u8 *addr = fi->l_data.mac.mac_addr;
2167 /* Update unicast Tx rules to reflect the selected
2170 if ((fi->flag & ICE_FLTR_TX) && IS_UNICAST_ETHER_ADDR(addr) &&
2171 (fi->fltr_act == ICE_FWD_TO_VSI ||
2172 fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
2173 fi->fltr_act == ICE_FWD_TO_Q ||
2174 fi->fltr_act == ICE_FWD_TO_QGRP)) {
2175 status = ice_update_pkt_fwd_rule(hw, fi);
2181 ice_release_lock(rule_lock);
2187 * ice_add_update_vsi_list
2188 * @hw: pointer to the hardware structure
2189 * @m_entry: pointer to current filter management list entry
2190 * @cur_fltr: filter information from the book keeping entry
2191 * @new_fltr: filter information with the new VSI to be added
2193 * Call AQ command to add or update previously created VSI list with new VSI.
2195 * Helper function to do book keeping associated with adding filter information
2196 * The algorithm to do the book keeping is described below :
2197 * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
2198 * if only one VSI has been added till now
2199 * Allocate a new VSI list and add two VSIs
2200 * to this list using switch rule command
2201 * Update the previously created switch rule with the
2202 * newly created VSI list ID
2203 * if a VSI list was previously created
2204 * Add the new VSI to the previously created VSI list set
2205 * using the update switch rule command
2207 static enum ice_status
2208 ice_add_update_vsi_list(struct ice_hw *hw,
2209 struct ice_fltr_mgmt_list_entry *m_entry,
2210 struct ice_fltr_info *cur_fltr,
2211 struct ice_fltr_info *new_fltr)
2213 enum ice_status status = ICE_SUCCESS;
2214 u16 vsi_list_id = 0;
2216 if ((cur_fltr->fltr_act == ICE_FWD_TO_Q ||
2217 cur_fltr->fltr_act == ICE_FWD_TO_QGRP))
2218 return ICE_ERR_NOT_IMPL;
2220 if ((new_fltr->fltr_act == ICE_FWD_TO_Q ||
2221 new_fltr->fltr_act == ICE_FWD_TO_QGRP) &&
2222 (cur_fltr->fltr_act == ICE_FWD_TO_VSI ||
2223 cur_fltr->fltr_act == ICE_FWD_TO_VSI_LIST))
2224 return ICE_ERR_NOT_IMPL;
2226 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
2227 /* Only one entry existed in the mapping and it was not already
2228 * a part of a VSI list. So, create a VSI list with the old and
2231 struct ice_fltr_info tmp_fltr;
2232 u16 vsi_handle_arr[2];
2234 /* A rule already exists with the new VSI being added */
2235 if (cur_fltr->fwd_id.hw_vsi_id == new_fltr->fwd_id.hw_vsi_id)
2236 return ICE_ERR_ALREADY_EXISTS;
2238 vsi_handle_arr[0] = cur_fltr->vsi_handle;
2239 vsi_handle_arr[1] = new_fltr->vsi_handle;
2240 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
2242 new_fltr->lkup_type);
2246 tmp_fltr = *new_fltr;
2247 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
2248 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
2249 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
2250 /* Update the previous switch rule of "MAC forward to VSI" to
2251 * "MAC fwd to VSI list"
2253 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
2257 cur_fltr->fwd_id.vsi_list_id = vsi_list_id;
2258 cur_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2259 m_entry->vsi_list_info =
2260 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
2263 /* If this entry was large action then the large action needs
2264 * to be updated to point to FWD to VSI list
2266 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID)
2268 ice_add_marker_act(hw, m_entry,
2269 m_entry->sw_marker_id,
2270 m_entry->lg_act_idx);
2272 u16 vsi_handle = new_fltr->vsi_handle;
2273 enum ice_adminq_opc opcode;
2275 if (!m_entry->vsi_list_info)
2278 /* A rule already exists with the new VSI being added */
2279 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
2282 /* Update the previously created VSI list set with
2283 * the new VSI ID passed in
2285 vsi_list_id = cur_fltr->fwd_id.vsi_list_id;
2286 opcode = ice_aqc_opc_update_sw_rules;
2288 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
2289 vsi_list_id, false, opcode,
2290 new_fltr->lkup_type);
2291 /* update VSI list mapping info with new VSI ID */
2293 ice_set_bit(vsi_handle,
2294 m_entry->vsi_list_info->vsi_map);
2297 m_entry->vsi_count++;
2302 * ice_find_rule_entry - Search a rule entry
2303 * @hw: pointer to the hardware structure
2304 * @recp_id: lookup type for which the specified rule needs to be searched
2305 * @f_info: rule information
2307 * Helper function to search for a given rule entry
2308 * Returns pointer to entry storing the rule if found
2310 static struct ice_fltr_mgmt_list_entry *
2311 ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
2313 struct ice_fltr_mgmt_list_entry *list_itr, *ret = NULL;
2314 struct ice_switch_info *sw = hw->switch_info;
2315 struct LIST_HEAD_TYPE *list_head;
2317 list_head = &sw->recp_list[recp_id].filt_rules;
2318 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
2320 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
2321 sizeof(f_info->l_data)) &&
2322 f_info->flag == list_itr->fltr_info.flag) {
2331 * ice_find_vsi_list_entry - Search VSI list map with VSI count 1
2332 * @hw: pointer to the hardware structure
2333 * @recp_id: lookup type for which VSI lists needs to be searched
2334 * @vsi_handle: VSI handle to be found in VSI list
2335 * @vsi_list_id: VSI list ID found containing vsi_handle
2337 * Helper function to search a VSI list with single entry containing given VSI
2338 * handle element. This can be extended further to search VSI list with more
2339 * than 1 vsi_count. Returns pointer to VSI list entry if found.
2341 static struct ice_vsi_list_map_info *
2342 ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
2345 struct ice_vsi_list_map_info *map_info = NULL;
2346 struct ice_switch_info *sw = hw->switch_info;
2347 struct LIST_HEAD_TYPE *list_head;
2349 list_head = &sw->recp_list[recp_id].filt_rules;
2350 if (sw->recp_list[recp_id].adv_rule) {
2351 struct ice_adv_fltr_mgmt_list_entry *list_itr;
2353 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2354 ice_adv_fltr_mgmt_list_entry,
2356 if (list_itr->vsi_list_info) {
2357 map_info = list_itr->vsi_list_info;
2358 if (ice_is_bit_set(map_info->vsi_map,
2360 *vsi_list_id = map_info->vsi_list_id;
2366 struct ice_fltr_mgmt_list_entry *list_itr;
2368 LIST_FOR_EACH_ENTRY(list_itr, list_head,
2369 ice_fltr_mgmt_list_entry,
2371 if (list_itr->vsi_count == 1 &&
2372 list_itr->vsi_list_info) {
2373 map_info = list_itr->vsi_list_info;
2374 if (ice_is_bit_set(map_info->vsi_map,
2376 *vsi_list_id = map_info->vsi_list_id;
2386 * ice_add_rule_internal - add rule for a given lookup type
2387 * @hw: pointer to the hardware structure
2388 * @recp_id: lookup type (recipe ID) for which rule has to be added
2389 * @f_entry: structure containing MAC forwarding information
2391 * Adds or updates the rule lists for a given recipe
2393 static enum ice_status
2394 ice_add_rule_internal(struct ice_hw *hw, u8 recp_id,
2395 struct ice_fltr_list_entry *f_entry)
2397 struct ice_switch_info *sw = hw->switch_info;
2398 struct ice_fltr_info *new_fltr, *cur_fltr;
2399 struct ice_fltr_mgmt_list_entry *m_entry;
2400 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2401 enum ice_status status = ICE_SUCCESS;
2403 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2404 return ICE_ERR_PARAM;
2406 /* Load the hw_vsi_id only if the fwd action is fwd to VSI */
2407 if (f_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI)
2408 f_entry->fltr_info.fwd_id.hw_vsi_id =
2409 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2411 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2413 ice_acquire_lock(rule_lock);
2414 new_fltr = &f_entry->fltr_info;
2415 if (new_fltr->flag & ICE_FLTR_RX)
2416 new_fltr->src = hw->port_info->lport;
2417 else if (new_fltr->flag & ICE_FLTR_TX)
2419 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2421 m_entry = ice_find_rule_entry(hw, recp_id, new_fltr);
2423 status = ice_create_pkt_fwd_rule(hw, f_entry);
2424 goto exit_add_rule_internal;
2427 cur_fltr = &m_entry->fltr_info;
2428 status = ice_add_update_vsi_list(hw, m_entry, cur_fltr, new_fltr);
2430 exit_add_rule_internal:
2431 ice_release_lock(rule_lock);
2436 * ice_remove_vsi_list_rule
2437 * @hw: pointer to the hardware structure
2438 * @vsi_list_id: VSI list ID generated as part of allocate resource
2439 * @lkup_type: switch rule filter lookup type
2441 * The VSI list should be emptied before this function is called to remove the
2444 static enum ice_status
2445 ice_remove_vsi_list_rule(struct ice_hw *hw, u16 vsi_list_id,
2446 enum ice_sw_lkup_type lkup_type)
2448 struct ice_aqc_sw_rules_elem *s_rule;
2449 enum ice_status status;
2452 s_rule_size = (u16)ICE_SW_RULE_VSI_LIST_SIZE(0);
2453 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
2455 return ICE_ERR_NO_MEMORY;
2457 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR);
2458 s_rule->pdata.vsi_list.index = CPU_TO_LE16(vsi_list_id);
2460 /* Free the vsi_list resource that we allocated. It is assumed that the
2461 * list is empty at this point.
2463 status = ice_aq_alloc_free_vsi_list(hw, &vsi_list_id, lkup_type,
2464 ice_aqc_opc_free_res);
2466 ice_free(hw, s_rule);
2471 * ice_rem_update_vsi_list
2472 * @hw: pointer to the hardware structure
2473 * @vsi_handle: VSI handle of the VSI to remove
2474 * @fm_list: filter management entry for which the VSI list management needs to
2477 static enum ice_status
2478 ice_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
2479 struct ice_fltr_mgmt_list_entry *fm_list)
2481 enum ice_sw_lkup_type lkup_type;
2482 enum ice_status status = ICE_SUCCESS;
2485 if (fm_list->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST ||
2486 fm_list->vsi_count == 0)
2487 return ICE_ERR_PARAM;
2489 /* A rule with the VSI being removed does not exist */
2490 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
2491 return ICE_ERR_DOES_NOT_EXIST;
2493 lkup_type = fm_list->fltr_info.lkup_type;
2494 vsi_list_id = fm_list->fltr_info.fwd_id.vsi_list_id;
2495 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
2496 ice_aqc_opc_update_sw_rules,
2501 fm_list->vsi_count--;
2502 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
2504 if (fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) {
2505 struct ice_fltr_info tmp_fltr_info = fm_list->fltr_info;
2506 struct ice_vsi_list_map_info *vsi_list_info =
2507 fm_list->vsi_list_info;
2510 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
2512 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
2513 return ICE_ERR_OUT_OF_RANGE;
2515 /* Make sure VSI list is empty before removing it below */
2516 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
2518 ice_aqc_opc_update_sw_rules,
2523 tmp_fltr_info.fltr_act = ICE_FWD_TO_VSI;
2524 tmp_fltr_info.fwd_id.hw_vsi_id =
2525 ice_get_hw_vsi_num(hw, rem_vsi_handle);
2526 tmp_fltr_info.vsi_handle = rem_vsi_handle;
2527 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr_info);
2529 ice_debug(hw, ICE_DBG_SW,
2530 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
2531 tmp_fltr_info.fwd_id.hw_vsi_id, status);
2535 fm_list->fltr_info = tmp_fltr_info;
2538 if ((fm_list->vsi_count == 1 && lkup_type != ICE_SW_LKUP_VLAN) ||
2539 (fm_list->vsi_count == 0 && lkup_type == ICE_SW_LKUP_VLAN)) {
2540 struct ice_vsi_list_map_info *vsi_list_info =
2541 fm_list->vsi_list_info;
2543 /* Remove the VSI list since it is no longer used */
2544 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
2546 ice_debug(hw, ICE_DBG_SW,
2547 "Failed to remove VSI list %d, error %d\n",
2548 vsi_list_id, status);
2552 LIST_DEL(&vsi_list_info->list_entry);
2553 ice_free(hw, vsi_list_info);
2554 fm_list->vsi_list_info = NULL;
2561 * ice_remove_rule_internal - Remove a filter rule of a given type
2563 * @hw: pointer to the hardware structure
2564 * @recp_id: recipe ID for which the rule needs to removed
2565 * @f_entry: rule entry containing filter information
2567 static enum ice_status
2568 ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
2569 struct ice_fltr_list_entry *f_entry)
2571 struct ice_switch_info *sw = hw->switch_info;
2572 struct ice_fltr_mgmt_list_entry *list_elem;
2573 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2574 enum ice_status status = ICE_SUCCESS;
2575 bool remove_rule = false;
2578 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2579 return ICE_ERR_PARAM;
2580 f_entry->fltr_info.fwd_id.hw_vsi_id =
2581 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2583 rule_lock = &sw->recp_list[recp_id].filt_rule_lock;
2584 ice_acquire_lock(rule_lock);
2585 list_elem = ice_find_rule_entry(hw, recp_id, &f_entry->fltr_info);
2587 status = ICE_ERR_DOES_NOT_EXIST;
2591 if (list_elem->fltr_info.fltr_act != ICE_FWD_TO_VSI_LIST) {
2593 } else if (!list_elem->vsi_list_info) {
2594 status = ICE_ERR_DOES_NOT_EXIST;
2596 } else if (list_elem->vsi_list_info->ref_cnt > 1) {
2597 /* a ref_cnt > 1 indicates that the vsi_list is being
2598 * shared by multiple rules. Decrement the ref_cnt and
2599 * remove this rule, but do not modify the list, as it
2600 * is in-use by other rules.
2602 list_elem->vsi_list_info->ref_cnt--;
2605 /* a ref_cnt of 1 indicates the vsi_list is only used
2606 * by one rule. However, the original removal request is only
2607 * for a single VSI. Update the vsi_list first, and only
2608 * remove the rule if there are no further VSIs in this list.
2610 vsi_handle = f_entry->fltr_info.vsi_handle;
2611 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
2614 /* if VSI count goes to zero after updating the VSI list */
2615 if (list_elem->vsi_count == 0)
2620 /* Remove the lookup rule */
2621 struct ice_aqc_sw_rules_elem *s_rule;
2623 s_rule = (struct ice_aqc_sw_rules_elem *)
2624 ice_malloc(hw, ICE_SW_RULE_RX_TX_NO_HDR_SIZE);
2626 status = ICE_ERR_NO_MEMORY;
2630 ice_fill_sw_rule(hw, &list_elem->fltr_info, s_rule,
2631 ice_aqc_opc_remove_sw_rules);
2633 status = ice_aq_sw_rules(hw, s_rule,
2634 ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
2635 ice_aqc_opc_remove_sw_rules, NULL);
2639 /* Remove a book keeping from the list */
2640 ice_free(hw, s_rule);
2642 LIST_DEL(&list_elem->list_entry);
2643 ice_free(hw, list_elem);
2646 ice_release_lock(rule_lock);
2651 * ice_aq_get_res_alloc - get allocated resources
2652 * @hw: pointer to the HW struct
2653 * @num_entries: pointer to u16 to store the number of resource entries returned
2654 * @buf: pointer to user-supplied buffer
2655 * @buf_size: size of buff
2656 * @cd: pointer to command details structure or NULL
2658 * The user-supplied buffer must be large enough to store the resource
2659 * information for all resource types. Each resource type is an
2660 * ice_aqc_get_res_resp_data_elem structure.
2663 ice_aq_get_res_alloc(struct ice_hw *hw, u16 *num_entries, void *buf,
2664 u16 buf_size, struct ice_sq_cd *cd)
2666 struct ice_aqc_get_res_alloc *resp;
2667 enum ice_status status;
2668 struct ice_aq_desc desc;
2671 return ICE_ERR_BAD_PTR;
2673 if (buf_size < ICE_AQ_GET_RES_ALLOC_BUF_LEN)
2674 return ICE_ERR_INVAL_SIZE;
2676 resp = &desc.params.get_res;
2678 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_res_alloc);
2679 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2681 if (!status && num_entries)
2682 *num_entries = LE16_TO_CPU(resp->resp_elem_num);
2688 * ice_aq_get_res_descs - get allocated resource descriptors
2689 * @hw: pointer to the hardware structure
2690 * @num_entries: number of resource entries in buffer
2691 * @buf: Indirect buffer to hold data parameters and response
2692 * @buf_size: size of buffer for indirect commands
2693 * @res_type: resource type
2694 * @res_shared: is resource shared
2695 * @desc_id: input - first desc ID to start; output - next desc ID
2696 * @cd: pointer to command details structure or NULL
2699 ice_aq_get_res_descs(struct ice_hw *hw, u16 num_entries,
2700 struct ice_aqc_get_allocd_res_desc_resp *buf,
2701 u16 buf_size, u16 res_type, bool res_shared, u16 *desc_id,
2702 struct ice_sq_cd *cd)
2704 struct ice_aqc_get_allocd_res_desc *cmd;
2705 struct ice_aq_desc desc;
2706 enum ice_status status;
2708 ice_debug(hw, ICE_DBG_TRACE, "ice_aq_get_res_descs");
2710 cmd = &desc.params.get_res_desc;
2713 return ICE_ERR_PARAM;
2715 if (buf_size != (num_entries * sizeof(*buf)))
2716 return ICE_ERR_PARAM;
2718 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_allocd_res_desc);
2720 cmd->ops.cmd.res = CPU_TO_LE16(((res_type << ICE_AQC_RES_TYPE_S) &
2721 ICE_AQC_RES_TYPE_M) | (res_shared ?
2722 ICE_AQC_RES_TYPE_FLAG_SHARED : 0));
2723 cmd->ops.cmd.first_desc = CPU_TO_LE16(*desc_id);
2725 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2727 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2729 *desc_id = LE16_TO_CPU(cmd->ops.resp.next_desc);
2735 * ice_add_mac - Add a MAC address based filter rule
2736 * @hw: pointer to the hardware structure
2737 * @m_list: list of MAC addresses and forwarding information
2739 * IMPORTANT: When the ucast_shared flag is set to false and m_list has
2740 * multiple unicast addresses, the function assumes that all the
2741 * addresses are unique in a given add_mac call. It doesn't
2742 * check for duplicates in this case, removing duplicates from a given
2743 * list should be taken care of in the caller of this function.
2746 ice_add_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
2748 struct ice_aqc_sw_rules_elem *s_rule, *r_iter;
2749 struct ice_fltr_list_entry *m_list_itr;
2750 struct LIST_HEAD_TYPE *rule_head;
2751 u16 elem_sent, total_elem_left;
2752 struct ice_switch_info *sw;
2753 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2754 enum ice_status status = ICE_SUCCESS;
2755 u16 num_unicast = 0;
2759 return ICE_ERR_PARAM;
2761 sw = hw->switch_info;
2762 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
2763 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2765 u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
2769 m_list_itr->fltr_info.flag = ICE_FLTR_TX;
2770 vsi_handle = m_list_itr->fltr_info.vsi_handle;
2771 if (!ice_is_vsi_valid(hw, vsi_handle))
2772 return ICE_ERR_PARAM;
2773 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
2774 m_list_itr->fltr_info.fwd_id.hw_vsi_id = hw_vsi_id;
2775 /* update the src in case it is VSI num */
2776 if (m_list_itr->fltr_info.src_id != ICE_SRC_ID_VSI)
2777 return ICE_ERR_PARAM;
2778 m_list_itr->fltr_info.src = hw_vsi_id;
2779 if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
2780 IS_ZERO_ETHER_ADDR(add))
2781 return ICE_ERR_PARAM;
2782 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
2783 /* Don't overwrite the unicast address */
2784 ice_acquire_lock(rule_lock);
2785 if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
2786 &m_list_itr->fltr_info)) {
2787 ice_release_lock(rule_lock);
2788 return ICE_ERR_ALREADY_EXISTS;
2790 ice_release_lock(rule_lock);
2792 } else if (IS_MULTICAST_ETHER_ADDR(add) ||
2793 (IS_UNICAST_ETHER_ADDR(add) && hw->ucast_shared)) {
2794 m_list_itr->status =
2795 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
2797 if (m_list_itr->status)
2798 return m_list_itr->status;
2802 ice_acquire_lock(rule_lock);
2803 /* Exit if no suitable entries were found for adding bulk switch rule */
2805 status = ICE_SUCCESS;
2806 goto ice_add_mac_exit;
2809 rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
2811 /* Allocate switch rule buffer for the bulk update for unicast */
2812 s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
2813 s_rule = (struct ice_aqc_sw_rules_elem *)
2814 ice_calloc(hw, num_unicast, s_rule_size);
2816 status = ICE_ERR_NO_MEMORY;
2817 goto ice_add_mac_exit;
2821 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2823 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2824 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2826 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2827 ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
2828 ice_aqc_opc_add_sw_rules);
2829 r_iter = (struct ice_aqc_sw_rules_elem *)
2830 ((u8 *)r_iter + s_rule_size);
2834 /* Call AQ bulk switch rule update for all unicast addresses */
2836 /* Call AQ switch rule in AQ_MAX chunk */
2837 for (total_elem_left = num_unicast; total_elem_left > 0;
2838 total_elem_left -= elem_sent) {
2839 struct ice_aqc_sw_rules_elem *entry = r_iter;
2841 elem_sent = min(total_elem_left,
2842 (u16)(ICE_AQ_MAX_BUF_LEN / s_rule_size));
2843 status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
2844 elem_sent, ice_aqc_opc_add_sw_rules,
2847 goto ice_add_mac_exit;
2848 r_iter = (struct ice_aqc_sw_rules_elem *)
2849 ((u8 *)r_iter + (elem_sent * s_rule_size));
2852 /* Fill up rule ID based on the value returned from FW */
2854 LIST_FOR_EACH_ENTRY(m_list_itr, m_list, ice_fltr_list_entry,
2856 struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
2857 u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
2858 struct ice_fltr_mgmt_list_entry *fm_entry;
2860 if (IS_UNICAST_ETHER_ADDR(mac_addr)) {
2861 f_info->fltr_rule_id =
2862 LE16_TO_CPU(r_iter->pdata.lkup_tx_rx.index);
2863 f_info->fltr_act = ICE_FWD_TO_VSI;
2864 /* Create an entry to track this MAC address */
2865 fm_entry = (struct ice_fltr_mgmt_list_entry *)
2866 ice_malloc(hw, sizeof(*fm_entry));
2868 status = ICE_ERR_NO_MEMORY;
2869 goto ice_add_mac_exit;
2871 fm_entry->fltr_info = *f_info;
2872 fm_entry->vsi_count = 1;
2873 /* The book keeping entries will get removed when
2874 * base driver calls remove filter AQ command
2877 LIST_ADD(&fm_entry->list_entry, rule_head);
2878 r_iter = (struct ice_aqc_sw_rules_elem *)
2879 ((u8 *)r_iter + s_rule_size);
2884 ice_release_lock(rule_lock);
2886 ice_free(hw, s_rule);
2891 * ice_add_vlan_internal - Add one VLAN based filter rule
2892 * @hw: pointer to the hardware structure
2893 * @f_entry: filter entry containing one VLAN information
2895 static enum ice_status
2896 ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
2898 struct ice_switch_info *sw = hw->switch_info;
2899 struct ice_fltr_mgmt_list_entry *v_list_itr;
2900 struct ice_fltr_info *new_fltr, *cur_fltr;
2901 enum ice_sw_lkup_type lkup_type;
2902 u16 vsi_list_id = 0, vsi_handle;
2903 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
2904 enum ice_status status = ICE_SUCCESS;
2906 if (!ice_is_vsi_valid(hw, f_entry->fltr_info.vsi_handle))
2907 return ICE_ERR_PARAM;
2909 f_entry->fltr_info.fwd_id.hw_vsi_id =
2910 ice_get_hw_vsi_num(hw, f_entry->fltr_info.vsi_handle);
2911 new_fltr = &f_entry->fltr_info;
2913 /* VLAN ID should only be 12 bits */
2914 if (new_fltr->l_data.vlan.vlan_id > ICE_MAX_VLAN_ID)
2915 return ICE_ERR_PARAM;
2917 if (new_fltr->src_id != ICE_SRC_ID_VSI)
2918 return ICE_ERR_PARAM;
2920 new_fltr->src = new_fltr->fwd_id.hw_vsi_id;
2921 lkup_type = new_fltr->lkup_type;
2922 vsi_handle = new_fltr->vsi_handle;
2923 rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
2924 ice_acquire_lock(rule_lock);
2925 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN, new_fltr);
2927 struct ice_vsi_list_map_info *map_info = NULL;
2929 if (new_fltr->fltr_act == ICE_FWD_TO_VSI) {
2930 /* All VLAN pruning rules use a VSI list. Check if
2931 * there is already a VSI list containing VSI that we
2932 * want to add. If found, use the same vsi_list_id for
2933 * this new VLAN rule or else create a new list.
2935 map_info = ice_find_vsi_list_entry(hw, ICE_SW_LKUP_VLAN,
2939 status = ice_create_vsi_list_rule(hw,
2947 /* Convert the action to forwarding to a VSI list. */
2948 new_fltr->fltr_act = ICE_FWD_TO_VSI_LIST;
2949 new_fltr->fwd_id.vsi_list_id = vsi_list_id;
2952 status = ice_create_pkt_fwd_rule(hw, f_entry);
2954 v_list_itr = ice_find_rule_entry(hw, ICE_SW_LKUP_VLAN,
2957 status = ICE_ERR_DOES_NOT_EXIST;
2960 /* reuse VSI list for new rule and increment ref_cnt */
2962 v_list_itr->vsi_list_info = map_info;
2963 map_info->ref_cnt++;
2965 v_list_itr->vsi_list_info =
2966 ice_create_vsi_list_map(hw, &vsi_handle,
2970 } else if (v_list_itr->vsi_list_info->ref_cnt == 1) {
2971 /* Update existing VSI list to add new VSI ID only if it used
2974 cur_fltr = &v_list_itr->fltr_info;
2975 status = ice_add_update_vsi_list(hw, v_list_itr, cur_fltr,
2978 /* If VLAN rule exists and VSI list being used by this rule is
2979 * referenced by more than 1 VLAN rule. Then create a new VSI
2980 * list appending previous VSI with new VSI and update existing
2981 * VLAN rule to point to new VSI list ID
2983 struct ice_fltr_info tmp_fltr;
2984 u16 vsi_handle_arr[2];
2987 /* Current implementation only supports reusing VSI list with
2988 * one VSI count. We should never hit below condition
2990 if (v_list_itr->vsi_count > 1 &&
2991 v_list_itr->vsi_list_info->ref_cnt > 1) {
2992 ice_debug(hw, ICE_DBG_SW,
2993 "Invalid configuration: Optimization to reuse VSI list with more than one VSI is not being done yet\n");
2994 status = ICE_ERR_CFG;
2999 ice_find_first_bit(v_list_itr->vsi_list_info->vsi_map,
3002 /* A rule already exists with the new VSI being added */
3003 if (cur_handle == vsi_handle) {
3004 status = ICE_ERR_ALREADY_EXISTS;
3008 vsi_handle_arr[0] = cur_handle;
3009 vsi_handle_arr[1] = vsi_handle;
3010 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
3011 &vsi_list_id, lkup_type);
3015 tmp_fltr = v_list_itr->fltr_info;
3016 tmp_fltr.fltr_rule_id = v_list_itr->fltr_info.fltr_rule_id;
3017 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
3018 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
3019 /* Update the previous switch rule to a new VSI list which
3020 * includes current VSI that is requested
3022 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
3026 /* before overriding VSI list map info. decrement ref_cnt of
3029 v_list_itr->vsi_list_info->ref_cnt--;
3031 /* now update to newly created list */
3032 v_list_itr->fltr_info.fwd_id.vsi_list_id = vsi_list_id;
3033 v_list_itr->vsi_list_info =
3034 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
3036 v_list_itr->vsi_count++;
3040 ice_release_lock(rule_lock);
3045 * ice_add_vlan - Add VLAN based filter rule
3046 * @hw: pointer to the hardware structure
3047 * @v_list: list of VLAN entries and forwarding information
3050 ice_add_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3052 struct ice_fltr_list_entry *v_list_itr;
3055 return ICE_ERR_PARAM;
3057 LIST_FOR_EACH_ENTRY(v_list_itr, v_list, ice_fltr_list_entry,
3059 if (v_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_VLAN)
3060 return ICE_ERR_PARAM;
3061 v_list_itr->fltr_info.flag = ICE_FLTR_TX;
3062 v_list_itr->status = ice_add_vlan_internal(hw, v_list_itr);
3063 if (v_list_itr->status)
3064 return v_list_itr->status;
3070 * ice_add_mac_vlan - Add MAC and VLAN pair based filter rule
3071 * @hw: pointer to the hardware structure
3072 * @mv_list: list of MAC and VLAN filters
3074 * If the VSI on which the MAC-VLAN pair has to be added has Rx and Tx VLAN
3075 * pruning bits enabled, then it is the responsibility of the caller to make
3076 * sure to add a VLAN only filter on the same VSI. Packets belonging to that
3077 * VLAN won't be received on that VSI otherwise.
3080 ice_add_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *mv_list)
3082 struct ice_fltr_list_entry *mv_list_itr;
3084 if (!mv_list || !hw)
3085 return ICE_ERR_PARAM;
3087 LIST_FOR_EACH_ENTRY(mv_list_itr, mv_list, ice_fltr_list_entry,
3089 enum ice_sw_lkup_type l_type =
3090 mv_list_itr->fltr_info.lkup_type;
3092 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3093 return ICE_ERR_PARAM;
3094 mv_list_itr->fltr_info.flag = ICE_FLTR_TX;
3095 mv_list_itr->status =
3096 ice_add_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3098 if (mv_list_itr->status)
3099 return mv_list_itr->status;
3105 * ice_add_eth_mac - Add ethertype and MAC based filter rule
3106 * @hw: pointer to the hardware structure
3107 * @em_list: list of ether type MAC filter, MAC is optional
3109 * This function requires the caller to populate the entries in
3110 * the filter list with the necessary fields (including flags to
3111 * indicate Tx or Rx rules).
3114 ice_add_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3116 struct ice_fltr_list_entry *em_list_itr;
3118 if (!em_list || !hw)
3119 return ICE_ERR_PARAM;
3121 LIST_FOR_EACH_ENTRY(em_list_itr, em_list, ice_fltr_list_entry,
3123 enum ice_sw_lkup_type l_type =
3124 em_list_itr->fltr_info.lkup_type;
3126 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3127 l_type != ICE_SW_LKUP_ETHERTYPE)
3128 return ICE_ERR_PARAM;
3130 em_list_itr->status = ice_add_rule_internal(hw, l_type,
3132 if (em_list_itr->status)
3133 return em_list_itr->status;
3139 * ice_remove_eth_mac - Remove an ethertype (or MAC) based filter rule
3140 * @hw: pointer to the hardware structure
3141 * @em_list: list of ethertype or ethertype MAC entries
3144 ice_remove_eth_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *em_list)
3146 struct ice_fltr_list_entry *em_list_itr, *tmp;
3148 if (!em_list || !hw)
3149 return ICE_ERR_PARAM;
3151 LIST_FOR_EACH_ENTRY_SAFE(em_list_itr, tmp, em_list, ice_fltr_list_entry,
3153 enum ice_sw_lkup_type l_type =
3154 em_list_itr->fltr_info.lkup_type;
3156 if (l_type != ICE_SW_LKUP_ETHERTYPE_MAC &&
3157 l_type != ICE_SW_LKUP_ETHERTYPE)
3158 return ICE_ERR_PARAM;
3160 em_list_itr->status = ice_remove_rule_internal(hw, l_type,
3162 if (em_list_itr->status)
3163 return em_list_itr->status;
3170 * ice_rem_sw_rule_info
3171 * @hw: pointer to the hardware structure
3172 * @rule_head: pointer to the switch list structure that we want to delete
3175 ice_rem_sw_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3177 if (!LIST_EMPTY(rule_head)) {
3178 struct ice_fltr_mgmt_list_entry *entry;
3179 struct ice_fltr_mgmt_list_entry *tmp;
3181 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, rule_head,
3182 ice_fltr_mgmt_list_entry, list_entry) {
3183 LIST_DEL(&entry->list_entry);
3184 ice_free(hw, entry);
3190 * ice_rem_adv_rule_info
3191 * @hw: pointer to the hardware structure
3192 * @rule_head: pointer to the switch list structure that we want to delete
3195 ice_rem_adv_rule_info(struct ice_hw *hw, struct LIST_HEAD_TYPE *rule_head)
3197 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
3198 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
3200 if (LIST_EMPTY(rule_head))
3203 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry, rule_head,
3204 ice_adv_fltr_mgmt_list_entry, list_entry) {
3205 LIST_DEL(&lst_itr->list_entry);
3206 ice_free(hw, lst_itr->lkups);
3207 ice_free(hw, lst_itr);
3212 * ice_rem_all_sw_rules_info
3213 * @hw: pointer to the hardware structure
3215 void ice_rem_all_sw_rules_info(struct ice_hw *hw)
3217 struct ice_switch_info *sw = hw->switch_info;
3220 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
3221 struct LIST_HEAD_TYPE *rule_head;
3223 rule_head = &sw->recp_list[i].filt_rules;
3224 if (!sw->recp_list[i].adv_rule)
3225 ice_rem_sw_rule_info(hw, rule_head);
3227 ice_rem_adv_rule_info(hw, rule_head);
3232 * ice_cfg_dflt_vsi - change state of VSI to set/clear default
3233 * @pi: pointer to the port_info structure
3234 * @vsi_handle: VSI handle to set as default
3235 * @set: true to add the above mentioned switch rule, false to remove it
3236 * @direction: ICE_FLTR_RX or ICE_FLTR_TX
3238 * add filter rule to set/unset given VSI as default VSI for the switch
3239 * (represented by swid)
3242 ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
3245 struct ice_aqc_sw_rules_elem *s_rule;
3246 struct ice_fltr_info f_info;
3247 struct ice_hw *hw = pi->hw;
3248 enum ice_adminq_opc opcode;
3249 enum ice_status status;
3253 if (!ice_is_vsi_valid(hw, vsi_handle))
3254 return ICE_ERR_PARAM;
3255 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3257 s_rule_size = set ? ICE_SW_RULE_RX_TX_ETH_HDR_SIZE :
3258 ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
3259 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, s_rule_size);
3261 return ICE_ERR_NO_MEMORY;
3263 ice_memset(&f_info, 0, sizeof(f_info), ICE_NONDMA_MEM);
3265 f_info.lkup_type = ICE_SW_LKUP_DFLT;
3266 f_info.flag = direction;
3267 f_info.fltr_act = ICE_FWD_TO_VSI;
3268 f_info.fwd_id.hw_vsi_id = hw_vsi_id;
3270 if (f_info.flag & ICE_FLTR_RX) {
3271 f_info.src = pi->lport;
3272 f_info.src_id = ICE_SRC_ID_LPORT;
3274 f_info.fltr_rule_id =
3275 pi->dflt_rx_vsi_rule_id;
3276 } else if (f_info.flag & ICE_FLTR_TX) {
3277 f_info.src_id = ICE_SRC_ID_VSI;
3278 f_info.src = hw_vsi_id;
3280 f_info.fltr_rule_id =
3281 pi->dflt_tx_vsi_rule_id;
3285 opcode = ice_aqc_opc_add_sw_rules;
3287 opcode = ice_aqc_opc_remove_sw_rules;
3289 ice_fill_sw_rule(hw, &f_info, s_rule, opcode);
3291 status = ice_aq_sw_rules(hw, s_rule, s_rule_size, 1, opcode, NULL);
3292 if (status || !(f_info.flag & ICE_FLTR_TX_RX))
3295 u16 index = LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
3297 if (f_info.flag & ICE_FLTR_TX) {
3298 pi->dflt_tx_vsi_num = hw_vsi_id;
3299 pi->dflt_tx_vsi_rule_id = index;
3300 } else if (f_info.flag & ICE_FLTR_RX) {
3301 pi->dflt_rx_vsi_num = hw_vsi_id;
3302 pi->dflt_rx_vsi_rule_id = index;
3305 if (f_info.flag & ICE_FLTR_TX) {
3306 pi->dflt_tx_vsi_num = ICE_DFLT_VSI_INVAL;
3307 pi->dflt_tx_vsi_rule_id = ICE_INVAL_ACT;
3308 } else if (f_info.flag & ICE_FLTR_RX) {
3309 pi->dflt_rx_vsi_num = ICE_DFLT_VSI_INVAL;
3310 pi->dflt_rx_vsi_rule_id = ICE_INVAL_ACT;
3315 ice_free(hw, s_rule);
3320 * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
3321 * @hw: pointer to the hardware structure
3322 * @recp_id: lookup type for which the specified rule needs to be searched
3323 * @f_info: rule information
3325 * Helper function to search for a unicast rule entry - this is to be used
3326 * to remove unicast MAC filter that is not shared with other VSIs on the
3329 * Returns pointer to entry storing the rule if found
3331 static struct ice_fltr_mgmt_list_entry *
3332 ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
3333 struct ice_fltr_info *f_info)
3335 struct ice_switch_info *sw = hw->switch_info;
3336 struct ice_fltr_mgmt_list_entry *list_itr;
3337 struct LIST_HEAD_TYPE *list_head;
3339 list_head = &sw->recp_list[recp_id].filt_rules;
3340 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_fltr_mgmt_list_entry,
3342 if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
3343 sizeof(f_info->l_data)) &&
3344 f_info->fwd_id.hw_vsi_id ==
3345 list_itr->fltr_info.fwd_id.hw_vsi_id &&
3346 f_info->flag == list_itr->fltr_info.flag)
3353 * ice_remove_mac - remove a MAC address based filter rule
3354 * @hw: pointer to the hardware structure
3355 * @m_list: list of MAC addresses and forwarding information
3357 * This function removes either a MAC filter rule or a specific VSI from a
3358 * VSI list for a multicast MAC address.
3360 * Returns ICE_ERR_DOES_NOT_EXIST if a given entry was not added by
3361 * ice_add_mac. Caller should be aware that this call will only work if all
3362 * the entries passed into m_list were added previously. It will not attempt to
3363 * do a partial remove of entries that were found.
3366 ice_remove_mac(struct ice_hw *hw, struct LIST_HEAD_TYPE *m_list)
3368 struct ice_fltr_list_entry *list_itr, *tmp;
3369 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3372 return ICE_ERR_PARAM;
3374 rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
3375 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, m_list, ice_fltr_list_entry,
3377 enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
3378 u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
3381 if (l_type != ICE_SW_LKUP_MAC)
3382 return ICE_ERR_PARAM;
3384 vsi_handle = list_itr->fltr_info.vsi_handle;
3385 if (!ice_is_vsi_valid(hw, vsi_handle))
3386 return ICE_ERR_PARAM;
3388 list_itr->fltr_info.fwd_id.hw_vsi_id =
3389 ice_get_hw_vsi_num(hw, vsi_handle);
3390 if (IS_UNICAST_ETHER_ADDR(add) && !hw->ucast_shared) {
3391 /* Don't remove the unicast address that belongs to
3392 * another VSI on the switch, since it is not being
3395 ice_acquire_lock(rule_lock);
3396 if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
3397 &list_itr->fltr_info)) {
3398 ice_release_lock(rule_lock);
3399 return ICE_ERR_DOES_NOT_EXIST;
3401 ice_release_lock(rule_lock);
3403 list_itr->status = ice_remove_rule_internal(hw,
3406 if (list_itr->status)
3407 return list_itr->status;
3413 * ice_remove_vlan - Remove VLAN based filter rule
3414 * @hw: pointer to the hardware structure
3415 * @v_list: list of VLAN entries and forwarding information
3418 ice_remove_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3420 struct ice_fltr_list_entry *v_list_itr, *tmp;
3423 return ICE_ERR_PARAM;
3425 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3427 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3429 if (l_type != ICE_SW_LKUP_VLAN)
3430 return ICE_ERR_PARAM;
3431 v_list_itr->status = ice_remove_rule_internal(hw,
3434 if (v_list_itr->status)
3435 return v_list_itr->status;
3441 * ice_remove_mac_vlan - Remove MAC VLAN based filter rule
3442 * @hw: pointer to the hardware structure
3443 * @v_list: list of MAC VLAN entries and forwarding information
3446 ice_remove_mac_vlan(struct ice_hw *hw, struct LIST_HEAD_TYPE *v_list)
3448 struct ice_fltr_list_entry *v_list_itr, *tmp;
3451 return ICE_ERR_PARAM;
3453 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3455 enum ice_sw_lkup_type l_type = v_list_itr->fltr_info.lkup_type;
3457 if (l_type != ICE_SW_LKUP_MAC_VLAN)
3458 return ICE_ERR_PARAM;
3459 v_list_itr->status =
3460 ice_remove_rule_internal(hw, ICE_SW_LKUP_MAC_VLAN,
3462 if (v_list_itr->status)
3463 return v_list_itr->status;
3469 * ice_vsi_uses_fltr - Determine if given VSI uses specified filter
3470 * @fm_entry: filter entry to inspect
3471 * @vsi_handle: VSI handle to compare with filter info
3474 ice_vsi_uses_fltr(struct ice_fltr_mgmt_list_entry *fm_entry, u16 vsi_handle)
3476 return ((fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI &&
3477 fm_entry->fltr_info.vsi_handle == vsi_handle) ||
3478 (fm_entry->fltr_info.fltr_act == ICE_FWD_TO_VSI_LIST &&
3479 (ice_is_bit_set(fm_entry->vsi_list_info->vsi_map,
3484 * ice_add_entry_to_vsi_fltr_list - Add copy of fltr_list_entry to remove list
3485 * @hw: pointer to the hardware structure
3486 * @vsi_handle: VSI handle to remove filters from
3487 * @vsi_list_head: pointer to the list to add entry to
3488 * @fi: pointer to fltr_info of filter entry to copy & add
3490 * Helper function, used when creating a list of filters to remove from
3491 * a specific VSI. The entry added to vsi_list_head is a COPY of the
3492 * original filter entry, with the exception of fltr_info.fltr_act and
3493 * fltr_info.fwd_id fields. These are set such that later logic can
3494 * extract which VSI to remove the fltr from, and pass on that information.
3496 static enum ice_status
3497 ice_add_entry_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3498 struct LIST_HEAD_TYPE *vsi_list_head,
3499 struct ice_fltr_info *fi)
3501 struct ice_fltr_list_entry *tmp;
3503 /* this memory is freed up in the caller function
3504 * once filters for this VSI are removed
3506 tmp = (struct ice_fltr_list_entry *)ice_malloc(hw, sizeof(*tmp));
3508 return ICE_ERR_NO_MEMORY;
3510 tmp->fltr_info = *fi;
3512 /* Overwrite these fields to indicate which VSI to remove filter from,
3513 * so find and remove logic can extract the information from the
3514 * list entries. Note that original entries will still have proper
3517 tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
3518 tmp->fltr_info.vsi_handle = vsi_handle;
3519 tmp->fltr_info.fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3521 LIST_ADD(&tmp->list_entry, vsi_list_head);
3527 * ice_add_to_vsi_fltr_list - Add VSI filters to the list
3528 * @hw: pointer to the hardware structure
3529 * @vsi_handle: VSI handle to remove filters from
3530 * @lkup_list_head: pointer to the list that has certain lookup type filters
3531 * @vsi_list_head: pointer to the list pertaining to VSI with vsi_handle
3533 * Locates all filters in lkup_list_head that are used by the given VSI,
3534 * and adds COPIES of those entries to vsi_list_head (intended to be used
3535 * to remove the listed filters).
3536 * Note that this means all entries in vsi_list_head must be explicitly
3537 * deallocated by the caller when done with list.
3539 static enum ice_status
3540 ice_add_to_vsi_fltr_list(struct ice_hw *hw, u16 vsi_handle,
3541 struct LIST_HEAD_TYPE *lkup_list_head,
3542 struct LIST_HEAD_TYPE *vsi_list_head)
3544 struct ice_fltr_mgmt_list_entry *fm_entry;
3545 enum ice_status status = ICE_SUCCESS;
3547 /* check to make sure VSI ID is valid and within boundary */
3548 if (!ice_is_vsi_valid(hw, vsi_handle))
3549 return ICE_ERR_PARAM;
3551 LIST_FOR_EACH_ENTRY(fm_entry, lkup_list_head,
3552 ice_fltr_mgmt_list_entry, list_entry) {
3553 struct ice_fltr_info *fi;
3555 fi = &fm_entry->fltr_info;
3556 if (!fi || !ice_vsi_uses_fltr(fm_entry, vsi_handle))
3559 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3569 * ice_determine_promisc_mask
3570 * @fi: filter info to parse
3572 * Helper function to determine which ICE_PROMISC_ mask corresponds
3573 * to given filter into.
3575 static u8 ice_determine_promisc_mask(struct ice_fltr_info *fi)
3577 u16 vid = fi->l_data.mac_vlan.vlan_id;
3578 u8 *macaddr = fi->l_data.mac.mac_addr;
3579 bool is_tx_fltr = false;
3580 u8 promisc_mask = 0;
3582 if (fi->flag == ICE_FLTR_TX)
3585 if (IS_BROADCAST_ETHER_ADDR(macaddr))
3586 promisc_mask |= is_tx_fltr ?
3587 ICE_PROMISC_BCAST_TX : ICE_PROMISC_BCAST_RX;
3588 else if (IS_MULTICAST_ETHER_ADDR(macaddr))
3589 promisc_mask |= is_tx_fltr ?
3590 ICE_PROMISC_MCAST_TX : ICE_PROMISC_MCAST_RX;
3591 else if (IS_UNICAST_ETHER_ADDR(macaddr))
3592 promisc_mask |= is_tx_fltr ?
3593 ICE_PROMISC_UCAST_TX : ICE_PROMISC_UCAST_RX;
3595 promisc_mask |= is_tx_fltr ?
3596 ICE_PROMISC_VLAN_TX : ICE_PROMISC_VLAN_RX;
3598 return promisc_mask;
3602 * ice_get_vsi_promisc - get promiscuous mode of given VSI
3603 * @hw: pointer to the hardware structure
3604 * @vsi_handle: VSI handle to retrieve info from
3605 * @promisc_mask: pointer to mask to be filled in
3606 * @vid: VLAN ID of promisc VLAN VSI
3609 ice_get_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3612 struct ice_switch_info *sw = hw->switch_info;
3613 struct ice_fltr_mgmt_list_entry *itr;
3614 struct LIST_HEAD_TYPE *rule_head;
3615 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3617 if (!ice_is_vsi_valid(hw, vsi_handle))
3618 return ICE_ERR_PARAM;
3622 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rules;
3623 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC].filt_rule_lock;
3625 ice_acquire_lock(rule_lock);
3626 LIST_FOR_EACH_ENTRY(itr, rule_head,
3627 ice_fltr_mgmt_list_entry, list_entry) {
3628 /* Continue if this filter doesn't apply to this VSI or the
3629 * VSI ID is not in the VSI map for this filter
3631 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3634 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3636 ice_release_lock(rule_lock);
3642 * ice_get_vsi_vlan_promisc - get VLAN promiscuous mode of given VSI
3643 * @hw: pointer to the hardware structure
3644 * @vsi_handle: VSI handle to retrieve info from
3645 * @promisc_mask: pointer to mask to be filled in
3646 * @vid: VLAN ID of promisc VLAN VSI
3649 ice_get_vsi_vlan_promisc(struct ice_hw *hw, u16 vsi_handle, u8 *promisc_mask,
3652 struct ice_switch_info *sw = hw->switch_info;
3653 struct ice_fltr_mgmt_list_entry *itr;
3654 struct LIST_HEAD_TYPE *rule_head;
3655 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3657 if (!ice_is_vsi_valid(hw, vsi_handle))
3658 return ICE_ERR_PARAM;
3662 rule_head = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rules;
3663 rule_lock = &sw->recp_list[ICE_SW_LKUP_PROMISC_VLAN].filt_rule_lock;
3665 ice_acquire_lock(rule_lock);
3666 LIST_FOR_EACH_ENTRY(itr, rule_head, ice_fltr_mgmt_list_entry,
3668 /* Continue if this filter doesn't apply to this VSI or the
3669 * VSI ID is not in the VSI map for this filter
3671 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3674 *promisc_mask |= ice_determine_promisc_mask(&itr->fltr_info);
3676 ice_release_lock(rule_lock);
3682 * ice_remove_promisc - Remove promisc based filter rules
3683 * @hw: pointer to the hardware structure
3684 * @recp_id: recipe ID for which the rule needs to removed
3685 * @v_list: list of promisc entries
3687 static enum ice_status
3688 ice_remove_promisc(struct ice_hw *hw, u8 recp_id,
3689 struct LIST_HEAD_TYPE *v_list)
3691 struct ice_fltr_list_entry *v_list_itr, *tmp;
3693 LIST_FOR_EACH_ENTRY_SAFE(v_list_itr, tmp, v_list, ice_fltr_list_entry,
3695 v_list_itr->status =
3696 ice_remove_rule_internal(hw, recp_id, v_list_itr);
3697 if (v_list_itr->status)
3698 return v_list_itr->status;
3704 * ice_clear_vsi_promisc - clear specified promiscuous mode(s) for given VSI
3705 * @hw: pointer to the hardware structure
3706 * @vsi_handle: VSI handle to clear mode
3707 * @promisc_mask: mask of promiscuous config bits to clear
3708 * @vid: VLAN ID to clear VLAN promiscuous
3711 ice_clear_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3714 struct ice_switch_info *sw = hw->switch_info;
3715 struct ice_fltr_list_entry *fm_entry, *tmp;
3716 struct LIST_HEAD_TYPE remove_list_head;
3717 struct ice_fltr_mgmt_list_entry *itr;
3718 struct LIST_HEAD_TYPE *rule_head;
3719 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3720 enum ice_status status = ICE_SUCCESS;
3723 if (!ice_is_vsi_valid(hw, vsi_handle))
3724 return ICE_ERR_PARAM;
3727 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3729 recipe_id = ICE_SW_LKUP_PROMISC;
3731 rule_head = &sw->recp_list[recipe_id].filt_rules;
3732 rule_lock = &sw->recp_list[recipe_id].filt_rule_lock;
3734 INIT_LIST_HEAD(&remove_list_head);
3736 ice_acquire_lock(rule_lock);
3737 LIST_FOR_EACH_ENTRY(itr, rule_head,
3738 ice_fltr_mgmt_list_entry, list_entry) {
3739 u8 fltr_promisc_mask = 0;
3741 if (!ice_vsi_uses_fltr(itr, vsi_handle))
3744 fltr_promisc_mask |=
3745 ice_determine_promisc_mask(&itr->fltr_info);
3747 /* Skip if filter is not completely specified by given mask */
3748 if (fltr_promisc_mask & ~promisc_mask)
3751 status = ice_add_entry_to_vsi_fltr_list(hw, vsi_handle,
3755 ice_release_lock(rule_lock);
3756 goto free_fltr_list;
3759 ice_release_lock(rule_lock);
3761 status = ice_remove_promisc(hw, recipe_id, &remove_list_head);
3764 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3765 ice_fltr_list_entry, list_entry) {
3766 LIST_DEL(&fm_entry->list_entry);
3767 ice_free(hw, fm_entry);
3774 * ice_set_vsi_promisc - set given VSI to given promiscuous mode(s)
3775 * @hw: pointer to the hardware structure
3776 * @vsi_handle: VSI handle to configure
3777 * @promisc_mask: mask of promiscuous config bits
3778 * @vid: VLAN ID to set VLAN promiscuous
3781 ice_set_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask, u16 vid)
3783 enum { UCAST_FLTR = 1, MCAST_FLTR, BCAST_FLTR };
3784 struct ice_fltr_list_entry f_list_entry;
3785 struct ice_fltr_info new_fltr;
3786 enum ice_status status = ICE_SUCCESS;
3792 ice_debug(hw, ICE_DBG_TRACE, "ice_set_vsi_promisc\n");
3794 if (!ice_is_vsi_valid(hw, vsi_handle))
3795 return ICE_ERR_PARAM;
3796 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3798 ice_memset(&new_fltr, 0, sizeof(new_fltr), ICE_NONDMA_MEM);
3800 if (promisc_mask & (ICE_PROMISC_VLAN_RX | ICE_PROMISC_VLAN_TX)) {
3801 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC_VLAN;
3802 new_fltr.l_data.mac_vlan.vlan_id = vid;
3803 recipe_id = ICE_SW_LKUP_PROMISC_VLAN;
3805 new_fltr.lkup_type = ICE_SW_LKUP_PROMISC;
3806 recipe_id = ICE_SW_LKUP_PROMISC;
3809 /* Separate filters must be set for each direction/packet type
3810 * combination, so we will loop over the mask value, store the
3811 * individual type, and clear it out in the input mask as it
3814 while (promisc_mask) {
3820 if (promisc_mask & ICE_PROMISC_UCAST_RX) {
3821 promisc_mask &= ~ICE_PROMISC_UCAST_RX;
3822 pkt_type = UCAST_FLTR;
3823 } else if (promisc_mask & ICE_PROMISC_UCAST_TX) {
3824 promisc_mask &= ~ICE_PROMISC_UCAST_TX;
3825 pkt_type = UCAST_FLTR;
3827 } else if (promisc_mask & ICE_PROMISC_MCAST_RX) {
3828 promisc_mask &= ~ICE_PROMISC_MCAST_RX;
3829 pkt_type = MCAST_FLTR;
3830 } else if (promisc_mask & ICE_PROMISC_MCAST_TX) {
3831 promisc_mask &= ~ICE_PROMISC_MCAST_TX;
3832 pkt_type = MCAST_FLTR;
3834 } else if (promisc_mask & ICE_PROMISC_BCAST_RX) {
3835 promisc_mask &= ~ICE_PROMISC_BCAST_RX;
3836 pkt_type = BCAST_FLTR;
3837 } else if (promisc_mask & ICE_PROMISC_BCAST_TX) {
3838 promisc_mask &= ~ICE_PROMISC_BCAST_TX;
3839 pkt_type = BCAST_FLTR;
3843 /* Check for VLAN promiscuous flag */
3844 if (promisc_mask & ICE_PROMISC_VLAN_RX) {
3845 promisc_mask &= ~ICE_PROMISC_VLAN_RX;
3846 } else if (promisc_mask & ICE_PROMISC_VLAN_TX) {
3847 promisc_mask &= ~ICE_PROMISC_VLAN_TX;
3851 /* Set filter DA based on packet type */
3852 mac_addr = new_fltr.l_data.mac.mac_addr;
3853 if (pkt_type == BCAST_FLTR) {
3854 ice_memset(mac_addr, 0xff, ETH_ALEN, ICE_NONDMA_MEM);
3855 } else if (pkt_type == MCAST_FLTR ||
3856 pkt_type == UCAST_FLTR) {
3857 /* Use the dummy ether header DA */
3858 ice_memcpy(mac_addr, dummy_eth_header, ETH_ALEN,
3859 ICE_NONDMA_TO_NONDMA);
3860 if (pkt_type == MCAST_FLTR)
3861 mac_addr[0] |= 0x1; /* Set multicast bit */
3864 /* Need to reset this to zero for all iterations */
3867 new_fltr.flag |= ICE_FLTR_TX;
3868 new_fltr.src = hw_vsi_id;
3870 new_fltr.flag |= ICE_FLTR_RX;
3871 new_fltr.src = hw->port_info->lport;
3874 new_fltr.fltr_act = ICE_FWD_TO_VSI;
3875 new_fltr.vsi_handle = vsi_handle;
3876 new_fltr.fwd_id.hw_vsi_id = hw_vsi_id;
3877 f_list_entry.fltr_info = new_fltr;
3879 status = ice_add_rule_internal(hw, recipe_id, &f_list_entry);
3880 if (status != ICE_SUCCESS)
3881 goto set_promisc_exit;
3889 * ice_set_vlan_vsi_promisc
3890 * @hw: pointer to the hardware structure
3891 * @vsi_handle: VSI handle to configure
3892 * @promisc_mask: mask of promiscuous config bits
3893 * @rm_vlan_promisc: Clear VLANs VSI promisc mode
3895 * Configure VSI with all associated VLANs to given promiscuous mode(s)
3898 ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
3899 bool rm_vlan_promisc)
3901 struct ice_switch_info *sw = hw->switch_info;
3902 struct ice_fltr_list_entry *list_itr, *tmp;
3903 struct LIST_HEAD_TYPE vsi_list_head;
3904 struct LIST_HEAD_TYPE *vlan_head;
3905 struct ice_lock *vlan_lock; /* Lock to protect filter rule list */
3906 enum ice_status status;
3909 INIT_LIST_HEAD(&vsi_list_head);
3910 vlan_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
3911 vlan_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
3912 ice_acquire_lock(vlan_lock);
3913 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, vlan_head,
3915 ice_release_lock(vlan_lock);
3917 goto free_fltr_list;
3919 LIST_FOR_EACH_ENTRY(list_itr, &vsi_list_head, ice_fltr_list_entry,
3921 vlan_id = list_itr->fltr_info.l_data.vlan.vlan_id;
3922 if (rm_vlan_promisc)
3923 status = ice_clear_vsi_promisc(hw, vsi_handle,
3924 promisc_mask, vlan_id);
3926 status = ice_set_vsi_promisc(hw, vsi_handle,
3927 promisc_mask, vlan_id);
3933 LIST_FOR_EACH_ENTRY_SAFE(list_itr, tmp, &vsi_list_head,
3934 ice_fltr_list_entry, list_entry) {
3935 LIST_DEL(&list_itr->list_entry);
3936 ice_free(hw, list_itr);
3942 * ice_remove_vsi_lkup_fltr - Remove lookup type filters for a VSI
3943 * @hw: pointer to the hardware structure
3944 * @vsi_handle: VSI handle to remove filters from
3945 * @lkup: switch rule filter lookup type
3948 ice_remove_vsi_lkup_fltr(struct ice_hw *hw, u16 vsi_handle,
3949 enum ice_sw_lkup_type lkup)
3951 struct ice_switch_info *sw = hw->switch_info;
3952 struct ice_fltr_list_entry *fm_entry;
3953 struct LIST_HEAD_TYPE remove_list_head;
3954 struct LIST_HEAD_TYPE *rule_head;
3955 struct ice_fltr_list_entry *tmp;
3956 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
3957 enum ice_status status;
3959 INIT_LIST_HEAD(&remove_list_head);
3960 rule_lock = &sw->recp_list[lkup].filt_rule_lock;
3961 rule_head = &sw->recp_list[lkup].filt_rules;
3962 ice_acquire_lock(rule_lock);
3963 status = ice_add_to_vsi_fltr_list(hw, vsi_handle, rule_head,
3965 ice_release_lock(rule_lock);
3970 case ICE_SW_LKUP_MAC:
3971 ice_remove_mac(hw, &remove_list_head);
3973 case ICE_SW_LKUP_VLAN:
3974 ice_remove_vlan(hw, &remove_list_head);
3976 case ICE_SW_LKUP_PROMISC:
3977 case ICE_SW_LKUP_PROMISC_VLAN:
3978 ice_remove_promisc(hw, lkup, &remove_list_head);
3980 case ICE_SW_LKUP_MAC_VLAN:
3981 ice_remove_mac_vlan(hw, &remove_list_head);
3983 case ICE_SW_LKUP_ETHERTYPE:
3984 case ICE_SW_LKUP_ETHERTYPE_MAC:
3985 ice_remove_eth_mac(hw, &remove_list_head);
3987 case ICE_SW_LKUP_DFLT:
3988 ice_debug(hw, ICE_DBG_SW,
3989 "Remove filters for this lookup type hasn't been implemented yet\n");
3991 case ICE_SW_LKUP_LAST:
3992 ice_debug(hw, ICE_DBG_SW, "Unsupported lookup type\n");
3996 LIST_FOR_EACH_ENTRY_SAFE(fm_entry, tmp, &remove_list_head,
3997 ice_fltr_list_entry, list_entry) {
3998 LIST_DEL(&fm_entry->list_entry);
3999 ice_free(hw, fm_entry);
4004 * ice_remove_vsi_fltr - Remove all filters for a VSI
4005 * @hw: pointer to the hardware structure
4006 * @vsi_handle: VSI handle to remove filters from
4008 void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle)
4010 ice_debug(hw, ICE_DBG_TRACE, "ice_remove_vsi_fltr\n");
4012 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC);
4013 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_MAC_VLAN);
4014 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC);
4015 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_VLAN);
4016 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_DFLT);
4017 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE);
4018 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_ETHERTYPE_MAC);
4019 ice_remove_vsi_lkup_fltr(hw, vsi_handle, ICE_SW_LKUP_PROMISC_VLAN);
4023 * ice_alloc_res_cntr - allocating resource counter
4024 * @hw: pointer to the hardware structure
4025 * @type: type of resource
4026 * @alloc_shared: if set it is shared else dedicated
4027 * @num_items: number of entries requested for FD resource type
4028 * @counter_id: counter index returned by AQ call
4031 ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4034 struct ice_aqc_alloc_free_res_elem *buf;
4035 enum ice_status status;
4038 /* Allocate resource */
4039 buf_len = sizeof(*buf);
4040 buf = (struct ice_aqc_alloc_free_res_elem *)
4041 ice_malloc(hw, buf_len);
4043 return ICE_ERR_NO_MEMORY;
4045 buf->num_elems = CPU_TO_LE16(num_items);
4046 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4047 ICE_AQC_RES_TYPE_M) | alloc_shared);
4049 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4050 ice_aqc_opc_alloc_res, NULL);
4054 *counter_id = LE16_TO_CPU(buf->elem[0].e.sw_resp);
4062 * ice_free_res_cntr - free resource counter
4063 * @hw: pointer to the hardware structure
4064 * @type: type of resource
4065 * @alloc_shared: if set it is shared else dedicated
4066 * @num_items: number of entries to be freed for FD resource type
4067 * @counter_id: counter ID resource which needs to be freed
4070 ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
4073 struct ice_aqc_alloc_free_res_elem *buf;
4074 enum ice_status status;
4078 buf_len = sizeof(*buf);
4079 buf = (struct ice_aqc_alloc_free_res_elem *)
4080 ice_malloc(hw, buf_len);
4082 return ICE_ERR_NO_MEMORY;
4084 buf->num_elems = CPU_TO_LE16(num_items);
4085 buf->res_type = CPU_TO_LE16(((type << ICE_AQC_RES_TYPE_S) &
4086 ICE_AQC_RES_TYPE_M) | alloc_shared);
4087 buf->elem[0].e.sw_resp = CPU_TO_LE16(counter_id);
4089 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
4090 ice_aqc_opc_free_res, NULL);
4092 ice_debug(hw, ICE_DBG_SW,
4093 "counter resource could not be freed\n");
4100 * ice_alloc_vlan_res_counter - obtain counter resource for VLAN type
4101 * @hw: pointer to the hardware structure
4102 * @counter_id: returns counter index
4104 enum ice_status ice_alloc_vlan_res_counter(struct ice_hw *hw, u16 *counter_id)
4106 return ice_alloc_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4107 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4112 * ice_free_vlan_res_counter - Free counter resource for VLAN type
4113 * @hw: pointer to the hardware structure
4114 * @counter_id: counter index to be freed
4116 enum ice_status ice_free_vlan_res_counter(struct ice_hw *hw, u16 counter_id)
4118 return ice_free_res_cntr(hw, ICE_AQC_RES_TYPE_VLAN_COUNTER,
4119 ICE_AQC_RES_TYPE_FLAG_DEDICATED, 1,
4124 * ice_alloc_res_lg_act - add large action resource
4125 * @hw: pointer to the hardware structure
4126 * @l_id: large action ID to fill it in
4127 * @num_acts: number of actions to hold with a large action entry
4129 static enum ice_status
4130 ice_alloc_res_lg_act(struct ice_hw *hw, u16 *l_id, u16 num_acts)
4132 struct ice_aqc_alloc_free_res_elem *sw_buf;
4133 enum ice_status status;
4136 if (num_acts > ICE_MAX_LG_ACT || num_acts == 0)
4137 return ICE_ERR_PARAM;
4139 /* Allocate resource for large action */
4140 buf_len = sizeof(*sw_buf);
4141 sw_buf = (struct ice_aqc_alloc_free_res_elem *)
4142 ice_malloc(hw, buf_len);
4144 return ICE_ERR_NO_MEMORY;
4146 sw_buf->num_elems = CPU_TO_LE16(1);
4148 /* If num_acts is 1, use ICE_AQC_RES_TYPE_WIDE_TABLE_1.
4149 * If num_acts is 2, use ICE_AQC_RES_TYPE_WIDE_TABLE_3.
4150 * If num_acts is greater than 2, then use
4151 * ICE_AQC_RES_TYPE_WIDE_TABLE_4.
4152 * The num_acts cannot exceed 4. This was ensured at the
4153 * beginning of the function.
4156 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_1);
4157 else if (num_acts == 2)
4158 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_2);
4160 sw_buf->res_type = CPU_TO_LE16(ICE_AQC_RES_TYPE_WIDE_TABLE_4);
4162 status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
4163 ice_aqc_opc_alloc_res, NULL);
4165 *l_id = LE16_TO_CPU(sw_buf->elem[0].e.sw_resp);
4167 ice_free(hw, sw_buf);
4172 * ice_add_mac_with_sw_marker - add filter with sw marker
4173 * @hw: pointer to the hardware structure
4174 * @f_info: filter info structure containing the MAC filter information
4175 * @sw_marker: sw marker to tag the Rx descriptor with
4178 ice_add_mac_with_sw_marker(struct ice_hw *hw, struct ice_fltr_info *f_info,
4181 struct ice_switch_info *sw = hw->switch_info;
4182 struct ice_fltr_mgmt_list_entry *m_entry;
4183 struct ice_fltr_list_entry fl_info;
4184 struct LIST_HEAD_TYPE l_head;
4185 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4186 enum ice_status ret;
4190 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4191 return ICE_ERR_PARAM;
4193 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4194 return ICE_ERR_PARAM;
4196 if (sw_marker == ICE_INVAL_SW_MARKER_ID)
4197 return ICE_ERR_PARAM;
4199 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4200 return ICE_ERR_PARAM;
4201 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4203 /* Add filter if it doesn't exist so then the adding of large
4204 * action always results in update
4207 INIT_LIST_HEAD(&l_head);
4208 fl_info.fltr_info = *f_info;
4209 LIST_ADD(&fl_info.list_entry, &l_head);
4211 entry_exists = false;
4212 ret = ice_add_mac(hw, &l_head);
4213 if (ret == ICE_ERR_ALREADY_EXISTS)
4214 entry_exists = true;
4218 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4219 ice_acquire_lock(rule_lock);
4220 /* Get the book keeping entry for the filter */
4221 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4225 /* If counter action was enabled for this rule then don't enable
4226 * sw marker large action
4228 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4229 ret = ICE_ERR_PARAM;
4233 /* if same marker was added before */
4234 if (m_entry->sw_marker_id == sw_marker) {
4235 ret = ICE_ERR_ALREADY_EXISTS;
4239 /* Allocate a hardware table entry to hold large act. Three actions
4240 * for marker based large action
4242 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 3);
4246 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4249 /* Update the switch rule to add the marker action */
4250 ret = ice_add_marker_act(hw, m_entry, sw_marker, lg_act_id);
4252 ice_release_lock(rule_lock);
4257 ice_release_lock(rule_lock);
4258 /* only remove entry if it did not exist previously */
4260 ret = ice_remove_mac(hw, &l_head);
4266 * ice_add_mac_with_counter - add filter with counter enabled
4267 * @hw: pointer to the hardware structure
4268 * @f_info: pointer to filter info structure containing the MAC filter
4272 ice_add_mac_with_counter(struct ice_hw *hw, struct ice_fltr_info *f_info)
4274 struct ice_switch_info *sw = hw->switch_info;
4275 struct ice_fltr_mgmt_list_entry *m_entry;
4276 struct ice_fltr_list_entry fl_info;
4277 struct LIST_HEAD_TYPE l_head;
4278 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
4279 enum ice_status ret;
4284 if (f_info->fltr_act != ICE_FWD_TO_VSI)
4285 return ICE_ERR_PARAM;
4287 if (f_info->lkup_type != ICE_SW_LKUP_MAC)
4288 return ICE_ERR_PARAM;
4290 if (!ice_is_vsi_valid(hw, f_info->vsi_handle))
4291 return ICE_ERR_PARAM;
4292 f_info->fwd_id.hw_vsi_id = ice_get_hw_vsi_num(hw, f_info->vsi_handle);
4294 entry_exist = false;
4296 rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
4298 /* Add filter if it doesn't exist so then the adding of large
4299 * action always results in update
4301 INIT_LIST_HEAD(&l_head);
4303 fl_info.fltr_info = *f_info;
4304 LIST_ADD(&fl_info.list_entry, &l_head);
4306 ret = ice_add_mac(hw, &l_head);
4307 if (ret == ICE_ERR_ALREADY_EXISTS)
4312 ice_acquire_lock(rule_lock);
4313 m_entry = ice_find_rule_entry(hw, ICE_SW_LKUP_MAC, f_info);
4315 ret = ICE_ERR_BAD_PTR;
4319 /* Don't enable counter for a filter for which sw marker was enabled */
4320 if (m_entry->sw_marker_id != ICE_INVAL_SW_MARKER_ID) {
4321 ret = ICE_ERR_PARAM;
4325 /* If a counter was already enabled then don't need to add again */
4326 if (m_entry->counter_index != ICE_INVAL_COUNTER_ID) {
4327 ret = ICE_ERR_ALREADY_EXISTS;
4331 /* Allocate a hardware table entry to VLAN counter */
4332 ret = ice_alloc_vlan_res_counter(hw, &counter_id);
4336 /* Allocate a hardware table entry to hold large act. Two actions for
4337 * counter based large action
4339 ret = ice_alloc_res_lg_act(hw, &lg_act_id, 2);
4343 if (lg_act_id == ICE_INVAL_LG_ACT_INDEX)
4346 /* Update the switch rule to add the counter action */
4347 ret = ice_add_counter_act(hw, m_entry, counter_id, lg_act_id);
4349 ice_release_lock(rule_lock);
4354 ice_release_lock(rule_lock);
4355 /* only remove entry if it did not exist previously */
4357 ret = ice_remove_mac(hw, &l_head);
4362 /* This is mapping table entry that maps every word within a given protocol
4363 * structure to the real byte offset as per the specification of that
4365 * for example dst address is 3 words in ethertype header and corresponding
4366 * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
4367 * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
4368 * matching entry describing its field. This needs to be updated if new
4369 * structure is added to that union.
4371 static const struct ice_prot_ext_tbl_entry ice_prot_ext[] = {
4372 { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
4373 { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
4374 { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4375 { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
4376 { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4377 26, 28, 30, 32, 34, 36, 38 } },
4378 { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
4379 26, 28, 30, 32, 34, 36, 38 } },
4380 { ICE_TCP_IL, { 0, 2 } },
4381 { ICE_UDP_OF, { 0, 2 } },
4382 { ICE_UDP_ILOS, { 0, 2 } },
4383 { ICE_SCTP_IL, { 0, 2 } },
4384 { ICE_VXLAN, { 8, 10, 12, 14 } },
4385 { ICE_GENEVE, { 8, 10, 12, 14 } },
4386 { ICE_VXLAN_GPE, { 0, 2, 4 } },
4387 { ICE_NVGRE, { 0, 2 } },
4388 { ICE_PROTOCOL_LAST, { 0 } }
4391 /* The following table describes preferred grouping of recipes.
4392 * If a recipe that needs to be programmed is a superset or matches one of the
4393 * following combinations, then the recipe needs to be chained as per the
4396 static const struct ice_pref_recipe_group ice_recipe_pack[] = {
4397 {3, { { ICE_MAC_OFOS_HW, 0, 0 }, { ICE_MAC_OFOS_HW, 2, 0 },
4398 { ICE_MAC_OFOS_HW, 4, 0 } }, { 0xffff, 0xffff, 0xffff, 0xffff } },
4399 {4, { { ICE_MAC_IL_HW, 0, 0 }, { ICE_MAC_IL_HW, 2, 0 },
4400 { ICE_MAC_IL_HW, 4, 0 }, { ICE_META_DATA_ID_HW, 44, 0 } },
4401 { 0xffff, 0xffff, 0xffff, 0xffff } },
4402 {2, { { ICE_IPV4_IL_HW, 0, 0 }, { ICE_IPV4_IL_HW, 2, 0 } },
4403 { 0xffff, 0xffff, 0xffff, 0xffff } },
4404 {2, { { ICE_IPV4_IL_HW, 12, 0 }, { ICE_IPV4_IL_HW, 14, 0 } },
4405 { 0xffff, 0xffff, 0xffff, 0xffff } },
4408 static const struct ice_protocol_entry ice_prot_id_tbl[] = {
4409 { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
4410 { ICE_MAC_IL, ICE_MAC_IL_HW },
4411 { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
4412 { ICE_IPV4_IL, ICE_IPV4_IL_HW },
4413 { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
4414 { ICE_IPV6_IL, ICE_IPV6_IL_HW },
4415 { ICE_TCP_IL, ICE_TCP_IL_HW },
4416 { ICE_UDP_OF, ICE_UDP_OF_HW },
4417 { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
4418 { ICE_SCTP_IL, ICE_SCTP_IL_HW },
4419 { ICE_VXLAN, ICE_UDP_OF_HW },
4420 { ICE_GENEVE, ICE_UDP_OF_HW },
4421 { ICE_VXLAN_GPE, ICE_UDP_OF_HW },
4422 { ICE_NVGRE, ICE_GRE_OF_HW },
4423 { ICE_PROTOCOL_LAST, 0 }
4427 * ice_find_recp - find a recipe
4428 * @hw: pointer to the hardware structure
4429 * @lkup_exts: extension sequence to match
4431 * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
4433 static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
4435 struct ice_sw_recipe *recp;
4438 ice_get_recp_to_prof_map(hw);
4439 /* Initialize available_result_ids which tracks available result idx */
4440 for (i = 0; i <= ICE_CHAIN_FV_INDEX_START; i++)
4441 ice_set_bit(ICE_CHAIN_FV_INDEX_START - i,
4442 available_result_ids);
4444 /* Walk through existing recipes to find a match */
4445 recp = hw->switch_info->recp_list;
4446 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
4447 /* If recipe was not created for this ID, in SW bookkeeping,
4448 * check if FW has an entry for this recipe. If the FW has an
4449 * entry update it in our SW bookkeeping and continue with the
4452 if (!recp[i].recp_created)
4453 if (ice_get_recp_frm_fw(hw,
4454 hw->switch_info->recp_list, i))
4457 /* if number of words we are looking for match */
4458 if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
4459 struct ice_fv_word *a = lkup_exts->fv_words;
4460 struct ice_fv_word *b = recp[i].lkup_exts.fv_words;
4464 for (p = 0; p < lkup_exts->n_val_words; p++) {
4465 for (q = 0; q < recp[i].lkup_exts.n_val_words;
4467 if (a[p].off == b[q].off &&
4468 a[p].prot_id == b[q].prot_id)
4469 /* Found the "p"th word in the
4474 /* After walking through all the words in the
4475 * "i"th recipe if "p"th word was not found then
4476 * this recipe is not what we are looking for.
4477 * So break out from this loop and try the next
4480 if (q >= recp[i].lkup_exts.n_val_words) {
4485 /* If for "i"th recipe the found was never set to false
4486 * then it means we found our match
4489 return i; /* Return the recipe ID */
4492 return ICE_MAX_NUM_RECIPES;
4496 * ice_prot_type_to_id - get protocol ID from protocol type
4497 * @type: protocol type
4498 * @id: pointer to variable that will receive the ID
4500 * Returns true if found, false otherwise
4502 static bool ice_prot_type_to_id(enum ice_protocol_type type, u16 *id)
4506 for (i = 0; ice_prot_id_tbl[i].type != ICE_PROTOCOL_LAST; i++)
4507 if (ice_prot_id_tbl[i].type == type) {
4508 *id = ice_prot_id_tbl[i].protocol_id;
4515 * ice_find_valid_words - count valid words
4516 * @rule: advanced rule with lookup information
4517 * @lkup_exts: byte offset extractions of the words that are valid
4519 * calculate valid words in a lookup rule using mask value
4522 ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
4523 struct ice_prot_lkup_ext *lkup_exts)
4529 if (!ice_prot_type_to_id(rule->type, &prot_id))
4532 word = lkup_exts->n_val_words;
4534 for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
4535 if (((u16 *)&rule->m_u)[j] &&
4536 rule->type < ARRAY_SIZE(ice_prot_ext)) {
4537 /* No more space to accommodate */
4538 if (word >= ICE_MAX_CHAIN_WORDS)
4540 lkup_exts->fv_words[word].off =
4541 ice_prot_ext[rule->type].offs[j];
4542 lkup_exts->fv_words[word].prot_id =
4543 ice_prot_id_tbl[rule->type].protocol_id;
4544 lkup_exts->field_mask[word] = ((u16 *)&rule->m_u)[j];
4548 ret_val = word - lkup_exts->n_val_words;
4549 lkup_exts->n_val_words = word;
4555 * ice_find_prot_off_ind - check for specific ID and offset in rule
4556 * @lkup_exts: an array of protocol header extractions
4557 * @prot_type: protocol type to check
4558 * @off: expected offset of the extraction
4560 * Check if the prot_ext has given protocol ID and offset
4563 ice_find_prot_off_ind(struct ice_prot_lkup_ext *lkup_exts, u8 prot_type,
4568 for (j = 0; j < lkup_exts->n_val_words; j++)
4569 if (lkup_exts->fv_words[j].off == off &&
4570 lkup_exts->fv_words[j].prot_id == prot_type)
4573 return ICE_MAX_CHAIN_WORDS;
4577 * ice_is_recipe_subset - check if recipe group policy is a subset of lookup
4578 * @lkup_exts: an array of protocol header extractions
4579 * @r_policy: preferred recipe grouping policy
4581 * Helper function to check if given recipe group is subset we need to check if
4582 * all the words described by the given recipe group exist in the advanced rule
4583 * look up information
4586 ice_is_recipe_subset(struct ice_prot_lkup_ext *lkup_exts,
4587 const struct ice_pref_recipe_group *r_policy)
4589 u8 ind[ICE_NUM_WORDS_RECIPE];
4593 /* check if everything in the r_policy is part of the entire rule */
4594 for (i = 0; i < r_policy->n_val_pairs; i++) {
4597 j = ice_find_prot_off_ind(lkup_exts, r_policy->pairs[i].prot_id,
4598 r_policy->pairs[i].off);
4599 if (j >= ICE_MAX_CHAIN_WORDS)
4602 /* store the indexes temporarily found by the find function
4603 * this will be used to mark the words as 'done'
4608 /* If the entire policy recipe was a true match, then mark the fields
4609 * that are covered by the recipe as 'done' meaning that these words
4610 * will be clumped together in one recipe.
4611 * "Done" here means in our searching if certain recipe group
4612 * matches or is subset of the given rule, then we mark all
4613 * the corresponding offsets as found. So the remaining recipes should
4614 * be created with whatever words that were left.
4616 for (i = 0; i < count; i++) {
4619 ice_set_bit(in, lkup_exts->done);
4625 * ice_create_first_fit_recp_def - Create a recipe grouping
4626 * @hw: pointer to the hardware structure
4627 * @lkup_exts: an array of protocol header extractions
4628 * @rg_list: pointer to a list that stores new recipe groups
4629 * @recp_cnt: pointer to a variable that stores returned number of recipe groups
4631 * Using first fit algorithm, take all the words that are still not done
4632 * and start grouping them in 4-word groups. Each group makes up one
4635 static enum ice_status
4636 ice_create_first_fit_recp_def(struct ice_hw *hw,
4637 struct ice_prot_lkup_ext *lkup_exts,
4638 struct LIST_HEAD_TYPE *rg_list,
4641 struct ice_pref_recipe_group *grp = NULL;
4646 /* Walk through every word in the rule to check if it is not done. If so
4647 * then this word needs to be part of a new recipe.
4649 for (j = 0; j < lkup_exts->n_val_words; j++)
4650 if (!ice_is_bit_set(lkup_exts->done, j)) {
4652 grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
4653 struct ice_recp_grp_entry *entry;
4655 entry = (struct ice_recp_grp_entry *)
4656 ice_malloc(hw, sizeof(*entry));
4658 return ICE_ERR_NO_MEMORY;
4659 LIST_ADD(&entry->l_entry, rg_list);
4660 grp = &entry->r_group;
4664 grp->pairs[grp->n_val_pairs].prot_id =
4665 lkup_exts->fv_words[j].prot_id;
4666 grp->pairs[grp->n_val_pairs].off =
4667 lkup_exts->fv_words[j].off;
4668 grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
4676 * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
4677 * @hw: pointer to the hardware structure
4678 * @fv_list: field vector with the extraction sequence information
4679 * @rg_list: recipe groupings with protocol-offset pairs
4681 * Helper function to fill in the field vector indices for protocol-offset
4682 * pairs. These indexes are then ultimately programmed into a recipe.
4685 ice_fill_fv_word_index(struct ice_hw *hw, struct LIST_HEAD_TYPE *fv_list,
4686 struct LIST_HEAD_TYPE *rg_list)
4688 struct ice_sw_fv_list_entry *fv;
4689 struct ice_recp_grp_entry *rg;
4690 struct ice_fv_word *fv_ext;
4692 if (LIST_EMPTY(fv_list))
4695 fv = LIST_FIRST_ENTRY(fv_list, struct ice_sw_fv_list_entry, list_entry);
4696 fv_ext = fv->fv_ptr->ew;
4698 LIST_FOR_EACH_ENTRY(rg, rg_list, ice_recp_grp_entry, l_entry) {
4701 for (i = 0; i < rg->r_group.n_val_pairs; i++) {
4702 struct ice_fv_word *pr;
4706 pr = &rg->r_group.pairs[i];
4707 mask = rg->r_group.mask[i];
4709 for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
4710 if (fv_ext[j].prot_id == pr->prot_id &&
4711 fv_ext[j].off == pr->off) {
4712 /* Store index of field vector */
4714 /* Mask is given by caller as big
4715 * endian, but sent to FW as little
4718 rg->fv_mask[i] = mask << 8 | mask >> 8;
4726 * ice_add_sw_recipe - function to call AQ calls to create switch recipe
4727 * @hw: pointer to hardware structure
4728 * @rm: recipe management list entry
4729 * @match_tun: if field vector index for tunnel needs to be programmed
4731 static enum ice_status
4732 ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
4735 struct ice_aqc_recipe_data_elem *tmp;
4736 struct ice_aqc_recipe_data_elem *buf;
4737 struct ice_recp_grp_entry *entry;
4738 enum ice_status status;
4743 /* When more than one recipe are required, another recipe is needed to
4744 * chain them together. Matching a tunnel metadata ID takes up one of
4745 * the match fields in the chaining recipe reducing the number of
4746 * chained recipes by one.
4748 if (rm->n_grp_count > 1)
4750 if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE ||
4751 (match_tun && rm->n_grp_count > (ICE_MAX_CHAIN_RECIPE - 1)))
4752 return ICE_ERR_MAX_LIMIT;
4754 tmp = (struct ice_aqc_recipe_data_elem *)ice_calloc(hw,
4755 ICE_MAX_NUM_RECIPES,
4758 return ICE_ERR_NO_MEMORY;
4760 buf = (struct ice_aqc_recipe_data_elem *)
4761 ice_calloc(hw, rm->n_grp_count, sizeof(*buf));
4763 status = ICE_ERR_NO_MEMORY;
4767 ice_zero_bitmap(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
4768 recipe_count = ICE_MAX_NUM_RECIPES;
4769 status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
4771 if (status || recipe_count == 0)
4774 /* Allocate the recipe resources, and configure them according to the
4775 * match fields from protocol headers and extracted field vectors.
4777 chain_idx = ICE_CHAIN_FV_INDEX_START -
4778 ice_find_first_bit(available_result_ids,
4779 ICE_CHAIN_FV_INDEX_START + 1);
4780 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4783 status = ice_alloc_recipe(hw, &entry->rid);
4787 /* Clear the result index of the located recipe, as this will be
4788 * updated, if needed, later in the recipe creation process.
4790 tmp[0].content.result_indx = 0;
4792 buf[recps] = tmp[0];
4793 buf[recps].recipe_indx = (u8)entry->rid;
4794 /* if the recipe is a non-root recipe RID should be programmed
4795 * as 0 for the rules to be applied correctly.
4797 buf[recps].content.rid = 0;
4798 ice_memset(&buf[recps].content.lkup_indx, 0,
4799 sizeof(buf[recps].content.lkup_indx),
4802 /* All recipes use look-up index 0 to match switch ID. */
4803 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4804 buf[recps].content.mask[0] =
4805 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4806 /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
4809 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4810 buf[recps].content.lkup_indx[i] = 0x80;
4811 buf[recps].content.mask[i] = 0;
4814 for (i = 0; i < entry->r_group.n_val_pairs; i++) {
4815 buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
4816 buf[recps].content.mask[i + 1] =
4817 CPU_TO_LE16(entry->fv_mask[i]);
4820 if (rm->n_grp_count > 1) {
4821 entry->chain_idx = chain_idx;
4822 buf[recps].content.result_indx =
4823 ICE_AQ_RECIPE_RESULT_EN |
4824 ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
4825 ICE_AQ_RECIPE_RESULT_DATA_M);
4826 ice_clear_bit(ICE_CHAIN_FV_INDEX_START - chain_idx,
4827 available_result_ids);
4828 chain_idx = ICE_CHAIN_FV_INDEX_START -
4829 ice_find_first_bit(available_result_ids,
4830 ICE_CHAIN_FV_INDEX_START +
4834 /* fill recipe dependencies */
4835 ice_zero_bitmap((ice_bitmap_t *)buf[recps].recipe_bitmap,
4836 ICE_MAX_NUM_RECIPES);
4837 ice_set_bit(buf[recps].recipe_indx,
4838 (ice_bitmap_t *)buf[recps].recipe_bitmap);
4839 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4843 if (rm->n_grp_count == 1) {
4844 rm->root_rid = buf[0].recipe_indx;
4845 ice_set_bit(buf[0].recipe_indx, rm->r_bitmap);
4846 buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
4847 if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
4848 ice_memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
4849 sizeof(buf[0].recipe_bitmap),
4850 ICE_NONDMA_TO_NONDMA);
4852 status = ICE_ERR_BAD_PTR;
4855 /* Applicable only for ROOT_RECIPE, set the fwd_priority for
4856 * the recipe which is getting created if specified
4857 * by user. Usually any advanced switch filter, which results
4858 * into new extraction sequence, ended up creating a new recipe
4859 * of type ROOT and usually recipes are associated with profiles
4860 * Switch rule referreing newly created recipe, needs to have
4861 * either/or 'fwd' or 'join' priority, otherwise switch rule
4862 * evaluation will not happen correctly. In other words, if
4863 * switch rule to be evaluated on priority basis, then recipe
4864 * needs to have priority, otherwise it will be evaluated last.
4866 buf[0].content.act_ctrl_fwd_priority = rm->priority;
4868 struct ice_recp_grp_entry *last_chain_entry;
4871 /* Allocate the last recipe that will chain the outcomes of the
4872 * other recipes together
4874 status = ice_alloc_recipe(hw, &rid);
4878 buf[recps].recipe_indx = (u8)rid;
4879 buf[recps].content.rid = (u8)rid;
4880 buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
4881 /* the new entry created should also be part of rg_list to
4882 * make sure we have complete recipe
4884 last_chain_entry = (struct ice_recp_grp_entry *)ice_malloc(hw,
4885 sizeof(*last_chain_entry));
4886 if (!last_chain_entry) {
4887 status = ICE_ERR_NO_MEMORY;
4890 last_chain_entry->rid = rid;
4891 ice_memset(&buf[recps].content.lkup_indx, 0,
4892 sizeof(buf[recps].content.lkup_indx),
4894 /* All recipes use look-up index 0 to match switch ID. */
4895 buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
4896 buf[recps].content.mask[0] =
4897 CPU_TO_LE16(ICE_AQ_SW_ID_LKUP_MASK);
4898 for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
4899 buf[recps].content.lkup_indx[i] =
4900 ICE_AQ_RECIPE_LKUP_IGNORE;
4901 buf[recps].content.mask[i] = 0;
4905 /* update r_bitmap with the recp that is used for chaining */
4906 ice_set_bit(rid, rm->r_bitmap);
4907 /* this is the recipe that chains all the other recipes so it
4908 * should not have a chaining ID to indicate the same
4910 last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
4911 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry,
4913 last_chain_entry->fv_idx[i] = entry->chain_idx;
4914 buf[recps].content.lkup_indx[i] = entry->chain_idx;
4915 buf[recps].content.mask[i++] = CPU_TO_LE16(0xFFFF);
4916 ice_set_bit(entry->rid, rm->r_bitmap);
4918 LIST_ADD(&last_chain_entry->l_entry, &rm->rg_list);
4919 if (sizeof(buf[recps].recipe_bitmap) >=
4920 sizeof(rm->r_bitmap)) {
4921 ice_memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
4922 sizeof(buf[recps].recipe_bitmap),
4923 ICE_NONDMA_TO_NONDMA);
4925 status = ICE_ERR_BAD_PTR;
4928 buf[recps].content.act_ctrl_fwd_priority = rm->priority;
4930 /* To differentiate among different UDP tunnels, a meta data ID
4934 buf[recps].content.lkup_indx[i] = ICE_TUN_FLAG_FV_IND;
4935 buf[recps].content.mask[i] =
4936 CPU_TO_LE16(ICE_TUN_FLAG_MASK);
4940 rm->root_rid = (u8)rid;
4942 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
4946 status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
4947 ice_release_change_lock(hw);
4951 /* Every recipe that just got created add it to the recipe
4954 LIST_FOR_EACH_ENTRY(entry, &rm->rg_list, ice_recp_grp_entry, l_entry) {
4955 struct ice_switch_info *sw = hw->switch_info;
4956 struct ice_sw_recipe *recp;
4958 recp = &sw->recp_list[entry->rid];
4959 recp->root_rid = entry->rid;
4960 ice_memcpy(&recp->ext_words, entry->r_group.pairs,
4961 entry->r_group.n_val_pairs *
4962 sizeof(struct ice_fv_word),
4963 ICE_NONDMA_TO_NONDMA);
4965 recp->n_ext_words = entry->r_group.n_val_pairs;
4966 recp->chain_idx = entry->chain_idx;
4967 recp->recp_created = true;
4968 recp->big_recp = false;
4982 * ice_create_recipe_group - creates recipe group
4983 * @hw: pointer to hardware structure
4984 * @rm: recipe management list entry
4985 * @lkup_exts: lookup elements
4987 static enum ice_status
4988 ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
4989 struct ice_prot_lkup_ext *lkup_exts)
4991 struct ice_recp_grp_entry *entry;
4992 struct ice_recp_grp_entry *tmp;
4993 enum ice_status status;
4997 rm->n_grp_count = 0;
4999 /* Each switch recipe can match up to 5 words or metadata. One word in
5000 * each recipe is used to match the switch ID. Four words are left for
5001 * matching other values. If the new advanced recipe requires more than
5002 * 4 words, it needs to be split into multiple recipes which are chained
5003 * together using the intermediate result that each produces as input to
5004 * the other recipes in the sequence.
5006 groups = ARRAY_SIZE(ice_recipe_pack);
5008 /* Check if any of the preferred recipes from the grouping policy
5011 for (i = 0; i < groups; i++)
5012 /* Check if the recipe from the preferred grouping matches
5013 * or is a subset of the fields that needs to be looked up.
5015 if (ice_is_recipe_subset(lkup_exts, &ice_recipe_pack[i])) {
5016 /* This recipe can be used by itself or grouped with
5019 entry = (struct ice_recp_grp_entry *)
5020 ice_malloc(hw, sizeof(*entry));
5022 status = ICE_ERR_NO_MEMORY;
5025 entry->r_group = ice_recipe_pack[i];
5026 LIST_ADD(&entry->l_entry, &rm->rg_list);
5030 /* Create recipes for words that are marked not done by packing them
5033 status = ice_create_first_fit_recp_def(hw, lkup_exts,
5034 &rm->rg_list, &recp_count);
5036 rm->n_grp_count += recp_count;
5037 rm->n_ext_words = lkup_exts->n_val_words;
5038 ice_memcpy(&rm->ext_words, lkup_exts->fv_words,
5039 sizeof(rm->ext_words), ICE_NONDMA_TO_NONDMA);
5040 ice_memcpy(rm->word_masks, lkup_exts->field_mask,
5041 sizeof(rm->word_masks), ICE_NONDMA_TO_NONDMA);
5046 LIST_FOR_EACH_ENTRY_SAFE(entry, tmp, &rm->rg_list, ice_recp_grp_entry,
5048 LIST_DEL(&entry->l_entry);
5049 ice_free(hw, entry);
5057 * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
5058 * @hw: pointer to hardware structure
5059 * @lkups: lookup elements or match criteria for the advanced recipe, one
5060 * structure per protocol header
5061 * @lkups_cnt: number of protocols
5062 * @fv_list: pointer to a list that holds the returned field vectors
5064 static enum ice_status
5065 ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5066 struct LIST_HEAD_TYPE *fv_list)
5068 enum ice_status status;
5072 prot_ids = (u16 *)ice_calloc(hw, lkups_cnt, sizeof(*prot_ids));
5074 return ICE_ERR_NO_MEMORY;
5076 for (i = 0; i < lkups_cnt; i++)
5077 if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
5078 status = ICE_ERR_CFG;
5082 /* Find field vectors that include all specified protocol types */
5083 status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, fv_list);
5086 ice_free(hw, prot_ids);
5091 * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
5092 * @hw: pointer to hardware structure
5093 * @lkups: lookup elements or match criteria for the advanced recipe, one
5094 * structure per protocol header
5095 * @lkups_cnt: number of protocols
5096 * @rinfo: other information regarding the rule e.g. priority and action info
5097 * @rid: return the recipe ID of the recipe created
5099 static enum ice_status
5100 ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5101 u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
5103 struct ice_prot_lkup_ext *lkup_exts;
5104 struct ice_recp_grp_entry *r_entry;
5105 struct ice_sw_fv_list_entry *fvit;
5106 struct ice_recp_grp_entry *r_tmp;
5107 struct ice_sw_fv_list_entry *tmp;
5108 enum ice_status status = ICE_SUCCESS;
5109 struct ice_sw_recipe *rm;
5110 bool match_tun = false;
5114 return ICE_ERR_PARAM;
5116 lkup_exts = (struct ice_prot_lkup_ext *)
5117 ice_malloc(hw, sizeof(*lkup_exts));
5119 return ICE_ERR_NO_MEMORY;
5121 /* Determine the number of words to be matched and if it exceeds a
5122 * recipe's restrictions
5124 for (i = 0; i < lkups_cnt; i++) {
5127 if (lkups[i].type >= ICE_PROTOCOL_LAST) {
5128 status = ICE_ERR_CFG;
5129 goto err_free_lkup_exts;
5132 count = ice_fill_valid_words(&lkups[i], lkup_exts);
5134 status = ICE_ERR_CFG;
5135 goto err_free_lkup_exts;
5139 *rid = ice_find_recp(hw, lkup_exts);
5140 if (*rid < ICE_MAX_NUM_RECIPES)
5141 /* Success if found a recipe that match the existing criteria */
5142 goto err_free_lkup_exts;
5144 /* Recipe we need does not exist, add a recipe */
5146 rm = (struct ice_sw_recipe *)ice_malloc(hw, sizeof(*rm));
5148 status = ICE_ERR_NO_MEMORY;
5149 goto err_free_lkup_exts;
5152 /* Get field vectors that contain fields extracted from all the protocol
5153 * headers being programmed.
5155 INIT_LIST_HEAD(&rm->fv_list);
5156 INIT_LIST_HEAD(&rm->rg_list);
5158 status = ice_get_fv(hw, lkups, lkups_cnt, &rm->fv_list);
5162 /* Group match words into recipes using preferred recipe grouping
5165 status = ice_create_recipe_group(hw, rm, lkup_exts);
5169 /* There is only profile for UDP tunnels. So, it is necessary to use a
5170 * metadata ID flag to differentiate different tunnel types. A separate
5171 * recipe needs to be used for the metadata.
5173 if ((rinfo->tun_type == ICE_SW_TUN_VXLAN_GPE ||
5174 rinfo->tun_type == ICE_SW_TUN_GENEVE ||
5175 rinfo->tun_type == ICE_SW_TUN_VXLAN) && rm->n_grp_count > 1)
5178 /* set the recipe priority if specified */
5179 rm->priority = rinfo->priority ? rinfo->priority : 0;
5181 /* Find offsets from the field vector. Pick the first one for all the
5184 ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
5185 status = ice_add_sw_recipe(hw, rm, match_tun);
5189 /* Associate all the recipes created with all the profiles in the
5190 * common field vector.
5192 LIST_FOR_EACH_ENTRY(fvit, &rm->fv_list, ice_sw_fv_list_entry,
5194 ice_declare_bitmap(r_bitmap, ICE_MAX_NUM_RECIPES);
5196 status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
5197 (u8 *)r_bitmap, NULL);
5201 ice_or_bitmap(rm->r_bitmap, r_bitmap, rm->r_bitmap,
5202 ICE_MAX_NUM_RECIPES);
5203 status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
5207 status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
5210 ice_release_change_lock(hw);
5216 *rid = rm->root_rid;
5217 ice_memcpy(&hw->switch_info->recp_list[*rid].lkup_exts,
5218 lkup_exts, sizeof(*lkup_exts), ICE_NONDMA_TO_NONDMA);
5220 LIST_FOR_EACH_ENTRY_SAFE(r_entry, r_tmp, &rm->rg_list,
5221 ice_recp_grp_entry, l_entry) {
5222 LIST_DEL(&r_entry->l_entry);
5223 ice_free(hw, r_entry);
5226 LIST_FOR_EACH_ENTRY_SAFE(fvit, tmp, &rm->fv_list, ice_sw_fv_list_entry,
5228 LIST_DEL(&fvit->list_entry);
5233 ice_free(hw, rm->root_buf);
5238 ice_free(hw, lkup_exts);
5244 * ice_find_dummy_packet - find dummy packet by tunnel type
5246 * @lkups: lookup elements or match criteria for the advanced recipe, one
5247 * structure per protocol header
5248 * @lkups_cnt: number of protocols
5249 * @tun_type: tunnel type from the match criteria
5250 * @pkt: dummy packet to fill according to filter match criteria
5251 * @pkt_len: packet length of dummy packet
5252 * @offsets: pointer to receive the pointer to the offsets for the packet
5255 ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5256 enum ice_sw_tunnel_type tun_type, const u8 **pkt,
5258 const struct ice_dummy_pkt_offsets **offsets)
5260 bool tcp = false, udp = false;
5263 for (i = 0; i < lkups_cnt; i++) {
5264 if (lkups[i].type == ICE_UDP_ILOS)
5266 else if (lkups[i].type == ICE_TCP_IL)
5270 if (tun_type == ICE_SW_TUN_NVGRE || tun_type == ICE_ALL_TUNNELS) {
5271 *pkt = dummy_gre_packet;
5272 *pkt_len = sizeof(dummy_gre_packet);
5273 *offsets = dummy_gre_packet_offsets;
5277 if (tun_type == ICE_SW_TUN_VXLAN || tun_type == ICE_SW_TUN_GENEVE ||
5278 tun_type == ICE_SW_TUN_VXLAN_GPE || tun_type == ICE_SW_TUN_UDP) {
5280 *pkt = dummy_udp_tun_tcp_packet;
5281 *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
5282 *offsets = dummy_udp_tun_tcp_packet_offsets;
5286 *pkt = dummy_udp_tun_udp_packet;
5287 *pkt_len = sizeof(dummy_udp_tun_udp_packet);
5288 *offsets = dummy_udp_tun_udp_packet_offsets;
5293 *pkt = dummy_udp_packet;
5294 *pkt_len = sizeof(dummy_udp_packet);
5295 *offsets = dummy_udp_packet_offsets;
5299 *pkt = dummy_tcp_packet;
5300 *pkt_len = sizeof(dummy_tcp_packet);
5301 *offsets = dummy_tcp_packet_offsets;
5305 * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
5307 * @lkups: lookup elements or match criteria for the advanced recipe, one
5308 * structure per protocol header
5309 * @lkups_cnt: number of protocols
5310 * @s_rule: stores rule information from the match criteria
5311 * @dummy_pkt: dummy packet to fill according to filter match criteria
5312 * @pkt_len: packet length of dummy packet
5313 * @offsets: offset info for the dummy packet
5315 static enum ice_status
5316 ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
5317 struct ice_aqc_sw_rules_elem *s_rule,
5318 const u8 *dummy_pkt, u16 pkt_len,
5319 const struct ice_dummy_pkt_offsets *offsets)
5324 /* Start with a packet with a pre-defined/dummy content. Then, fill
5325 * in the header values to be looked up or matched.
5327 pkt = s_rule->pdata.lkup_tx_rx.hdr;
5329 ice_memcpy(pkt, dummy_pkt, pkt_len, ICE_NONDMA_TO_NONDMA);
5331 for (i = 0; i < lkups_cnt; i++) {
5332 enum ice_protocol_type type;
5333 u16 offset = 0, len = 0, j;
5336 /* find the start of this layer; it should be found since this
5337 * was already checked when search for the dummy packet
5339 type = lkups[i].type;
5340 for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
5341 if (type == offsets[j].type) {
5342 offset = offsets[j].offset;
5347 /* this should never happen in a correct calling sequence */
5349 return ICE_ERR_PARAM;
5351 switch (lkups[i].type) {
5354 len = sizeof(struct ice_ether_hdr);
5358 len = sizeof(struct ice_ipv4_hdr);
5363 len = sizeof(struct ice_l4_hdr);
5366 len = sizeof(struct ice_sctp_hdr);
5369 len = sizeof(struct ice_nvgre);
5374 len = sizeof(struct ice_udp_tnl_hdr);
5377 return ICE_ERR_PARAM;
5380 /* the length should be a word multiple */
5381 if (len % ICE_BYTES_PER_WORD)
5384 /* We have the offset to the header start, the length, the
5385 * caller's header values and mask. Use this information to
5386 * copy the data into the dummy packet appropriately based on
5387 * the mask. Note that we need to only write the bits as
5388 * indicated by the mask to make sure we don't improperly write
5389 * over any significant packet data.
5391 for (j = 0; j < len / sizeof(u16); j++)
5392 if (((u16 *)&lkups[i].m_u)[j])
5393 ((u16 *)(pkt + offset))[j] =
5394 (((u16 *)(pkt + offset))[j] &
5395 ~((u16 *)&lkups[i].m_u)[j]) |
5396 (((u16 *)&lkups[i].h_u)[j] &
5397 ((u16 *)&lkups[i].m_u)[j]);
5400 s_rule->pdata.lkup_tx_rx.hdr_len = CPU_TO_LE16(pkt_len);
5406 * ice_find_adv_rule_entry - Search a rule entry
5407 * @hw: pointer to the hardware structure
5408 * @lkups: lookup elements or match criteria for the advanced recipe, one
5409 * structure per protocol header
5410 * @lkups_cnt: number of protocols
5411 * @recp_id: recipe ID for which we are finding the rule
5412 * @rinfo: other information regarding the rule e.g. priority and action info
5414 * Helper function to search for a given advance rule entry
5415 * Returns pointer to entry storing the rule if found
5417 static struct ice_adv_fltr_mgmt_list_entry *
5418 ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5419 u16 lkups_cnt, u8 recp_id,
5420 struct ice_adv_rule_info *rinfo)
5422 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5423 struct ice_switch_info *sw = hw->switch_info;
5426 LIST_FOR_EACH_ENTRY(list_itr, &sw->recp_list[recp_id].filt_rules,
5427 ice_adv_fltr_mgmt_list_entry, list_entry) {
5428 bool lkups_matched = true;
5430 if (lkups_cnt != list_itr->lkups_cnt)
5432 for (i = 0; i < list_itr->lkups_cnt; i++)
5433 if (memcmp(&list_itr->lkups[i], &lkups[i],
5435 lkups_matched = false;
5438 if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
5439 rinfo->tun_type == list_itr->rule_info.tun_type &&
5447 * ice_adv_add_update_vsi_list
5448 * @hw: pointer to the hardware structure
5449 * @m_entry: pointer to current adv filter management list entry
5450 * @cur_fltr: filter information from the book keeping entry
5451 * @new_fltr: filter information with the new VSI to be added
5453 * Call AQ command to add or update previously created VSI list with new VSI.
5455 * Helper function to do book keeping associated with adding filter information
5456 * The algorithm to do the booking keeping is described below :
5457 * When a VSI needs to subscribe to a given advanced filter
5458 * if only one VSI has been added till now
5459 * Allocate a new VSI list and add two VSIs
5460 * to this list using switch rule command
5461 * Update the previously created switch rule with the
5462 * newly created VSI list ID
5463 * if a VSI list was previously created
5464 * Add the new VSI to the previously created VSI list set
5465 * using the update switch rule command
5467 static enum ice_status
5468 ice_adv_add_update_vsi_list(struct ice_hw *hw,
5469 struct ice_adv_fltr_mgmt_list_entry *m_entry,
5470 struct ice_adv_rule_info *cur_fltr,
5471 struct ice_adv_rule_info *new_fltr)
5473 enum ice_status status;
5474 u16 vsi_list_id = 0;
5476 if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5477 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP)
5478 return ICE_ERR_NOT_IMPL;
5480 if (cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET &&
5481 new_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
5482 return ICE_ERR_ALREADY_EXISTS;
5484 if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
5485 new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
5486 (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5487 cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
5488 return ICE_ERR_NOT_IMPL;
5490 if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
5491 /* Only one entry existed in the mapping and it was not already
5492 * a part of a VSI list. So, create a VSI list with the old and
5495 struct ice_fltr_info tmp_fltr;
5496 u16 vsi_handle_arr[2];
5498 /* A rule already exists with the new VSI being added */
5499 if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
5500 new_fltr->sw_act.fwd_id.hw_vsi_id)
5501 return ICE_ERR_ALREADY_EXISTS;
5503 vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
5504 vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
5505 status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
5511 tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
5512 tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
5513 tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
5514 /* Update the previous switch rule of "forward to VSI" to
5517 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5521 cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
5522 cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
5523 m_entry->vsi_list_info =
5524 ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
5527 u16 vsi_handle = new_fltr->sw_act.vsi_handle;
5529 if (!m_entry->vsi_list_info)
5532 /* A rule already exists with the new VSI being added */
5533 if (ice_is_bit_set(m_entry->vsi_list_info->vsi_map, vsi_handle))
5536 /* Update the previously created VSI list set with
5537 * the new VSI ID passed in
5539 vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
5541 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
5543 ice_aqc_opc_update_sw_rules,
5545 /* update VSI list mapping info with new VSI ID */
5547 ice_set_bit(vsi_handle,
5548 m_entry->vsi_list_info->vsi_map);
5551 m_entry->vsi_count++;
5556 * ice_add_adv_rule - helper function to create an advanced switch rule
5557 * @hw: pointer to the hardware structure
5558 * @lkups: information on the words that needs to be looked up. All words
5559 * together makes one recipe
5560 * @lkups_cnt: num of entries in the lkups array
5561 * @rinfo: other information related to the rule that needs to be programmed
5562 * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
5563 * ignored is case of error.
5565 * This function can program only 1 rule at a time. The lkups is used to
5566 * describe the all the words that forms the "lookup" portion of the recipe.
5567 * These words can span multiple protocols. Callers to this function need to
5568 * pass in a list of protocol headers with lookup information along and mask
5569 * that determines which words are valid from the given protocol header.
5570 * rinfo describes other information related to this rule such as forwarding
5571 * IDs, priority of this rule, etc.
5574 ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5575 u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
5576 struct ice_rule_query_data *added_entry)
5578 struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
5579 u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
5580 const struct ice_dummy_pkt_offsets *pkt_offsets;
5581 struct ice_aqc_sw_rules_elem *s_rule = NULL;
5582 struct LIST_HEAD_TYPE *rule_head;
5583 struct ice_switch_info *sw;
5584 enum ice_status status;
5585 const u8 *pkt = NULL;
5590 return ICE_ERR_PARAM;
5592 for (i = 0; i < lkups_cnt; i++) {
5595 /* Validate match masks to make sure that there is something
5598 ptr = (u16 *)&lkups[i].m_u;
5599 for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
5606 return ICE_ERR_PARAM;
5608 /* make sure that we can locate a dummy packet */
5609 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
5612 status = ICE_ERR_PARAM;
5613 goto err_ice_add_adv_rule;
5616 if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
5617 rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
5618 rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
5621 vsi_handle = rinfo->sw_act.vsi_handle;
5622 if (!ice_is_vsi_valid(hw, vsi_handle))
5623 return ICE_ERR_PARAM;
5625 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
5626 rinfo->sw_act.fwd_id.hw_vsi_id =
5627 ice_get_hw_vsi_num(hw, vsi_handle);
5628 if (rinfo->sw_act.flag & ICE_FLTR_TX)
5629 rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
5631 status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
5634 m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5636 /* we have to add VSI to VSI_LIST and increment vsi_count.
5637 * Also Update VSI list so that we can change forwarding rule
5638 * if the rule already exists, we will check if it exists with
5639 * same vsi_id, if not then add it to the VSI list if it already
5640 * exists if not then create a VSI list and add the existing VSI
5641 * ID and the new VSI ID to the list
5642 * We will add that VSI to the list
5644 status = ice_adv_add_update_vsi_list(hw, m_entry,
5645 &m_entry->rule_info,
5648 added_entry->rid = rid;
5649 added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
5650 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5654 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5655 s_rule = (struct ice_aqc_sw_rules_elem *)ice_malloc(hw, rule_buf_sz);
5657 return ICE_ERR_NO_MEMORY;
5658 act |= ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
5659 switch (rinfo->sw_act.fltr_act) {
5660 case ICE_FWD_TO_VSI:
5661 act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
5662 ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
5663 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
5666 act |= ICE_SINGLE_ACT_TO_Q;
5667 act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
5668 ICE_SINGLE_ACT_Q_INDEX_M;
5670 case ICE_DROP_PACKET:
5671 act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
5672 ICE_SINGLE_ACT_VALID_BIT;
5675 status = ICE_ERR_CFG;
5676 goto err_ice_add_adv_rule;
5679 /* set the rule LOOKUP type based on caller specified 'RX'
5680 * instead of hardcoding it to be either LOOKUP_TX/RX
5682 * for 'RX' set the source to be the port number
5683 * for 'TX' set the source to be the source HW VSI number (determined
5687 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_RX);
5688 s_rule->pdata.lkup_tx_rx.src =
5689 CPU_TO_LE16(hw->port_info->lport);
5691 s_rule->type = CPU_TO_LE16(ICE_AQC_SW_RULES_T_LKUP_TX);
5692 s_rule->pdata.lkup_tx_rx.src = CPU_TO_LE16(rinfo->sw_act.src);
5695 s_rule->pdata.lkup_tx_rx.recipe_id = CPU_TO_LE16(rid);
5696 s_rule->pdata.lkup_tx_rx.act = CPU_TO_LE32(act);
5698 ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt, pkt_len,
5701 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5702 rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
5705 goto err_ice_add_adv_rule;
5706 adv_fltr = (struct ice_adv_fltr_mgmt_list_entry *)
5707 ice_malloc(hw, sizeof(struct ice_adv_fltr_mgmt_list_entry));
5709 status = ICE_ERR_NO_MEMORY;
5710 goto err_ice_add_adv_rule;
5713 adv_fltr->lkups = (struct ice_adv_lkup_elem *)
5714 ice_memdup(hw, lkups, lkups_cnt * sizeof(*lkups),
5715 ICE_NONDMA_TO_NONDMA);
5716 if (!adv_fltr->lkups) {
5717 status = ICE_ERR_NO_MEMORY;
5718 goto err_ice_add_adv_rule;
5721 adv_fltr->lkups_cnt = lkups_cnt;
5722 adv_fltr->rule_info = *rinfo;
5723 adv_fltr->rule_info.fltr_rule_id =
5724 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5725 sw = hw->switch_info;
5726 sw->recp_list[rid].adv_rule = true;
5727 rule_head = &sw->recp_list[rid].filt_rules;
5729 if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI) {
5730 struct ice_fltr_info tmp_fltr;
5732 tmp_fltr.fltr_rule_id =
5733 LE16_TO_CPU(s_rule->pdata.lkup_tx_rx.index);
5734 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5735 tmp_fltr.fwd_id.hw_vsi_id =
5736 ice_get_hw_vsi_num(hw, vsi_handle);
5737 tmp_fltr.vsi_handle = vsi_handle;
5738 /* Update the previous switch rule of "forward to VSI" to
5741 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5743 goto err_ice_add_adv_rule;
5744 adv_fltr->vsi_count = 1;
5747 /* Add rule entry to book keeping list */
5748 LIST_ADD(&adv_fltr->list_entry, rule_head);
5750 added_entry->rid = rid;
5751 added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
5752 added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
5754 err_ice_add_adv_rule:
5755 if (status && adv_fltr) {
5756 ice_free(hw, adv_fltr->lkups);
5757 ice_free(hw, adv_fltr);
5760 ice_free(hw, s_rule);
5766 * ice_adv_rem_update_vsi_list
5767 * @hw: pointer to the hardware structure
5768 * @vsi_handle: VSI handle of the VSI to remove
5769 * @fm_list: filter management entry for which the VSI list management needs to
5772 static enum ice_status
5773 ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
5774 struct ice_adv_fltr_mgmt_list_entry *fm_list)
5776 struct ice_vsi_list_map_info *vsi_list_info;
5777 enum ice_sw_lkup_type lkup_type;
5778 enum ice_status status;
5781 if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
5782 fm_list->vsi_count == 0)
5783 return ICE_ERR_PARAM;
5785 /* A rule with the VSI being removed does not exist */
5786 if (!ice_is_bit_set(fm_list->vsi_list_info->vsi_map, vsi_handle))
5787 return ICE_ERR_DOES_NOT_EXIST;
5789 lkup_type = ICE_SW_LKUP_LAST;
5790 vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
5791 status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
5792 ice_aqc_opc_update_sw_rules,
5797 fm_list->vsi_count--;
5798 ice_clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
5799 vsi_list_info = fm_list->vsi_list_info;
5800 if (fm_list->vsi_count == 1) {
5801 struct ice_fltr_info tmp_fltr;
5804 rem_vsi_handle = ice_find_first_bit(vsi_list_info->vsi_map,
5806 if (!ice_is_vsi_valid(hw, rem_vsi_handle))
5807 return ICE_ERR_OUT_OF_RANGE;
5809 /* Make sure VSI list is empty before removing it below */
5810 status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
5812 ice_aqc_opc_update_sw_rules,
5816 tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
5817 fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
5818 tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
5819 tmp_fltr.fwd_id.hw_vsi_id =
5820 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5821 fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
5822 ice_get_hw_vsi_num(hw, rem_vsi_handle);
5824 /* Update the previous switch rule of "MAC forward to VSI" to
5825 * "MAC fwd to VSI list"
5827 status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
5829 ice_debug(hw, ICE_DBG_SW,
5830 "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
5831 tmp_fltr.fwd_id.hw_vsi_id, status);
5836 if (fm_list->vsi_count == 1) {
5837 /* Remove the VSI list since it is no longer used */
5838 status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
5840 ice_debug(hw, ICE_DBG_SW,
5841 "Failed to remove VSI list %d, error %d\n",
5842 vsi_list_id, status);
5846 LIST_DEL(&vsi_list_info->list_entry);
5847 ice_free(hw, vsi_list_info);
5848 fm_list->vsi_list_info = NULL;
5855 * ice_rem_adv_rule - removes existing advanced switch rule
5856 * @hw: pointer to the hardware structure
5857 * @lkups: information on the words that needs to be looked up. All words
5858 * together makes one recipe
5859 * @lkups_cnt: num of entries in the lkups array
5860 * @rinfo: Its the pointer to the rule information for the rule
5862 * This function can be used to remove 1 rule at a time. The lkups is
5863 * used to describe all the words that forms the "lookup" portion of the
5864 * rule. These words can span multiple protocols. Callers to this function
5865 * need to pass in a list of protocol headers with lookup information along
5866 * and mask that determines which words are valid from the given protocol
5867 * header. rinfo describes other information related to this rule such as
5868 * forwarding IDs, priority of this rule, etc.
5871 ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
5872 u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
5874 struct ice_adv_fltr_mgmt_list_entry *list_elem;
5875 const struct ice_dummy_pkt_offsets *offsets;
5876 struct ice_prot_lkup_ext lkup_exts;
5877 u16 rule_buf_sz, pkt_len, i, rid;
5878 struct ice_lock *rule_lock; /* Lock to protect filter rule list */
5879 enum ice_status status = ICE_SUCCESS;
5880 bool remove_rule = false;
5881 const u8 *pkt = NULL;
5884 ice_memset(&lkup_exts, 0, sizeof(lkup_exts), ICE_NONDMA_MEM);
5885 for (i = 0; i < lkups_cnt; i++) {
5888 if (lkups[i].type >= ICE_PROTOCOL_LAST)
5891 count = ice_fill_valid_words(&lkups[i], &lkup_exts);
5895 rid = ice_find_recp(hw, &lkup_exts);
5896 /* If did not find a recipe that match the existing criteria */
5897 if (rid == ICE_MAX_NUM_RECIPES)
5898 return ICE_ERR_PARAM;
5900 rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
5901 list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
5902 /* the rule is already removed */
5905 ice_acquire_lock(rule_lock);
5906 if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
5908 } else if (list_elem->vsi_count > 1) {
5909 list_elem->vsi_list_info->ref_cnt--;
5910 remove_rule = false;
5911 vsi_handle = rinfo->sw_act.vsi_handle;
5912 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5914 vsi_handle = rinfo->sw_act.vsi_handle;
5915 status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
5917 ice_release_lock(rule_lock);
5920 if (list_elem->vsi_count == 0)
5923 ice_release_lock(rule_lock);
5925 struct ice_aqc_sw_rules_elem *s_rule;
5927 ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt,
5928 &pkt_len, &offsets);
5929 rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
5931 (struct ice_aqc_sw_rules_elem *)ice_malloc(hw,
5934 return ICE_ERR_NO_MEMORY;
5935 s_rule->pdata.lkup_tx_rx.act = 0;
5936 s_rule->pdata.lkup_tx_rx.index =
5937 CPU_TO_LE16(list_elem->rule_info.fltr_rule_id);
5938 s_rule->pdata.lkup_tx_rx.hdr_len = 0;
5939 status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
5941 ice_aqc_opc_remove_sw_rules, NULL);
5942 if (status == ICE_SUCCESS) {
5943 ice_acquire_lock(rule_lock);
5944 LIST_DEL(&list_elem->list_entry);
5945 ice_free(hw, list_elem->lkups);
5946 ice_free(hw, list_elem);
5947 ice_release_lock(rule_lock);
5949 ice_free(hw, s_rule);
5955 * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
5956 * @hw: pointer to the hardware structure
5957 * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
5959 * This function is used to remove 1 rule at a time. The removal is based on
5960 * the remove_entry parameter. This function will remove rule for a given
5961 * vsi_handle with a given rule_id which is passed as parameter in remove_entry
5964 ice_rem_adv_rule_by_id(struct ice_hw *hw,
5965 struct ice_rule_query_data *remove_entry)
5967 struct ice_adv_fltr_mgmt_list_entry *list_itr;
5968 struct LIST_HEAD_TYPE *list_head;
5969 struct ice_adv_rule_info rinfo;
5970 struct ice_switch_info *sw;
5972 sw = hw->switch_info;
5973 if (!sw->recp_list[remove_entry->rid].recp_created)
5974 return ICE_ERR_PARAM;
5975 list_head = &sw->recp_list[remove_entry->rid].filt_rules;
5976 LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry,
5978 if (list_itr->rule_info.fltr_rule_id ==
5979 remove_entry->rule_id) {
5980 rinfo = list_itr->rule_info;
5981 rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
5982 return ice_rem_adv_rule(hw, list_itr->lkups,
5983 list_itr->lkups_cnt, &rinfo);
5986 return ICE_ERR_PARAM;
5990 * ice_rem_adv_for_vsi - removes existing advanced switch rules for a
5992 * @hw: pointer to the hardware structure
5993 * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
5995 * This function is used to remove all the rules for a given VSI and as soon
5996 * as removing a rule fails, it will return immediately with the error code,
5997 * else it will return ICE_SUCCESS
6000 ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
6002 struct ice_adv_fltr_mgmt_list_entry *list_itr;
6003 struct ice_vsi_list_map_info *map_info;
6004 struct LIST_HEAD_TYPE *list_head;
6005 struct ice_adv_rule_info rinfo;
6006 struct ice_switch_info *sw;
6007 enum ice_status status;
6008 u16 vsi_list_id = 0;
6011 sw = hw->switch_info;
6012 for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
6013 if (!sw->recp_list[rid].recp_created)
6015 if (!sw->recp_list[rid].adv_rule)
6017 list_head = &sw->recp_list[rid].filt_rules;
6019 LIST_FOR_EACH_ENTRY(list_itr, list_head,
6020 ice_adv_fltr_mgmt_list_entry, list_entry) {
6021 map_info = ice_find_vsi_list_entry(hw, rid, vsi_handle,
6025 rinfo = list_itr->rule_info;
6026 rinfo.sw_act.vsi_handle = vsi_handle;
6027 status = ice_rem_adv_rule(hw, list_itr->lkups,
6028 list_itr->lkups_cnt, &rinfo);
6038 * ice_replay_fltr - Replay all the filters stored by a specific list head
6039 * @hw: pointer to the hardware structure
6040 * @list_head: list for which filters needs to be replayed
6041 * @recp_id: Recipe ID for which rules need to be replayed
6043 static enum ice_status
6044 ice_replay_fltr(struct ice_hw *hw, u8 recp_id, struct LIST_HEAD_TYPE *list_head)
6046 struct ice_fltr_mgmt_list_entry *itr;
6047 struct LIST_HEAD_TYPE l_head;
6048 enum ice_status status = ICE_SUCCESS;
6050 if (LIST_EMPTY(list_head))
6053 /* Move entries from the given list_head to a temporary l_head so that
6054 * they can be replayed. Otherwise when trying to re-add the same
6055 * filter, the function will return already exists
6057 LIST_REPLACE_INIT(list_head, &l_head);
6059 /* Mark the given list_head empty by reinitializing it so filters
6060 * could be added again by *handler
6062 LIST_FOR_EACH_ENTRY(itr, &l_head, ice_fltr_mgmt_list_entry,
6064 struct ice_fltr_list_entry f_entry;
6066 f_entry.fltr_info = itr->fltr_info;
6067 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN) {
6068 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6069 if (status != ICE_SUCCESS)
6074 /* Add a filter per VSI separately */
6079 ice_find_first_bit(itr->vsi_list_info->vsi_map,
6081 if (!ice_is_vsi_valid(hw, vsi_handle))
6084 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6085 f_entry.fltr_info.vsi_handle = vsi_handle;
6086 f_entry.fltr_info.fwd_id.hw_vsi_id =
6087 ice_get_hw_vsi_num(hw, vsi_handle);
6088 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6089 if (recp_id == ICE_SW_LKUP_VLAN)
6090 status = ice_add_vlan_internal(hw, &f_entry);
6092 status = ice_add_rule_internal(hw, recp_id,
6094 if (status != ICE_SUCCESS)
6099 /* Clear the filter management list */
6100 ice_rem_sw_rule_info(hw, &l_head);
6105 * ice_replay_all_fltr - replay all filters stored in bookkeeping lists
6106 * @hw: pointer to the hardware structure
6108 * NOTE: This function does not clean up partially added filters on error.
6109 * It is up to caller of the function to issue a reset or fail early.
6111 enum ice_status ice_replay_all_fltr(struct ice_hw *hw)
6113 struct ice_switch_info *sw = hw->switch_info;
6114 enum ice_status status = ICE_SUCCESS;
6117 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6118 struct LIST_HEAD_TYPE *head = &sw->recp_list[i].filt_rules;
6120 status = ice_replay_fltr(hw, i, head);
6121 if (status != ICE_SUCCESS)
6128 * ice_replay_vsi_fltr - Replay filters for requested VSI
6129 * @hw: pointer to the hardware structure
6130 * @vsi_handle: driver VSI handle
6131 * @recp_id: Recipe ID for which rules need to be replayed
6132 * @list_head: list for which filters need to be replayed
6134 * Replays the filter of recipe recp_id for a VSI represented via vsi_handle.
6135 * It is required to pass valid VSI handle.
6137 static enum ice_status
6138 ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
6139 struct LIST_HEAD_TYPE *list_head)
6141 struct ice_fltr_mgmt_list_entry *itr;
6142 enum ice_status status = ICE_SUCCESS;
6145 if (LIST_EMPTY(list_head))
6147 hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
6149 LIST_FOR_EACH_ENTRY(itr, list_head, ice_fltr_mgmt_list_entry,
6151 struct ice_fltr_list_entry f_entry;
6153 f_entry.fltr_info = itr->fltr_info;
6154 if (itr->vsi_count < 2 && recp_id != ICE_SW_LKUP_VLAN &&
6155 itr->fltr_info.vsi_handle == vsi_handle) {
6156 /* update the src in case it is VSI num */
6157 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6158 f_entry.fltr_info.src = hw_vsi_id;
6159 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6160 if (status != ICE_SUCCESS)
6164 if (!itr->vsi_list_info ||
6165 !ice_is_bit_set(itr->vsi_list_info->vsi_map, vsi_handle))
6167 /* Clearing it so that the logic can add it back */
6168 ice_clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
6169 f_entry.fltr_info.vsi_handle = vsi_handle;
6170 f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
6171 /* update the src in case it is VSI num */
6172 if (f_entry.fltr_info.src_id == ICE_SRC_ID_VSI)
6173 f_entry.fltr_info.src = hw_vsi_id;
6174 if (recp_id == ICE_SW_LKUP_VLAN)
6175 status = ice_add_vlan_internal(hw, &f_entry);
6177 status = ice_add_rule_internal(hw, recp_id, &f_entry);
6178 if (status != ICE_SUCCESS)
6186 * ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
6187 * @hw: pointer to the hardware structure
6188 * @vsi_handle: driver VSI handle
6189 * @list_head: list for which filters need to be replayed
6191 * Replay the advanced rule for the given VSI.
6193 static enum ice_status
6194 ice_replay_vsi_adv_rule(struct ice_hw *hw, u16 vsi_handle,
6195 struct LIST_HEAD_TYPE *list_head)
6197 struct ice_rule_query_data added_entry = { 0 };
6198 struct ice_adv_fltr_mgmt_list_entry *adv_fltr;
6199 enum ice_status status = ICE_SUCCESS;
6201 if (LIST_EMPTY(list_head))
6203 LIST_FOR_EACH_ENTRY(adv_fltr, list_head, ice_adv_fltr_mgmt_list_entry,
6205 struct ice_adv_rule_info *rinfo = &adv_fltr->rule_info;
6206 u16 lk_cnt = adv_fltr->lkups_cnt;
6208 if (vsi_handle != rinfo->sw_act.vsi_handle)
6210 status = ice_add_adv_rule(hw, adv_fltr->lkups, lk_cnt, rinfo,
6219 * ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
6220 * @hw: pointer to the hardware structure
6221 * @vsi_handle: driver VSI handle
6223 * Replays filters for requested VSI via vsi_handle.
6225 enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle)
6227 struct ice_switch_info *sw = hw->switch_info;
6228 enum ice_status status;
6231 /* Update the recipes that were created */
6232 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6233 struct LIST_HEAD_TYPE *head;
6235 head = &sw->recp_list[i].filt_replay_rules;
6236 if (!sw->recp_list[i].adv_rule)
6237 status = ice_replay_vsi_fltr(hw, vsi_handle, i, head);
6239 status = ice_replay_vsi_adv_rule(hw, vsi_handle, head);
6240 if (status != ICE_SUCCESS)
6248 * ice_rm_all_sw_replay_rule_info - deletes filter replay rules
6249 * @hw: pointer to the HW struct
6251 * Deletes the filter replay rules.
6253 void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
6255 struct ice_switch_info *sw = hw->switch_info;
6261 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
6262 if (!LIST_EMPTY(&sw->recp_list[i].filt_replay_rules)) {
6263 struct LIST_HEAD_TYPE *l_head;
6265 l_head = &sw->recp_list[i].filt_replay_rules;
6266 if (!sw->recp_list[i].adv_rule)
6267 ice_rem_sw_rule_info(hw, l_head);
6269 ice_rem_adv_rule_info(hw, l_head);